diff --git "a/1133.jsonl" "b/1133.jsonl" new file mode 100644--- /dev/null +++ "b/1133.jsonl" @@ -0,0 +1,1866 @@ +{"seq_id":"24762989265","text":"\"\"\"\nCompute the running median of a sequence of numbers. \nThat is, given a stream of numbers, print out the median of the\n list so far on each new element.\n\nRecall that the median of an even-numbered list is the average \nof the two middle numbers.\n\"\"\"\n\ndef median(l: list)->int:\n length = len(l)\n if length == 1:\n return l[0]\n else:\n mid = 0\n even = length % 2\n mid = length // 2\n if even == 0:\n median = (l[mid-1] + l[mid]) / 2 \n return round(median, 1)\n else:\n return l[mid] \n\ndef runningMedian(s: list)->list:\n m = []\n for i in range(1, len(s)):\n sub = s[:i]\n m.append(median(sub))\n return m\n\nif __name__ == \"__main__\":\n runningMedian([2, 1, 5, 7, 2, 0, 5]) == [2, 1.5, 1, 3.0, 5, 6.0]","repo_name":"TetianaHrunyk/DailyCodingProblems","sub_path":"challenge33.py","file_name":"challenge33.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16101342768","text":"'''\n多项式回归\n'''\n# 多项式回归: 如果数据实际上比简单的直线更复杂,依然可以使用线性模型来拟合非线性数据。\n# 一个简单的方法是对每个特征进行加权后作为新的特征,然后训练一个线性模型在这个扩展的\n# 特征集。\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nm = 100\nX = 6 * np.random.rand(m, 1) - 3\ny = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)\n\nplt.plot(X, y, \"b.\")\nplt.xlabel(\"$x_1$\", fontsize=18)\nplt.ylabel(\"$y$\", rotation=0, fontsize=18)\nplt.axis([-3, 3, 0, 10])\n\nfrom sklearn.preprocessing import PolynomialFeatures\n# 使用 Scikit-Learning 的 PolynomialFeatures 类进行训练数据集的转换,让训练集中每个\n# 特征的平方(2 次多项式)作为新特征。\npoly_features = PolynomialFeatures(degree=2, include_bias=False)\nX_poly = poly_features.fit_transform(X)\n\nprint(X_poly[0])\n\n# X_poly 现在包含原始特征并加上了这个特征的平方 。现在你可以在这个扩展训练集上\n# 使用 LinearRegression 模型进行拟合\n\nfrom sklearn.linear_model import LinearRegression\n\nlin_reg = LinearRegression()\nlin_reg.fit(X_poly, y)\nprint(lin_reg.intercept_, lin_reg.coef_)\n\nX_new=np.linspace(-3, 3, 100).reshape(100, 1)\nX_new_poly = poly_features.transform(X_new)\ny_new = lin_reg.predict(X_new_poly)\nplt.plot(X, y, \"b.\")\nplt.plot(X_new, y_new, \"r-\", linewidth=2, label=\"Predictions\")\nplt.xlabel(\"$x_1$\", fontsize=18)\nplt.ylabel(\"$y$\", rotation=0, fontsize=18)\nplt.legend(loc=\"upper left\", fontsize=14)\nplt.axis([-3, 3, 0, 10])\nplt.show()\n\n# 可以使用交叉验证来估计一个模型的泛化能力,另一种方法是观察学习曲线:画出模型在训练集上\n# 的表现,同时画出以训练集规模为自变量的训练集函数\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\ndef plot_learning_curves(model, X, y):\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)\n train_errors, val_errors = [], []\n for m in range(1, len(X_train)):\n model.fit(X_train[:m], y_train[:m])\n y_train_predict = model.predict(X_train[:m])\n y_val_predict = model.predict(X_val)\n train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))\n val_errors.append(mean_squared_error(y_val_predict, y_val))\n\n plt.xlabel(\"Training set size\", fontsize=18)\n plt.ylabel(\"RMSE\", rotation=0, fontsize=18)\n plt.plot(np.sqrt(train_errors), \"r-+\", linewidth=2, label=\"train\")\n plt.plot(np.sqrt(val_errors), \"b-\", linewidth=3, label=\"val\")\n\n\n# 简单线性回归模型的学习曲线\nlin_reg = LinearRegression()\nplot_learning_curves(lin_reg, X, y)\n\n# 上面的曲线表现了一个典型的欠拟合模型,两条曲线都到达高原地带并趋于稳定,并且最后\n# 两条曲线非常接近,同时误差值非常大。\n\nplt.show()\n\n# 在统计和机器学习领域有个重要的理论:一个模型的泛化误差由三个不同误差的和决\n# 定:\n# 偏差:泛化误差的这部分误差是由于错误的假设决定的。例如实际是一个二次模型,\n# 你却假设了一个线性模型。一个高偏差的模型最容易出现欠拟合。\n# 方差:这部分误差是由于模型对训练数据的微小变化较为敏感,一个多自由度的模\n# 型更容易有高的方差(例如一个高阶多项式模型),因此会导致模型过拟合。\n# 不可约误差:这部分误差是由于数据本身的噪声决定的。降低这部分误差的唯一方\n# 法就是进行数据清洗(例如:修复数据源,修复坏的传感器,识别和剔除异常值)。","repo_name":"applepip/machine_learning","sub_path":"model_training/Polynomial_model_training.py","file_name":"Polynomial_model_training.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38383583689","text":"from collections import defaultdict\nfrom typing import Dict, List\n\nfrom aocpuzzle import AoCPuzzle\n\n\nclass Puzzle10(AoCPuzzle):\n def common(self, input_data: List[str]) -> None:\n self.adapters = list(map(int, input_data))\n self.max_rating = max(self.adapters)\n self.target_rating = self.max_rating + 3\n\n def part1(self) -> int:\n adapters = self.adapters[:]\n curr_rating = 0\n\n adapters_left = set()\n adapters_left.add(0)\n\n diff_1, diff_3 = 0, 0\n\n while len(adapters_left) > 0:\n rating = adapters_left.pop()\n\n for next_rating_option in [rating + idx for idx in range(1, 4)]:\n if next_rating_option in adapters:\n difference = next_rating_option - curr_rating\n curr_rating = next_rating_option\n\n if difference == 1:\n diff_1 += 1\n\n if difference == 3 or curr_rating + 3 == self.target_rating:\n diff_3 += 1\n\n adapters.remove(next_rating_option)\n adapters_left.add(next_rating_option)\n return diff_1 * diff_3\n\n def count_ways(self, curr_rating: int) -> int:\n if curr_rating in self.cache:\n return self.cache[curr_rating]\n\n if curr_rating == self.target_rating:\n return 1\n\n count = 0\n\n for next_rating in [a for a in self.adapters if 1 <= a - curr_rating <= 3]:\n count += self.count_ways(next_rating)\n\n self.cache[curr_rating] = count\n\n return count\n\n def part2(self) -> int:\n curr_rating = 0\n self.cache: Dict[int, int] = defaultdict(int)\n self.adapters = self.adapters[:] + [self.target_rating]\n\n return self.count_ways(curr_rating)\n\n def test_cases(self, input_data: List[str]) -> int:\n part1_tests_1 = ['16', '10', '15', '5', '1', '11', '7', '19', '6', '12', '4']\n part1_tests_2 = [\n '28', '33', '18', '42', '31', '14', '46', '20', '48', '47', '24', '23', '49',\n '45', '19', '38', '39', '11', '1', '32', '25', '35', '8', '17', '7', '9', '4',\n '2', '34', '10', '3',\n ]\n\n self.common(part1_tests_1)\n assert self.part1() == 28\n assert self.part2() == 8\n\n self.common(part1_tests_2)\n assert self.part1() == 220\n assert self.part2() == 19208\n\n self.common(input_data)\n assert self.part1() == 2775\n assert self.part2() == 518344341716992\n\n return 3\n","repo_name":"cpallapolu/advent-of-code","sub_path":"src/years/2020/10/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1707946320","text":"import zope.component\nimport zope.interface\nfrom z3c.form import button\nfrom z3c.formui import form\nfrom zope.publisher.interfaces import NotFound\nfrom zope.traversing.browser import absoluteURL\n\nfrom z3c.wizard import interfaces\nfrom z3c.wizard.button import WizardButtonActions\n\n\ndef nameStep(step, name):\n \"\"\"Give a step a __name__.\"\"\"\n step.__name__ = name\n return step\n\n\n@zope.interface.implementer(interfaces.IWizard)\nclass Wizard(form.Form):\n \"\"\"Wizard form.\n\n The wizard is responsible for manage the steps and offers the wizard menu\n navigation and knows the step order. The wizard can check the conditions\n given from the steps. The wizard is also responsible for delegate the\n back, next and complete actions to the steps.\n\n This IWizard object is modelled as a Controller known from the MVC\n (Model, view, controller) patter version 2.0 and the step is implemented as\n a view.\n \"\"\"\n\n buttons = button.Buttons(interfaces.IWizardButtons)\n\n # customize this part if needed\n stepInterface = interfaces.IStep\n\n firstStepAsDefault = True\n adjustStep = True\n confirmationPageName = None\n nextURL = None\n\n cssActive = 'selected'\n cssInActive = None # None will skip class attribute in DOM element\n\n # for internal use\n __name__ = None\n step = None\n\n @property\n def baseURL(self):\n return absoluteURL(self, self.request)\n\n def setUpSteps(self):\n \"\"\"Return a list of steps. This implementation uses IStep adapters.\n\n Take a look at the addStep method defined in step.py. This method\n allows you to setup steps directly in the method and offers an API for\n customized step setup.\n \"\"\"\n steps = list(zope.component.getAdapters(\n (self.context, self.request, self), self.stepInterface))\n return [nameStep(step, name) for name, step in steps]\n\n def filterSteps(self, steps):\n \"\"\"Make sure to only select available steps and we give a name.\"\"\"\n return [step for step in steps if step.available]\n\n def orderSteps(self, steps):\n # order steps by it's weight\n return sorted(steps, key=lambda step: step.weight)\n\n @property\n def steps(self):\n steps = self.setUpSteps()\n steps = self.filterSteps(steps)\n return self.orderSteps(steps)\n\n @property\n def completed(self):\n for step in self.steps:\n if not step.completed:\n return False\n return True\n\n @property\n def isFirstStep(self):\n \"\"\"See interfaces.IWizard\"\"\"\n return self.step and self.step.__name__ == self.steps[0].__name__\n\n @property\n def isLastStep(self):\n \"\"\"See interfaces.IWizard\"\"\"\n return self.step and self.step.__name__ == self.steps[-1].__name__\n\n @property\n def showBackButton(self):\n \"\"\"Ask the step.\"\"\"\n return self.step and self.step.showBackButton\n\n @property\n def showNextButton(self):\n \"\"\"Ask the step.\"\"\"\n return self.step and self.step.showNextButton\n\n @property\n def showCompleteButton(self):\n \"\"\"Ask the step.\"\"\"\n return self.step.showCompleteButton\n\n @property\n def previousStepName(self):\n if self.step is None:\n return\n stepNames = [step.__name__ for step in self.steps]\n idx = stepNames.index(self.step.__name__)\n if idx == 0:\n return\n return stepNames[idx - 1]\n\n @property\n def nextStepName(self):\n if self.step is None:\n return\n stepNames = [step.__name__ for step in self.steps]\n idx = stepNames.index(self.step.__name__)\n if idx == len(stepNames) - 1:\n return\n return stepNames[idx + 1]\n\n @property\n def stepMenu(self):\n items = []\n append = items.append\n lenght = len(self.steps) - 1\n for idx, step in enumerate(self.steps):\n firstStep = False\n lastStep = False\n if step.visible:\n isSelected = self.step and self.step.__name__ == step.__name__\n cssClass = isSelected and self.cssActive or self.cssInActive\n if idx == 0:\n firstStep = True\n if idx == lenght:\n lastStep = True\n append({\n 'name': step.__name__,\n 'title': step.label,\n 'number': str(idx + 1),\n 'url': '{}/{}'.format(self.baseURL, step.__name__),\n 'selected': self.step.__name__ == step.__name__,\n 'class': cssClass,\n 'first': firstStep,\n 'last': lastStep\n })\n return items\n\n def getDefaultStep(self):\n \"\"\"Can return the first or first not completed step as default.\"\"\"\n # return first step if this option is set\n if self.firstStepAsDefault:\n return self.steps[0]\n # return first not completed step\n for step in self.steps:\n if not step.completed:\n return step\n # fallback to first step if all steps completed\n return self.steps[0]\n\n def doAdjustStep(self):\n # Make sure all previous steps got completed. If not, redirect to the\n # last incomplete step\n if not self.adjustStep:\n return False\n for step in self.steps:\n if step.__name__ is self.step.__name__:\n break\n if not step.completed:\n # prepare redirect to not completed step and return True\n self.nextURL = '{}/{}'.format(self.baseURL, step.__name__)\n return True\n # or return False\n return False\n\n def updateActions(self):\n self.actions = WizardButtonActions(self, self.request, self.context)\n self.actions.update()\n\n def update(self):\n if self.doAdjustStep():\n return\n self.updateActions()\n\n def publishTraverse(self, request, name):\n \"\"\"Traverse to step by it's name.\"\"\"\n # Remove HTML ending\n if '.' in name:\n rawName = name.rsplit('.', 1)[0]\n else:\n rawName = name\n # Find the active step\n for step in self.steps:\n if step.__name__ == rawName:\n self.step = step\n return self.step\n raise NotFound(self, name, request)\n\n def browserDefault(self, request):\n \"\"\"The default step is our browserDefault traversal setp.\"\"\"\n if self.step is None:\n step = self.getDefaultStep()\n # always return default step as default view for our wizard\n return self, (step.__name__,)\n\n def goToStep(self, stepName):\n self.nextURL = '{}/{}'.format(self.baseURL, stepName)\n\n def goToBack(self):\n # redirect to next step if previous get sucessfuly processed\n self.goToStep(self.previousStepName)\n\n def goToNext(self):\n # redirect to next step if previous get sucessfuly processed\n self.goToStep(self.nextStepName)\n\n def doBack(self, action):\n if self.step.doBack(action):\n self.goToBack()\n\n def doNext(self, action):\n if self.step.doNext(action):\n self.goToNext()\n\n def doComplete(self, action):\n if self.step.doComplete(action):\n # do finsih after step get completed is completed\n self.doFinish()\n\n def doFinish(self):\n \"\"\"Force redirect after doComplete if confirmationPageName is given.\"\"\"\n if self.confirmationPageName is not None:\n self.nextURL = '{}/{}'.format(\n absoluteURL(self.context, self.request),\n self.confirmationPageName)\n\n @button.handler(interfaces.IWizardButtons['back'])\n def handleBack(self, action):\n self.doBack(action)\n\n @button.handler(interfaces.IWizardButtons['next'])\n def handleNext(self, action):\n self.doNext(action)\n\n @button.handler(interfaces.IWizardButtons['complete'])\n def handleComplete(self, action):\n self.doComplete(action)\n\n def render(self, *args, **kws):\n raise NotImplementedError('render is no supported')\n\n def __repr__(self):\n return \"<{} '{}'>\".format(self.__class__.__name__, self.__name__)\n","repo_name":"zopefoundation/z3c.wizard","sub_path":"src/z3c/wizard/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":8313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"39085447732","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport copy\nfrom functools import reduce\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom plots import Plots\nfrom reading_data import ReadingData\n\nif __name__ == \"__main__\":\n preprocessing_data = ReadingData()\n preprocessing_data.reading_data('data/marzec_2014_2.xlsx', 'results/2014_marzec')\n preprocessing_data.reading_data('data/wrzesien_2014_2.xlsx', 'results/2014_wrzesien')\n\n path = 'results'\n directories = ['2014_marzec', '2014_wrzesien', '2015_czerwiec', '2015_wrzesien', '2016_marzec', '2016_wrzesien',\n '2017_czerwiec', '2017_wrzesien', '2018_czerwiec', '2018_wrzesien', '2019_sierpien']\n\n list_range_year = preprocessing_data.reading_question(directories, 'przedzial_wiekowy')\n list_education = preprocessing_data.reading_question(directories, 'wyksztalcenie')\n list_accomodation = preprocessing_data.reading_question(directories, 'miejsce_zamieszkania')\n list_A2 = preprocessing_data.reading_question(directories, 'A2')\n list_A3 = preprocessing_data.reading_question(directories, 'A3')\n fear_list_questions = ['B4', 'B5', 'C10', 'C11', 'C16']\n fear_columns_number = [1, 1, 5, 3, 11]\n list_fear_index = preprocessing_data.reading_question_to_index(directories, fear_list_questions,\n fear_columns_number)\n\n plot = Plots()\n list_range_year_dict = plot.calculate_structure_population_age_based_range(list_range_year, directories)\n list_education_dict = plot.calculate_structure_education(list_education, directories)\n list_accomodation_dict = plot.calculate_structure_accomodation(list_accomodation, directories)\n list_A2_dict = plot.calculate_A2(list_A2, directories)\n list_A2_dict_with_3_cat = plot.calculate_A2_with_3_cat(list_A2, directories)\n list_fear, list_fear_dict, list_fear_dict_3_cat, affective_dict, cognitive_dict, \\\n behavioral_dict = plot.calculate_fear_index(list_fear_index, directories)\n\n label_list = [\"Zdecydowanie tak\", \"Raczej tak\", \"Ani tak, ani nie\",\n \"Raczej nie\", \"Zdecydowanie nie\"]\n\n label_list_3_cat = [\"tak\", \"Ani tak, ani nie\", \"nie\"]\n\n label_list_fear = [\"bardzo niski\", \"raczej niski\", \"średni\",\n \"raczej wysoki\", \"bardzo wysoki\"]\n label_list_fear_3_cat = [\"niski\", \"średni\", \"wysoki\"]\n\n plot.create_time_series(list_A2_dict, 'A2', 5, label_list)\n plot.create_time_series(list_fear_dict, 'fear_index', 5, label_list_fear)\n\n\n\n\n\n\n\n\n","repo_name":"krzych27/data_analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3604794266","text":"import torch\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn as nn\n\nclass FGSM(object):\n def __init__(self,model):\n self.model = model\n\n def get_loss(self,xi,label_or_target,TARGETED):\n criterion = nn.CrossEntropyLoss()\n output = self.model.predict(xi)\n #print(output, label_or_target)\n loss = criterion(output, label_or_target)\n #print(loss)\n #print(c.size(),modifier.size())\n return loss\n\n def i_fgsm(self, input_xi, label_or_target, eta, TARGETED=False):\n \n yi = Variable(label_or_target.cuda())\n x_adv = Variable(input_xi.cuda(), requires_grad=True)\n for it in range(10):\n error = self.get_loss(x_adv,yi,TARGETED)\n if (it)%1==0:\n print(error.item()) \n self.model.get_gradient(error)\n #print(gradient)\n x_adv.grad.sign_()\n if TARGETED:\n x_adv.data = x_adv.data - eta* x_adv.grad \n else:\n x_adv.data = x_adv.data + eta* x_adv.grad\n #x_adv = Variable(x_adv.data, requires_grad=True)\n #error.backward()\n return x_adv\n\n def fgsm(self, input_xi, label_or_target, eta, TARGETED=False):\n \n yi = Variable(label_or_target.cuda())\n x_adv = Variable(input_xi.cuda(), requires_grad=True)\n\n error = self.get_loss(x_adv,yi,TARGETED)\n print(error.item()) \n self.model.get_gradient(error)\n #print(gradient)\n x_adv.grad.sign_()\n if TARGETED:\n x_adv.data = x_adv.data - eta* x_adv.grad \n else:\n x_adv.data = x_adv.data + eta* x_adv.grad\n #x_adv = Variable(x_adv.data, requires_grad=True)\n #error.backward()\n return x_adv \n\n def __call__(self, input_xi, label_or_target, eta=0.01, TARGETED=False, ITERATIVE=False, epsilon=None):\n if ITERATIVE:\n adv = self.i_fgsm(input_xi, label_or_target, eta, TARGETED)\n else:\n eta = epsilon\n adv = self.fgsm(input_xi, label_or_target, eta, TARGETED)\n return adv \n \n \n","repo_name":"cmhcbb/attackbox","sub_path":"attack/FGSM.py","file_name":"FGSM.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"31"} +{"seq_id":"36025360387","text":"from flask_restplus import fields\n\nfrom app import db\nfrom .. import api\n\n\nclass Context(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), unique=True)\n maxQuota = db.Column(db.Integer, default=500)\n usedQuota = db.Column(db.Integer, default=0)\n enabled = db.Column(db.Boolean, default=True)\n reseller_id = db.Column(db.Integer, db.ForeignKey('reseller.id'), nullable=False) \n customer_id = db.Column(db.Integer, db.ForeignKey('customer.id'), nullable=False) \n admin = db.Column(db.String(200))\n password = db.Column(db.String(200))\n\n @property\n def ox_id(self):\n return self.id\n\n @ox_id.setter\n def ox_id(self, ox_id):\n self.id = ox_id\n\n # Models\n register_model = api.model('Register Context', {\n 'name': fields.String(required=True),\n 'description': fields.String(),\n 'reseller_id': fields.Integer(),\n 'customer_id': fields.Integer()\n })\n \n resource_model = api.model('Context', {\n 'id': fields.Integer(),\n 'name': fields.String(),\n 'usedQuota': fields.Integer(),\n 'enabled': fields.Boolean(),\n 'ox_id': fields.Integer(),\n 'reseller_id': fields.Integer(),\n 'customer_id': fields.Integer(),\n })\n\n theme_model = api.model('Theming', {\n 'ctx_id': fields.Integer(),\n 'mainColor': fields.String(),\n 'logoURL': fields.String(),\n 'logoWidth': fields.Integer(default=60),\n }) \n\n\n","repo_name":"FelipeMaeda/ox-rest-api","sub_path":"app/beta/models/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13711264148","text":"import gc\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nimport random\n\n\nclass train_data():\n def __init__(self, filepath):\n self.filepath = filepath\n assert '.npy' in filepath\n if not os.path.exists(filepath):\n print(\"[!] Data file not exists\")\n sys.exit(1)\n\n def __enter__(self):\n print(\"[*] Loading data...\")\n self.data = np.load(self.filepath)\n np.random.shuffle(self.data)\n print(\"[*] Load successfully...\")\n return self.data\n\n def __exit__(self, type, value, trace):\n del self.data\n gc.collect()\n print(\"In __exit__()\")\n\n\ndef load_data(filepath):\n return train_data(filepath=filepath)\n\n\ndef load_images(filelist):\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data\n\n\ndef save_images(filepath, ground_truth, noisy_image=None, clean_image=None):\n ground_truth = np.squeeze(ground_truth)\n noisy_image = np.squeeze(noisy_image)\n clean_image = np.squeeze(clean_image)\n if not clean_image.any():\n cat_image = ground_truth\n else:\n cat_image = np.concatenate([ground_truth, noisy_image, clean_image], axis=1)\n im = Image.fromarray(cat_image.astype('uint8')).convert('L')\n im.save(filepath, 'png')\n\n\ndef tf_psnr(im1, im2):\n mse = tf.losses.mean_squared_error(labels=im2 * 255.0, predictions=im1 * 255.0)\n psnr = 10.0 * (tf.log(255.0 ** 2 / mse) / tf.log(10.0))\n return psnr\n\n\ndef cal_psnr(im1, im2):\n mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr\n\n\ndef np_psnr(im1, im2):\n mse = (((im1.astype(np.float))*255.0 - (im2.astype(np.float))*255.0) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr\n\n\ndef np_mpsnr(img1, img2):\n mse = np.zeros(img1.shape[2])\n psnr = np.zeros(img1.shape[2])\n for i in range(img1.shape[2]):\n im1 = img1[:,:,i]\n im2 = img2[:,:,i]\n mse[i]= (((im1.astype(np.float))*255.0 - (im2.astype(np.float))*255.0) ** 2).mean()\n psnr[i] = 10 * np.log10(255 ** 2 / mse[i])\n return np.mean(psnr)\n\n\ndef salt_and_pepper_noise(img, proportion):\n noise_img =img\n height,width =noise_img.shape[0],noise_img.shape[1]\n num = int(height*width*proportion)\n for i in range(num):\n w = random.randint(0,width-1)\n h = random.randint(0,height-1)\n if random.randint(0,1) ==0:\n noise_img[h,w] = 0\n else:\n noise_img[h,w] = 1\n return noise_img","repo_name":"lzz11834/SGIDN","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"70506096727","text":"import networkx as nx\nimport dgl\nimport torch as th\n\n# g_nx = nx.petersen_graph()\n# g_dgl = dgl.DGLGraph(g_nx)\n\nimport matplotlib.pyplot as plt\n# plt.subplot(121)\n# nx.draw(g_nx, with_labels=True)\n# plt.subplot(122)\n# nx.draw(g_dgl.to_networkx(), with_labels=True)\n\n# plt.show()\ng = dgl.DGLGraph()\ng.add_nodes(10)\n# A couple edges one-by-one\nfor i in range(1, 4):\n g.add_edge(i, 0)\n# A few more with a paired list\nsrc = list(range(5, 8)); dst = [1]*3\ng.add_edges(src, dst)\n# finish with a pair of tensors\nsrc = th.tensor([8, 9]); dst = th.tensor([0, 0])\ng.add_edges(src, dst)\n\nnx.draw(g.to_networkx(), with_labels=True)\nplt.show()\n\n","repo_name":"ashishu007/pytorch-stuff","sub_path":"dglstuff/dgl_ex2.py","file_name":"dgl_ex2.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7492094436","text":"from flask import Flask, render_template,request\nfrom flask_babel import Babel, _\n\napp = Flask(__name__)\napp.config['BABEL_DEFAULT_LOCALE'] = 'zh'\nbabel = Babel(app)\n\n\n@babel.localeselector\ndef get_locale():\n return request.accept_languages.best_match(['zh', 'en'])\n\n\n@app.route('/')\ndef hello():\n day = _(\"Saturday\")\n\n return render_template('index.html', day=day)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\"\"\"\n代码汇总:\n\n1、新建babel.cfg:\n[python: **.py]\n[jinja2: **/templates/**.html]\nextensions=jinja2.ext.autoescape,jinja2.ext.with_\n2、生成编译模板\npybabel extract -F babel.cfg -o messages.pot .\n3、翻译\npybabel init -i messages.pot -d translations -l zh_Hans-CN\n4、手动输入中文\nmessages.mo\n5、编译翻译结果\npybabel compile -d translations\n6、更新翻译\npybabel update -i messages.pot -d translations\n\"\"\"","repo_name":"ddxygq/PyCode","sub_path":"web/flask/hello-babel/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"zh","doc_type":"code","stars":22,"dataset":"github-code","pt":"31"} +{"seq_id":"34440713498","text":"data = input()\nrow = int(data[1])\ncol = int(ord(data[0])) - int(ord('a')) + 1\n\nmove = [(-2, -1), (-2, 1), (-1, -2), (1, -2), (2, -1), (2, 1), (-1, 2), (1, 2)]\nanswer = 0\n\nfor m in move:\n nrow = row + m[0]\n ncol = col + m[1]\n\n if(nrow > 8 or ncol > 8 or nrow < 1 or ncol < 1):\n continue\n\n answer += 1\n\nprint(answer)","repo_name":"rnrn99/codingtest_book","sub_path":"Implementation/4-3_왕실의나이트.py","file_name":"4-3_왕실의나이트.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41998263206","text":"input = [3, 5, 6, 1, 2, 4]\n\n\ndef is_number_exist(number, array):\n for n in array: # array 의 길이만큼 아래의 연산이 실행된다.\n if number == n: # 비교연산이 1번 실행된다.\n return True # N * 1 = N\n return False\n\n\nresult = is_number_exist(3, input)\nprint(result)\n\n #점근 계산법\n # 입력값이 좋을때는 1이고 안 좋을때는 N이 된다.\n # 즉 O(N), Ω(1)의 시간 복잡도를 가진 알고리즘이다.","repo_name":"MuveloperDev/sparta_algorithm","sub_path":"week_1/04_is_number_exist.py","file_name":"04_is_number_exist.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24369436889","text":"import random\n\n#./talkingLib.txt\nleFile = open(\"talkingLib.txt\")\ntxt = leFile.readlines()\n\n'''def selectRandomLine(filerino):\n num = random.randint(0, 10)\n while num < 5:\n filerino.readline()\n leString = filerino.readline()\n return leString'''\n\ndef init_bot(respuestas):\n\n while 1:\n inp = input('>').lower()\n\n if inp.startswith('hola'):\n print(random.choice(respuestas['saludos']))\n continue \n \n elif inp.startswith('que tal'):\n print('Muy bien y tu?');\n continue\n\n elif inp.startswith('adios'):\n print(random.choice(respuestas['despedidas']))\n break\n\n elif inp.startswith(\"cuentame un cuento\"):\n i = 0\n while i < 10:\n print (\" \" + random.choice(txt))\n i += 1\n leFile.close()\n continue\n\n\n elif inp.endswith('?'):\n print(random.choice(respuestas['cortas']))\n continue\n\n else:\n print(random.choice(respuestas['largas']))\n continue\n return\n\ndef main():\n respuestas_largas = [\"Te escucho!\",\n \"Esta claro...\",\n \"Que interesante.\",\n \"Vaya por dios!\",\n \"Que bien!\"]\n\n respuestas_cortas = [\"Si.\",\n \"No.\",\n \"Claro!\",\n \"Tal vez.\",\n \"Podria ser.\"]\n\n saludos = [\"Well hello!\",\n \"Weeeeeh!\",\n \"Eeeeeh que passsssa!!\"]\n\n despedidas = [\"Venga a pastar!\",\n \"Ale hasta luego!\"]\n\n dict = {'largas':respuestas_largas,'cortas':respuestas_cortas,'saludos':saludos,'despedidas':despedidas}\n\n print(\"Hey! Esto es un pequeño TalkingBot, dile adios para cerrar el programa.\")\n\n init_bot(dict)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ironjanowar/Python","sub_path":"TalkingBot/talking_bot.py","file_name":"talking_bot.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69889118810","text":"import discord\r\nimport random\r\nfrom discord.ext import commands\r\n\r\n\r\nintents = discord.Intents.default()\r\nintents.message_content = True\r\n\r\nbot = commands.Bot(command_prefix='!', intents=intents)\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(f'We have logged in as {bot.user}')\r\n\r\n@bot.command()\r\nasync def hello(ctx):\r\n await ctx.send(f'Привет! Я бот {bot.user}!')\r\n\r\n@bot.command(description='For when you wanna settle the score some other way')\r\nasync def choose(ctx, *choices: str):\r\n \"\"\"Chooses between multiple choices.\"\"\"\r\n await ctx.send(random.choice(choices))\r\n\r\n@bot.command()\r\nasync def heh(ctx, count_heh = 5):\r\n await ctx.send(\"he\" * count_heh)\r\n\r\n@bot.command()\r\nasync def joined(ctx, member: discord.Member):\r\n \"\"\"Says when a member joined.\"\"\"\r\n await ctx.send(f'{member.name} joined {discord.utils.format_dt(member.joined_at)}')\r\n\r\n@bot.command()\r\nasync def guess(ctx, count:int):\r\n answer = random.randint(1,10)\r\n if count == answer:\r\n await ctx.send('Right!')\r\n else:\r\n await ctx.send(f'Nah, no! The answer was {answer}.')\r\n\r\n@bot.command()\r\nasync def repeat(ctx, times: int, content='repeating...'):\r\n \"\"\"Repeats a message multiple times.\"\"\"\r\n for i in range(times):\r\n await ctx.send(content)\r\n\r\n\r\nbot.run(\"Token\")\r\n","repo_name":"yernur0/DiscordBot2","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6796807535","text":"n = int(input())\narr = list(map(int, input().split()))\nok = False\nfor i in range(1, len(arr)):\n if arr[i - 1] > 0 and arr[i] > 0:\n ok = True\n break\n if arr[i - 1] < 0 and arr[i] < 0:\n ok = True\n break\nif ok:\n print(\"YES\")\nelse:\n print(\"NO\")","repo_name":"moonidelight/Web_Dev","sub_path":"lab7/task1/informatics/array/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72944690009","text":"# 239 : https://leetcode.com/problems/sliding-window-maximum/\n\n# 빈리스트 하나 만들어서 \n# nums의 리스트가 k를 몇번 돌릴수있는지 알아내야함\n# 알아 낸 후, 매번의 i:i+k 가장 큰값 확인 후 rl리스트�� 넣는다\n# rl 출력\n\n#nums = [1,3,-1,-3,5,3,6,7]\n#k = 3\n#out = [3,3,5,5,6,7]\n\ncounts = len(nums) - k + 1\n\nrl = []\n\nif len(nums) < counts:\n return nums\nelse:\n for i in range(counts):\n rl.append(max(nums[i:i+k]))\nreturn rl","repo_name":"algohell/ALGOHELL","sub_path":"239/pgo.py","file_name":"pgo.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"16881808610","text":"from django.contrib.auth import get_user_model\nfrom django.test import Client, TestCase\n\nfrom ..models import Group, Post\nfrom .test_store import get_response\n\nUser = get_user_model()\n\n\nclass URL(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='azhuravlev1001')\n cls.author = User.objects.create_user(username='leo')\n cls.group = Group.objects.create(\n title='Тестовый заголовок',\n slug='test-slug',\n description='Тестовое описание группы'\n )\n cls.post = Post.objects.create(\n text='Тестовый текст поста',\n author=cls.author,\n group=cls.group,\n id=1\n )\n\n global URL_GUEST_WANTS_AND_GETS, URL_LIST, URL_NOT_FOR_GUEST\n global URL_TEMPLATES\n\n URL_TEMPLATES = {\n '/': 'posts/index.html',\n f'/group/{URL.group.slug}/': 'posts/group_list.html',\n f'/profile/{URL.user.username}/': 'posts/profile.html',\n f'/posts/{URL.post.id}/edit/': 'posts/create_post.html',\n f'/posts/{URL.post.id}/': 'posts/post_detail.html',\n '/create/': 'posts/create_post.html'\n }\n\n URL_LIST = (\n '/',\n f'/group/{URL.group.slug}/',\n f'/profile/{URL.user.username}/',\n f'/posts/{URL.post.id}/edit/',\n f'/posts/{URL.post.id}/',\n '/create/'\n )\n\n URL_NOT_FOR_GUEST = (\n f'/posts/{URL.post.id}/edit/',\n '/create/'\n )\n\n URL_GUEST_WANTS_AND_GETS = {\n '/create/': '/auth/login/?next=/create/',\n f'/posts/{URL.post.id}/edit/':\n f'/auth/login/?next=%2Fposts%2F{URL.post.id}%2Fedit%2F'\n }\n\n def setUp(self):\n self.by_guest = Client()\n self.by_non_author = Client()\n self.by_non_author.force_login(URL.user)\n self.by_author = Client()\n self.by_author.force_login(URL.author)\n\n def test_EachPageCanOpen(self):\n for at_address in URL_LIST:\n with self.subTest(address=at_address):\n self.assertEqual(\n get_response(at_address, self.by_author).status_code,\n 200)\n\n def test_GuestCannotOpenPages(self):\n for at_address in URL_NOT_FOR_GUEST:\n with self.subTest(address=at_address):\n self.assertNotEqual(\n get_response(at_address, self.by_guest).status_code, 200)\n\n def test_TemplatesMatchAddresses(self):\n for at_address, template in URL_TEMPLATES.items():\n with self.subTest(address=at_address):\n self.assertTemplateUsed(\n get_response(at_address, self.by_author), template)\n\n def test_GuestGoesRedirected(self):\n for at_address, end_address in URL_GUEST_WANTS_AND_GETS.items():\n with self.subTest(address=at_address):\n self.assertRedirects(\n get_response(at_address, self.by_guest, follow=True),\n end_address, status_code=302, target_status_code=200)\n\n def test_WrongPageHasError404(self):\n self.assertEqual(\n get_response('/null_page/', self.by_author).status_code, 404)\n\n def test_OnlyAuthorCanEdit(self):\n at_address = URL_LIST[3]\n self.assertEqual(\n get_response(at_address, self.by_author).status_code,\n 200, 'автор не смог открыть эту страницу')\n self.assertNotEqual(\n get_response(at_address, self.by_non_author).status_code,\n 200, 'зарегистрированный не-автор смог открыть')\n self.assertNotEqual(\n get_response(at_address, self.by_guest).status_code,\n 200, 'гость смог открыть')\n","repo_name":"azhuravlev1001/hw05_final","sub_path":"yatube/posts/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73502896408","text":"import os\nimport codecs\nimport datetime\nimport fileutils\n\nclass ProcFiles(object):\n def __init__(self, args_dict):\n self.posts_dir = args_dict[\"posts_dir\"]\n self.output_dir = args_dict[\"output_dir\"]\n\n def process(self):\n \"\"\"Given a directory of posts, return a dict representing those posts\n and a dict representing the blog's config.\n Return: (post_list, config_dict)\n \"\"\"\n self.posts_dir = fileutils.add_slash_if_missing(self.posts_dir)\n post_list = []\n for post_filename in os.listdir(self.posts_dir):\n # Process config file\n if post_filename == \"_config\":\n config_dict = self._get_config_info(self.posts_dir+post_filename)\n continue\n # Process LaMark files\n if not post_filename.endswith(\".lm\"):\n continue\n post_info = self._get_post_info(self.posts_dir + post_filename)\n post_list.append(post_info)\n post_list.sort(key=lambda post_info: post_info['date'], reverse=True)\n post_list.append({\n 'type': 'toc',\n 'permalink': 'index.html',\n 'regen': True,\n })\n return (post_list, config_dict)\n\n def _get_post_info(self,post_filename):\n \"\"\"Given the filename of a post, populate a dict representing that post.\n \"\"\"\n post_info = {\n \"title\": None, # Post title\n \"author\": None, # Post author\n \"date\": None, # Date of post\n \"type\": None, # Type: page, post, rss, toc\n \"permalink\": None, # Permanent title of the post \"my-post.html\"\n \"desc\": None, # Post description\n \"body\": None, # Body of the post in LaMark\n \"html_body\": None,\n }\n # Use utf-8, otherwise the markdown module chokes on it.\n with codecs.open(post_filename, encoding='utf-8') as post_file:\n whitespace_count=0\n while True:\n line = post_file.next().strip()\n if line == \"\":\n break\n # Front matter can be surrounded by html comment tags if\n # needed.\n if line == \"\":\n continue\n colon_pos = line.find(\":\")\n if colon_pos == -1:\n raise Exception(\"Invalid front matter line: '%s'\" % line)\n arg_name = line[:colon_pos]\n if arg_name not in post_info.keys():\n raise Exception(\"Unrecognized front matter argument: '%s'\" %\n arg_name)\n post_info[arg_name] = line[colon_pos+1:].strip()\n # Body of post is the rest of the file.\n body = \"\"\n while True:\n try:\n body += post_file.next()\n except StopIteration:\n break\n post_info[\"body\"] = body.strip()\n post_info[\"html_body\"] = body.strip()\n # Convert date string to datetime obj.\n post_info[\"date\"] = datetime.datetime.strptime(\n post_info[\"date\"],\n \"%m-%d-%Y\")\n # Post must end in .html\n post_info[\"permalink\"] += \".html\"\n # Check if the file needs to be regenerated. If the output dir has\n # a file that's the same name as 'permalink', and that file is\n # more recently modified than the '.lm' file, then set regen to\n # False. Else True.\n post_stat = os.stat(post_filename)\n try:\n output_stat = os.stat(self.output_dir + post_info[\"permalink\"])\n if post_stat.st_mtime > output_stat.st_mtime:\n post_info[\"regen\"] = True\n else:\n post_info[\"regen\"] = False\n except OSError:\n # File could not be 'stat'd, so file probably doesn't exist yet,\n # meaning it needs to be generated.\n post_info[\"regen\"] = True\n self._validate_post(post_info)\n return post_info\n\n def _validate_post(self, post):\n optionals = [\"desc\"]\n for key in post:\n if key in optionals:\n continue\n if post[key] is None:\n raise Exception(\"Post '%s' is missing front matter '%s'\" %\n (post['title'], key))\n\n def _get_config_info(self,config_filename):\n config_info={\n 'home_url': None,\n 'blog_base_url': None,\n 'blog_title': None,\n 'desc': None,\n }\n with codecs.open(config_filename, encoding='utf-8') as config_file:\n for line in config_file:\n line = line.strip()\n # Skip empty lines or comments (lines beginning with #)\n if len(line) == 0 or line[0] == \"#\":\n continue\n # Colon separates argument name from argument value.\n colon_pos = line.find(\":\")\n if colon_pos == -1:\n continue\n arg_name = line[:colon_pos].strip()\n arg_val = line[colon_pos+1:].strip()\n config_info[arg_name] = arg_val\n required_args = [\n \"blog_base_url\",\n \"blog_title\",\n \"desc\",\n ]\n for arg_name in required_args:\n if config_info.get(arg_name, None) is None:\n raise Exception(\"Config file missing argument: '%s'\" % arg_name)\n return config_info\n","repo_name":"beala/paleoblogger","sub_path":"procfiles.py","file_name":"procfiles.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6132581893","text":"\"\"\"Streamlit application to display a college basketball team's history\nfrom 1985-2020 seasons.\n\"\"\"\nimport streamlit as st\nimport pandas as pd\nimport altair as alt\nfrom db import get_db\nfrom st_functions import get_teams_list, is_ascending_rank\n\n# TODO cache data\n# TODO round fields\n\nif __name__ == \"__main__\":\n st.title(\"Explore a team's history in a variety of metrics!\")\n DB = get_db(config_file_name='docker_database.ini')\n\n # SEASONS = get_seasons_list(_db=DB)\n TEAMS = get_teams_list(_db=DB)\n TEAM = st.sidebar.selectbox(\"Select a team\", TEAMS)\n PREFIX = st.sidebar.selectbox(\"Select a prefix\", ['Tm', 'Opp'])\n PREFIX_LONGSTRING = 'Team' if PREFIX == 'Tm' else 'Opponent'\n OTHER_PREFIX = 'Opp' if PREFIX == 'Tm' else 'Tm'\n METRICS = ['PF', 'Margin',\n 'FGM', 'FGA',\n 'FG3M', 'FG3A',\n 'FG2M', 'FG2A',\n 'FTA', 'FTM',\n 'Ast', 'ORB',\n 'DRB', 'TRB',\n 'TO', 'Stl',\n 'Blk', 'Foul']\n METRIC = st.sidebar.selectbox(\"Select a metric\", METRICS)\n DENOM = st.sidebar.selectbox(\n 'Select a normalization', ['per40', 'perGame', 'perPoss']\n )\n DENOM_FIELD = 'Mins' if DENOM == 'per40' else DENOM[-4:]\n if DENOM == 'perPoss':\n DENOM_LONGSTRING = 'per possession'\n elif DENOM == 'per40':\n DENOM_LONGSTRING = 'per 40 mins'\n elif DENOM == 'perGame':\n DENOM_LONGSTRING = 'per Game'\n\n OA_BOOL = st.sidebar.checkbox(\"Opponent Adjust?\")\n OA_PREF = 'OA_' if OA_BOOL else ''\n OA_LONGSTRING = ' (opponent-adjusted)' if OA_BOOL else ''\n\n NORMALIZE_CONST = 40 if DENOM == 'per40' else 1\n\n # Get results from DB\n season_team_cursor = DB.seasonteams.find(\n {},\n {\n # Get OA metric fields\n PREFIX+METRIC: 1,\n PREFIX+DENOM_FIELD: 1,\n 'OppSum_'+OTHER_PREFIX+METRIC: 1,\n 'OppSum_'+OTHER_PREFIX+DENOM_FIELD: 1,\n # Get W/L record fields\n 'TmWin': 1,\n 'TmGame': 1,\n # Get required aggregate fields\n 'TmName': 1,\n 'Season': 1,\n '_id': 0\n }\n )\n season_team = pd.DataFrame(list(season_team_cursor))\n season_team['Season'] = season_team.Season.astype(str)\n\n # Opponent-adjust selected metric\n season_team[PREFIX+METRIC+DENOM] = season_team[PREFIX+METRIC] / season_team[PREFIX+DENOM_FIELD] * NORMALIZE_CONST\n season_team['OA_'+PREFIX+METRIC+DENOM] = \\\n (season_team[PREFIX+METRIC+DENOM]) - \\\n (\n (season_team['OppSum_'+OTHER_PREFIX+METRIC] - season_team[PREFIX+METRIC]) /\n (season_team['OppSum_'+OTHER_PREFIX+DENOM_FIELD] - season_team[PREFIX+DENOM_FIELD])\n ) * NORMALIZE_CONST\n\n # Determine team's regular-season record\n # TODO update with postseason games\n season_team['TmLoss'] = season_team['TmGame'] - season_team['TmWin']\n season_team['Record'] = season_team['TmWin'].map(str) + '-' + season_team['TmLoss'].map(str)\n\n # Rank each team's values within the season\n season_team['Rnk_'+OA_PREF+PREFIX+METRIC+DENOM] = season_team.groupby(\n 'Season'\n )[OA_PREF+PREFIX+METRIC+DENOM].rank(\n 'min', ascending=is_ascending_rank(PREFIX, METRIC)\n )\n\n # Create chart\n season_team_chart = season_team.loc[season_team.TmName == TEAM]\n TITLE_STRING = f\"{TEAM}: Rank of {PREFIX_LONGSTRING}'s {METRIC} {DENOM_LONGSTRING+OA_LONGSTRING} [1 is best]\"\n chart = alt.Chart(\n data=season_team_chart, \n title=TITLE_STRING\n ).mark_line(\n point=True\n ).encode(\n alt.X('Season'),\n alt.Y('Rnk_'+OA_PREF+PREFIX+METRIC+DENOM,\n scale=alt.Scale(domain=(353,1)),\n axis=alt.Axis(title='Rank')),\n tooltip=['Season',\n 'Record', \n OA_PREF+PREFIX+METRIC+DENOM, \n 'Rnk_'+OA_PREF+PREFIX+METRIC+DENOM]\n ).interactive()\n\n st.altair_chart(chart)\n","repo_name":"ryanofarrell/ncaa-basketball","sub_path":"code/streamlit/st_team_history.py","file_name":"st_team_history.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19768143555","text":"s = list(input())\nt = list(input())\nn = len(s)\nanswer = \"No\"\nfor i in range(n):\n for j in range(n-1):\n s[n-1-j], s[n-2-j] = s[n-2-j], s[n-1-j]\n if s == t:\n answer = \"Yes\"\nprint(answer)","repo_name":"shimomura314/AtcoderCodes","sub_path":"ABC103/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"543568971","text":"from django.shortcuts import render\n\n\ndef list_children(page):\n html = ''\n return html\n\n\ndef sitemap_view(request):\n root_page = request.site.root_page\n\n html = list_children(root_page)\n\n return render(request, 'sitemap.html', {'sitemap_html': html})\n","repo_name":"City-of-Helsinki/digihel","sub_path":"digi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"37138587209","text":"\"\"\"Module for posting to facebook\"\"\"\nimport os\nimport facepy\nfrom yawt.utils import cfg, load_file\n\n\ndef post_fb(post, link=None):\n \"\"\"Post message to facebook\"\"\"\n token_file = os.path.expanduser(cfg('YAWT_MICROPOST_FB_ACCESS_TOKEN_FILE'))\n access_tok = load_file(token_file)\n graph = facepy.GraphAPI(access_tok)\n\n if link:\n print(\"trying force facebook to scrape URL...\")\n graph.post('/', id=link, scrape=True)\n print(\"trying to post to facebook...\")\n response = graph.post('me/feed', message=post, link=link)\n else:\n response = graph.post('me/feed', message=post)\n print(\"response: \"+str(response))\n fid = None\n retid = response['id']\n if retid:\n pids = retid.split('_')\n if len(pids) < 2:\n print(\"unexpected id format\")\n fid = pids[1]\n posturl = cfg('YAWT_MICROPOST_FB_POST_URL')\n metadata = {}\n if fid:\n metadata['fbpost'] = posturl.format(fid)\n return metadata\n","repo_name":"drivet/yawt","sub_path":"yawtext/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5964505312","text":"class Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n if not board: return False\n m, n = len(board), len(board[0])\n \n def search(i, j, word, used):\n if not word:\n return True\n if not (0<=i dict:\n return ALL_VALUE\n\n\ndef set_all_value(value: dict):\n ALL_VALUE.update(value)\n\n\n# mysql\nMYSQL_HOST = \"shop-xo.hctestedu.com\"\nMYSQL_port = 3306\nMYSQL_USERNAME = \"api_test\"\nMYSQL_PASSWORD = \"Aa9999!\"\nMYSQL_DB = \"shopxo_hctested\"\n\nif __name__ == '__main__':\n print(__current_path)\n print(ROOT_PATH)\n","repo_name":"zxc123620/interFramework","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9004243141","text":"import os\nimport numpy as np\nfrom scipy.io import wavfile\n\n# Path to the LibriMix folder and selected subfolder\nwsj0_path = '/isip/Students-ro/SpeechSeparation/wsj0-mix'\npublic_path = '/isip/Public/spruenken/wsj0-2mix_normed/'\nsubfolder = ['mix', 's1', 's2']\n\n\ndef bundle_wav_files(dataset, cutout_size=1, wsj0_mix='wsj0-2mix', sampling_rate=8000, sampling_length='min'):\n \"\"\" Convert the .wav-files of a given data set into an 2D-array (.npy-file)\n :param dataset: 'cv', 'tr', 'tt'\n :param cutout_size: length of cutout in seconds\n :param wsj0_mix: 'wsj0-2mix', 'wsj0-3mix' (static für 2mix)\n :param sampling_rate: '8000', '16000'\n :param sampling_length: 'min', 'max'\n \"\"\"\n # Paths\n\n\n # Calculate cutout size and select wav_type-variable\n cutout = sampling_rate * cutout_size\n wav_type = \"wav{}k\".format(sampling_rate // 1000)\n wsj0_mix = \"2speakers\" # Static due to the given folder structure\n\n # Path to the subfolder containing the different types of data for the network\n folder_path = os.path.join(wsj0_path, wsj0_mix, wav_type, sampling_length, dataset)\n\n for subdir in subfolder:\n npy_array = []\n # Subfolder with type of sounds\n subdir_path = os.path.join(folder_path, subdir)\n for file in sorted(os.listdir(subdir_path)):\n # Path including .wav attachment\n file_path = os.path.join(subdir_path, file)\n # Read time series and scale to float32 Wertebereich\n time_series = wavfile.read(file_path)[1]\n time_series = time_series.astype(np.float32) / np.iinfo(np.int16).max\n # Split time series into one seconds sub-arrays\n n = len(time_series)\n for i in range(0, n-(n % cutout), cutout):\n sub_array = []\n sub_array = time_series[i:i+cutout]\n npy_array.append(sub_array)\n # Save the numpy array under the corresponding path\n arr = np.array(npy_array)\n # Adapt the storage path to that of LibriMix\n dataset_correlation = {'cv': 'dev', 'tr': 'train', 'tt': 'test'}\n mix_correlation = {'mix': 'mix_clean', 's1': 's1', 's2': 's2'}\n save_path = os.path.join(public_path, wav_type, sampling_length, dataset_correlation[dataset], mix_correlation[subdir])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n save_name = 'wsj0-2mix_as_array'\n np.save(os.path.join(save_path, save_name + '.npy'), arr)\n np.savez_compressed(os.path.join(save_path, save_name + '.npz'), arr=arr, allow_pickle=True, pickle_protocol=2)\n print('Saved under: ', os.path.join(save_path, save_name + '.npy'))\n\n\nif __name__ == '__main__':\n for j in ['cv', 'tr', 'tt']: # cv := val, tr := train, tt := test\n bundle_wav_files(j)\n\n print('Datensatz für 8000 wurde erstellt')\n\n for j in ['cv', 'tr', 'tt']: # cv := val, tr := train, tt := test\n bundle_wav_files(j, sampling_rate=16000)\n","repo_name":"moibrgit/MA23","sub_path":"04_SepFormer_Ref/Code/datasets/wsj0-2mix/wsj0_to_npy.py","file_name":"wsj0_to_npy.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20951794574","text":"import sys\nimport os\nfrom time import gmtime, strftime\nimport re\nimport base64\nimport argparse\nimport glob\n\n##################\n# Argument Setup\n##################\nparser = argparse.ArgumentParser(description=\"SignWriting 2010 packing script takes a directory of files and writes a single data file.\"\n\t,epilog=\"Source SVG and completed TTF available online https://github.com/slevinski/signwriting_2010_fonts\")\nparser.add_argument(\"directory\", nargs=\"?\", help=\"name of the sub-directory in sources for the subfont files\")\nparser.add_argument(\"-f\",\"--force\", help=\"overwrite existing font files\", action=\"store_true\")\nparser.add_argument(\"-n\",\"--name\", metavar=\"filename\", help=\"name of data file\")\nparser.add_argument(\"-m\",\"--minimize\", metavar=\"factor\", help=\"for SVG, minimization factor for coordinate space\")\nparser.add_argument(\"-p\",\"--precision\", help=\"for SVG, number of decimal places for rounding\", default=\"NA\")\nparser.add_argument(\"-s\",\"--simplify\", help=\"for SVG, remove extra text\", action=\"store_true\")\nparser.add_argument(\"-t\",\"--test\", help=\"write one example to the screen\", action=\"store_true\")\nparser.add_argument(\"-r\",\"--reserved\", default=\"SignWriting 2010\", help=\"Reserved Font Name, default of %(default)s\")\n\nargs = parser.parse_args()\n\n\n\n##################\n# # initializing\n##################\nsourceDir = \"../source/\"\n\nif not args.directory:\n\tdirectories = os.walk( os.path.join(sourceDir,'.')).next()[1]\n\tdirectories.remove('other_svg')\n\tdirectories.remove('templates')\n\tif not len(directories):\n\t\tprint(\"\")\n\t\tprint(\"FAILURE: no directory available for packing \" + sourceDir)\n\telse:\n\t\tprint()\n\t\tprint(\"Please specify a directory from \" + sourceDir)\n\n\t\tfor dir in directories:\n\t\t\tprint(\"python pack.py \" + dir)\n\tsys.exit()\n\nfontDir = sourceDir + args.directory + \"/\"\nif args.name:\n\targs.directory = args.name\n\next = (args.directory[:3]).lower()\n\ndataFile = sourceDir + args.directory + \".dat\"\n\nif os.path.exists(dataFile) and not args.test:\n\tif args.force:\n\t\tprint(\"Overwriting data file \" + dataFile)\n\telse:\n\t\tprint()\n\t\tprint(\"FAILURE: Data file already exists: \" + dataFile)\n\t\tprint(\"Move file or use -f to force the file creation\")\n\t\tprint()\n\t\tsys.exit(-1)\n\nif os.path.exists(fontDir):\n\tprint(\"input directory \" + fontDir)\n\tprint(\"output data file \" + dataFile)\nelse:\n\tprint(f\"FAILURE: directory {fontDir} does not exist\")\n\tsys.exit(-1)\n\nif not args.test:\n\tsys.stdout = open(dataFile,'w') #redirect all prints to this log file\n\nprint(\"# SignWriting 2010 is released under the SIL Open Font License, Version 1.1.\")\nprint(\"# http://scripts.sil.org/OFL\")\nprint(\"#\")\nprint(\"# This Font Software is Copyright (c) 1974-2014\")\nprint(\"# Center For Sutton Movement Writing, Inc.\")\nprint(\"#\")\nprint(\"# The symbols of SignWriting 2010 were designed by Valerie Sutton (sutton@signwriting.org),\")\nprint(\"#\\t inventor of the SignWriting Script\")\nprint(\"#\")\nprint(\"# The symbol images were refined by Adam Frost (frost@signwriting.org).\")\nprint(\"#\")\nprint(\"# The symbols were encoded, transformed, and refactored by Stephen E Slevinski Jr (slevin@signpuddle.net).\")\nprint(\"#\")\nprint(\"# Reserved Font Name: \" + args.reserved)\nprint(\"#\")\nprint(\"# SignWriting 2010 Packed Data\")\nprint(\"# ------------------------------------\")\nprint(\"#\\tinput directory: \" + args.directory)\nprint(\"#\\toutput data file: \" + dataFile.replace(sourceDir,\"\"))\nprint(\"#\\tprocessed: \" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\nprint(\"# ------------------------------------\")\nprint(\"# https://github.com/slevinski/signwriting_2010_tools\")\nprint(\"#\")\nprint(\"# created with command:\",)\nfor item in sys.argv:\n\tif \" \" in item:\n\t\tprint(f'\"{item}\"')\n\telse:\n\t\tprint(item)\nprint()\nprint(\"#\")\nfiles = glob.glob(fontDir + \"*\" + ext)\nfor file in files:\n\tname = file.split('/')[-1].split('.')[0]\n\twith open(file, \"rb\") as image_file:\n\t\tdata = image_file.read()\n\t\tif not ext==\"svg\":\n\t\t\tencoded_string = base64.b64encode(data)\n\t\t\tprint(name + \"\\t\" + encoded_string)\n\t\telse:\n\t\t\t#cleanup for various svg sources\n\t\t\tdata = data.replace(\"\\n\",\" \")\n\t\t\tstart = data.index(\"\", start)+4\n\t\t\tglines = data[start:end]\n\t\t\tif args.precision != \"NA\":\n\t\t\t\tglines = re.sub(r'\\.[0-9]+',\n\t\t\t\tlambda m: ((\"%.\" + args.precision + \"f\") % float(m.group().strip()))[1:],\n\t\t\t\tglines).replace(\".\" + \"0\"*int(args.precision),\"\")\n\t\t\tif args.simplify:\n\t\t\t\tglines = glines.replace(' fill=\"#000000\" stroke=\"none\"',\"\")\n\t\t\tif args.minimize:\n\t\t\t\tstart = glines.index(\"translate(\")\n\t\t\t\tend = glines.index(\")\", start)+1\n\t\t\t\ttranslate =glines[start:end]\n\t\t\t\tstart = translate.index(\"(\")+1\n\t\t\t\tend = translate.index(\",\", start)\n\t\t\t\ttransx =int(translate[start:end])/int(args.minimize)\n\t\t\t\tstart = translate.index(\",\")+1\n\t\t\t\tend = translate.index(\")\", start)\n\t\t\t\ttransy =int(translate[start:end])/int(args.minimize)\n\t\t\t\tglines = glines.replace(translate,\"translate(\" + str(transx) + \",\" + str(transy) + \")\")\n\n\t\t\t\tstart = glines.index(\"scale(\")\n\t\t\t\tend = glines.index(\")\", start)+1\n\t\t\t\tscale =glines[start:end]\n\t\t\t\tstart = scale.index(\"(\")+1\n\t\t\t\tend = scale.index(\",\", start)\n\t\t\t\tscalex =float(scale[start:end])/int(args.minimize)\n\t\t\t\tstart = scale.index(\",\")+1\n\t\t\t\tend = scale.index(\")\", start)\n\t\t\t\tscaley =float(scale[start:end])/int(args.minimize)\n\t\t\t\tglines=glines.replace(scale,\"scale(\" + str(scalex) + \",\" + str(scaley) + \")\")\n\n\t\t\tprint(name + \"\\t\" + glines)\n\t\tif args.test:\n\t\t\tsys.exit()\n","repo_name":"Slevinski/signwriting_2010_tools","sub_path":"tools/pack.py","file_name":"pack.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"16169517959","text":"from django.shortcuts import redirect, render \nfrom django.http import HttpResponse, JsonResponse \nfrom todo_app.models import Todo\n\nconvert_into_boolean = {\n \"0\": False,\n \"1\": True,\n}\norder_to_string = {\n \"0\": \"created_at\",\n \"1\": \"-created_at\"\n}\n\n\n\ndef index(request):\n search = request.GET.get(\"todoSearch\")\n completed = request.GET.get(\"completed\")\n order = request.GET.get(\"order\")\n all_todos = Todo.objects.all() \n if search != None: \n all_todos = all_todos.filter(title__icontains = search)\n if completed != None:\n value = convert_into_boolean.get(completed)\n all_todos = Todo.objects.filter(completed=value)\n if order != None:\n value = order_to_string.get(order)\n all_todos = all_todos.order_by(value)\n \n data = {\n \"todo\": all_todos\n }\n return render(request, \"index.html\", context=data)\n\ndef add_view(request):\n if request.method == \"GET\":\n return HttpResponse(\"Invalid Method\")\n else:\n todo_input = request.POST['todoInput']\n print(\"todo_input:\", todo_input)\n Todo.objects.create(title=todo_input)\n return redirect('todo_index')\n\ndef detailed_view(request, todo_id):\n if request.method == \"POST\":\n return HttpResponse(\"Invalid Method\")\n else:\n try:\n todo_object = Todo.objects.get(id=todo_id)\n print(todo_object)\n data = {\n 'id': todo_object.id,\n 'title': todo_object.title,\n 'completed': todo_object.completed,\n 'created_at': todo_object.created_at,\n 'updated_at': todo_object.updated_at,\n }\n return JsonResponse(data)\n except Todo.DoesNotExist:\n return HttpResponse(\"Error Todo not found\")\ndef delete_todo(request, todo_id):\n if request.method == \"GET\":\n return HttpResponse(\"Invalid Method\")\n else:\n try:\n todo_object = Todo.objects.get(id=todo_id)\n todo_object.delete()\n return redirect('todo_index')\n except Todo.DoesNotExist:\n return HttpResponse(\"Error Todo not found\")\n \ndef mark_view(request, todo_id):\n if request.method == \"GET\":\n return HttpResponse(\"Invalid Method\")\n else:\n try:\n todo_object = Todo.objects.get(id=todo_id)\n todo_object.completed = True \n todo_object.save()\n return redirect('todo_index')\n except Todo.DoesNotExist:\n return HttpResponse(\"Error Todo not found\")","repo_name":"Venugopalreddygithub/django-batch3","sub_path":"todo_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39115231406","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAdvent of code 2021, day x\nblackstream-x’ solution\n\"\"\"\n\n\nimport logging\n\nimport helpers\n\n\nSPAWN_PERIOD = 7\nNEW_FISH = 8\n\n\ndef spawn_days(initial_count, days):\n \"\"\"Days when a fish with initial_count spawns\"\"\"\n return range(initial_count + 1, days + 1, SPAWN_PERIOD)\n\n\ndef population_after(days, fishes):\n \"\"\"Return the population count after {days} days\"\"\"\n population = len(fishes)\n spawn_events = [None] + [0] * days\n for fish in fishes:\n for day in spawn_days(fish, days):\n spawn_events[day] += 1\n #\n #\n for day in range(1, days + 1):\n new_fish = spawn_events[day]\n logging.debug(\"Day #%s: %s spawned\", day, new_fish)\n population += new_fish\n for days_offset in spawn_days(NEW_FISH, days - day):\n spawn_events[day + days_offset] += new_fish\n #\n #\n return population\n\n\n@helpers.timer\ndef part1(reader):\n \"\"\"Part 1\"\"\"\n days = 80\n for line in reader.lines():\n fishes = [int(item) for item in line.split(\",\")]\n return population_after(days, fishes)\n #\n\n\n@helpers.timer\ndef part2(reader):\n \"\"\"Part 2\"\"\"\n days = 256\n for line in reader.lines():\n fishes = [int(item) for item in line.split(\",\")]\n return population_after(days, fishes)\n #\n\n\nif __name__ == \"__main__\":\n helpers.solve_puzzle(part1, part2)\n\n\n# vim: fileencoding=utf-8 sw=4 ts=4 sts=4 expandtab autoindent syntax=python:\n","repo_name":"blackstream-x/advent-of-code2021","sub_path":"solutions/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39093431822","text":"import sys, string\nimport os\nimport socket\nimport time\nimport operator\nimport boto3\nimport json\nfrom pyspark.sql import SparkSession\nfrom datetime import datetime\nimport pyspark.sql.functions as F\n\nif __name__ == \"__main__\":\n\n spark = SparkSession\\\n .builder\\\n .appName(\"Ethereum\")\\\n .getOrCreate()\n\n def check_transactions(line):\n try:\n fields = line.split(',')\n if len(fields)!=15:\n return False\n \n float(fields[7])\n return True\n except:\n return False\n\n \n\n s3_data_repository_bucket = os.environ['DATA_REPOSITORY_BUCKET']\n\n s3_endpoint_url = os.environ['S3_ENDPOINT_URL']+':'+os.environ['BUCKET_PORT']\n s3_access_key_id = os.environ['AWS_ACCESS_KEY_ID']\n s3_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']\n s3_bucket = os.environ['BUCKET_NAME']\n\n hadoopConf = spark.sparkContext._jsc.hadoopConfiguration()\n hadoopConf.set(\"fs.s3a.endpoint\", s3_endpoint_url)\n hadoopConf.set(\"fs.s3a.access.key\", s3_access_key_id)\n hadoopConf.set(\"fs.s3a.secret.key\", s3_secret_access_key)\n hadoopConf.set(\"fs.s3a.path.style.access\", \"true\")\n hadoopConf.set(\"fs.s3a.connection.ssl.enabled\", \"false\") \n \n \n \n transactions = spark.sparkContext.textFile(\"s3a://\" + s3_data_repository_bucket + \"/ECS765/ethereum-parvulus/transactions.csv\")\n trans = transactions.filter(check_transactions)\n \n trans_map = trans.map(lambda x: (x.split(',')[5], x.split(',')[6], x.split(',')[7], x.split(',')[11]))\n naming_columns = ['from_address', 'to_address', 'value',' timestamp']\n DataFrame = trans_map.toDF(naming_columns)\n df = DataFrame.filter(F.col('from_address') == F.col('to_address'))\n trans_rdd = df.rdd.map(lambda x: ((x[0],x[1]), float(x[2])))\n output = trans_rdd.reduceByKey(lambda x, y: x+y)\n top10 = output.takeOrdered(10, key = lambda x: -x[1])\n \n my_bucket_resource = boto3.resource('s3',\n endpoint_url='http://' + s3_endpoint_url,\n aws_access_key_id=s3_access_key_id,\n aws_secret_access_key=s3_secret_access_key)\n\n \n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y_%H:%M:%S\")\n \n my_result_object = my_bucket_resource.Object(s3_bucket,'ethereum_partd3_' + date_time + '/top10_washtrade.txt')\n my_result_object.put(Body=json.dumps(top10))\n\n \n spark.stop()\n","repo_name":"sunrita007/Big-Data-Project","sub_path":"Big Data CW 2/Part D/Wash Trading/wash_trading.py","file_name":"wash_trading.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32364507107","text":"# 2. Given a list of strings delete all special chars in each one (non-alphanumeric chars). \n# Initiates variables and prompts user input\ninput_string = input('Input a string: ')\noutput_string = \"\"\n\n#Process: Loops through string and adds alphanumeric characters to output_string\nfor char in input_string:\n if char.isalnum(): #Checks if a char is alphanumeric\n output_string += char #If so it adds it to output_string\n\n#Output\nprint(output_string)\n","repo_name":"saulbg/10_Excercises","sub_path":"Python_Special_Chars.py","file_name":"Python_Special_Chars.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25397869620","text":"#*****************************************************************************************\r\n#*Copyright (c) UFF - Federal Fluminense University 2022 *\r\n#* *\r\n#*Author(s): Brenda Gomes Gouveia *\r\n#* *\r\n#*Esse arquivo faz parte de um projeto da disciplina Internet das Coisas, ministrada pela*\r\n#*professora Flávia Delicato *\r\n#* *\r\n#*O arquivo se inscreve nos tópicos, e envia-los para o InfluxDB *\r\n#*****************************************************************************************\r\nfrom fastapi import FastAPI\r\nfrom paho import mqtt\r\nfrom fastapi_mqtt import FastMQTT, MQTTConfig\r\nfrom influxdb_client import InfluxDBClient, Point, WritePrecision\r\nfrom influxdb_client.client.write_api import SYNCHRONOUS\r\nimport datetime\r\n\r\nmqtt_broker = 'broker.mqttdashboard.com'\r\nmqtt_port = 1883\r\nmqtt_topic = \"casa/comodos1\"\r\nmqtt_topic2= \"casa/temperatura1\"\r\nfilenamet=\"temperatura.txt\"\r\nfilenamec=\"comodo.txt\"\r\n\r\napp = FastAPI()\r\n\r\nmqtt_config = MQTTConfig(host = mqtt_broker,port = mqtt_port,keepalive = 60)\r\n\r\nmqtt = FastMQTT(config=mqtt_config)\r\n\r\nmqtt.init_app(app)\r\n\r\ntoken = \"1MI7_9MBv6pDLHJKv9NDhmNFMCfaOK3s-6HFgCwRXyT2LM2d29Rtcq9QN0dV2oH-IbEmCh7mMzpvWqR7jSPedg==\"\r\norg = \"UFF-Internet das Coisas\"\r\nbucket = \"trabalho\"\r\n\r\nclient = InfluxDBClient(url=\"http://localhost:8086\", token=token)\r\n\r\nwrite_api = client.write_api(write_options=SYNCHRONOUS)\r\n\r\n@mqtt.on_connect()\r\ndef connect(client, flags, rc, properties):\r\n mqtt.client.subscribe(mqtt_topic) #subscribing mqtt topic\r\n mqtt.client.subscribe(mqtt_topic2)\r\n print(\"Connected: \", client, flags, rc, properties)\r\n\r\n@mqtt.on_message()\r\nasync def message(client, topic, payload, qos, properties):\r\n print(\"Received message: \",topic, payload.decode(), qos, properties)\r\n\r\n \r\n\r\n@mqtt.on_disconnect()\r\ndef disconnect(client, packet, exc=None):\r\n print(\"Disconnected\")\r\n\r\n@mqtt.on_subscribe()\r\ndef subscribe(client, mid, qos, properties):\r\n print(\"subscribed\", client, mid, qos, properties)\r\n \r\n@mqtt.subscribe(mqtt_topic) \r\nasync def get_dado(client,topic, payload, qos, properties): #recebe o tópico cômodos\r\n print(\"data: \", topic, payload.decode(), qos, properties)\r\n s=payload.decode()\r\n x=s.split(\" \")\r\n data = \"casa,comodo=\"+ x[2]+\" valor=1\" #converte para o protocolo em linha\r\n write_api.write(bucket, org, data) #envia para o InfluxDB\r\n\r\n@mqtt.subscribe(mqtt_topic2)\r\nasync def get_temperatura(client,topic,payload,qos,properties):\r\n print(\"data: \", topic, payload.decode(), qos, properties)\r\n s=payload.decode()\r\n x=s.split(\" \")\r\n data = \"casa,temperatura=\"+ x[2]+\" valor=\"+x[3] #converte para o protocolo em linha\r\n write_api.write(bucket, org, data) #envia para o InfluxDB\r\n\r\n\r\n@app.get(\"/teste\")\r\nasync def teste():\r\n return {\"data\"}\r\n \r\n \r\n ","repo_name":"brenda-gouveia/IoT_project","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28636667452","text":"# encoding: utf-8\n__author__ = 'pax'\n\nimport unittest\nfrom objekte_intro.raum1 import Raum\n\nclass PartikelTest(unittest.TestCase):\n\n def test_eigenschaften(self):\n r = Raum(\"Kiesweg\", \"Du stehst auf einem verlassenen Kiesweg.\")\n self.assertEqual(r.name, \"Kiesweg\")\n self.assertEqual(r.beschreibung, \"Du stehst auf einem verlassenen Kiesweg.\")\n self.assertListEqual(r.verbindungen, [])\n","repo_name":"ckunz175/efinfo2017","sub_path":"objekte_intro/raum1_test.py","file_name":"raum1_test.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42961210575","text":"def countBits(x):\n count = 0\n while x:\n count += (x & 1)\n x >>= 1\n return count\n\nline = input().split()\nn = int(line[0])\nm = int(line[1])\n\nresult = 0\nfor index in range(n, m + 1):\n result += countBits(index)\nprint(result)\n","repo_name":"cyberskeleton/sandbox","sub_path":"2019-11-24hw3.py","file_name":"2019-11-24hw3.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36141998705","text":"import os\n\ndirs = ['executables','io','staticfiles','submittedcodes','media']\n\ndef main():\n\t# create executables directory\n\tBASE_DIR = os.getcwd()\n\texecdir = os.path.join(BASE_DIR,'executables')\n\tif not os.path.exists(execdir):\n\t\ttry:\n\t\t\tos.makedirs(execdir)\n\t\texcept:\n\t\t\tprint('Error occured when creating directory \\'executables\\'')\n\t\t\traise\n\tfor d in dirs:\n\t\tdirpath = os.path.join(BASE_DIR, d)\n\t\tif not os.path.exists(dirpath):\n\t\t\tprint('{} directory could not be found'.format(d))\n\t\t\traise\n\tprint('everything is good')\t\n\t\t\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"pv0804034/nplonlinejudge","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39409294307","text":"#!/usr/bin/env python3\nfrom time import sleep\nimport threading\nimport traceback\nimport sys\nimport argparse\n\nfrom logzero import logger\nfrom neocore.Fixed8 import Fixed8\nfrom neocore.UInt256 import UInt256\nfrom neocore.UInt160 import UInt160\nfrom twisted.internet import reactor, task\n\nfrom neo.Core.Blockchain import Blockchain\nfrom neo.Core.CoinReference import CoinReference\nfrom neo.Core.TX.Transaction import TransactionOutput, ContractTransaction, TXFeeError\nfrom neo.Core.TX.TransactionAttribute import TransactionAttribute, TransactionAttributeUsage\nfrom neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain\nfrom neo.Implementations.Wallets.peewee.UserWallet import UserWallet\nfrom neo.Network.NodeLeader import NodeLeader\nfrom neo.Prompt.Commands import Send, Wallet\nfrom neo.Prompt.Utils import get_asset_id, lookup_addr_str, get_asset_amount\nfrom neo.Settings import settings\nfrom neo.SmartContract.ContractParameterContext import ContractParametersContext\nfrom neo.Wallets.utils import to_aes_key\n\nWALLET_PWD = \"nspcc\"\nWALLET_PATH = \"/wallets/wallet\"\nMASTER_WALLET_PWD = \"coz\"\nMASTER_WALLET_PATH = \"/neo-python/neo-privnet.wallet\"\nWALLET_DB_PATH = \"/wallets/db.log\"\nBLOCK_AMOUNT = 10\n\n# user defined params\nTX_FILE = \"/root/raw.txs\"\nPREMADE_NEO = 10\nPREMADE_GAS = 2.0\nTX_NEO = 3\nTX_GAS = 1.0\nTOTAL_AMOUNT = 1000\n\n\ndef read_wallet_db():\n with open(WALLET_DB_PATH, 'r') as f:\n database = f.read().splitlines()\n return database\n\ndef write_raw_db(hashes, path):\n with open(path, 'w') as f:\n for hash in hashes:\n f.write(hash.decode(\"ascii\")+\"\\n\")\n\ndef process_transaction(wallet, contract_tx, scripthash_from=None, scripthash_change=None, fee=None, owners=None, user_tx_attributes=None):\n try:\n tx = wallet.MakeTransaction(tx=contract_tx,\n change_address=scripthash_change,\n fee=fee,\n from_addr=scripthash_from)\n except ValueError:\n print(\"Insufficient funds. No unspent outputs available for building the transaction.\\n\"\n \"If you are trying to sent multiple transactions in 1 block, then make sure you have enough 'vouts'\\n.\"\n \"Use `wallet unspent` and `wallet address split`, or wait until the first transaction is processed before sending another.\")\n raise Exception('oh no')\n except TXFeeError as e:\n print(e)\n raise Exception('oh no')\n\n if tx is None:\n print(\"Insufficient funds\")\n raise Exception('oh no')\n\n try:\n input_coinref = wallet.FindCoinsByVins(tx.inputs)[0]\n source_addr = input_coinref.Address\n for order in tx.outputs:\n dest_addr = order.Address\n value = order.Value.ToString() # fixed8\n if order.AssetId == Blockchain.Default().SystemShare().Hash:\n asset_name = 'NEO'\n else:\n asset_name = 'GAS'\n\n if source_addr != dest_addr:\n print(f\"Sending {value} {asset_name} from {source_addr} to {dest_addr}\")\n else:\n print(f\"Returning {value} {asset_name} as change to {dest_addr}\")\n print(\" \")\n\n standard_contract = wallet.GetStandardAddress()\n\n if scripthash_from is not None:\n signer_contract = wallet.GetContract(scripthash_from)\n else:\n signer_contract = wallet.GetContract(standard_contract)\n\n if not signer_contract.IsMultiSigContract and owners is None:\n data = standard_contract.Data\n tx.Attributes = [TransactionAttribute(usage=TransactionAttributeUsage.Script,\n data=data)]\n\n # insert any additional user specified tx attributes\n tx.Attributes = tx.Attributes + user_tx_attributes\n\n context = ContractParametersContext(tx, isMultiSig=signer_contract.IsMultiSigContract)\n wallet.Sign(context)\n\n if context.Completed:\n tx.scripts = context.GetScripts()\n relayed = NodeLeader.Instance().Relay(tx)\n\n if relayed:\n wallet.SaveTransaction(tx)\n return tx\n else:\n print(\"Could not relay tx %s \" % tx.Hash.ToString())\n raise Exception('oh no')\n\n else:\n print(\"Transaction initiated, but the signature is incomplete. Use the `sign` command with the information below to complete signing.\")\n print(json.dumps(context.ToJson(), separators=(',', ':')))\n raise Exception('oh no')\n\n except Exception as e:\n print(\"Could not send: %s \" % e)\n traceback.print_stack()\n traceback.print_exc()\n\n return\n\ndef construct_send_many(wallet, outgoing, start, data, asset, amount):\n logger.info(\"Constructing %s : %d-%d\" % (asset, start, start+outgoing-1))\n output = []\n for i in range(outgoing):\n try:\n assetId = get_asset_id(wallet, asset)\n address_to = data[start+i]\n scripthash_to = lookup_addr_str(wallet, address_to)\n if scripthash_to is None:\n logger.debug(\"invalid destination address\")\n return\n f8amount = get_asset_amount(amount, assetId)\n if f8amount is False:\n logger.debug(\"invalid amount\")\n return\n tx_output = TransactionOutput(AssetId=assetId, Value=f8amount, script_hash=scripthash_to)\n output.append(tx_output)\n except KeyboardInterrupt:\n print('Transaction cancelled')\n return\n contract_tx = ContractTransaction(outputs=output)\n\n scripthash_from = None\n scripthash_change = None\n owners = None\n user_tx_attributes = []\n fee = Fixed8.Zero()\n\n return [contract_tx, scripthash_from, scripthash_change, fee, owners, user_tx_attributes]\n\ndef create_raw_transaction(walletpath, source, dest, txidNeo, txidGas, n):\n # const for asset id\n gas_asset_id = Blockchain.SystemCoin().Hash\n neo_asset_id = Blockchain.SystemShare().Hash\n\n # open source wallet for later transaction signing\n wallet = UserWallet.Open(walletpath, to_aes_key(WALLET_PWD))\n\n source_script_hash = wallet.ToScriptHash(source)\n destination_script_hash = wallet.ToScriptHash(dest)\n\n contract_tx = ContractTransaction()\n contract_tx.raw_tx = True\n\n # here we creating vin\n input1 = CoinReference(prev_hash=UInt256.ParseString(txidNeo), prev_index=int(n))\n input2 = CoinReference(prev_hash=UInt256.ParseString(txidGas), prev_index=int(n))\n contract_tx.inputs = [input1, input2]\n\n # here we creating vout (src [10 NEO] -> { dst [3 NEO]; src [7 NEO] })\n send_to_destination_output1 = TransactionOutput(AssetId=neo_asset_id, Value=Fixed8.FromDecimal(TX_NEO), script_hash=destination_script_hash)\n return_change_output1 = TransactionOutput(AssetId=neo_asset_id, Value=Fixed8.FromDecimal(PREMADE_NEO-TX_NEO), script_hash=source_script_hash)\n return_change_output2 = TransactionOutput(AssetId=gas_asset_id, Value=Fixed8.FromDecimal(TX_GAS), script_hash=source_script_hash)\n contract_tx.outputs = [send_to_destination_output1, return_change_output1, return_change_output2]\n\n # time to sign\n context = ContractParametersContext(contract_tx)\n wallet.Sign(context)\n\n # confirmation scripts\n contract_tx.scripts = context.GetScripts()\n\n raw_tx = contract_tx.ToArray()\n return raw_tx\n\ndef construct_raw_many(outgoing, start, data, txidNeo, txidGas):\n output = []\n for i in range(outgoing):\n try:\n pos = start + i\n filename = WALLET_PATH + \"%d\" % pos\n tx = create_raw_transaction(filename, data[pos], data[pos + TOTAL_AMOUNT], txidNeo, txidGas, i)\n output.append(tx)\n logger.info(\"Rawed transaction %d\" % pos)\n\n except KeyboardInterrupt:\n print('Transaction cancelled')\n return\n\n return output\n\ndef main_routine():\n # Here we awaiting local node to synchronize with private-net\n while True:\n if Blockchain.Default().Height != Blockchain.Default().HeaderHeight or Blockchain.Default().Height < 10:\n logger.info(\"...awaits %s/%s\" % (Blockchain.Default().Height, Blockchain.Default().HeaderHeight))\n sleep(2)\n else:\n break\n\n bc_height = Blockchain.Default().Height\n logger.info(\"Syncronized. Height %s Now open wallet:\" % bc_height)\n txsNeo = []\n txsGas = []\n hashes = []\n\n try:\n wallet = UserWallet.Open(MASTER_WALLET_PATH, to_aes_key(MASTER_WALLET_PWD))\n loop = task.LoopingCall(wallet.ProcessBlocks)\n loop.start(.5)\n\n logger.info(\"Wallet opened\")\n\n wallet_db = read_wallet_db()\n\n sleep(5)\n # In this block we transfer NEO assets from master wallet to generated wallets\n for i in range(int(TOTAL_AMOUNT/BLOCK_AMOUNT)):\n pre_tx = construct_send_many(wallet, BLOCK_AMOUNT, i*BLOCK_AMOUNT, wallet_db, 'NEO', str(PREMADE_NEO))\n funds_source_script_hash = wallet.ToScriptHash(wallet.Addresses[0])\n tx = process_transaction(wallet, contract_tx=pre_tx[0], scripthash_from=funds_source_script_hash,\n scripthash_change=pre_tx[2], fee=pre_tx[3], owners=pre_tx[4],\n user_tx_attributes=pre_tx[5])\n if tx is None:\n continue\n tx_hash = tx.Hash.ToString()\n txsNeo.append(tx_hash)\n while True:\n # Try to find transaction in blockchain\n sleep(0.5)\n _tx, height = Blockchain.Default().GetTransaction(tx_hash)\n if height > 0:\n break\n sleep(1)\n\n # In this block we transfer GAS assets from master wallet to generated wallets\n for i in range(int(TOTAL_AMOUNT/BLOCK_AMOUNT)):\n pre_tx = construct_send_many(wallet, BLOCK_AMOUNT, i*BLOCK_AMOUNT, wallet_db, 'GAS', str(PREMADE_GAS))\n funds_source_script_hash = wallet.ToScriptHash(wallet.Addresses[0])\n tx = process_transaction(wallet, contract_tx=pre_tx[0], scripthash_from=funds_source_script_hash,\n scripthash_change=pre_tx[2], fee=pre_tx[3], owners=pre_tx[4],\n user_tx_attributes=pre_tx[5])\n if tx is None:\n continue\n tx_hash = tx.Hash.ToString()\n txsGas.append(tx_hash)\n while True:\n # Try to find transaction in blockchain\n sleep(0.5)\n _tx, height = Blockchain.Default().GetTransaction(tx_hash)\n if height > 0:\n break\n sleep(1)\n\n loop.stop()\n wallet.Close()\n logger.info(\"Wallet closed\")\n sleep(2)\n logger.info(\"Generating raw transactions\")\n\n # In this block we generating raw transactions\n for i in range(int(TOTAL_AMOUNT/BLOCK_AMOUNT)):\n hashes += construct_raw_many(BLOCK_AMOUNT, i*BLOCK_AMOUNT, wallet_db, txsNeo[i], txsGas[i])\n\n write_raw_db(hashes, TX_FILE)\n\n\n except Exception as ex:\n logger.info(ex)\n traceback.print_stack()\n traceback.print_exc()\n reactor.stop()\n return\n\n # After main_routine we stop application\n reactor.stop()\n return\n\n\ndef main():\n # Parse args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", help=\"total amount of transactions\", type=int)\n parser.add_argument(\"--walletneo\", help=\"premade NEO in wallet\", type=int)\n parser.add_argument(\"--walletgas\", help=\"premade GAS in wallet\", type=int)\n parser.add_argument(\"--txneo\", help=\"amount of sending NEO\", type=int)\n parser.add_argument(\"--txfee\", help=\"tx fee\", type=float)\n parser.add_argument(\"-f\", help=\"file to save raw transactions\", type=str)\n args = parser.parse_args()\n\n global TOTAL_AMOUNT\n global PREMADE_GAS\n global PREMADE_NEO\n global TX_NEO\n global TX_FEE\n global TX_FILE\n\n if args.n:\n TOTAL_AMOUNT = args.n\n if args.walletgas:\n PREMADE_GAS = args.walletgas\n if args.walletneo:\n PREMADE_NEO = args.walletneo\n if args.txneo:\n TX_NEO = args.tx\n if args.txfee:\n TX_FEE = PREMADE_GAS - args.txfee\n if args.f:\n TX_FILE = args.f\n\n # Use TestNet\n settings.setup_privnet()\n\n # Setup the blockchain\n blockchain = LevelDBBlockchain(settings.chain_leveldb_path)\n Blockchain.RegisterBlockchain(blockchain)\n dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)\n dbloop.start(.5)\n NodeLeader.Instance().Start()\n\n d = threading.Thread(target=main_routine)\n d.setDaemon(True)\n d.start()\n\n # Awaiting exit here\n reactor.run()\n logger.info(\"Shutting down.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nspcc-dev/neo-test-flow","sub_path":"scripts/tx-gen.py","file_name":"tx-gen.py","file_ext":"py","file_size_in_byte":12929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14595206216","text":"#!/usr/bin/env python3\n\"\"\"\n Get table of allele presence/absence from an MAF file.\n\"\"\"\n\nimport argparse\nimport collections\nimport gzip\nimport itertools\nimport sys\n\nMafSeq = collections.namedtuple('MafSeq', 'seq_name start aligned_bases strand contig_length seq')\n\ndef parse_maf_haplotypes(ih):\n record = {'a':{}, 'haplotypes':collections.defaultdict(set),\n 'strain_count':collections.defaultdict(int)}\n for line in ih:\n if line.startswith('a'):\n if len(record['haplotypes']) > 0:\n yield record\n record = {'a':{}, 'haplotypes':collections.defaultdict(set),\n 'strain_count':collections.defaultdict(int)}\n record['a'] = {x.split('=')[0]:x.split('=')[1] for x in line.strip().split()[1:]}\n elif line.startswith('s'):\n fields = line.strip().split()[1:]\n strain = fields[0].split('.')[0]\n record['strain_count'][strain] += 1\n record['haplotypes'][fields[5]].add(strain)\n else:\n continue\n yield record\n\ndef get_vars_from_haplotypes(haplotypes):\n h = list(haplotypes.items())\n l = len(h[0][0])\n ret = {}\n\n ## First find SNPs\n snps = []\n for i in range(l):\n alleles = collections.defaultdict(set)\n for j in range(len(h)):\n b = (h[j][0][i]).upper()\n if b not in {'A', 'T', 'C', 'G'}:\n continue\n alleles[b].add(j)\n if len(alleles) > 1:\n for a, idxs in alleles.items():\n snps.append((str(i) + '-' + a, idxs))\n ret[str(i+1) + '-' + a + '-snp'] = set()\n for j in idxs:\n for s in h[j][1]:\n ret[str(i+1) + '-' + a + '-snp'].add(s)\n\n ## Then find indels\n indels = collections.defaultdict(set)\n for j in range(len(h)):\n d = False\n s = None\n e = None\n for i in range(l):\n b = (h[j][0][i])\n if (b != '-' or i == l-1) and d:\n e = i\n if e == l-1: e +=1\n indels[(s+1, e)].add(j)\n d = False\n elif b == '-' and not d:\n s = i\n d = True\n\n ## Produce output table\n for se, idxs in sorted(indels.items()):\n drs = '_'.join(map(str, se)) + '-del'\n irs = '_'.join(map(str, se)) + '-ins'\n ret[drs] = set()\n ret[irs] = set()\n done = set()\n for j in idxs:\n done.add(j)\n for s in h[j][1]:\n ret[drs].add(s)\n for j in range(len(h)):\n if j not in done:\n for s in h[j][1]:\n ret[irs].add(s)\n\n return ret\n\n\n\nparser = argparse.ArgumentParser(usage=__doc__)\nparser.add_argument('maf')\nparser.add_argument('strains')\nargs = parser.parse_args()\n\n\nstrains = []\nwith open(args.strains, 'rt') as h:\n for line in h:\n strains.append(line.strip())\n\nsys.stdout.write('contig\\tpos\\trs\\t' + '\\t'.join(strains) + '\\n')\nopen_fun = open\nif args.maf.endswith('.gz'):\n open_fun = gzip.open\nwith open_fun(args.maf, 'rt') as ih:\n for record in parse_maf_haplotypes(ih):\n var_table = get_vars_from_haplotypes(record['haplotypes'])\n for rs, strain_list in var_table.items():\n sys.stdout.write(record['a']['label'] + '\\t' +\n rs.split('-')[0] + '\\t' +\n record['a']['label'] + '-' + rs)\n for strain in strains:\n if strain in strain_list:\n sys.stdout.write('\\t1')\n else:\n sys.stdout.write('\\t0')\n sys.stdout.write('\\n')\n sys.stdout.write(record['a']['label'] + '\\t' +\n '0' + '\\t' +\n record['a']['label'] + '-copynumber')\n for strain in strains:\n sys.stdout.write('\\t' + str(record['strain_count'][strain]))\n sys.stdout.write('\\n')\n","repo_name":"brendane/asymmetric_selection_manuscript_code_for_review","sub_path":"ensifer_variants/helper_scripts/variants_from_maf.py","file_name":"variants_from_maf.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13708224528","text":"def open_dataset(filename):\n import csv\n rows = []\n with open(filename, 'r', encoding='utf-8') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=';')\n fields = next(csv_reader)\n for row in csv_reader:\n rows.append(row)\n return fields, rows\n\n\ndef convert_1(filename):\n for row in open_dataset(filename)[1]:\n print(f'{row[0]}: {row[2]}')\n\n\ndef convert_2(filename):\n for row in open_dataset(filename)[1]:\n print(f'{row[1]} (email: {row[2]})')\n\n\ndef convert_3(filename):\n dct = {}\n for row in open_dataset(filename)[1]:\n dct.setdefault(row[2].split('@')[1], [])\n dct[row[2].split('@')[1]].append(row[0])\n for k, v in dct.items():\n print(f\"{k} ==> {', '.join(v)}\")\n\n\ndef convert_4(filename):\n from random import sample\n fields, rows = open_dataset(filename)\n fields.append(\"Password\")\n for row in rows:\n row.append(''.join(map(str, sample(range(10), 4))))\n print(*fields, sep=';')\n print(*[';'.join(row) for row in rows], sep='\\n')\n","repo_name":"kkravchenkodev/main_academy","sub_path":"lesson_9/homework/convert/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30505703571","text":"#\n# @lc app=leetcode id=1156 lang=python3\n#\n# [1156] Swap For Longest Repeated Character Substring\n#\n\n# @lc code=start\n\n\nclass Solution:\n def count(self, c, text):\n ans = 0\n f, g = 0, 0\n\n for i in range(len(text)):\n if c == text[i]:\n f += 1\n g += 1\n else:\n f = g+1\n g = 0\n\n ans = max(ans, f, g)\n return ans\n\n def maxRepOpt1(self, text: str) -> int:\n if not text:\n return 0\n\n d = collections.Counter(text)\n\n ans = 0\n for char in d.keys():\n ans = max(ans, min(self.count(char, text), d[char]))\n\n return ans\n# @lc code=end\n","repo_name":"naseeihity/leetcode-daily","sub_path":"dp/1156.swap-for-longest-repeated-character-substring.py","file_name":"1156.swap-for-longest-repeated-character-substring.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20490580711","text":"\n# coding: utf-8\n\n\nfrom py2neo import Graph, Node, Relationship, GraphService\nimport os\nfrom tqdm import tqdm\nimport re\n\nNEO4J_USER = 'neo4j'\nNEO4J_PASSWORD = '123456'\nNEO4J_HOST = 'localhost'\n\n\nclass NeoGraph:\n def __init__(self, gcp=None):\n if gcp:\n #self.g = Graph('bolt://neo4j:123456@35.230.134.163:7687')\n self.g = Graph(host=gcp['NEO4J_HOST'], port=gcp['NEO4J_PORT'], user=gcp['NEO4J_USER'],\n password=gcp['NEO4J_PASSWORD'])\n else:\n self.g = Graph(host=NEO4J_HOST, user=NEO4J_USER,\n password=NEO4J_PASSWORD)\n\n def truncate(self):\n \"\"\"Remove all nodes in the graph\"\"\"\n print(\"----- Truncating graph -----\")\n tx = self.g.begin()\n result = tx.run('MATCH (n) DETACH DELETE n')\n tx.commit()\n return result\n\n def add_companies(self, df):\n print(\"----- Starting Add companies process -----\")\n tx = self.g.begin()\n for _, x in tqdm(df.iterrows(), total=len(df)):\n if x['ticker'] != \"NA\":\n n = Node(\"Ticker\", name=x['ticker'], company=x['name'],\n sector=x['sector'], variation_coefficient=x['var_coef'])\n tx.create(n)\n tx.commit()\n self.g.run(\"CREATE INDEX ON :Ticker(name)\")\n print(\"----- Add companies process complete -----\")\n\n def create_links(self, df):\n print(\"----- Starting relationship creation process -----\")\n for _, x in tqdm(df.iterrows(), total=df.shape[0]):\n cypher = f\"MATCH (s1:Ticker {{name:\\'{x['ticker1']}\\'}}),(s2:Ticker {{name:\\'{x['ticker2']}\\'}}) CREATE (s1)-[:CORR {{corr : {x['cor']}, id : '{x['id']}'}}]->(s2)\"\n self.g.run(cypher)\n print(\"-----Relationship creation process complete -----\")\n\n def add_tickers(self, df):\n print(\"----- Starting Add companies process -----\")\n tx = self.g.begin()\n for _, x in tqdm(df.iterrows(), total=len(df)):\n\n n = Node(\"Ticker\", ticker=x['ticker'], company=x['name'],\n sector=x['sector'], )\n tx.create(n)\n tx.commit()\n self.g.run(\"CREATE INDEX ON :Ticker(ticker)\")\n print(\"----- Add companies process complete -----\")\n\n def add_funds(self, df):\n print(\"----- Starting Add Funds process -----\")\n tx = self.g.begin()\n for _, x in tqdm(df.iterrows(), total=len(df)):\n\n n = Node(\"Fund\", name=x['name'])\n tx.create(n)\n tx.commit()\n print(\"----- Add Funds process complete -----\")\n\n def link_funds_to_tickers(self, funds_tickers):\n print(\"----- Starting relationship creation process -----\")\n for fund in tqdm(funds_tickers, total=len(funds_tickers)):\n print(fund)\n for x in funds_tickers[fund]:\n if x['TICKER'] and x['VALUE'] > 0:\n\n if re.sub('[^a-zA-Z]+', '', x['PUT/CALL']):\n pc = re.sub('[^a-zA-Z]+', '',\n x['PUT/CALL']).upper()\n cypher = f\"MATCH (f1:Fund {{name:\\'{fund}\\'}}),(t1:Ticker {{ticker:\\'{x['TICKER']}\\'}}) CREATE (f1)-[:INVESTMENT {{valuex$1000 : {x['VALUE']}, shares : {x['SHARES']}, put_call : '{pc}'}}]->(t1)\"\n else:\n cypher = f\"MATCH (f1:Fund {{name:\\'{fund}\\'}}),(t1:Ticker {{ticker:\\'{x['TICKER']}\\'}}) CREATE (f1)-[:INVESTMENT {{valuex$1000 : {x['VALUE']}, shares : {x['SHARES']}}}]->(t1)\"\n try:\n self.g.run(cypher)\n except:\n print(\"Failed \", fund, x)\n print(\"-----Relationship creation process complete -----\")\n","repo_name":"DL4L/13F-Network","sub_path":"neo4j_funcs.py","file_name":"neo4j_funcs.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8674045554","text":"import csv\nfrom Airport import Airport\nfrom math import sin, cos, sqrt, atan2, radians\n\n\nclass AllAirports:\n def __init__(self):\n self.load_airports('./data/airport.csv')\n\n def load_airports(self, file_path, encoding=\"utf8\"):\n all_airports = {}\n country_code = {}\n currency = {}\n # find the country Code\n with open(\"./data/countrycurrency.csv\", \"r\") as file:\n lines = csv.reader(file)\n header = next(lines)\n for line in lines:\n country_code[line[0]] = line[1]\n # print(country_code['Afghanistan'])\n # print(header)\n file.close()\n\n with open(\"./data/countryrates.csv\", \"r\") as file:\n lines = csv.reader(file)\n for line in lines:\n currency[line[1]] = line[2]\n file.close()\n # print(currency)\n with open(file_path, 'r') as file:\n allData = csv.reader(file)\n for data in allData:\n try:\n code = country_code[data[3]]\n rate = currency[code]\n all_airports[data[4]] = Airport(\n idx=data[0], code=data[4], name=data[1], city=data[2], country=data[3], lat=data[6], lon=data[7], rate=rate)\n except KeyError as e:\n print(\"Error happen\", e)\n file.close()\n\n self.all_airports = all_airports\n # for airport in all_airports.items():\n # print(airport)\n\n def calculateAirportDistance(self, lat1, lon1, lat2, lon2):\n # approximate radius of earth in km\n R = 6373.0\n lat1 = radians(52.2296756)\n lon1 = radians(21.0122287)\n lat2 = radians(52.406374)\n lon2 = radians(16.9251681)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n return distance\n\n def airportDistance(self, aiport1_code, airport2_code):\n airport1 = self.all_airports[aiport1_code]\n airport2 = self.all_airports[airport2_code]\n distance = self.calculateAirportDistance(\n lat1=airport1.lat, lon1=airport1.lon, lat2=airport2.lat, lon2=airport2.lon)\n return distance\n\n def getTicketPrice(self, start, end):\n airport_1 = self.all_airports[start]\n distance = self.airportDistance(start, end)\n fare = distance * airport_1.rate\n return fare\n\n\nTravelAgency = AllAirports()\n\nfare = TravelAgency.getTicketPrice('HEA', 'WKM')\n\nprint(f\"Ticket Fare is : {fare}\")\n","repo_name":"ruhulaminjr/Phitron-Cse-Fundamental-Course","sub_path":"python/oop/flight-scheduler/All_Airports.py","file_name":"All_Airports.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10139990732","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef awgn(x,snr):\r\n snr = 10**(snr/10.0)\r\n xpower = np.sum(x**2)\r\n mu = 0\r\n npower = xpower/snr\r\n sigma = np.sqrt(npower)\r\n return np.random.normal(mu,sigma,size=len(x)),sigma\r\n\r\nN = 200\r\ns = np.zeros(N)\r\nsig_db = 35\r\ns[50] = np.sqrt(10**(sig_db/10.0))\r\n[w1,sigma] = awgn(s,5) ####this w is constant if awgn is called only once\r\ns1 = s+w1 \r\ntotal = 10*np.log(np.sum(s1**2)/N)####estimate sigma\r\nprint('noise power(one pulse period) : %ddB'%total)\r\nprint('signal power(one pulse period) ; %ddB'%sig_db)\r\n\r\nw0 = np.zeros((N,10))\r\nfor k in range(10):\r\n [w,sigma] = awgn(s,5)\r\n w0[:,k] = w\r\n\r\ns0 = np.concatenate((s+w0[:,0],s+w0[:,1],s+w0[:,2],s+w0[:,3],s+w0[:,4],s+w0[:,5],s+w0[:,6],s+w0[:,7],s+w0[:,8],s+w0[:,9]))##this just generates the same sequence\r\nsc0 = np.zeros(N)\r\n\r\n\r\nfor j in range(10):\r\n sc0 = sc0 + s0[j*N:(j+1)*N]\r\n\r\nsc = sc0\r\n#sc = s+w ###this for no accumulation\r\nfig1 = plt.figure()\r\nplt.plot(10*np.log10(s1**2),label='one period clutter')\r\nplt.ylabel('Amplitude/dB')\r\nplt.xlabel('range')\r\nfig1.show()\r\n\r\n####cfar####\r\n#sc = sc**2\r\npfa = 1e-4\r\ng = 2\r\nr = 10\r\nn = 2*r\r\nr_cell = np.zeros(n)\r\ntest = np.zeros(N)\r\ntest_ideal = np.zeros(N)\r\nsig_loc = np.zeros(N)\r\nsigma0 = sigma**2\r\n\r\nfor i in range(N):\r\n if i-g-r>=0 and i+g+r<=N-1:\r\n r_cell = np.concatenate((sc[i-g-r:i-g-1],sc[i+g+1:r+i+g]))\r\n \r\n sigma2_e = np.sum(r_cell**2)/n\r\n #print(sigma2_e)\r\n a_temp = pfa**(-1/n)\r\n alpha = n*(a_temp-1)\r\n alpha0 = -np.log(pfa)\r\n #print(alpha)\r\n test_ideal[i] = alpha0*sigma0 ###sigma appears in square not in db,10sigma\r\n test[i] = alpha*sigma2_e\r\n #test[i] = sigma2_e\r\n if test[i]=0 and i+g+r<=N-1:\r\n sig_loc[i]=1\r\n\r\nfig2 = plt.figure()\r\n\r\nplt.plot(10*np.log10(sc**2),'r-',label='signal with white gaussian noise')\r\nplt.ylabel('Amplitude/dB')\r\nplt.xlabel('range')\r\nplt.legend(loc='lower right')\r\nplt.hold(True)\r\n#plt.plot(test,'b-')\r\nplt.plot(10*np.log10(test),'b-.',label='the adaptive threshold')\r\nplt.legend(loc='lower right')\r\nplt.hold(False)\r\nfig2.show()\r\n\r\nfig3 = plt.figure()\r\n\r\nplt.plot(sig_loc,label='the estimate location')\r\nplt.legend(loc='upper right')\r\nplt.xlabel('range')\r\nfig3.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"jensonlin/SomeRadarDetectProgram","sub_path":"phaseaccumulate.py","file_name":"phaseaccumulate.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"30068823280","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# main.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: tbrizon +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2019/11/06 10:21:22 by tbrizon #+# #+# #\n# Updated: 2019/11/07 10:11:20 by tbrizon ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport os\nimport time\n\"\"\"\n print( args)\n for i in args:\n print(i)\n\"\"\"\ndef log(function):\n def wrap(*args):\n print(\"avant\")\n function()\n print(\"apres\")\n return function()\n return wrap\n\n\ndef what_are_the_vars(*args, **kwargs):\n i = 0\n obj = ObjectC\n if not args and not kwargs:\n return \n for lst_v in args:\n \"s{}tr\".format(\"cc\")\n setattr(obj, \"var_{}\".format(i), lst_v)\n i+=1\n for k, value, in kwargs.items():\n setattr(obj, k, value)\n return (ObjectC)\n \n\nclass ObjectC(object):\n def __init__(self):\n pass\n@log\ndef doom_printer(obj):\n if obj is None:\n print(\"ERROR\")\n print(\"end\")\n return\n for attr in dir(obj):\n if attr[0] != '_':\n value = getattr(obj, attr)\n print(\"{}: {}\".format(attr, value))\n print(\"end\")\n\nif __name__ == \"__main__\":\n obj = what_are_the_vars(7)\n doom_printer(obj)\n obj = what_are_the_vars(\"ft_lol\", \"Hi\")\n doom_printer(obj)\n obj = what_are_the_vars()\n doom_printer(obj)\n obj = what_are_the_vars(12, \"Yes\", [0, 0, 0], a=10, hello=\"world\")\n doom_printer(obj)\n obj = what_are_the_vars(42, a=10, var_0=\"world\")\n doom_printer(obj)\n","repo_name":"tbrizon/bootcamp_python","sub_path":"day02/ex01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6704702022","text":"from rest_framework import serializers\nfrom books.models import Book #Book serializers ke liye\nfrom django.contrib.auth import get_user_model#User Authentication\n\nclass BookSerializer(serializers.ModelSerializer): #serializes book objects\n class Meta:\n model = Book\n fields = ('title','subtitle','author','isbn')\n\n\nclass UserSerializer(serializers.ModelSerializer): #serializes user objects\n class Meta:\n model = get_user_model()\n fields = ('id', 'username')\n","repo_name":"Thakur-Govind/Library_project","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1551829395","text":"import pygame\nfrom pygame.surface import Surface, SurfaceType\nfrom typing import Union\n\n\nclass Circle:\n \"\"\"\n This class represents the circle which appears during the game.\n \"\"\"\n def __init__(self, radius, position, color):\n self.radius = radius\n self.position = position\n self.color = color\n self.rect_obj = pygame.Rect(position[0] - radius, position[1] - radius,\n 2 * radius, 2 * radius)\n\n def draw(self, window: Union[Surface, SurfaceType]):\n \"\"\"\n This circle will be drawn into the window.\n\n :param window: The main window of the game.\n :return:\n \"\"\"\n pygame.draw.circle(window, self.color, self.position, self.radius)\n\n def update_rect_position(self):\n \"\"\"\n The rectangular object's position that wraps the circle will be\n adjusted accordingly.\n :return:\n \"\"\"\n self.rect_obj.x = self.position[0] - self.radius\n self.rect_obj.y = self.position[1] - self.radius\n","repo_name":"RaduTheMan/bubble_buster","sub_path":"states/game_in_progress/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36172664618","text":"# Contains the logic for unpacking user configuration file\n__author__ = \"Matteo Golin\"\n\n# Imports\nimport json\nfrom dataclasses import dataclass, field\nfrom enum import StrEnum\nfrom typing import Any, Self\n\n# Constants (note that trailing +1 is for inclusivity in range() object)\nPOWER_RANGE: tuple[int, int] = (-3, 16 + 1)\nVALID_SPREADING_FACTORS: list[int] = [7, 8, 9, 10, 11, 12]\nVALID_BANDWIDTHS: list[int] = [125, 250, 500]\nSYNC_RANGE: tuple[int, int] = (0, 256 + 1)\nPREAMBLE_RANGE: tuple[int, int] = (0, 65_535 + 1)\nLF_RANGE: tuple[int, int] = (433_050_000, 434_790_000 + 1)\nHF_RANGE: tuple[int, int] = (863_000_000, 870_000_000 + 1)\n\n# Types\nJSON = dict[str, Any]\n\n\nclass ModulationModes(StrEnum):\n \"\"\"Modulation types for the RN2483 radio.\"\"\"\n\n LORA = \"lora\"\n FSK = \"fsk\"\n\n\nclass CodingRates(StrEnum):\n \"\"\"Coding rates for the RN2483 radio.\"\"\"\n\n FOUR_FIFTHS = \"4/5\"\n FOUR_SIXTHS = \"4/6\"\n FOUR_SEVENTHS = \"4/7\"\n FOUR_EIGHTS = \"4/8\"\n\n\n@dataclass\nclass RadioParameters:\n\n \"\"\"\n Represents a collection of parameters for the RN2483 radio settings.\n\n modulation: The modulation type.\n frequency: The frequency in Hz.\n power: The 15th state has an output power of 14.1dBm for 868MHz and 13.6dBm for 433MHz.\n spread_factor: Higher spreading factor means slower transmissions, but system will have better reception and less\n error.\n coding_rate: The ratio of actual data to error-correcting data.\n bandwidth: The bandwidth allocated to the transmission.\n preamble_len: The length of the transmission used to synchronize the receiver.\n cyclic_redundancy: Enable or disable cyclic redudancy check used to detect errors in the received signal.\n iqi: Invert IQ function enabled/disabled.\n sync_word: The radio sync word.\n \"\"\"\n\n modulation: ModulationModes = ModulationModes.LORA\n frequency: int = 433_050_000\n power: int = 15\n spread_factor: int = 9\n coding_rate: CodingRates = CodingRates.FOUR_SEVENTHS\n bandwidth: int = 500\n preamble_len: int = 6\n cyclic_redundancy: bool = True\n iqi: bool = False\n sync_word: str = \"0x43\"\n\n def __post_init__(self):\n if self.frequency not in range(*LF_RANGE) and self.frequency not in range(*HF_RANGE):\n raise ValueError(\n f\"Frequency '{self.frequency}' not in low frequency range {LF_RANGE} or high frequency range {HF_RANGE}\"\n )\n\n if self.power not in range(*POWER_RANGE):\n raise ValueError(f\"Power '{self.power}' not within allowed range {POWER_RANGE}\")\n\n if self.spread_factor not in VALID_SPREADING_FACTORS:\n raise ValueError(f\"Spread factor '{self.spread_factor}' invalid; must be one of {VALID_SPREADING_FACTORS}\")\n\n if self.preamble_len not in range(*PREAMBLE_RANGE):\n raise ValueError(f\"Preamble length '{self.preamble_len}' not within allowed range of {PREAMBLE_RANGE}\")\n\n if int(self.sync_word, 16) not in range(*SYNC_RANGE):\n raise ValueError(f\"Sync word '{self.sync_word}' not within allowed range of {SYNC_RANGE}\")\n self.sync_word = self.sync_word[2:] # Remove 0x\n\n @classmethod\n def from_json(cls, data: JSON) -> Self:\n \"\"\"Builds a new RadioParameters object from JSON data found in a config file.\"\"\"\n\n # Radio parameters are either initialized with an explicitly defined value from the config file, or\n # are assigned a default value.\n return cls(\n modulation=ModulationModes(data.get(\"modulation\", \"lora\")),\n frequency=data.get(\"frequency\", 433_050_000),\n power=data.get(\"power\", 15),\n spread_factor=data.get(\"spread_factor\", 9),\n coding_rate=CodingRates(data.get(\"coding_rate\", \"4/7\")),\n bandwidth=data.get(\"bandwidth\", 500),\n preamble_len=data.get(\"preamble_len\", 6),\n cyclic_redundancy=data.get(\"cyclic_redundancy\", True),\n iqi=data.get(\"iqi\", False),\n sync_word=data.get(\"sync_word\", \"0x43\"),\n )\n\n def __iter__(self):\n yield \"modulation\", self.modulation.value\n yield \"frequency\", self.frequency\n yield \"power\", self.power\n yield \"spread_factor\", self.spread_factor\n yield \"coding_rate\", self.coding_rate.value\n yield \"bandwidth\", self.bandwidth\n yield \"preamble_len\", self.preamble_len\n yield \"cyclic_redundancy\", self.cyclic_redundancy\n yield \"iqi\", self.iqi\n yield \"sync_word\", self.sync_word\n\n\n@dataclass\nclass Config:\n\n \"\"\"Contains settings for the ground station process.\"\"\"\n\n radio_parameters: RadioParameters = field(default_factory=RadioParameters)\n approved_callsigns: dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n if len(self.approved_callsigns) == 0:\n raise ValueError(\"You must provide at least one approved callsign.\")\n\n @classmethod\n def from_json(cls, data: JSON) -> Self:\n \"\"\"Creates a new Config object from the JSON data contained in the user config file.\"\"\"\n\n return cls(\n radio_parameters=RadioParameters.from_json(data.get(\"radio_params\", dict())), # type:ignore\n approved_callsigns=data.get(\"approved_callsigns\", dict()), # type:ignore\n )\n\n\ndef load_config(filepath: str) -> Config:\n \"\"\"Returns a Config object created from a configuration JSON file.\"\"\"\n\n with open(filepath, \"r\") as file:\n data = json.load(file)\n\n return Config.from_json(data)\n","repo_name":"CarletonURocketry/ground-station","sub_path":"modules/misc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"38099379812","text":"# 205. 同构字符串\n# 给定两个字符串 s 和 t,判断它们是否是同构的。\n# 如果 s 中的字符可以被替换得到 t ,那么这两个字符串是同构的。\n# 所有出现的字符都必须用另一个字符替换,同时保留字符的顺序。两个字符不能映射到同一个字符上,但字符可以映射自己本身。\n#\n# 示例 1:\n# 输入: s = \"egg\", t = \"add\"\n# 输出: true\n#\n# 示例 2:\n# 输入: s = \"foo\", t = \"bar\"\n# 输出: false\n#\n# 示例 3:\n# 输入: s = \"paper\", t = \"title\"\n# 输出: true\n#\n# 说明:\n# 你可以假设 s 和 t 具有相同的长度。\n\n\ndef isIsomorphic(s, t): # 没想到好方法,只能暴力\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if len(s) != len(t):\n return False\n n = len(s)\n d = {}\n for i in range(n):\n if t[i] not in d.values():\n if s[i] not in d:\n d[s[i]] = t[i]\n else:\n return False\n else:\n if s[i] not in d:\n return False\n else:\n if d[s[i]] != t[i]:\n return False\n return True\n\nprint(isIsomorphic('aa', 'ab'))","repo_name":"wulalala17/leetcode","sub_path":"leetcode/2020/December/205isIsomorphic.py","file_name":"205isIsomorphic.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5809168651","text":"def solution(cost, order):\n \n order.sort()\n _order = [order[0]]\n for i, (m, n) in enumerate(order[1:]):\n _order.append([m - order[i][0], n])\n\n stack = []\n for m, n in _order:\n while stack:\n _m, _n = stack[-1]\n if _m / _n < m / n:\n break\n stack.pop()\n m, n = m + _m, n + _n\n stack.append([m, n])\n \n # part 3\n answer = 0\n for m, n in stack:\n p_prev = 0\n for t, p in cost:\n if m * t >= n:\n break\n answer += (n - m * t) * (p - p_prev)\n p_prev = p\n \n return answer","repo_name":"Liebestraum1/Algorithm_Python","sub_path":"Programmers/community_learning/1_4_bicycle_factory.py","file_name":"1_4_bicycle_factory.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11274377097","text":"import sys\nfrom util.log import *\nfrom util.cases import *\nfrom util.sql import *\nfrom util.dnodes import *\n\n\nclass TDTestCase:\n def init(self, conn, logSql):\n tdLog.debug(\"start to execute %s\" % __file__)\n tdSql.init(conn.cursor(), logSql)\n\n def run(self):\n tdSql.prepare()\n tdSql.execute(\"drop database if exists tdb\")\n tdSql.execute(\"create database if not exists tdb keep 3650\")\n tdSql.execute(\"use tdb\")\n\n tdSql.execute(\n \"create table stb1 (time timestamp, c1 int) TAGS (t1 int)\"\n )\n\n tdSql.execute(\n \"insert into t1 using stb1 tags(1) values (now - 1m, 1)\"\n )\n tdSql.execute(\n \"insert into t1 using stb1 tags(1) values (now - 2m, 2)\"\n )\n tdSql.execute(\n \"insert into t1 using stb1 tags(1) values (now - 3m, 3)\"\n )\n\n res = tdSql.getColNameList(\"select count(*) from t1 interval(1m)\")\n assert res[0] == 'time'\n\n def stop(self):\n tdSql.close()\n tdLog.success(\"%s successfully executed\" % __file__)\n\ntdCases.addWindows(__file__, TDTestCase())\ntdCases.addLinux(__file__, TDTestCase())\n","repo_name":"lx1zhong/TDengine","sub_path":"tests/pytest/query/queryPriKey.py","file_name":"queryPriKey.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"72265256089","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ContestSubmission',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('run_id', models.CharField(max_length=191, blank=True)),\n ('compiler_id', models.CharField(max_length=191, blank=True)),\n ('send_error', models.TextField(null=True, blank=True)),\n ('got_verdict', models.BooleanField(default=False)),\n ('full_response', models.TextField(null=True, blank=True)),\n ('verdict', models.TextField(null=True, blank=True)),\n ('precompile_checks', models.TextField(null=True, blank=True)),\n ('compile_log', models.TextField(null=True, blank=True)),\n ('used_time', models.IntegerField(null=True, blank=True)),\n ('used_memory', models.IntegerField(null=True, blank=True)),\n ('error', models.TextField(null=True, blank=True)),\n ('message', models.TextField(null=True, blank=True)),\n ('test_number', models.IntegerField(null=True, blank=True)),\n ('create_time', models.DateTimeField(auto_now_add=True)),\n ('update_time', models.DateTimeField(auto_now=True)),\n ('sended_notify', models.BooleanField(default=False)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"znick/anytask","sub_path":"anytask/anycontest/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"31"} +{"seq_id":"32933856372","text":"#!/usr/bin/env python\n\nimport os\nimport tweepy\nimport Queue\nimport threading\nimport json\nfrom sets import Set\nfrom time import sleep\n\nerr = False\nif not \"TWITTER_CONSUMER_KEY\" in os.environ:\n print(\"Environment variable TWITTER_CONSUMER_KEY not set!\")\n err = True\nelse:\n consumer_key = os.environ.get(\"TWITTER_CONSUMER_KEY\", \"\")\n\nif not \"TWITTER_CONSUMER_SECRET\" in os.environ:\n print(\"Environment variable TWITTER_CONSUMER_SECRET not set!\")\n err = True\nelse:\n consumer_secret = os.environ.get(\"TWITTER_CONSUMER_SECRET\", \"\")\n\nif not \"TWITTER_OAUTH_TOKEN\" in os.environ:\n print(\"Environment variable TWITTER_OAUTH_TOKEN not set!\")\n err = True\nelse:\n oauth_token = os.environ.get(\"TWITTER_OAUTH_TOKEN\", \"\")\n\nif not \"TWITTER_OAUTH_SECRET\" in os.environ:\n print(\"Environment variable TWITTER_OAUTH_SECRET not set!\")\n err = True\nelse:\n oauth_secret = os.environ.get(\"TWITTER_OAUTH_SECRET\", \"\")\n\ntwitter_track = os.environ.get(\"TWITTER_TRACK\")\nif not \"TWITTER_TRACK\" in os.environ:\n print(\"WARNING: Environment variable TWITTER_TRACK not set; nothing will be streamed!\")\nelse:\n print(\"Tracking\", twitter_track)\n\nif not err:\n try:\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(oauth_token, oauth_secret)\n except:\n print(\"Could not set API tokens\")\n err = True\n\nif not err:\n try:\n api = tweepy.API(auth)\n except:\n print(\"Could not authenticate with Twitter\")\n err = True\n\nif err:\n print(\"ERROR: Twitter API communication failed\")\n while True:\n sleep(100)\n\n\nclass RobotListener(tweepy.StreamListener):\n def on_status(self, status):\n # Filter out retweets because we don't get the full text and there's no real\n # point in going and looking up the original just to print it again -- people\n # should say unique things! :)\n if (not status.retweeted) and ('RT @' not in status.text):\n #print(\"FROM: @\" + status.user.screen_name + \" (\" + status.user.name + \")\")\n #print(\"TEXT: \" + status.text)\n q.put(status)\n #print(q.qsize())\n \n def on_error(self, status_code):\n print(\"ERROR! Status code: \", status_code)\n return False\n\ndef robot(letter):\n print(\"ROBOT:\", letter)\n os.system(\"screen -S robot -X stuff '\" + letter + \"^M'\")\n\ndef display_letter(letter):\n #print(\"DISPLAY:\", letter)\n if letter == \"\\n\":\n letter = \"
\"\n message = { \"newmessage\": False, \"letter\": letter }\n json.dump(message, fifo)\n fifo.write(\"\\n\")\n fifo.flush()\n\ndef display_new(handle):\n print(\"New tweet from \", handle)\n message = { \"newmessage\": True, \"name\": handle }\n json.dump(message, fifo)\n fifo.write(\"\\n\")\n fifo.flush()\n\ndef twitter_thread():\n print(\"Starting twitter thread\")\n robotListener = RobotListener()\n robot = tweepy.Stream(auth = api.auth, listener = robotListener)\n robot.filter(track=[twitter_track])\n\n\nboard_chars = Set('abcdefghijklmnopqrstuvwxyz0123456789(,@!?:.)')\nq = Queue.Queue()\nfifo = open('messages', 'w')\n\nt = threading.Thread(target=twitter_thread)\nt.daemon = True\nt.start()\n\nwhile True:\n if not q.empty():\n # Grab a tweet and show it\n tweet = q.get()\n display_new(\"@\" + tweet.user.screen_name + \" (\" + tweet.user.name + \")\")\n # We'll show it character by character to allow the robot to move\n for c in tweet.text:\n display_letter(c)\n if Set(c.lower()).issubset(board_chars):\n robot(c.lower())\n sleep(2)\n sleep(5)\n","repo_name":"mccollam/ouijarobot","sub_path":"pi/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38556351800","text":"import heapq\ndef solution(jobs):\n answer = 0\n start = -1 # 특정 작업의 시작 시간\n time = 0 # 전체 소요 시간\n heapq.heapify(jobs)\n heap = []\n cnt = 0\n while cnt finW[ss]:\n # forward k, ning que wu lan\n ss1 = s[k:k + wl]\n dicW[ss1] -= 1\n if dicW[ss1] < finW[ss1]: count -= 1\n k += wl\n if count == cnt:\n # result get\n res.append(k)\n dicW[s[k:k + wl]] -= 1\n count -= 1\n k += wl\n else: # not a word when processing\n dicW = collections.defaultdict(int)\n count = 0\n k = j + wl\n j += wl\n return res\n\nprint(Solution().findSubstring(\"abaababbaba\", [\"ba\",\"ab\",\"ab\"]))\nprint(Solution().findSubstring(\"barfoogfoobarthefoobarman\", [\"bar\",\"foo\",\"the\"]))\nprint(Solution().findSubstring('foobarthebarfooman', ['foo', 'bar']))\nprint(Solution().findSubstring(\"wordgoodgoodgoodbestword\", [\"word\",\"good\",\"best\",\"good\"]))\n# print(Solution().findSubstring(\"lingmindraboofooowingdingbarrwingmonkeypoundcake\", [\"fooo\",\"barr\",\"wing\",\"ding\",\"wing\"]))","repo_name":"SuperMartinYang/learning_algorithm","sub_path":"leetcode/hard/Substring_with_Concatenation_of_All_Words.py","file_name":"Substring_with_Concatenation_of_All_Words.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5869077728","text":"# File: BST_Cipher.py\n\n# Description:\n\n# Student Name: Neha Kondaveeti\n\n# Student UT EID: nk8975\n\n# Course Name: CS 313E\n\n# Unique Number: 51120\n\n# Date Created: 04/18\n\n# Date Last Modified: 04/18\n\n\nimport sys\n\nclass Node(object):\n def __init__(self, data, lChild = None, rChild = None):\n self.data = data\n self.lChild = lChild\n self.rChild = rChild\n\nclass Tree (object):\n # the init() function creates the binary search tree with the\n # encryption string. \n # call the remove other function to have correct string within class\n # If the encryption string contains any\n # character other than the characters 'a' through 'z' or the\n # space character drop that character.\n\n def __init__ (self, encrypt_str):\n self.root = None\n self.encrypt_str = encrypt_str.lower()\n emptystr = \"\"\n for i in encrypt_str:\n if ord(i) > 96 and ord(i) < 123 or i == \" \":\n emptystr += i\n self.encrypt_str = emptystr\n for i in range(len(self.encrypt_str)):\n self.insert(self.encrypt_str[i])\n \n # the insert() function adds a node containing a character in\n # the binary search tree. If the character already exists, it\n # does not add that character. There are no duplicate characters\n # in the binary search tree.\n def insert (self, ch):\n new = Node(ch)\n if self.root == None:\n self.root = new\n return\n else:\n current = self.root\n parent = self.root\n while current != None:\n parent = current\n if current.data == ch:\n return\n elif ch < current.data:\n current = current.lChild\n else:\n current = current.rChild\n if ch < parent.data:\n parent.lChild = new\n return\n else:\n parent.rChild = new\n\n\n # the search() function will search for a character in the binary\n # search tree and return a string containing a series of lefts\n # (<) and rights (>) needed to reach that character. It will\n # return a blank string if the character does not exist in the tree.\n # It will return * if the character is the root of the tree.\n def search (self, ch):\n current = self.root\n # search val in tree\n if current.data == ch:\n return \"*\"\n astring = ''\n while current != None and current.data != ch:\n if ch > current.data:\n astring += \">\"\n current = current.rChild\n elif ch < current.data:\n astring += \"<\"\n current = current.lChild\n return astring\n\n # the traverse() function will take string composed of a series of\n # lefts (<) and rights (>) and return the corresponding \n # character in the binary search tree. It will return an empty string\n # if the input parameter does not lead to a valid character in the tree.\n def traverse (self, st):\n # opposite of search, oging to be used in decryption\n if len(st) == 0:\n return \"\"\n else:\n current = self.root\n if st == \"*\":\n return current.data\n for i in st:\n if current == None:\n return ''\n elif i == \">\" and current.rChild != None:\n current = current.rChild\n elif i == \"<\" and current.lChild != None:\n current = current.lChild\n return current.data\n\n # the encrypt() function will take a string as input parameter, convert\n # it to lower case, and return the encrypted string. It will ignore\n # all digits, punctuation marks, and special characters.\n def encrypt (self, st):\n encrypt_string = \"\"\n # make all lower\n st = st.lower()\n for i in st:\n if i in self.encrypt_str:\n encrypt_string += self.search(i) + \"!\"\n else:\n encrypt_string += str(i)\n final = encrypt_string[:-1]\n return final\n\n # the decrypt() function will take a string as input parameter, and\n # return the decrypted string.\n def decrypt (self, st):\n astring = \"\"\n dec = st.split(\"!\")\n for i in dec:\n astring += self.traverse(i)\n return astring\n \ndef main():\n # read encrypt string\n line = sys.stdin.readline()\n encrypt_str = line.strip()\n\n # create a Tree object\n the_tree = Tree (encrypt_str)\n\n # read string to be encrypted\n line = sys.stdin.readline()\n str_to_encode = line.strip()\n\n # print the encryption\n print (the_tree.encrypt(str_to_encode))\n\n # read the string to be decrypted\n line = sys.stdin.readline()\n str_to_decode = line.strip()\n \n # print the decryption\n print (the_tree.decrypt(str_to_decode))\n \nif __name__ == \"__main__\":\n main()","repo_name":"nehakondaveeti/CSprojects","sub_path":"BST_Cipher.py","file_name":"BST_Cipher.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21608767894","text":"# retreiving the argv variable function from python sys package\r\n\r\nfrom sys import argv\r\n\r\n# defining the argument variables\r\n\r\nscript, filename = argv\r\n\r\n#some print functions with \"filename\" being called for in the curly brackets\r\n\r\nprint(f\"We're going to erase {filename}.\")\r\nprint(\"If you dont want that, hit CTR:-C (^C).\")\r\nprint(\"If you do want that, hit RETURN.\")\r\n\r\n# if you press RETURN, the script will continue, if you press ctrl c then it will terminate the script via \"keyboard interrupt\"\r\n\r\ninput(\"?\")\r\n\r\nprint(\"Opening the file...\")\r\n\r\n# assigning the variable, 'target' to the filename called for in the argv, in this case, text.txt\r\n# the 'w' is giving a mode to the file we are opening\r\n# the w mode is allowing us to write into the chosen file\r\n\r\ntarget = open(filename, 'w')\r\n\r\nprint(\"Truncating the file. Goodbye!\")\r\ntarget.truncate()\r\n\r\nprint(\"Now I'm going to ask you for three lines.\")\r\n\r\nline1 = input(\"line 1: \")\r\nline2 = input(\"line 2: \")\r\nline3 = input(\"line 3: \")\r\n\r\nprint(\"I'm going to write these to the file.\")\r\n\r\ntarget.write(line1)\r\ntarget.write(\"\\n\")\r\ntarget.write(line2)\r\ntarget.write(\"\\n\")\r\ntarget.write(line3)\r\ntarget.write(\"\\n\")\r\n\r\nprint(\"And finally, we close it.\")\r\ntarget.close()\r\n","repo_name":"atamanbillor/Learning-Python-The-Hard-Way","sub_path":"ex16.py","file_name":"ex16.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70804302807","text":"import time\n\nimport win32api\nfrom threading import Thread\n\nimport win32con\n\n\nclass Bot:\n\n def __init__(self, power_key):\n self._power_key = power_key\n self._listener = Listener()\n self._macros = []\n self._stop = True\n\n def start(self):\n self._stop = False\n thread = Thread(target=self._run)\n thread.start()\n\n def stop(self):\n self._stop = True\n\n def set_single_macro(self, key, func):\n self._listener.set_macro(Macro(key, func, single=True))\n\n def set_cyclical_macro(self, key, func):\n self._listener.set_macro(Macro(key, func))\n\n def set_cyclical_hold_macro(self, key, func):\n self._listener.set_hold_macro(Macro(key, func))\n\n def _run(self):\n down = False\n print('start bot')\n try:\n while not self._stop:\n if win32api.GetAsyncKeyState(self._power_key) < 0 and win32api.GetAsyncKeyState(win32con.VK_CONTROL) < 0 and not down:\n down = True\n if not self._listener.work:\n self._listener.start()\n else:\n self._listener.stop()\n if win32api.GetAsyncKeyState(self._power_key) >= 0:\n down = False\n time.sleep(0.001)\n finally:\n self._listener.stop()\n print('stop bot')\n\n\nclass Listener:\n\n def __init__(self):\n self._stop = True\n self._macros = []\n self._hold_macros = []\n self._keys_down = {}\n\n def start(self):\n self._stop = False\n thread = Thread(target=self._run)\n thread.start()\n\n def stop(self):\n self._stop = True\n\n def set_macro(self, macro):\n self._macros.append(macro)\n\n def set_hold_macro(self, macro):\n self._hold_macros.append(macro)\n\n @property\n def work(self):\n return not self._stop\n\n def _run(self):\n print('start listener')\n try:\n while not self._stop:\n for macro in self._macros:\n if win32api.GetAsyncKeyState(macro.key) < 0 and not self._keys_down[macro.key]:\n self._keys_down[macro.key] = True\n if not macro.work:\n macro.start()\n else:\n macro.stop()\n if win32api.GetAsyncKeyState(macro.key) >= 0:\n self._keys_down[macro.key] = False\n for macro in self._hold_macros:\n if win32api.GetAsyncKeyState(macro.key) < 0 and not self._keys_down[macro.key]:\n self._keys_down[macro.key] = True\n if not macro.work:\n macro.start()\n if win32api.GetAsyncKeyState(macro.key) >= 0:\n self._keys_down[macro.key] = False\n macro.stop()\n time.sleep(0.001)\n finally:\n for macro in self._macros:\n macro.stop()\n for macro in self._hold_macros:\n macro.stop()\n print('stop listener')\n\n\nclass Macro:\n\n def __init__(self, key, func, single=False):\n self.key = key\n self._stop = True\n self._func = func\n self._single = single\n self.t = 0\n\n @property\n def work(self):\n return not self._stop\n\n def start(self):\n self._stop = False\n thread = Thread(target=self._run)\n thread.start()\n\n def stop(self):\n self._stop = True\n\n def _run(self):\n print('start macro')\n if self._single:\n self._func()\n self._stop = True\n else:\n while not self._stop:\n self._func()\n print('stop macro')\n","repo_name":"IAmTomaton/easybot","sub_path":"easybot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40676989087","text":"import logging\nimport re\nfrom datetime import datetime\n\nfrom odoo import api, fields, models, registry\nfrom odoo.exceptions import UserError\nfrom odoo.fields import first\nfrom odoo.osv import expression\nfrom odoo.tools import float_is_zero, frozendict\nfrom odoo.tools.translate import _\n\nfrom odoo.addons.base_iban.models.res_partner_bank import pretty_iban\n\nfrom . import efattura\n\n_logger = logging.getLogger(__name__)\n\nWT_CODES_MAPPING = {\n \"RT01\": \"ritenuta\",\n \"RT02\": \"ritenuta\",\n \"RT03\": \"inps\",\n \"RT04\": \"enasarco\",\n \"RT05\": \"enpam\",\n \"RT06\": \"other\",\n}\n\n\nclass WizardImportFatturapa(models.TransientModel):\n _name = \"wizard.import.fatturapa\"\n _description = \"Import E-bill\"\n\n e_invoice_detail_level = fields.Selection(\n [\n (\"0\", \"Minimum\"),\n (\"1\", \"Tax rate\"),\n (\"2\", \"Maximum\"),\n ],\n string=\"E-bills Detail Level\",\n help=\"Minimum level: Bill is created with no lines; \"\n \"User will have to create them, according to what specified in \"\n \"the electronic bill.\\n\"\n \"Tax rate level: Rate level: an invoice line is created for each \"\n \"rate present in the electronic invoice\\n\"\n \"Maximum level: every line contained in the electronic bill \"\n \"will create a line in the bill.\",\n required=True,\n )\n price_decimal_digits = fields.Integer(\n \"Prices decimal digits\",\n required=True,\n help=\"Decimal digits used in prices computation. This is needed to correctly \"\n \"import e-invoices with many decimal digits, not being forced to \"\n \"increase decimal digits of all your prices. \"\n 'Otherwise, increase \"Product Price\" precision.',\n )\n quantity_decimal_digits = fields.Integer(\n \"Quantities decimal digits\",\n required=True,\n help='Decimal digits used for quantity field. See \"Prices decimal digits\".',\n )\n discount_decimal_digits = fields.Integer(\n \"Discounts decimal digits\",\n required=True,\n help='Decimal digits used for discount field. See \"Prices decimal digits\".',\n )\n\n def _get_selected_model(self):\n context = self.env.context\n model_name = context.get(\"active_model\")\n return model_name\n\n def _get_selected_records(self):\n context = self.env.context\n ids = context.get(\"active_ids\", [])\n model_name = self._get_selected_model()\n attachments = self.env[model_name].browse(ids)\n return attachments\n\n def _check_attachment(self, attachment):\n if attachment.in_invoice_ids:\n raise UserError(_(\"File %s is linked to bills yet.\", attachment.name))\n\n def _extract_supplier(self, fatturapa_attachment):\n return fatturapa_attachment.xml_supplier_id\n\n @api.model\n def default_get(self, fields_list):\n res = super().default_get(fields_list)\n res[\"price_decimal_digits\"] = self.env[\"decimal.precision\"].precision_get(\n \"Product Price\"\n )\n res[\"quantity_decimal_digits\"] = self.env[\"decimal.precision\"].precision_get(\n \"Product Unit of Measure\"\n )\n res[\"discount_decimal_digits\"] = self.env[\"decimal.precision\"].precision_get(\n \"Discount\"\n )\n res[\"e_invoice_detail_level\"] = \"2\"\n\n fatturapa_attachments = self._get_selected_records()\n partners = self.env[\"res.partner\"].browse()\n for fatturapa_attachment in fatturapa_attachments:\n self._check_attachment(fatturapa_attachment)\n partners |= self._extract_supplier(fatturapa_attachment)\n if len(partners) == 1:\n res[\"e_invoice_detail_level\"] = partners[0].e_invoice_detail_level\n if partners[0].e_invoice_price_decimal_digits >= 0:\n res[\"price_decimal_digits\"] = partners[\n 0\n ].e_invoice_price_decimal_digits\n if partners[0].e_invoice_quantity_decimal_digits >= 0:\n res[\"quantity_decimal_digits\"] = partners[\n 0\n ].e_invoice_quantity_decimal_digits\n if partners[0].e_invoice_discount_decimal_digits >= 0:\n res[\"discount_decimal_digits\"] = partners[\n 0\n ].e_invoice_discount_decimal_digits\n return res\n\n def CountryByCode(self, CountryCode):\n country_model = self.env[\"res.country\"]\n return country_model.search([(\"code\", \"=\", CountryCode)])\n\n def ProvinceByCode(self, provinceCode):\n province_model = self.env[\"res.country.state\"]\n return province_model.search(\n [(\"code\", \"=\", provinceCode), (\"country_id.code\", \"=\", \"IT\")]\n )\n\n def reset_inconsistencies(self):\n \"\"\"\n Clean all existing inconsistencies.\n Note that inconsistencies are in all environments.\n \"\"\"\n for env in self.env.all.envs:\n env_context = dict(env.context)\n env_context.pop(\"inconsistencies\", None)\n env.context = frozendict(env_context)\n\n def get_inconsistencies(self):\n \"\"\"\n Get all existing inconsistencies.\n \"\"\"\n return self.env.context.get(\"inconsistencies\", \"\")\n\n def log_inconsistency(self, message):\n \"\"\"\n Add `message` to existing inconsistencies.\n Note that inconsistencies are in all environments.\n \"\"\"\n inconsistencies = self.get_inconsistencies()\n if message not in inconsistencies:\n if inconsistencies:\n inconsistencies += \"\\n\"\n inconsistencies += message\n # we can't set\n # self = self.with_context(inconsistencies=inconsistencies)\n # because self is a locale variable.\n # Environments are weakly referenced,\n # so they might disappear if they are no more referenced.\n # Save the inconsistencies in all the environments\n # to avoid losing them.\n for env in self.env.all.envs:\n env_context = dict(env.context)\n env_context.setdefault(\"inconsistencies\", inconsistencies)\n env.context = frozendict(env_context)\n\n def check_partner_base_data(self, partner_id, DatiAnagrafici):\n partner = self.env[\"res.partner\"].browse(partner_id)\n if (\n DatiAnagrafici.Anagrafica.Denominazione\n and partner.name != DatiAnagrafici.Anagrafica.Denominazione\n ):\n self.log_inconsistency(\n _(\n \"Company Name field contains '%(name)s'.\"\n \" Your System contains '%(partner)s'\"\n )\n % {\n \"name\": DatiAnagrafici.Anagrafica.Denominazione,\n \"partner\": partner.name,\n }\n )\n if (\n DatiAnagrafici.Anagrafica.Nome\n and partner.firstname != DatiAnagrafici.Anagrafica.Nome\n ):\n self.log_inconsistency(\n _(\n \"Name field contains '%(name)s'.\"\n \" Your System contains '%(firstname)s'\"\n )\n % {\n \"name\": DatiAnagrafici.Anagrafica.Nome,\n \"firstname\": partner.firstname,\n }\n )\n if (\n DatiAnagrafici.Anagrafica.Cognome\n and partner.lastname != DatiAnagrafici.Anagrafica.Cognome\n ):\n self.log_inconsistency(\n _(\n \"Surname field contains '%(surname)s'.\"\n \" Your System contains '%(lastname)s'\"\n )\n % {\n \"surname\": DatiAnagrafici.Anagrafica.Cognome,\n \"lastname\": partner.lastname,\n }\n )\n\n def getPartnerBase(self, DatiAnagrafici): # noqa: C901\n if not DatiAnagrafici:\n return False\n partner_model = self.env[\"res.partner\"]\n cf = DatiAnagrafici.CodiceFiscale or False\n vat = False\n if DatiAnagrafici.IdFiscaleIVA:\n id_paese = DatiAnagrafici.IdFiscaleIVA.IdPaese.upper()\n id_codice = re.sub(r\"\\W+\", \"\", DatiAnagrafici.IdFiscaleIVA.IdCodice).upper()\n # Format Italian VAT ID to always have 11 char\n # to avoid validation error when creating the given partner\n if id_paese == \"IT\" and not id_codice.startswith(\"IT\"):\n vat = \"IT{}\".format(id_codice.rjust(11, \"0\")[:11])\n # XXX maybe San Marino needs special formatting too?\n else:\n vat = id_codice\n partners = partner_model\n res_partner_rule = self.sudo().env.ref(\n \"base.res_partner_rule\", raise_if_not_found=False\n )\n if vat:\n domain = [(\"vat\", \"=\", vat)]\n if (\n self.env.context.get(\"from_attachment\")\n and res_partner_rule\n and res_partner_rule.active\n ):\n att = self.env.context.get(\"from_attachment\")\n domain.extend(\n [\n \"|\",\n (\"company_id\", \"child_of\", att.company_id.id),\n (\"company_id\", \"=\", False),\n ]\n )\n partners = partner_model.search(domain)\n if not partners and cf:\n domain = [(\"fiscalcode\", \"=\", cf)]\n if (\n self.env.context.get(\"from_attachment\")\n and res_partner_rule\n and res_partner_rule.active\n ):\n att = self.env.context.get(\"from_attachment\")\n domain.extend(\n [\n \"|\",\n (\"company_id\", \"child_of\", att.company_id.id),\n (\"company_id\", \"=\", False),\n ]\n )\n partners = partner_model.search(domain)\n commercial_partner_id = False\n if len(partners) > 1:\n for partner in partners:\n if (\n commercial_partner_id\n and partner.commercial_partner_id.id != commercial_partner_id\n ):\n self.log_inconsistency(\n _(\n \"Two distinct partners with \"\n \"VAT number %(vat)s or Fiscal Code %(fiscalcode)s already \"\n \"present in db.\"\n )\n % {\"vat\": vat, \"fiscalcode\": cf}\n )\n return False\n commercial_partner_id = partner.commercial_partner_id.id\n if partners:\n if not commercial_partner_id:\n commercial_partner_id = partners[0].commercial_partner_id.id\n self.check_partner_base_data(commercial_partner_id, DatiAnagrafici)\n return commercial_partner_id\n else:\n # partner to be created\n country_id = False\n if DatiAnagrafici.IdFiscaleIVA:\n CountryCode = DatiAnagrafici.IdFiscaleIVA.IdPaese\n countries = self.CountryByCode(CountryCode)\n if countries:\n country_id = countries[0].id\n else:\n raise UserError(\n _(\"Country Code %s not found in system.\") % CountryCode\n )\n vals = {\n \"vat\": vat,\n \"fiscalcode\": cf,\n \"is_company\": (\n DatiAnagrafici.Anagrafica.Denominazione and True or False\n ),\n \"eori_code\": DatiAnagrafici.Anagrafica.CodEORI or \"\",\n \"country_id\": country_id,\n }\n if DatiAnagrafici.Anagrafica.Nome:\n vals[\"firstname\"] = DatiAnagrafici.Anagrafica.Nome\n if DatiAnagrafici.Anagrafica.Cognome:\n vals[\"lastname\"] = DatiAnagrafici.Anagrafica.Cognome\n if DatiAnagrafici.Anagrafica.Denominazione:\n vals[\"name\"] = DatiAnagrafici.Anagrafica.Denominazione\n\n return partner_model.create(vals).id\n\n def getCedPrest(self, cedPrest):\n partner_model = self.env[\"res.partner\"]\n # Assume that any non-IT VAT coming from SdI is correct\n partner_id = self.with_context(\n fatturapa_in_skip_no_it_vat_check=True,\n ).getPartnerBase(cedPrest.DatiAnagrafici)\n no_contact_update = False\n if partner_id:\n no_contact_update = partner_model.browse(\n partner_id\n ).electronic_invoice_no_contact_update\n fiscalPosModel = self.env[\"fatturapa.fiscal_position\"]\n if partner_id and not no_contact_update:\n partner_company_id = partner_model.browse(partner_id).company_id.id\n vals = {\n \"street\": \" \".join(\n map(\n str,\n filter(\n None, (cedPrest.Sede.Indirizzo, cedPrest.Sede.NumeroCivico)\n ),\n )\n ),\n \"zip\": cedPrest.Sede.CAP,\n \"city\": cedPrest.Sede.Comune,\n \"register\": cedPrest.DatiAnagrafici.AlboProfessionale or \"\",\n }\n if cedPrest.DatiAnagrafici.ProvinciaAlbo:\n ProvinciaAlbo = cedPrest.DatiAnagrafici.ProvinciaAlbo\n prov = self.ProvinceByCode(ProvinciaAlbo)\n if not prov:\n self.log_inconsistency(\n _(\"Register Province ( %s ) not present \" \"in your system\")\n % ProvinciaAlbo\n )\n else:\n vals[\"register_province\"] = prov[0].id\n if cedPrest.Sede.Provincia:\n Provincia = cedPrest.Sede.Provincia\n prov_sede = self.ProvinceByCode(Provincia)\n if not prov_sede:\n self.log_inconsistency(\n _(\"Province ( %s ) not present in your system\") % Provincia\n )\n else:\n vals[\"state_id\"] = prov_sede[0].id\n\n vals[\"register_code\"] = cedPrest.DatiAnagrafici.NumeroIscrizioneAlbo\n vals[\"register_regdate\"] = cedPrest.DatiAnagrafici.DataIscrizioneAlbo\n\n if cedPrest.DatiAnagrafici.RegimeFiscale:\n rfPos = cedPrest.DatiAnagrafici.RegimeFiscale\n FiscalPos = fiscalPosModel.search([(\"code\", \"=\", rfPos)])\n if not FiscalPos:\n raise UserError(\n _(\"Tax Regime %s not present in your system.\") % rfPos\n )\n else:\n vals[\"register_fiscalpos\"] = FiscalPos[0].id\n\n if cedPrest.IscrizioneREA:\n REA = cedPrest.IscrizioneREA\n offices = self.ProvinceByCode(REA.Ufficio)\n rea_nr = REA.NumeroREA\n\n if not offices:\n office_id = False\n self.log_inconsistency(\n _(\n \"REA Office Province Code ( %s ) not present in \"\n \"your system\"\n )\n % REA.Ufficio\n )\n else:\n office_id = offices[0].id\n vals[\"rea_office\"] = office_id\n\n rea_domain = [\n (\"rea_code\", \"=\", rea_nr),\n (\"company_id\", \"=\", partner_company_id),\n (\"id\", \"!=\", partner_id),\n ]\n if office_id:\n rea_domain.append((\"rea_office\", \"=\", office_id))\n rea_partners = partner_model.search(rea_domain)\n if rea_partners:\n rea_names = \", \".join(rea_partners.mapped(\"name\"))\n p_name = partner_model.browse(partner_id).name\n self.log_inconsistency(\n _(\n \"Current invoice is from {} with REA Code\"\n \" {}. Yet it seems that partners {} have the same\"\n \" REA Code. This code should be unique; please fix\"\n \" it.\"\n ).format(p_name, rea_nr, rea_names)\n )\n else:\n vals[\"rea_code\"] = REA.NumeroREA\n\n vals[\"rea_capital\"] = REA.CapitaleSociale or 0.0\n vals[\"rea_member_type\"] = REA.SocioUnico or False\n vals[\"rea_liquidation_state\"] = REA.StatoLiquidazione or False\n\n if cedPrest.Contatti:\n if cedPrest.Contatti.Telefono:\n vals[\"phone\"] = cedPrest.Contatti.Telefono\n if cedPrest.Contatti.Email:\n vals[\"email\"] = cedPrest.Contatti.Email\n partner_model.browse(partner_id).write(vals)\n return partner_id\n\n def getCarrirerPartner(self, Carrier):\n partner_model = self.env[\"res.partner\"]\n partner_id = self.getPartnerBase(Carrier.DatiAnagraficiVettore)\n no_contact_update = False\n if partner_id:\n no_contact_update = partner_model.browse(\n partner_id\n ).electronic_invoice_no_contact_update\n if partner_id and not no_contact_update:\n vals = {\n \"license_number\": Carrier.DatiAnagraficiVettore.NumeroLicenzaGuida\n or \"\",\n }\n partner_model.browse(partner_id).write(vals)\n return partner_id\n\n def _prepare_generic_line_data(self, line):\n retLine = {}\n account_taxes = self.get_account_taxes(line.AliquotaIVA, line.Natura)\n if account_taxes:\n retLine[\"tax_ids\"] = [fields.Command.set([account_taxes[0].id])]\n else:\n retLine[\"tax_ids\"] = [fields.Command.clear()]\n return retLine\n\n def _get_default_product_taxes(self, tax_field_name):\n \"\"\"Return default tax for field `product.product.`.\"\"\"\n company = self.env.company\n default_taxes_ids = self.env[\"ir.default\"].get(\n \"product.product\",\n tax_field_name,\n company_id=company.id,\n )\n tax_model = self.env[\"account.tax\"]\n if default_taxes_ids is not None:\n default_taxes = tax_model.browse(default_taxes_ids)\n default_tax = first(default_taxes)\n else:\n default_tax = tax_model.browse()\n return default_tax\n\n def _get_account_tax_domain(self, amount):\n return [\n (\"type_tax_use\", \"=\", \"purchase\"),\n (\"amount\", \"=\", amount),\n ]\n\n def _get_zero_kind_account_tax(self, Natura):\n tax_amount = 0\n tax_domain = self._get_account_tax_domain(tax_amount)\n tax_domain = expression.AND(\n [\n tax_domain,\n [\n (\"kind_id.code\", \"=\", Natura),\n ],\n ]\n )\n account_taxes = self.env[\"account.tax\"].search(\n tax_domain,\n order=\"sequence\",\n )\n account_tax = first(account_taxes)\n if not account_taxes:\n self.log_inconsistency(\n _(\n \"No tax with percentage \"\n \"%(percentage)s and nature %(nature)s found. Please configure this tax.\",\n percentage=tax_amount,\n nature=Natura,\n )\n )\n elif len(account_taxes) > 1:\n self.log_inconsistency(\n _(\n \"Too many taxes with percentage \"\n \"%(percentage)s and nature %(nature)s found. \"\n \"Tax %(tax)s with lower priority has \"\n \"been set on invoice lines.\",\n percentage=tax_amount,\n nature=Natura,\n tax=account_tax.description,\n )\n )\n return account_tax\n\n def _get_amount_account_tax(self, tax_amount):\n tax_domain = self._get_account_tax_domain(tax_amount)\n tax_domain = expression.AND(\n [\n tax_domain,\n [\n (\"price_include\", \"=\", False),\n # partially deductible VAT must be set by user\n (\"children_tax_ids\", \"=\", False),\n ],\n ]\n )\n account_taxes = self.env[\"account.tax\"].search(\n tax_domain,\n order=\"sequence\",\n )\n account_tax = first(account_taxes)\n if not account_taxes:\n self.log_inconsistency(\n _(\n \"XML contains tax with percentage '%s' \"\n \"but it does not exist in your system\",\n tax_amount,\n )\n )\n # check if there are multiple taxes with\n # same percentage\n elif len(account_taxes) > 1:\n # just logging because this is an usual case: see split payment\n _logger.warning(\n _(\n \"Too many taxes with percentage equals \"\n \"to '%s'.\\nFix it if required\",\n tax_amount,\n )\n )\n # if there are multiple taxes with same percentage\n # and there is a default tax with this percentage,\n # set taxes list equal to supplier_taxes_id\n default_tax = self._get_default_product_taxes(\"supplier_taxes_id\")\n if default_tax and default_tax.amount == tax_amount:\n account_tax = default_tax\n return account_tax\n\n def get_account_taxes(self, AliquotaIVA, Natura):\n tax_amount = float(AliquotaIVA)\n if tax_amount == 0.0 and Natura:\n account_tax = self._get_zero_kind_account_tax(Natura)\n else:\n account_tax = self._get_amount_account_tax(tax_amount)\n return account_tax\n\n def get_line_product(self, line, partner):\n product = self.env[\"product.product\"].browse()\n\n # Search the product using supplier infos\n supplier_info = self.env[\"product.supplierinfo\"]\n partner_supplier_info = supplier_info.search(\n [\n (\"partner_id\", \"=\", partner.id),\n ]\n )\n found_supplier_infos = supplier_info.browse()\n if len(line.CodiceArticolo or []) == 1:\n supplier_code = line.CodiceArticolo[0].CodiceValore\n found_supplier_infos = supplier_info.search(\n [\n (\"id\", \"in\", partner_supplier_info.ids),\n (\"product_code\", \"=\", supplier_code),\n ]\n )\n if not found_supplier_infos:\n supplier_name = line.Descrizione\n found_supplier_infos = supplier_info.search(\n [\n (\"id\", \"in\", partner_supplier_info.ids),\n (\"product_name\", \"=\", supplier_name),\n ]\n )\n\n if found_supplier_infos:\n products = found_supplier_infos.mapped(\"product_id\")\n if len(products) == 1:\n product = first(products)\n else:\n templates = found_supplier_infos.mapped(\"product_tmpl_id\")\n if len(templates) == 1:\n product = templates.product_variant_id\n\n if not product and partner.e_invoice_default_product_id:\n product = partner.e_invoice_default_product_id\n return product\n\n def adjust_accounting_data(self, product, line_vals):\n account = self.get_credit_account(product)\n line_vals[\"account_id\"] = account.id\n\n new_tax = None\n if len(product.product_tmpl_id.supplier_taxes_id) == 1:\n new_tax = product.product_tmpl_id.supplier_taxes_id[0]\n elif len(account.tax_ids) == 1:\n new_tax = account.tax_ids[0]\n line_tax = self.env[\"account.tax\"]\n if line_vals.get(\"tax_ids\") and line_vals[\"tax_ids\"][0] == fields.Command.SET:\n line_tax_id = line_vals[\"tax_ids\"][0][2][0]\n line_tax = self.env[\"account.tax\"].browse(line_tax_id)\n if new_tax and line_tax and new_tax != line_tax:\n if new_tax._get_tax_amount() != line_tax._get_tax_amount():\n self.log_inconsistency(\n _(\n \"XML contains tax %(line_tax)s. \"\n \"Product %(product)s has tax %(new_tax)s. Using \"\n \"the XML one\"\n )\n % {\n \"line_tax\": line_tax.name,\n \"product\": product.name,\n \"new_tax\": new_tax.name,\n }\n )\n else:\n # If product has the same amount of the one in XML,\n # I use it. Typical case: 22% det 50%\n line_vals[\"tax_ids\"] = [(6, 0, [new_tax.id])]\n\n # move_line.tax_ids\n # move_line.name\n # move_line.sequence\n # move_line.account_id\n # move_line.price_unit\n # move_line.quantity\n def _prepareInvoiceLineAliquota(self, credit_account_id, line, nline):\n retLine = {}\n account_taxes = self.get_account_taxes(line.AliquotaIVA, line.Natura)\n if account_taxes:\n retLine[\"tax_ids\"] = [fields.Command.set([account_taxes[0].id])]\n else:\n retLine[\"tax_ids\"] = [fields.Command.clear()]\n\n retLine.update(\n {\n \"name\": \"Riepilogo Aliquota {}\".format(line.AliquotaIVA),\n \"sequence\": nline,\n \"account_id\": credit_account_id,\n \"price_unit\": float(abs(line.ImponibileImporto)),\n }\n )\n return retLine\n\n # move_line.name\n # move_line.sequence\n # move_line.account_id\n # move_line.price_unit\n # move_line.quantity\n # move_line.discount\n # move_line.admin_ref\n # move_line.invoice_line_tax_wt_ids\n def _prepareInvoiceLine(self, credit_account_id, line, wt_founds=False):\n retLine = self._prepare_generic_line_data(line)\n retLine.update(\n {\n \"name\": line.Descrizione,\n \"sequence\": int(line.NumeroLinea),\n \"account_id\": credit_account_id,\n \"price_unit\": float(line.PrezzoUnitario),\n \"display_type\": \"product\",\n }\n )\n if line.Quantita is None:\n retLine[\"quantity\"] = 1.0\n else:\n retLine[\"quantity\"] = float(line.Quantita)\n if (\n float(line.PrezzoUnitario)\n and line.Quantita\n and float(line.Quantita)\n and line.ScontoMaggiorazione # Quantita not required\n ):\n retLine[\"discount\"] = self._computeDiscount(line)\n if line.RiferimentoAmministrazione:\n retLine[\"admin_ref\"] = line.RiferimentoAmministrazione\n if wt_founds and line.Ritenuta:\n retLine[\"invoice_line_tax_wt_ids\"] = [(6, 0, [x.id for x in wt_founds])]\n\n return retLine\n\n def _prepareRelDocsLine(self, invoice_id, line, doc_type):\n res = []\n lineref = line.RiferimentoNumeroLinea or False\n IdDoc = line.IdDocumento or \"Error\"\n Data = line.Data or False\n NumItem = line.NumItem or \"\"\n Code = line.CodiceCommessaConvenzione or \"\"\n Cig = line.CodiceCIG or \"\"\n Cup = line.CodiceCUP or \"\"\n invoice_lineid = False\n if lineref:\n for numline in lineref:\n invoice_lineid = False\n invoice_line_model = self.env[\"account.move.line\"]\n invoice_lines = invoice_line_model.search(\n [\n (\"move_id\", \"=\", invoice_id),\n (\"sequence\", \"=\", int(numline)),\n ]\n )\n if invoice_lines:\n invoice_lineid = invoice_lines[0].id\n val = {\n \"type\": doc_type,\n \"name\": IdDoc,\n \"lineRef\": numline,\n \"invoice_line_id\": invoice_lineid,\n \"invoice_id\": invoice_id,\n \"date\": Data,\n \"numitem\": NumItem,\n \"code\": Code,\n \"cig\": Cig,\n \"cup\": Cup,\n }\n res.append(val)\n else:\n val = {\n \"type\": doc_type,\n \"name\": IdDoc,\n \"invoice_line_id\": invoice_lineid,\n \"invoice_id\": invoice_id,\n \"date\": Data,\n \"numitem\": NumItem,\n \"code\": Code,\n \"cig\": Cig,\n \"cup\": Cup,\n }\n res.append(val)\n return res\n\n def _prepareWelfareLine(self, invoice_id, line):\n TipoCassa = line.TipoCassa or False\n AlCassa = line.AlCassa and (float(line.AlCassa) / 100) or None\n ImportoContributoCassa = (\n line.ImportoContributoCassa and float(line.ImportoContributoCassa) or None\n )\n ImponibileCassa = line.ImponibileCassa and float(line.ImponibileCassa) or None\n AliquotaIVA = line.AliquotaIVA and (float(line.AliquotaIVA) / 100) or None\n Ritenuta = line.Ritenuta or \"\"\n Natura = line.Natura or False\n kind_id = False\n if Natura:\n kind = self.env[\"account.tax.kind\"].search([(\"code\", \"=\", Natura)])\n if not kind:\n self.log_inconsistency(_(\"Tax kind %s not found\") % Natura)\n else:\n kind_id = kind[0].id\n\n RiferimentoAmministrazione = line.RiferimentoAmministrazione or \"\"\n WelfareTypeModel = self.env[\"welfare.fund.type\"]\n if not TipoCassa:\n raise UserError(_(\"Welfare Fund is not defined.\"))\n WelfareType = WelfareTypeModel.search([(\"name\", \"=\", TipoCassa)])\n\n res = {\n \"welfare_rate_tax\": AlCassa,\n \"welfare_amount_tax\": ImportoContributoCassa,\n \"welfare_taxable\": ImponibileCassa,\n \"welfare_Iva_tax\": AliquotaIVA,\n \"subjected_withholding\": Ritenuta,\n \"kind_id\": kind_id,\n \"pa_line_code\": RiferimentoAmministrazione,\n \"invoice_id\": invoice_id,\n }\n if not WelfareType:\n raise UserError(\n _(\"Welfare Fund %s not present in your system.\") % TipoCassa\n )\n else:\n res[\"name\"] = WelfareType[0].id\n\n return res\n\n def _prepareDiscRisePriceLine(self, line_id, line):\n Tipo = line.Tipo or False\n Percentuale = line.Percentuale and float(line.Percentuale) or 0.0\n Importo = line.Importo and float(line.Importo) or 0.0\n res = {\n \"percentage\": Percentuale,\n \"amount\": Importo,\n self.env.context.get(\"drtype\"): line_id,\n }\n res[\"name\"] = Tipo\n\n return res\n\n def _computeDiscount(self, DettaglioLinea):\n line_total = float(DettaglioLinea.PrezzoTotale)\n line_unit = line_total / float(DettaglioLinea.Quantita)\n discount = (1 - (line_unit / float(DettaglioLinea.PrezzoUnitario))) * 100.0\n return discount\n\n def _addGlobalDiscount(self, invoice_id, DatiGeneraliDocumento):\n discount = 0.0\n if (\n DatiGeneraliDocumento.ScontoMaggiorazione\n and self.e_invoice_detail_level == \"2\"\n ):\n invoice = self.env[\"account.move\"].browse(invoice_id)\n for DiscRise in DatiGeneraliDocumento.ScontoMaggiorazione:\n if DiscRise.Percentuale:\n amount = invoice.amount_total * (float(DiscRise.Percentuale) / 100)\n if DiscRise.Tipo == \"SC\":\n discount -= amount\n elif DiscRise.Tipo == \"MG\":\n discount += amount\n elif DiscRise.Importo:\n if DiscRise.Tipo == \"SC\":\n discount -= float(DiscRise.Importo)\n elif DiscRise.Tipo == \"MG\":\n discount += float(DiscRise.Importo)\n company = invoice.company_id\n global_discount_product = company.sconto_maggiorazione_product_id\n credit_account = self.get_credit_account(\n product=global_discount_product,\n )\n line_vals = {\n \"move_id\": invoice_id,\n \"name\": _(\"Global bill discount from document general data\"),\n \"account_id\": credit_account.id,\n \"price_unit\": discount,\n \"quantity\": 1,\n }\n if global_discount_product:\n line_vals[\"product_id\"] = global_discount_product.id\n line_vals[\"name\"] = global_discount_product.name\n self.adjust_accounting_data(global_discount_product, line_vals)\n else:\n line_vals[\"tax_ids\"] = [fields.Command.clear()]\n self.env[\"account.move.line\"].with_context(\n check_move_validity=False\n ).create(line_vals)\n return True\n\n def _createPaymentsLine(self, payment_id, line, partner_id, invoice):\n details = line.DettaglioPagamento or False\n if details:\n PaymentModel = self.env[\"fatturapa.payment.detail\"]\n PaymentMethodModel = self.env[\"fatturapa.payment_method\"]\n BankModel = self.env[\"res.bank\"]\n PartnerBankModel = self.env[\"res.partner.bank\"]\n for dline in details:\n method = PaymentMethodModel.search(\n [(\"code\", \"=\", dline.ModalitaPagamento)]\n )\n if not method:\n raise UserError(\n _(\"Payment method %s is not defined in your system.\")\n % dline.ModalitaPagamento\n )\n val = {\n \"recipient\": dline.Beneficiario,\n \"fatturapa_pm_id\": method[0].id,\n \"payment_term_start\": dline.DataRiferimentoTerminiPagamento\n or False,\n \"payment_days\": dline.GiorniTerminiPagamento or 0,\n \"payment_due_date\": dline.DataScadenzaPagamento or False,\n \"payment_amount\": dline.ImportoPagamento or 0.0,\n \"post_office_code\": dline.CodUfficioPostale or \"\",\n \"recepit_surname\": dline.CognomeQuietanzante or \"\",\n \"recepit_name\": dline.NomeQuietanzante or \"\",\n \"recepit_cf\": dline.CFQuietanzante or \"\",\n \"recepit_title\": dline.TitoloQuietanzante or \"1\",\n \"payment_bank_name\": dline.IstitutoFinanziario or \"\",\n \"payment_bank_iban\": dline.IBAN or \"\",\n \"payment_bank_abi\": dline.ABI or \"\",\n \"payment_bank_cab\": dline.CAB or \"\",\n \"payment_bank_bic\": dline.BIC or \"\",\n \"payment_bank\": False,\n \"prepayment_discount\": dline.ScontoPagamentoAnticipato or 0.0,\n \"max_payment_date\": dline.DataLimitePagamentoAnticipato or False,\n \"penalty_amount\": dline.PenalitaPagamentiRitardati or 0.0,\n \"penalty_date\": dline.DataDecorrenzaPenale or False,\n \"payment_code\": dline.CodicePagamento or \"\",\n \"payment_data_id\": payment_id,\n }\n bank = False\n payment_bank_id = False\n if dline.BIC:\n banks = BankModel.search([(\"bic\", \"=\", dline.BIC.strip())])\n if not banks:\n if not dline.IstitutoFinanziario:\n self.log_inconsistency(\n _(\n \"Name of Bank with BIC '%s' is not set.\"\n \" Can't create bank\"\n )\n % dline.BIC\n )\n else:\n bank = BankModel.create(\n {\n \"name\": dline.IstitutoFinanziario,\n \"bic\": dline.BIC,\n }\n )\n else:\n bank = banks[0]\n if dline.IBAN:\n iban = dline.IBAN.strip()\n SearchDom = [\n (\"acc_number\", \"=\", pretty_iban(iban)),\n (\"partner_id\", \"=\", partner_id),\n ]\n payment_bank_id = False\n payment_banks = PartnerBankModel.search(SearchDom)\n if not payment_banks and not bank:\n self.log_inconsistency(\n _(\n \"BIC is required and not exist in Xml\\n\"\n \"Curr bank data is: \\n\"\n \"IBAN: %(iban)s\\n\"\n \"Bank Name: %(bank)s\\n\"\n )\n % {\n \"iban\": iban or \"\",\n \"bank\": dline.IstitutoFinanziario or \"\",\n }\n )\n elif not payment_banks and bank:\n existing_account = PartnerBankModel.search(\n [\n (\"acc_number\", \"=\", iban),\n (\"company_id\", \"=\", invoice.company_id.id),\n ]\n )\n if existing_account:\n self.log_inconsistency(\n _(\"Bank account %s already exists\") % iban\n )\n else:\n payment_bank_id = PartnerBankModel.create(\n {\n \"acc_number\": iban,\n \"partner_id\": partner_id,\n \"bank_id\": bank.id,\n \"bank_name\": dline.IstitutoFinanziario or bank.name,\n \"bank_bic\": dline.BIC or bank.bic,\n }\n ).id\n if payment_banks:\n payment_bank_id = payment_banks[0].id\n\n if payment_bank_id:\n val[\"payment_bank\"] = payment_bank_id\n PaymentModel.create(val)\n return True\n\n # TODO sul partner?\n def set_StabileOrganizzazione(self, CedentePrestatore, invoice):\n if CedentePrestatore.StabileOrganizzazione:\n invoice.efatt_stabile_organizzazione_indirizzo = (\n CedentePrestatore.StabileOrganizzazione.Indirizzo\n )\n invoice.efatt_stabile_organizzazione_civico = (\n CedentePrestatore.StabileOrganizzazione.NumeroCivico\n )\n invoice.efatt_stabile_organizzazione_cap = (\n CedentePrestatore.StabileOrganizzazione.CAP\n )\n invoice.efatt_stabile_organizzazione_comune = (\n CedentePrestatore.StabileOrganizzazione.Comune\n )\n invoice.efatt_stabile_organizzazione_provincia = (\n CedentePrestatore.StabileOrganizzazione.Provincia\n )\n invoice.efatt_stabile_organizzazione_nazione = (\n CedentePrestatore.StabileOrganizzazione.Nazione\n )\n\n def _get_journal_domain(self, company):\n return [\n (\"type\", \"=\", \"purchase\"),\n (\"company_id\", \"=\", company.id),\n ]\n\n def get_journal(self, company):\n domain = self._get_journal_domain(company)\n journal = self.env[\"account.journal\"].search(\n domain,\n limit=1,\n )\n if not journal:\n exception = self._get_missing_journal_exception(company)\n raise exception\n return journal\n\n def _get_missing_journal_exception(self, company):\n return UserError(\n _(\n \"Define a purchase journal for this company: '%(name)s' (id: %(id)s).\",\n name=company.name,\n id=company.id,\n )\n )\n\n def create_e_invoice_line(self, line):\n vals = {\n \"line_number\": int(line.NumeroLinea or 0),\n \"service_type\": line.TipoCessionePrestazione,\n \"name\": line.Descrizione,\n \"qty\": float(line.Quantita or 0),\n \"uom\": line.UnitaMisura,\n \"period_start_date\": line.DataInizioPeriodo,\n \"period_end_date\": line.DataFinePeriodo,\n \"unit_price\": float(line.PrezzoUnitario or 0),\n \"total_price\": float(line.PrezzoTotale or 0),\n \"tax_amount\": float(line.AliquotaIVA or 0),\n \"wt_amount\": line.Ritenuta,\n \"tax_kind\": line.Natura,\n \"admin_ref\": line.RiferimentoAmministrazione,\n }\n einvoiceline = self.env[\"einvoice.line\"].create(vals)\n if line.CodiceArticolo:\n for caline in line.CodiceArticolo:\n self.env[\"fatturapa.article.code\"].create(\n {\n \"name\": caline.CodiceTipo or \"\",\n \"code_val\": caline.CodiceValore or \"\",\n \"e_invoice_line_id\": einvoiceline.id,\n }\n )\n if line.ScontoMaggiorazione:\n for DiscRisePriceLine in line.ScontoMaggiorazione:\n DiscRisePriceVals = self.with_context(\n drtype=\"e_invoice_line_id\"\n )._prepareDiscRisePriceLine(einvoiceline.id, DiscRisePriceLine)\n self.env[\"discount.rise.price\"].create(DiscRisePriceVals)\n if line.AltriDatiGestionali:\n for dato in line.AltriDatiGestionali:\n self.env[\"einvoice.line.other.data\"].create(\n {\n \"name\": dato.TipoDato,\n \"text_ref\": dato.RiferimentoTesto,\n \"num_ref\": float(dato.RiferimentoNumero or 0),\n \"date_ref\": dato.RiferimentoData,\n \"e_invoice_line_id\": einvoiceline.id,\n }\n )\n return einvoiceline\n\n def get_credit_account(self, product=None):\n \"\"\"\n Try to get default credit account for invoice line looking in\n\n 1) product (if provided)\n 2) journal\n 3) company default.\n\n :param product: Product whose expense account will be used\n :return: The account found\n \"\"\"\n credit_account = self.env[\"account.account\"].browse()\n\n # If there is a product, get its default expense account\n if product:\n template = product.product_tmpl_id\n accounts_dict = template.get_product_accounts()\n credit_account = accounts_dict[\"expense\"]\n\n company = self.env.company\n # Search in journal\n journal = self.get_journal(company)\n if not credit_account:\n credit_account = journal.default_account_id\n\n # Search in company defaults\n if not credit_account:\n credit_account = (\n self.env[\"ir.property\"]\n .with_company(company)\n ._get(\"property_account_expense_categ_id\", \"product.category\")\n )\n\n if not credit_account:\n raise UserError(\n _(\n \"Please configure Default Credit Account \"\n \"in Journal '{journal}' \"\n \"or check default expense account \"\n \"for company '{company}'.\"\n ).format(\n journal=journal.display_name,\n company=company.display_name,\n )\n )\n\n return credit_account\n\n def _get_currency(self, FatturaBody):\n # currency 2.1.1.2\n currency_code = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Divisa\n currency = self.env[\"res.currency\"].search(\n [\n (\"name\", \"=\", currency_code),\n ]\n )\n if not currency:\n raise UserError(\n _(\n \"No currency found with code %s.\",\n currency_code,\n )\n )\n return currency\n\n def _get_fiscal_document_type(self, FatturaBody):\n fiscal_document_type_code = (\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.TipoDocumento\n )\n if fiscal_document_type_code:\n fiscal_document_type = self.env[\"fiscal.document.type\"].search(\n [\n (\"code\", \"=\", fiscal_document_type_code),\n ],\n limit=1,\n )\n if not fiscal_document_type:\n raise UserError(\n _(\n \"Document type %s not handled.\",\n fiscal_document_type_code,\n )\n )\n else:\n fiscal_document_type = self.env[\"fiscal.document.type\"].browse()\n return fiscal_document_type\n\n def _get_invoice_type(self, fiscal_document_type):\n if fiscal_document_type.code == \"TD04\":\n invoice_type = \"in_refund\"\n else:\n invoice_type = \"in_invoice\"\n return invoice_type\n\n def _get_received_date(self, attachment):\n received_date = attachment.e_invoice_received_date\n if not received_date:\n received_date = attachment.create_date\n received_date = received_date.date()\n return received_date\n\n def _prepare_invoice_values(self, fatt, fatturapa_attachment, FatturaBody, partner):\n company = self.env.company\n currency = self._get_currency(FatturaBody)\n purchase_journal = self.get_journal(company)\n comment = \"\"\n\n # 2.1.1\n fiscal_document_type = self._get_fiscal_document_type(FatturaBody)\n invoice_type = self._get_invoice_type(fiscal_document_type)\n\n # 2.1.1.11\n causLst = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Causale\n if causLst:\n for rel_doc in causLst:\n comment += rel_doc + \"\\n\"\n if comment:\n comment = \"
\" + comment + \"
\"\n\n e_invoice_received_date = self._get_received_date(fatturapa_attachment)\n\n e_invoice_date = datetime.strptime(\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.Data, \"%Y-%m-%d\"\n ).date()\n\n delivery_partner_id = partner.address_get([\"delivery\"])[\"delivery\"]\n delivery_partner = self.env[\"res.partner\"].browse(delivery_partner_id)\n fiscal_position = self.env[\"account.fiscal.position\"]._get_fiscal_position(\n partner,\n delivery=delivery_partner,\n )\n\n invoice_data = {\n \"e_invoice_received_date\": e_invoice_received_date,\n \"invoice_date\": e_invoice_date,\n \"date\": e_invoice_received_date\n if company.in_invoice_registration_date == \"rec_date\"\n else e_invoice_date,\n \"fiscal_document_type_id\": fiscal_document_type.id,\n \"sender\": fatt.FatturaElettronicaHeader.SoggettoEmittente or False,\n \"move_type\": invoice_type,\n \"partner_id\": partner.id,\n \"currency_id\": currency.id,\n \"journal_id\": purchase_journal.id,\n # 'origin': xmlData.datiOrdineAcquisto,\n \"fiscal_position_id\": fiscal_position.id,\n \"invoice_payment_term_id\": partner.property_supplier_payment_term_id.id,\n \"company_id\": company.id,\n \"fatturapa_attachment_in_id\": fatturapa_attachment.id,\n \"narration\": comment,\n }\n\n # 2.1.1.12\n self.set_art73(FatturaBody, invoice_data)\n\n self.set_e_invoice_lines(FatturaBody, invoice_data)\n return invoice_data\n\n def invoiceCreate(self, fatt, fatturapa_attachment, FatturaBody, partner_id):\n partner_model = self.env[\"res.partner\"]\n partner = partner_model.browse(partner_id)\n invoice_data = self._prepare_invoice_values(\n fatt,\n fatturapa_attachment,\n FatturaBody,\n partner,\n )\n\n # 2.1.1.5\n found_withholding_taxes = self.set_withholding_tax(FatturaBody, invoice_data)\n\n invoice = self.env[\"account.move\"].create(invoice_data)\n credit_account = self.get_credit_account()\n\n invoice_lines = []\n # 2.2.1\n invoice_lines.extend(\n self.set_invoice_line_ids(\n FatturaBody,\n credit_account.id,\n partner,\n found_withholding_taxes,\n invoice,\n )\n )\n\n # 2.1.1.7\n invoice_lines.extend(\n self.set_welfares_fund(\n FatturaBody, credit_account.id, invoice, found_withholding_taxes\n )\n )\n\n # 2.1.1.10\n invoice_lines.extend(self.set_efatt_rounding(FatturaBody, invoice))\n\n invoice.with_context(check_move_validity=False).update(\n {\"invoice_line_ids\": [(6, 0, invoice_lines)]}\n )\n\n invoice._onchange_invoice_line_wt_ids()\n\n rel_docs_dict = {\n # 2.1.2\n \"order\": FatturaBody.DatiGenerali.DatiOrdineAcquisto,\n # 2.1.3\n \"contract\": FatturaBody.DatiGenerali.DatiContratto,\n # 2.1.4\n \"agreement\": FatturaBody.DatiGenerali.DatiConvenzione,\n # 2.1.5\n \"reception\": FatturaBody.DatiGenerali.DatiRicezione,\n # 2.1.6\n \"invoice\": FatturaBody.DatiGenerali.DatiFattureCollegate,\n }\n\n for rel_doc_key, rel_doc_data in rel_docs_dict.items():\n if not rel_doc_data:\n continue\n for rel_doc in rel_doc_data:\n doc_datas = self._prepareRelDocsLine(invoice.id, rel_doc, rel_doc_key)\n for doc_data in doc_datas:\n # Note for v12: must take advantage of batch creation\n self.env[\"fatturapa.related_document_type\"].create(doc_data)\n\n # 2.1.7\n self.set_activity_progress(FatturaBody, invoice)\n\n # 2.1.8\n self.set_ddt_data(FatturaBody, invoice)\n\n # 2.1.9\n self.set_delivery_data(FatturaBody, invoice)\n\n # 2.2.2\n self.set_summary_data(FatturaBody, invoice)\n\n # 2.1.10\n self.set_parent_invoice_data(FatturaBody, invoice)\n\n # 2.3\n self.set_vehicles_data(FatturaBody, invoice)\n\n # 2.4\n self.set_payments_data(FatturaBody, invoice, partner_id)\n\n # 2.5\n self.set_attachments_data(FatturaBody, invoice)\n\n self._addGlobalDiscount(\n invoice.id, FatturaBody.DatiGenerali.DatiGeneraliDocumento\n )\n\n if self.e_invoice_detail_level != \"1\":\n self.set_roundings(FatturaBody, invoice)\n\n self.set_vendor_bill_data(FatturaBody, invoice)\n\n # this can happen with refunds with negative amounts\n invoice.process_negative_lines()\n return invoice\n\n def set_vendor_bill_data(self, FatturaBody, invoice):\n if not invoice.invoice_date:\n invoice.update(\n {\n \"invoice_date\": datetime.strptime(\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.Data, \"%Y-%m-%d\"\n ).date(),\n }\n )\n if not invoice.ref:\n today = fields.Date.context_today(self)\n x = invoice.line_ids.filtered(\n lambda line: line.account_id.account_type\n in (\"asset_receivable\", \"liability_payable\")\n ).sorted(lambda line: line.date_maturity or today)\n if x:\n x[-1].name = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Numero\n invoice.ref = FatturaBody.DatiGenerali.DatiGeneraliDocumento.Numero\n if not invoice.payment_reference:\n invoice.payment_reference = invoice.ref\n\n def set_parent_invoice_data(self, FatturaBody, invoice):\n ParentInvoice = FatturaBody.DatiGenerali.FatturaPrincipale\n if ParentInvoice:\n parentinv_vals = {\n \"related_invoice_code\": ParentInvoice.NumeroFatturaPrincipale or \"\",\n \"related_invoice_date\": ParentInvoice.DataFatturaPrincipale or False,\n }\n invoice.write(parentinv_vals)\n\n def set_vehicles_data(self, FatturaBody, invoice):\n Vehicle = FatturaBody.DatiVeicoli\n if Vehicle:\n veicle_vals = {\n \"vehicle_registration\": Vehicle.Data or False,\n \"total_travel\": Vehicle.TotalePercorso or \"\",\n }\n invoice.write(veicle_vals)\n\n def set_attachments_data(self, FatturaBody, invoice):\n invoice_id = invoice.id\n AttachmentsData = FatturaBody.Allegati\n if AttachmentsData:\n self.env[\"fatturapa.attachment.in\"].extract_attachments(\n AttachmentsData, invoice_id\n )\n\n def set_ddt_data(self, FatturaBody, invoice):\n invoice_id = invoice.id\n DdtDatas = FatturaBody.DatiGenerali.DatiDDT\n if not DdtDatas:\n return\n invoice_line_model = self.env[\"account.move.line\"]\n DdTModel = self.env[\"fatturapa.related_ddt\"]\n for DdtDataLine in DdtDatas:\n if not DdtDataLine.RiferimentoNumeroLinea:\n DdTModel.create(\n {\n \"name\": DdtDataLine.NumeroDDT or \"\",\n \"date\": DdtDataLine.DataDDT or False,\n \"invoice_id\": invoice_id,\n }\n )\n else:\n for numline in DdtDataLine.RiferimentoNumeroLinea:\n invoice_lines = invoice_line_model.search(\n [\n (\"move_id\", \"=\", invoice_id),\n (\"sequence\", \"=\", int(numline)),\n ]\n )\n invoice_lineid = False\n if invoice_lines:\n invoice_lineid = invoice_lines[0].id\n DdTModel.create(\n {\n \"name\": DdtDataLine.NumeroDDT or \"\",\n \"date\": DdtDataLine.DataDDT or False,\n \"invoice_id\": invoice_id,\n \"invoice_line_id\": invoice_lineid,\n }\n )\n\n def set_art73(self, FatturaBody, invoice_data):\n if FatturaBody.DatiGenerali.DatiGeneraliDocumento.Art73:\n invoice_data[\"art73\"] = True\n\n def set_roundings(self, FatturaBody, invoice):\n rounding = 0.0\n if FatturaBody.DatiBeniServizi.DatiRiepilogo:\n for summary in FatturaBody.DatiBeniServizi.DatiRiepilogo:\n rounding += float(summary.Arrotondamento or 0.0)\n if FatturaBody.DatiGenerali.DatiGeneraliDocumento:\n summary = FatturaBody.DatiGenerali.DatiGeneraliDocumento\n rounding += float(summary.Arrotondamento or 0.0)\n\n if rounding:\n arrotondamenti_attivi_account_id = (\n self.env.company.arrotondamenti_attivi_account_id\n )\n if not arrotondamenti_attivi_account_id:\n raise UserError(\n _(\"Round up account is not set \" \"in Accounting Settings\")\n )\n\n arrotondamenti_passivi_account_id = (\n self.env.company.arrotondamenti_passivi_account_id\n )\n if not arrotondamenti_passivi_account_id:\n raise UserError(\n _(\"Round down account is not set \" \"in Accounting Settings\")\n )\n\n arrotondamenti_tax_id = self.env.company.arrotondamenti_tax_id\n if not arrotondamenti_tax_id:\n self.log_inconsistency(_(\"Round up and down tax is not set\"))\n\n line_sequence = max(invoice.invoice_line_ids.mapped(\"sequence\"), default=1)\n line_vals = []\n for summary in FatturaBody.DatiBeniServizi.DatiRiepilogo:\n # XXX fallisce cattivo se non trova l'imposta Arrotondamento\n to_round = float(summary.Arrotondamento or 0.0)\n if to_round != 0.0:\n account_taxes = self.get_account_taxes(\n summary.AliquotaIVA, summary.Natura\n )\n arrotondamenti_account_id = (\n arrotondamenti_passivi_account_id.id\n if to_round > 0.0\n else arrotondamenti_attivi_account_id.id\n )\n invoice_line_tax_id = (\n account_taxes[0].id\n if account_taxes\n else arrotondamenti_tax_id.id\n )\n name = _(\"Rounding down\") if to_round > 0.0 else _(\"Rounding up\")\n line_sequence += 1\n upd_vals = {\n \"sequence\": line_sequence,\n \"move_id\": invoice.id,\n \"name\": name,\n \"account_id\": arrotondamenti_account_id,\n \"price_unit\": to_round,\n \"tax_ids\": [(6, 0, [invoice_line_tax_id])],\n }\n # Valutare se in caso di importazione senza rounding sia meglio\n # lavorare su debito e credito invece di\n # mettere una tassa sul valore !!\n # if to_round<0:\n # upd_vals[\"debit\"]= abs(to_round)\n # else:\n # upd_vals[\"credit\"]= abs(to_round)\n line_vals.append(upd_vals)\n\n if line_vals:\n self.env[\"account.move.line\"].with_context(\n check_move_validity=False\n ).create(line_vals)\n\n def set_efatt_rounding(self, FatturaBody, invoice):\n invoice_line_model = self.env[\"account.move.line\"]\n invoice_line_ids = []\n if FatturaBody.DatiGenerali.DatiGeneraliDocumento.Arrotondamento:\n invoice.efatt_rounding = float(\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.Arrotondamento\n )\n if invoice.efatt_rounding != 0:\n if invoice.efatt_rounding > 0:\n arrotondamenti_account_id = (\n self.env.company.arrotondamenti_passivi_account_id\n )\n if not arrotondamenti_account_id:\n raise UserError(\n _(\"Round down account is not set \" \"in Accounting Settings\")\n )\n name = _(\"Rounding down\")\n else:\n arrotondamenti_account_id = (\n self.env.company.arrotondamenti_attivi_account_id\n )\n if not arrotondamenti_account_id:\n raise UserError(\n _(\"Round up account is not set \" \"in Accounting Settings\")\n )\n name = _(\"Rounding up\")\n upd_vals = {\n \"move_id\": invoice.id,\n \"name\": name,\n \"account_id\": arrotondamenti_account_id.id,\n \"price_unit\": invoice.efatt_rounding,\n \"quantity\": 1,\n \"tax_ids\": [fields.Command.set([])],\n }\n self.create_and_get_line_id(\n invoice_line_ids, invoice_line_model, upd_vals\n )\n return invoice_line_ids\n\n def set_activity_progress(self, FatturaBody, invoice):\n invoice_id = invoice.id\n SalDatas = FatturaBody.DatiGenerali.DatiSAL\n if SalDatas:\n SalModel = self.env[\"fatturapa.activity.progress\"]\n for SalDataLine in SalDatas:\n SalModel.create(\n {\n \"fatturapa_activity_progress\": SalDataLine.RiferimentoFase or 0,\n \"invoice_id\": invoice_id,\n }\n )\n\n def _get_last_due_date(self, DatiPagamento):\n dates = []\n for PaymentLine in DatiPagamento or []:\n details = PaymentLine.DettaglioPagamento\n if details:\n for dline in details:\n if dline.DataScadenzaPagamento:\n dates.append(fields.Date.to_date(dline.DataScadenzaPagamento))\n dates.sort(reverse=True)\n return dates\n\n def set_payments_data(self, FatturaBody, invoice, partner_id):\n invoice_id = invoice.id\n PaymentsData = FatturaBody.DatiPagamento\n partner = self.env[\"res.partner\"].browse(partner_id)\n if not partner.property_supplier_payment_term_id:\n due_dates = self._get_last_due_date(FatturaBody.DatiPagamento)\n if due_dates:\n self.env[\"account.move\"].browse(\n invoice_id\n ).invoice_date_due = due_dates[0]\n if PaymentsData:\n PaymentDataModel = self.env[\"fatturapa.payment.data\"]\n PaymentTermsModel = self.env[\"fatturapa.payment_term\"]\n for PaymentLine in PaymentsData:\n cond = PaymentLine.CondizioniPagamento or False\n if not cond:\n raise UserError(_(\"Payment method code not found in document.\"))\n terms = PaymentTermsModel.search([(\"code\", \"=\", cond)])\n if not terms:\n raise UserError(_(\"Payment method code %s is incorrect.\") % cond)\n else:\n term_id = terms[0].id\n PayDataId = PaymentDataModel.create(\n {\"payment_terms\": term_id, \"invoice_id\": invoice_id}\n ).id\n self._createPaymentsLine(PayDataId, PaymentLine, partner_id, invoice)\n\n def set_withholding_tax(self, FatturaBody, invoice_data):\n Withholdings = FatturaBody.DatiGenerali.DatiGeneraliDocumento.DatiRitenuta\n if not Withholdings:\n return None\n\n withholding_tax_model = self.env[\"withholding.tax\"]\n found_withholding_taxes = withholding_tax_model.browse()\n e_withholding_taxes_values = []\n for Withholding in Withholdings:\n payment_reason_code = Withholding.CausalePagamento\n withholding_taxes = withholding_tax_model.search(\n [(\"payment_reason_id.code\", \"=\", payment_reason_code)],\n )\n if not withholding_taxes:\n raise UserError(\n _(\n \"The bill contains withholding tax with \"\n \"payment reason %s, \"\n \"but such a tax is not found in your system. Please \"\n \"set it.\",\n payment_reason_code,\n )\n )\n\n withholding_tax_amount = Withholding.AliquotaRitenuta\n e_withholding_tax_type = Withholding.TipoRitenuta\n withholding_tax_type = WT_CODES_MAPPING[e_withholding_tax_type]\n for withholding_tax in withholding_taxes:\n if (\n withholding_tax.tax == float(withholding_tax_amount)\n and withholding_tax_type == withholding_tax.wt_types\n ):\n found_withholding_taxes |= withholding_tax\n break\n else:\n raise UserError(\n _(\n \"No withholding tax found with document payment \"\n \"reason %(reason)s rate %(rate)s and type %(type)s.\",\n reason=payment_reason_code,\n rate=withholding_tax_amount,\n type=withholding_tax_type,\n )\n )\n\n e_withholding_tax_values = {\n \"name\": e_withholding_tax_type,\n \"amount\": Withholding.ImportoRitenuta,\n }\n e_withholding_taxes_values.append(e_withholding_tax_values)\n\n invoice_data[\"ftpa_withholding_ids\"] = [\n (0, 0, withholding_tax_values)\n for withholding_tax_values in e_withholding_taxes_values\n ]\n return found_withholding_taxes\n\n def set_welfares_fund(self, FatturaBody, credit_account_id, invoice, wt_founds):\n invoice_line_model = self.env[\"account.move.line\"]\n invoice_line_ids = []\n if self.e_invoice_detail_level == \"2\":\n\n Welfares = (\n FatturaBody.DatiGenerali.DatiGeneraliDocumento.DatiCassaPrevidenziale\n )\n if Welfares:\n WelfareFundLineModel = self.env[\"welfare.fund.data.line\"]\n for welfareLine in Welfares:\n WalfarLineVals = self._prepareWelfareLine(invoice.id, welfareLine)\n WelfareFundLineModel.create(WalfarLineVals)\n\n if welfareLine.TipoCassa == \"TC07\":\n continue\n\n line_vals = self._prepare_generic_line_data(welfareLine)\n line_vals.update(\n {\n \"name\": _(\"Welfare Fund: %s\") % welfareLine.TipoCassa,\n \"price_unit\": float(welfareLine.ImportoContributoCassa),\n \"move_id\": invoice.id,\n \"account_id\": credit_account_id,\n \"quantity\": 1,\n }\n )\n if welfareLine.Ritenuta:\n if not wt_founds:\n raise UserError(\n _(\n \"Welfare Fund data %s has withholding tax but no \"\n \"withholding tax was found in the system.\"\n )\n % welfareLine.TipoCassa\n )\n line_vals[\"invoice_line_tax_wt_ids\"] = [\n (6, 0, [wt.id for wt in wt_founds])\n ]\n if self.env.company.cassa_previdenziale_product_id:\n cassa_previdenziale_product = (\n self.env.company.cassa_previdenziale_product_id\n )\n line_vals[\"product_id\"] = cassa_previdenziale_product.id\n line_vals[\"name\"] = cassa_previdenziale_product.name\n self.adjust_accounting_data(\n cassa_previdenziale_product, line_vals\n )\n self.create_and_get_line_id(\n invoice_line_ids, invoice_line_model, line_vals\n )\n return invoice_line_ids\n\n def _convert_datetime(self, dtstring):\n ret = False\n try:\n dt = datetime.strptime(dtstring, \"%Y-%m-%dT%H:%M:%S\")\n if dt:\n ret = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n except (TypeError, ValueError): # pylint: disable=except-pass\n pass\n return ret\n\n def set_delivery_data(self, FatturaBody, invoice):\n Delivery = FatturaBody.DatiGenerali.DatiTrasporto\n if Delivery:\n delivery_id = self.getCarrirerPartner(Delivery)\n delivery_dict = {\n \"carrier_id\": delivery_id,\n \"transport_vehicle\": Delivery.MezzoTrasporto or \"\",\n \"transport_reason\": Delivery.CausaleTrasporto or \"\",\n \"number_items\": Delivery.NumeroColli or 0,\n \"description\": Delivery.Descrizione or \"\",\n \"unit_weight\": Delivery.UnitaMisuraPeso or 0.0,\n \"gross_weight\": Delivery.PesoLordo or 0.0,\n \"net_weight\": Delivery.PesoNetto or 0.0,\n \"pickup_datetime\": self._convert_datetime(Delivery.DataOraRitiro)\n or False,\n \"transport_date\": Delivery.DataInizioTrasporto or False,\n \"delivery_datetime\": self._convert_datetime(Delivery.DataOraConsegna)\n or False,\n \"delivery_address\": \"\",\n \"ftpa_incoterms\": Delivery.TipoResa,\n }\n\n if Delivery.IndirizzoResa:\n delivery_dict[\"delivery_address\"] = \"{}, {}\\n{} - {}\\n{} {}\".format(\n Delivery.IndirizzoResa.Indirizzo or \"\",\n Delivery.IndirizzoResa.NumeroCivico or \"\",\n Delivery.IndirizzoResa.CAP or \"\",\n Delivery.IndirizzoResa.Comune or \"\",\n Delivery.IndirizzoResa.Provincia or \"\",\n Delivery.IndirizzoResa.Nazione or \"\",\n )\n invoice.write(delivery_dict)\n\n def set_summary_data(self, FatturaBody, invoice):\n invoice_id = invoice.id\n Summary_datas = FatturaBody.DatiBeniServizi.DatiRiepilogo\n summary_data_model = self.env[\"fatturapa.summary.data\"]\n if Summary_datas:\n for summary in Summary_datas:\n summary_line = {\n \"tax_rate\": summary.AliquotaIVA or 0.0,\n \"non_taxable_nature\": summary.Natura or False,\n \"incidental_charges\": summary.SpeseAccessorie or 0.0,\n \"rounding\": summary.Arrotondamento or 0.0,\n \"amount_untaxed\": summary.ImponibileImporto or 0.0,\n \"amount_tax\": summary.Imposta or 0.0,\n \"payability\": summary.EsigibilitaIVA or False,\n \"law_reference\": summary.RiferimentoNormativo or \"\",\n \"invoice_id\": invoice_id,\n }\n summary_data_model.create(summary_line)\n\n def set_e_invoice_lines(self, FatturaBody, invoice_data):\n e_invoice_lines = self.env[\"einvoice.line\"].browse()\n for line in FatturaBody.DatiBeniServizi.DettaglioLinee:\n e_invoice_lines |= self.create_e_invoice_line(line)\n if e_invoice_lines:\n invoice_data[\"e_invoice_line_ids\"] = [(6, 0, e_invoice_lines.ids)]\n\n def _set_invoice_lines(\n self, product, invoice_line_data, invoice_lines, invoice_line_model\n ):\n\n if product:\n invoice_line_data[\"product_id\"] = product.id\n self.adjust_accounting_data(product, invoice_line_data)\n self.create_and_get_line_id(\n invoice_lines, invoice_line_model, invoice_line_data\n )\n\n # move_id\n # account_id\n def set_invoice_line_ids(\n self, FatturaBody, credit_account_id, partner, wt_founds, invoice\n ):\n invoice_lines = []\n invoice_line_model = self.env[\"account.move.line\"]\n if self.e_invoice_detail_level == \"1\":\n for nline, line in enumerate(FatturaBody.DatiBeniServizi.DatiRiepilogo):\n invoice_line_data = self._prepareInvoiceLineAliquota(\n credit_account_id, line, nline\n )\n invoice_line_data[\"move_id\"] = invoice.id\n\n product = partner.e_invoice_default_product_id\n self._set_invoice_lines(\n product, invoice_line_data, invoice_lines, invoice_line_model\n )\n\n elif self.e_invoice_detail_level == \"2\":\n for line in FatturaBody.DatiBeniServizi.DettaglioLinee:\n invoice_line_data = self._prepareInvoiceLine(\n credit_account_id, line, wt_founds\n )\n invoice_line_data[\"move_id\"] = invoice.id\n\n product = self.get_line_product(line, partner)\n self._set_invoice_lines(\n product, invoice_line_data, invoice_lines, invoice_line_model\n )\n return invoice_lines\n\n def check_invoice_amount(self, invoice, FatturaElettronicaBody):\n dgd = FatturaElettronicaBody.DatiGenerali.DatiGeneraliDocumento\n if dgd.ScontoMaggiorazione and dgd.ImportoTotaleDocumento:\n # assuming that, if someone uses\n # DatiGeneraliDocumento.ScontoMaggiorazione, also fills\n # DatiGeneraliDocumento.ImportoTotaleDocumento\n ImportoTotaleDocumento = float(dgd.ImportoTotaleDocumento)\n if not float_is_zero(\n invoice.amount_total - ImportoTotaleDocumento, precision_digits=2\n ):\n self.log_inconsistency(\n _(\n \"Bill total %(amount_total)s is different \"\n \"from document total amount %(document_total_amount)s\"\n )\n % {\n \"amount_total\": invoice.amount_total,\n \"document_total_amount\": ImportoTotaleDocumento,\n }\n )\n else:\n # else, we can only check DatiRiepilogo if\n # DatiGeneraliDocumento.ScontoMaggiorazione is not present,\n # because otherwise DatiRiepilogo and odoo invoice total would\n # differ\n amount_untaxed = invoice.compute_xml_amount_untaxed(FatturaElettronicaBody)\n if not float_is_zero(\n invoice.amount_untaxed - amount_untaxed, precision_digits=2\n ):\n self.log_inconsistency(\n _(\n \"Computed amount untaxed %(amount_untaxed)s is \"\n \"different from summary data %(summary_data)s\"\n )\n % {\n \"amount_untaxed\": invoice.amount_untaxed,\n \"summary_data\": amount_untaxed,\n }\n )\n\n def get_invoice_obj(self, fatturapa_attachment):\n xml_string = fatturapa_attachment.ir_attachment_id.get_xml_string()\n return efattura.CreateFromDocument(xml_string)\n\n def create_and_get_line_id(self, invoice_line_ids, invoice_line_model, upd_vals):\n invoice_line_id = (\n invoice_line_model.with_context(check_move_validity=False)\n .create(upd_vals)\n .id\n )\n invoice_line_ids.append(invoice_line_id)\n\n def _set_decimal_precision(self, precision_name, field_name):\n precision = self.env[\"decimal.precision\"].search(\n [(\"name\", \"=\", precision_name)], limit=1\n )\n different_precisions = original_precision = None\n if precision:\n precision_id = precision.id\n original_precision = precision.digits\n different_precisions = self[field_name] != original_precision\n if different_precisions:\n with registry(self.env.cr.dbname).cursor() as new_cr:\n # We need a new env (and cursor) because 'digits' property of Float\n # fields is retrieved with a new LazyCursor,\n # see class Float at odoo.fields,\n # so we need to write (commit) to DB in order to make the new\n # precision available\n new_env = api.Environment(new_cr, self.env.uid, self.env.context)\n new_precision = new_env[\"decimal.precision\"].browse(precision_id)\n new_precision.sudo().write({\"digits\": self[field_name]})\n new_cr.commit()\n return precision, different_precisions, original_precision\n\n def _restore_original_precision(self, precision, original_precision):\n with registry(self.env.cr.dbname).cursor() as new_cr:\n new_env = api.Environment(new_cr, self.env.uid, self.env.context)\n new_price_precision = new_env[\"decimal.precision\"].browse(precision.id)\n new_price_precision.sudo().write({\"digits\": original_precision})\n new_cr.commit()\n\n def _get_invoice_partner_id(self, fatt):\n cedentePrestatore = fatt.FatturaElettronicaHeader.CedentePrestatore\n partner_id = self.getCedPrest(cedentePrestatore)\n return partner_id\n\n def importFatturaPA(self):\n self.ensure_one()\n\n (\n price_precision,\n different_price_precisions,\n original_price_precision,\n ) = self._set_decimal_precision(\"Product Price\", \"price_decimal_digits\")\n (\n qty_precision,\n different_qty_precisions,\n original_qty_precision,\n ) = self._set_decimal_precision(\n \"Product Unit of Measure\", \"quantity_decimal_digits\"\n )\n (\n discount_precision,\n different_discount_precisions,\n original_discount_precision,\n ) = self._set_decimal_precision(\"Discount\", \"discount_decimal_digits\")\n\n new_invoices = []\n # convert to dict in order to be able to modify context\n fatturapa_attachments = self._get_selected_records()\n self.env.context = dict(self.env.context)\n for fatturapa_attachment in fatturapa_attachments:\n self.reset_inconsistencies()\n self._check_attachment(fatturapa_attachment)\n\n fatt = self.get_invoice_obj(fatturapa_attachment)\n cedentePrestatore = fatt.FatturaElettronicaHeader.CedentePrestatore\n # 1.2\n partner_id = self._get_invoice_partner_id(fatt)\n # 1.3\n TaxRappresentative = fatt.FatturaElettronicaHeader.RappresentanteFiscale\n # 1.5\n Intermediary = (\n fatt.FatturaElettronicaHeader.TerzoIntermediarioOSoggettoEmittente\n )\n\n generic_inconsistencies = \"\"\n existing_inconsistencies = self.get_inconsistencies()\n if existing_inconsistencies:\n generic_inconsistencies = existing_inconsistencies + \"\\n\\n\"\n\n xmlproblems = getattr(fatt, \"_xmldoctor\", None)\n if xmlproblems: # None or []\n generic_inconsistencies += \"\\n\".join(xmlproblems) + \"\\n\\n\"\n\n # 2\n for fattura in fatt.FatturaElettronicaBody:\n\n # reset inconsistencies\n self.reset_inconsistencies()\n\n invoice = self.invoiceCreate(\n fatt, fatturapa_attachment, fattura, partner_id\n )\n\n self.set_StabileOrganizzazione(cedentePrestatore, invoice)\n if TaxRappresentative:\n tax_partner_id = self.getPartnerBase(\n TaxRappresentative.DatiAnagrafici\n )\n invoice.write({\"tax_representative_id\": tax_partner_id})\n if Intermediary:\n Intermediary_id = self.getPartnerBase(Intermediary.DatiAnagrafici)\n invoice.write({\"intermediary\": Intermediary_id})\n new_invoices.append(invoice.id)\n self.check_invoice_amount(invoice, fattura)\n\n invoice.set_einvoice_data(fattura)\n\n existing_inconsistencies = self.get_inconsistencies()\n if existing_inconsistencies:\n invoice_inconsistencies = existing_inconsistencies\n else:\n invoice_inconsistencies = \"\"\n invoice.inconsistencies = (\n generic_inconsistencies + invoice_inconsistencies\n )\n\n if price_precision and different_price_precisions:\n self._restore_original_precision(price_precision, original_price_precision)\n if qty_precision and different_qty_precisions:\n self._restore_original_precision(qty_precision, original_qty_precision)\n if discount_precision and different_discount_precisions:\n self._restore_original_precision(\n discount_precision, original_discount_precision\n )\n\n return {\n \"view_type\": \"form\",\n \"name\": \"Electronic Bills\",\n \"view_mode\": \"tree,form\",\n \"res_model\": \"account.move\",\n \"type\": \"ir.actions.act_window\",\n \"domain\": [(\"id\", \"in\", new_invoices)],\n }\n","repo_name":"OCA/l10n-italy","sub_path":"l10n_it_fatturapa_in/wizard/wizard_import_fatturapa.py","file_name":"wizard_import_fatturapa.py","file_ext":"py","file_size_in_byte":81066,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"31"} +{"seq_id":"12127112389","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom threading import Thread, Event\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import sleep\n\nimport os\nfrom dotenv import load_dotenv\n\n\nclass Bot(Thread):\n def __init__(self):\n Thread.__init__(self)\n self._stop = Event()\n self.total_request = 100\n self.today_count = 0\n self.page = 1\n self.setdriver()\n \n def stop(self): \n self._stop.set() \n \n def stopped(self): \n return self._stop.isSet()\n\n def init_task_info(self):\n load_dotenv()\n self.DEFAULT_PROFILE = os.getenv('DEFAULT_PROFILE')\n self.channel_link = \"https://www.youtube.com/c/DanLok/about\"\n\n def setdriver(self):\n try:\n self.init_task_info()\n options = webdriver.FirefoxOptions()\n options.add_argument(\"--start-maximized\")\n # options.headless = True\n profile = webdriver.FirefoxProfile(self.DEFAULT_PROFILE)\n self.driver = webdriver.Firefox(executable_path=\"geckodriver.exe\", options=options, firefox_profile=profile)\n \n except Exception as e:\n print(e)\n \n def close(self):\n try:\n self.driver.quit()\n except Exception as e:\n print(e)\n\n def _next_page(self):\n self.page += 1\n self.driver.get(f'{self.SEARCH_LINK}&page={self.page}')\n print('next page : ', self.page)\n\n def _click_button_with_label(self, label):\n try:\n self.driver.find_element(By.XPATH, f'//button/span[text()=\"{label}\"]').click()\n return True\n except Exception as e:\n print(e)\n return False\n\n\n def get_email(self):\n try:\n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//tp-yt-paper-button[@id=\"button\"]/*[text()=\"View email address\"]'))).click()\n # view_email_button_ele = self.driver.find_element(By.XPATH, '//tp-yt-paper-button[@id=\"button\"]/*[text()=\"View email address\"]').find_element_by_xpath('..')\n # view_email_button_ele.click()\n print(\"clicked view email button!\")\n WebDriverWait(self.driver, 5).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,\"//iframe[@title='reCAPTCHA']\")))\n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//span[@id=\"recaptcha-anchor\"]'))).click()\n self.driver.switch_to.default_content()\n sleep(2)\n WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//button[@id=\"submit-btn\"]'))).click()\n email = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, '//a[@id=\"email\"]'))).text\n # email = self.driver.find_element_by_xpath('//a[@id=\"email\"]').text\n return email\n except Exception as e:\n print(e)\n return False\n\n def perform_task(self):\n try:\n self.driver.get(self.channel_link)\n business_email = self.get_email()\n print(business_email)\n except Exception as e:\n print(e)\n\n def run(self):\n while True:\n self.perform_task()\n \n\ndef main():\n my_bot = Bot()\n my_bot.start()\n\n\nif __name__ == '__main__':\n main()","repo_name":"SS-FS-58/LinkedIn-Auto-Request-Connection","sub_path":"youtube_business_email.py","file_name":"youtube_business_email.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"10734157754","text":"'''\r\nTitle:\r\nAuthor: Nikhil Nayyar\r\nDate Created: 16/05/19\r\n'''\r\nimport pygame, random, time\r\n\r\n### Classes\r\nclass myClass:\r\n\tdef __init__(self, x=0, y=0):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.pos = (self.x, self.y)\r\n\t\tself.surface = pygame.Surface((0, 0), pygame.SRCALPHA, 32)\r\n\t\tself.red = 0\r\n\t\tself.green = 0\r\n\t\tself.blue = 0\r\n\t\tself.colour = (self.red, self.green, self.blue)\r\n\r\n\tdef getSurface(self): # encapsulation\r\n\t\treturn self.surface\r\n\r\n\tdef getPos(self):\r\n\t\treturn self.pos\r\n\r\n\tdef getX(self):\r\n\t\treturn self.x\r\n\r\n\tdef getY(self):\r\n\t\treturn self.y\r\n\r\n\tdef getWidth(self):\r\n\t\treturn self.width\r\n\r\n\tdef getHeight(self):\r\n\t\treturn self.height\r\n\r\n\tdef setPos(self, pos):\r\n\t\tself.x = pos[0]\r\n\t\tself.y = pos[1]\r\n\t\tself.pos = (self.x, self.y)\r\n\r\n\tdef setColour(self, colour):\r\n\t\tself.colour = colour\r\n\r\n\r\nclass box(myClass):\r\n\tdef __init__(self, width, height, x=0, y=0):\r\n\t\tmyClass.__init__(self, x, y)\r\n\t\tself.width = width\r\n\t\tself.height = height\r\n\t\tself.dim = (self.width, self.height)\r\n\t\tself.surface = pygame.Surface(self.dim, pygame.SRCALPHA, 32)\r\n\t\tself.surface.fill(self.colour)\r\n\r\n\tdef setColour(self, colour):\r\n\t\tself.colour = colour\r\n\t\tself.surface.fill(self.colour)\r\n\r\n\r\nclass text(myClass):\r\n\tdef __init__(self, content, fontSize=24):\r\n\t\tmyClass.__init__(self)\r\n\t\tself.width = self.surface.get_rect()[2]\r\n\t\tself.height = self.surface.get_rect()[3]\r\n\t\tself.font = 'Pokemon GB.ttf'\r\n\t\tself.fontFam = self.font\r\n\t\tself.fontSize = fontSize\r\n\t\tself.font = pygame.font.SysFont(self.fontFam, self.fontSize)\r\n\t\tself.content = content\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef setColour(self, colour):\r\n\t\tmyClass.setColour(self, colour)\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef setFont(self, fontFam):\r\n\t\tself.fontFam = fontFam\r\n\t\tself.font = pygame.font.SysFont(self.fontFam, self.fontSize)\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef setContent(self, content):\r\n\t\tself.content = content\r\n\t\tself.surface = self.font.render(self.content, 1, self.colour)\r\n\r\n\tdef getText(self):\r\n\t\treturn myClass.getSurface(self)\r\n\r\n\r\nclass mySprite(myClass):\r\n\tdef __init__(self, fileName):\r\n\t\tmyClass.__init__(self)\r\n\t\tself.surface = pygame.image.load(fileName).convert_alpha()\r\n\t\tself.width = self.surface.get_rect()[2]\r\n\t\tself.height = self.surface.get_rect()[3]\r\n\r\n\tdef resize(self, width, height):\r\n\t\tself.width = width\r\n\t\tself.height = height\r\n\t\tself.dim = (self.width, self.height)\r\n\t\tself.surface = pygame.transform.smoothscale(self.getSurface(), self.dim)\r\n\r\n\tdef rotate(self):\r\n\t\tself.surface = pygame.transform.rotate(self.surface, 270)\r\n\r\n\r\nclass attack(mySprite):\r\n\tdef __init__(self, filename, name, damage, powerPoints, type, attackType, accuracy):\r\n\t\tmySprite.__init__(self, filename)\r\n\t\tself.name = name\r\n\t\tself.damage = damage\r\n\t\tself.powerPoints = powerPoints\r\n\t\tself.type = type\r\n\t\tself.attackType = attackType\r\n\t\tself.accuracy = accuracy\r\n\r\n\tdef getDamage(self):\r\n\t\treturn self.damage\r\n\r\n\tdef getType(self):\r\n\t\treturn self.type\r\n\r\n\tdef getAttackType(self):\r\n\t\treturn self.attackType\r\n\r\n\tdef getAccuracy(self):\r\n\t\treturn self.accuracy\r\n\r\n\tdef getName(self):\r\n\t\treturn self.name\r\n\r\n\tdef __str__(self):\r\n\t\treturn self.name\r\n\r\n\r\nclass pokemon(mySprite):\r\n\tdef __init__(self, name, filename, type, stats, attacks, weakness, resistance, immunites):\r\n\t\tmySprite.__init__(self, filename)\r\n\t\tself.name = name\r\n\t\tself.type = type\r\n\t\tself.stats = stats\r\n\t\tself.attacks = attacks\r\n\t\tself.weakness = weakness\r\n\t\tself.resistance = resistance\r\n\t\tself.immunites = immunites\r\n\r\n\tdef getName(self):\r\n\t\treturn self.name\r\n\r\n\tdef getType(self):\r\n\t\treturn self.type\r\n\r\n\tdef getStats(self):\r\n\t\treturn self.stats\r\n\r\n\tdef getAttacks(self):\r\n\t\treturn self.attacks\r\n\r\n\tdef getWeakness(self):\r\n\t\treturn self.weakness\r\n\r\n\tdef getResistance(self):\r\n\t\treturn self.resistance\r\n\r\n\tdef getImmunites(self):\r\n\t\treturn self.immunites\r\n\r\n\r\nclass Battle:\r\n\tdef __init__(self):\r\n\t\tself.state = 0\r\n\t\tself.poke1 = 0\r\n\t\tself.poke2 = 0\r\n\t\tself.userWin = 0\r\n\t\tself.cpuWin = 0\r\n\t\tself.attack = 0\r\n\t\tself.immune = 0\r\n\t\tself.end = 0\r\n\t\tself.u = 0 # user index number for party pokemon\r\n\t\tself.c = 0 # cpu index number for party pokemon\r\n\t\tself.spd1 = 0\r\n\t\tself.spd2 = 0\r\n\t\tself.cpuHp = 0\r\n\t\tself.userHp = 0\r\n\t\tself.tempcpuHp = 0\r\n\t\tself.tempUserHp = 0\r\n\t\tself.turns = 0\r\n\t\tself.askAttack = 0\r\n\t\tself.cpuFaint = 0\r\n\t\tself.userFaint = 0\r\n\t\tself.start = 1\r\n\r\n\tdef update(self, pkmnParty1, pkmnParty2, disp, key, bbox): # pkmnparty1 is user, pkmnparty2 is cpu\r\n\t\tif self.cpuWin == 0 and self.userWin == 0: # show the pokemon sprites while now one has won\r\n\t\t\tuserPkN = text(str(pkmnParty1[self.u].getName()), 36)\r\n\t\t\tcpuPkN = text(str(pkmnParty2[self.c].getName()), 36)\r\n\t\t\tuserHp = text(\"Hp: \" + str(self.userHp), 36)\r\n\t\t\tcpuHp = text(\"Hp: \" + str(self.cpuHp), 36)\r\n\t\t\tcpuPkmnName2 = pkmnParty2[self.c].getName()\r\n\t\t\tpkmnName2 = pkmnParty1[self.u].getName()\r\n\t\t\tif self.start == 1:\r\n\t\t\t\tchalText = text(\"You are challenged by\", 36)\r\n\t\t\t\tchalText.setPos((0, 600 - 85))\r\n\t\t\t\tdisp.blit(chalText.getText(), chalText.getPos())\r\n\t\t\t\tchalTextMore = text(\"Battle Tower Trainer!\", 36)\r\n\t\t\t\tchalTextMore.setPos((0, 600 - 45))\r\n\t\t\t\tdisp.blit(chalTextMore.getText(), chalTextMore.getPos())\r\n\t\t\t\tpygame.display.flip()\r\n\t\t\t\ttime.sleep(3)\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\tbatIntroText = text(\"Battle Tower Trainer\", 36)\r\n\t\t\t\tmorebatText = text(\"sent out \" + str(cpuPkmnName2), 36)\r\n\t\t\t\tbatIntroText.setPos((0, 600 - 85))\r\n\t\t\t\tdisp.blit(batIntroText.getText(), batIntroText.getPos())\r\n\t\t\t\tmorebatText.setPos((0, 600 - 45))\r\n\t\t\t\tdisp.blit(morebatText.getText(), morebatText.getPos())\r\n\t\t\t\tpygame.display.flip()\r\n\t\t\t\ttime.sleep(3)\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\tuserBatText = text(\"You sent out \" + str(pkmnName2), 36)\r\n\t\t\t\tuserBatText.setPos((0, 600 - 85))\r\n\t\t\t\tdisp.blit(userBatText.getText(), userBatText.getPos())\r\n\t\t\t\tpygame.display.flip()\r\n\t\t\t\ttime.sleep(2)\r\n\t\t\t\tself.start = 0\r\n\t\t\tuserPkN.setPos((800 - 200, 400))\r\n\t\t\tcpuPkN.setPos((0, 30))\r\n\t\t\tuserHp.setPos((800 - 200, 450))\r\n\t\t\tcpuHp.setPos((0, 60))\r\n\t\t\tdisp.blit(userPkN.getText(), userPkN.getPos())\r\n\t\t\tdisp.blit(cpuPkN.getText(), cpuPkN.getPos())\r\n\t\t\tdisp.blit(userHp.getText(), userHp.getPos())\r\n\t\t\tdisp.blit(cpuHp.getText(), cpuHp.getPos())\r\n\t\t\tdisp.blit(pkmnParty1[self.u].getSurface(), pkmnParty1[self.u].getPos())\r\n\t\t\tdisp.blit(pkmnParty2[self.c].getSurface(), pkmnParty2[self.c].getPos())\r\n\r\n\r\n\t\tif self.state == 0: # at the beginging of a turn get the speed of each pokemon to determine who goes first\r\n\t\t\tself.spd1 = pkmnParty1[self.u].getStats()[5]\r\n\t\t\tself.spd2 = pkmnParty2[self.c].getStats()[5]\r\n\r\n\r\n\t\t\tif self.spd1 > self.spd2:\r\n\t\t\t\tself.poke1 = 1\r\n\t\t\telse:\r\n\t\t\t\tself.poke2 = 1\r\n\r\n\t\t\tself.state = 1\r\n\r\n\t\tif self.state == 1: # get the Hp stat of each pokemon or if a pokemon dies get the new hp of only that pokemon\r\n\r\n\t\t\tif self.cpuFaint == 1:\r\n\t\t\t\tself.tempcpuHp = pkmnParty2[self.c].getStats()[0] # get Hp of cpu\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.cpuHp = 2 * self.tempcpuHp * 50\r\n\t\t\t\tself.cpuHp = self.cpuHp / 100\r\n\t\t\t\tself.cpuHp = self.cpuHp + 60\r\n\t\t\t\tself.state = 2\r\n\t\t\t\tself.cpuFaint = 0\r\n\r\n\t\t\telif self.userFaint == 1:\r\n\t\t\t\tself.tempUserHp = pkmnParty1[self.u].getStats()[0] # get Hp of user\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.userHp = 2 * self.tempUserHp * 50\r\n\t\t\t\tself.userHp = self.userHp / 100\r\n\t\t\t\tself.userHp = self.userHp + 60\r\n\t\t\t\tself.state = 2\r\n\t\t\t\tself.userFaint = 0\r\n\r\n\r\n\t\t\telif self.turns == 0:\r\n\t\t\t\tself.tempcpuHp = pkmnParty2[self.c].getStats()[0] # get Hp of cpu\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.cpuHp = 2 * self.tempcpuHp * 50\r\n\t\t\t\tself.cpuHp = self.cpuHp / 100\r\n\t\t\t\tself.cpuHp = self.cpuHp + 60\r\n\r\n\t\t\t\tself.tempUserHp = pkmnParty1[self.u].getStats()[0] # get Hp of user\r\n\t\t\t\t# hp calc\r\n\t\t\t\tself.userHp = 2 * self.tempUserHp * 50\r\n\t\t\t\tself.userHp = self.userHp / 100\r\n\t\t\t\tself.userHp = self.userHp + 60\r\n\t\t\t\tself.state = 2\r\n\r\n\t\t\tself.cpuHp = int(self.cpuHp)\r\n\t\t\tself.userHp = int(self.userHp)\r\n\t\tif self.state == 2: # get the input from the user to chose what attack to use\r\n\t\t\tattk1 = pkmnParty1[self.u].getAttacks()[0]\r\n\t\t\tattN1 = text(\"(1) \" + str(attk1), 36)\r\n\t\t\tattN1.setPos((0, 600 - 85))\r\n\t\t\tattk2 = pkmnParty1[self.u].getAttacks()[1]\r\n\t\t\tattN2 = text(\"(2) \" + str(attk2), 36)\r\n\t\t\tattN2.setPos((0, 600 - 45))\r\n\t\t\tdisp.blit(attN1.getText(), attN1.getPos())\r\n\t\t\tdisp.blit(attN2.getText(), attN2.getPos())\r\n\t\t\tself.askAttack = 0\r\n\t\t\tif key[pygame.K_1]:\r\n\t\t\t\tself.askAttack = 1\r\n\t\t\t\tself.turns = 0\r\n\t\t\t\tself.state = 3\r\n\t\t\telif key[pygame.K_2]:\r\n\t\t\t\tself.askAttack = 2\r\n\t\t\t\tself.turns = 0\r\n\t\t\t\tself.state = 3\r\n\t\t\treturn\r\n\r\n\t\tif self.state == 3:\r\n\t\t\tif self.turns == 2: # if a turn has passed and no one fainted go back to state 2 to get attack inputs\r\n\t\t\t\tself.state = 2\r\n\t\t\t\treturn\r\n\r\n\t\t\tif self.poke1 == 1: # users turn\r\n\t\t\t\tpkmnName = pkmnParty1[self.u].getName()\r\n\t\t\t\tpkmnAttkName = pkmnParty1[self.u].getAttacks()[self.askAttack - 1]\r\n\r\n\t\t\t\t# text stuff\r\n\t\t\t\tpkAction = text(str(pkmnName) + \" used \" + str(pkmnAttkName), 36)\r\n\t\t\t\tpkAction.setPos((0, 600 - 45))\r\n\t\t\t\tmonkey = 1\r\n\t\t\t\tif monkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\tdisp.blit(pkAction.getText(), pkAction.getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\tmonkey = 0\r\n\r\n\t\t\t\tattTimer = 1\r\n\t\t\t\tif attTimer == 1: # make attack appear for 3 seconds\r\n\t\t\t\t\tdisp.blit(pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getSurface(), pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\t\tattTimer = 0\r\n\r\n\t\t\t\ttypeAttack = pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getType() # get the type of attakc (rock, grounnd etc)\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getImmunites())): # check for immunites\r\n\t\t\t\t\tif typeAttack == pkmnParty2[self.c].getImmunites()[i]:\r\n\t\t\t\t\t\timmText = text(\"It does not effect \" + str(pkmnParty2[self.c].getName()), 36)\r\n\t\t\t\t\t\timmText.setPos((0, 600-45))\r\n\t\t\t\t\t\tooga = 1\r\n\t\t\t\t\t\tif ooga == 1:\r\n\t\t\t\t\t\t\tdisp.blit(immText.getText(), immText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tooga = 0\r\n\t\t\t\t\t\tself.immune = 1\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.poke2 = 1\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\tpokeAccur = random.randint(1, 100) # choose random number\r\n\t\t\t\tif pokeAccur > pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getAccuracy(): # check if move hits based on attack accuracy\r\n\t\t\t\t\tmissText = text(str(pkmnParty2[self.c].getName()) + \" avoided the attack\", 36)\r\n\t\t\t\t\tmissText.setPos((0, 600 - 45))\r\n\t\t\t\t\tbooga = 1\r\n\t\t\t\t\tif booga == 1:\r\n\t\t\t\t\t\tdisp.blit(missText.getText(), missText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\tbooga = 0\r\n\t\t\t\t\tself.turns +=1\r\n\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\tself.poke2 = 1\r\n\t\t\t\t\treturn\r\n\r\n\t\t\t\tattack = pkmnParty1[self.u].getAttacks()[self.askAttack - 1].getDamage() # get damage of chosen attack\r\n\r\n\t\t\t\tattackType = pkmnParty1[self.u].getAttacks()[\r\n\t\t\t\t\tself.askAttack - 1].getAttackType() # check if attack is physical or special\r\n\r\n\t\t\t\tif attackType == 1: # get the appropriate defense based on the attack type\r\n\t\t\t\t\tcpuDef = pkmnParty2[self.c].getStats()[2]\r\n\t\t\t\telse:\r\n\t\t\t\t\tcpuDef = pkmnParty2[self.c].getStats()[4]\r\n\r\n\t\t\t\tif attackType == 1: # get the appropriate attack stat based on teh attack type\r\n\t\t\t\t\tpkmnAttk = pkmnParty1[self.u].getStats()[1]\r\n\t\t\t\telse:\r\n\t\t\t\t\tpkmnAttk = pkmnParty1[self.u].getStats()[3]\r\n\r\n\t\t\t\t#print(\"initial cpu Hp: \" + str(self.cpuHp))\r\n\r\n\t\t\t\t# damage calc\r\n\t\t\t\tdamage = 2 * 50\r\n\t\t\t\tdamage = damage / 5\r\n\t\t\t\tdamage += 2\r\n\t\t\t\tdamage = damage * attack\r\n\t\t\t\tdamage = damage * pkmnAttk\r\n\t\t\t\tdamage = damage / cpuDef\r\n\t\t\t\tdamage = damage / 50\r\n\t\t\t\tdamage += 2\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[\r\n\t\t\t\t\t\t\t\t\t self.u].getType())): # checks if attack is STAB (same type attack bonus) and if so multiply attack by 1.5\r\n\t\t\t\t\tif typeAttack == pkmnParty1[self.u].getType()[i]:\r\n\t\t\t\t\t\tdamage = damage * 1.5\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpass\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getWeakness())): # checks if attack is super effective\r\n\t\t\t\t\tif typeAttack == pkmnParty2[self.c].getWeakness()[i]:\r\n\t\t\t\t\t\tdamage = damage * 2\r\n\t\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\t\t\tsupText = text(\"Its super effective\", 36)\r\n\t\t\t\t\t\tsupText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmonkeyagg = 1\r\n\t\t\t\t\t\tif monkeyagg == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(supText.getText(), supText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmonkeyagg = 0\r\n\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getResistance())): # checks if attack is not very effective\r\n\t\t\t\t\tif typeAttack == pkmnParty2[self.c].getResistance()[i]:\r\n\t\t\t\t\t\tdamage = damage * 0.5\r\n\t\t\t\t\t\tnotText = text(\"Its not very effective\", 36)\r\n\t\t\t\t\t\tnotText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmoremonkey = 1\r\n\t\t\t\t\t\tif moremonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(notText.getText(), notText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmoremonkey = 0\r\n\r\n\t\t\t\tcrit = random.randint(1, 100)\r\n\t\t\t\tif crit > 96: # check for a critical hit (4.16 % chance)\r\n\t\t\t\t\tdamage = damage * 1.5\r\n\t\t\t\t\tcritText = text(\"A critical hit!\", 36)\r\n\t\t\t\t\tcritText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeymoremanmonkey = 1\r\n\t\t\t\t\tif monkeymoremanmonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\tdisp.blit(critText.getText(), critText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeymoremanmonkey = 0\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tpass\r\n\r\n\t\t\t\troll = random.randint(1, 25)\r\n\t\t\t\tdamage = damage - roll\r\n\r\n\t\t\t\tif self.immune == 1:\r\n\t\t\t\t\tdamage = 0\r\n\t\t\t\t\tself.immune = 0\r\n\r\n\t\t\t\t#print(\"damage given: \" + str(damage))\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\tself.cpuHp = self.cpuHp - damage\r\n\t\t\t\tself.cpuHp = int(self.cpuHp)\r\n\t\t\t\tif self.cpuHp < 0:\r\n\t\t\t\t\tself.cpuHp = 0\r\n\r\n\t\t\t\t\tfaintText = text(str(pkmnParty2[self.c].getName()) + \" fainted\", 36)\r\n\t\t\t\t\tfaintText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeyman = 1\r\n\t\t\t\t\tif monkeyman == 1:\r\n\t\t\t\t\t\tdisp.blit(faintText.getText(), faintText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeyman = 0\r\n\r\n\t\t\t\t\tpkmnParty2.pop(self.c)\r\n\r\n\t\t\t\t\tself.cpuFaint = 1\r\n\t\t\t\t\t#print(\"final cpuHP: \" + str(self.cpuHp))\r\n\t\t\t\t\tif len(pkmnParty2) == 0:\r\n\t\t\t\t\t\tself.userWin = 1\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#print(\"mons fainted\")\r\n\t\t\t\t\t\tself.state = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.poke2 = 1\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t#print(\"final cpuHP: \" + str(self.cpuHp))\r\n\t\t\t\tself.poke1 = 0\r\n\t\t\t\tself.poke2 = 1\r\n\t\t\t\tself.turns += 1\r\n\t\t\t\treturn\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif self.poke2 == 1: ### CPUS TURN\r\n\t\t\t\t#print(self.cpuHp)\r\n\t\t\t\tcpuAttacks = pkmnParty2[self.c].getAttacks()\r\n\t\t\t\tcpuAskAttack = cpuAttacks[0]\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getWeakness())): # cpu checks if it has a super effective attack so it will use it\r\n\t\t\t\t\tfor j in range(len(cpuAttacks)):\r\n\t\t\t\t\t\tif cpuAttacks[j].getType() == pkmnParty1[self.u].getWeakness()[i]:\r\n\t\t\t\t\t\t\tcpuAskAttack = cpuAttacks[j]\r\n\t\t\t\t\t\t\t#print(cpuAskAttack)\r\n\r\n\t\t\t\tcpuTypeAttack = cpuAskAttack.getType()\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getImmunites())): # check for immunites\r\n\t\t\t\t\tif cpuAskAttack.getType() != pkmnParty1[self.u].getImmunites()[i]:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcpuAskAttack = cpuAttacks[1]\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty1[self.u].getImmunites()[i]:\r\n\t\t\t\t\t\timmText = text(\"It does not effect \" + str(pkmnParty1[self.u].getName()), 36)\r\n\t\t\t\t\t\timmText.setPos((0, 600-45))\r\n\t\t\t\t\t\tooga = 1\r\n\t\t\t\t\t\tif ooga == 1:\r\n\t\t\t\t\t\t\tdisp.blit(immText.getText(), immText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tooga = 0\r\n\t\t\t\t\t\tself.immune = 1\r\n\t\t\t\t\t\tself.poke1 = 1\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\treturn\r\n\r\n\r\n\r\n\r\n\t\t\t\tcpuPkmnName = pkmnParty2[self.c].getName()\r\n\t\t\t\tcpuPkmnAttkName = cpuAskAttack.getName()\r\n\t\t\t\t# text stuff\r\n\t\t\t\tcpuAction = text(str(cpuPkmnName) + \" used \" + str(cpuPkmnAttkName), 36)\r\n\t\t\t\tcpuAction.setPos((0, 600 - 45))\r\n\t\t\t\tcpumonkey = 1\r\n\t\t\t\tif cpumonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\tdisp.blit(cpuAction.getText(), cpuAction.getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\tcpumonkey = 0\r\n\r\n\t\t\t\tattTimer = 1\r\n\t\t\t\tif attTimer == 1: # make attack appear for 1 seconds\r\n\t\t\t\t\tdisp.blit(cpuAskAttack.getSurface(), cpuAskAttack.getPos())\r\n\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\t\tattTimer = 0\r\n\r\n\t\t\t\tcpuTypeAttack = cpuAskAttack.getType() # get the type of attakc (rock, grounnd etc)\r\n\t\t\t\tcpupokeAccur = random.randint(1, 100) # choose random number\r\n\t\t\t\tif cpupokeAccur > cpuAskAttack.getAccuracy(): # check if move hits based on attack accuracy\r\n\t\t\t\t\tmissText = text(str(pkmnParty1[self.u].getName()) + \" avoided the attack\", 36)\r\n\t\t\t\t\tmissText.setPos((0, 600 - 45))\r\n\t\t\t\t\tbooga = 1\r\n\t\t\t\t\tif booga == 1:\r\n\t\t\t\t\t\tdisp.blit(missText.getText(), missText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2)\r\n\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\tbooga = 0\r\n\t\t\t\t\tself.poke1 = 1\r\n\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\treturn\r\n\r\n\t\t\t\tcpuAttack = cpuAskAttack.getDamage() # damage of cpu's attack\r\n\r\n\t\t\t\tcpuAttackType = cpuAskAttack.getAttackType() # check if attack is physical or special\r\n\r\n\t\t\t\tif cpuAttackType == 1: # get the appropriate defense based on the attack type\r\n\t\t\t\t\tuserDef = pkmnParty1[self.u].getStats()[2]\r\n\t\t\t\telse:\r\n\t\t\t\t\tuserDef = pkmnParty1[self.u].getStats()[4]\r\n\r\n\t\t\t\tif cpuAttackType == 1: # get the appropriate attack stat based on teh attack type\r\n\t\t\t\t\tcpupkmnAttk = pkmnParty2[self.c].getStats()[1]\r\n\t\t\t\telse:\r\n\t\t\t\t\tcpupkmnAttk = pkmnParty2[self.c].getStats()[3]\r\n\r\n\t\t\t\t#print(\"inital user hp: \" + str(self.userHp))\r\n\r\n\t\t\t\t# damage calc\r\n\t\t\t\tcpuDamage = 2 * 50\r\n\t\t\t\tcpuDamage = cpuDamage / 5\r\n\t\t\t\tcpuDamage += 2\r\n\t\t\t\tcpuDamage = cpuDamage * cpuAttack\r\n\t\t\t\tcpuDamage = cpuDamage * cpupkmnAttk\r\n\t\t\t\tcpuDamage = cpuDamage / userDef\r\n\t\t\t\tcpuDamage = cpuDamage / 50\r\n\t\t\t\tcpuDamage += 2\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty2[self.c].getType())): # checks if attack is STAB (same type attack bonus) and if so multiply attack by 1.5\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty2[self.c].getType()[i]:\r\n\t\t\t\t\t\tcpuDamage = cpuDamage * 1.5\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpass\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getWeakness())): # checks if attack is super effective\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty1[self.u].getWeakness()[i]:\r\n\t\t\t\t\t\tcpuDamage = cpuDamage * 2\r\n\t\t\t\t\t\tsupText = text(\"Its super effective\", 36)\r\n\t\t\t\t\t\tsupText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmonkeyagg = 1\r\n\t\t\t\t\t\tif monkeyagg == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(supText.getText(), supText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmonkeyagg = 0\r\n\r\n\t\t\t\tfor i in range(len(pkmnParty1[self.u].getResistance())): # checks if attack is not very effective\r\n\t\t\t\t\tif cpuTypeAttack == pkmnParty1[self.u].getResistance()[i]:\r\n\t\t\t\t\t\tcpuDamage = cpuDamage * 0.5\r\n\t\t\t\t\t\tnotText = text(\"Its not very effective\", 36)\r\n\t\t\t\t\t\tnotText.setPos((0, 600 - 45))\r\n\t\t\t\t\t\tmoremonkey = 1\r\n\t\t\t\t\t\tif moremonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\t\tdisp.blit(notText.getText(), notText.getPos())\r\n\t\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\t\t\t\t\t\t\tmoremonkey = 0\r\n\r\n\t\t\t\tcpucrit = random.randint(1, 100)\r\n\t\t\t\tif cpucrit > 96: # check for a critical hit (4.16 % chance)\r\n\t\t\t\t\tcpuDamage = cpuDamage * 1.5\r\n\t\t\t\t\tcritText = text(\"A critical hit!\", 36)\r\n\t\t\t\t\tcritText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeymoremanmonkey = 1\r\n\t\t\t\t\tif monkeymoremanmonkey == 1: # make text appear for 3 seconds\r\n\t\t\t\t\t\tdisp.blit(critText.getText(), critText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeymoremanmonkey = 0\r\n\t\t\t\telse:\r\n\t\t\t\t\tpass\r\n\r\n\t\t\t\tif self.immune == 1:\r\n\t\t\t\t\tcpuDamage = 0\r\n\t\t\t\t\tself.immune = 0\r\n\t\t\t\t#print(\"damage given from cpu: \" + str(cpuDamage))\r\n\r\n\t\t\t\tself.userHp = self.userHp - cpuDamage\r\n\t\t\t\tself.userHp = int(self.userHp)\r\n\t\t\t\t#print(\"final user Hp: \" + str(self.userHp))\r\n\t\t\t\tdisp.blit(bbox.getSurface(), bbox.getPos())\r\n\r\n\r\n\t\t\t\tif self.userHp < 0:\r\n\t\t\t\t\tself.userHp = 0\r\n\t\t\t\t\tfaintText = text(str(pkmnParty1[self.u].getName()) + \" fainted\", 36)\r\n\t\t\t\t\tfaintText.setPos((0, 600 - 45))\r\n\t\t\t\t\tmonkeyman = 1\r\n\t\t\t\t\tif monkeyman == 1:\r\n\t\t\t\t\t\tdisp.blit(faintText.getText(), faintText.getPos())\r\n\t\t\t\t\t\tpygame.display.flip()\r\n\t\t\t\t\t\ttime.sleep(2.5)\r\n\t\t\t\t\t\tmonkeyman = 0\r\n\t\t\t\t\tpkmnParty1.pop(self.u)\r\n\t\t\t\t\t#print(\"monkey faint activated\")\r\n\t\t\t\t\tself.userFaint = 1\r\n\t\t\t\t\t#print(self.userFaint)\r\n\t\t\t\t\t#print(\"final user Hp: \" + str(self.userHp))\r\n\t\t\t\t\tif len(pkmnParty1) == 0:\r\n\t\t\t\t\t\t#print('monkey')\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.cpuWin = 1\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#print(\"mons fainted\")\r\n\t\t\t\t\t\tself.state = 0\r\n\t\t\t\t\t\tself.poke1 = 0\r\n\t\t\t\t\t\tself.poke2 = 0\r\n\t\t\t\t\t\tprint(self.state)\r\n\t\t\t\t\t\treturn\r\n\r\n\t\t\t\tself.poke2 = 0\r\n\t\t\t\tself.poke1 = 1\r\n\t\t\t\tself.turns += 1\r\n\t\t\t\treturn\r\n\r\n\t\t\tif self.cpuWin == 1:\r\n\t\t\t\t#print(\"You ded cpu win\")\r\n\t\t\t\tself.end = 2\r\n\r\n\t\t\tif self.userWin == 1:\r\n\t\t\t\t#print(\"you killed pokeman woo yay\")\r\n\t\t\t\tself.end = 1\r\n\r\n\t\treturn self.end\r\n\r\nclass Select: # chose a party of 3 pokemon\r\n\tdef __init__(self):\r\n\t\tself.bp = [False, False, False, False, False, False]\r\n\t\tself.selection = []\r\n\r\n\tdef choseMon(self, key):\r\n\t\tif len(self.selection) == 3:\r\n\t\t\treturn self.selection\r\n\r\n\t\tif key[pygame.K_1] and not self.bp[0]: # makes sure you dont press same button twice\r\n\t\t\tself.bp[0] = True\r\n\t\t\tself.selection.append(0) # add the index of pokemon\r\n\r\n\t\tif key[pygame.K_2] and not self.bp[1]:\r\n\t\t\tself.bp[1] = True\r\n\t\t\tself.selection.append(1)\r\n\r\n\t\tif key[pygame.K_3] and not self.bp[2]:\r\n\t\t\tself.bp[2] = True\r\n\t\t\tself.selection.append(2)\r\n\r\n\t\tif key[pygame.K_4] and not self.bp[3]:\r\n\t\t\tself.bp[3] = True\r\n\t\t\tself.selection.append(3)\r\n\r\n\t\tif key[pygame.K_5] and not self.bp[4]:\r\n\t\t\tself.bp[4] = True\r\n\t\t\tself.selection.append(4)\r\n\r\n\t\tif key[pygame.K_6] and not self.bp[5]:\r\n\t\t\tself.bp[5] = True\r\n\t\t\tself.selection.append(5)\r\n\r\n\t\treturn [0]","repo_name":"Nikhil6767/Games","sub_path":"Pokemon_Project/Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":22249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25012508375","text":"class PlayerCharacter:\n Membership=True\n def __init__(self,name,age):\n if PlayerCharacter.Membership:\n self.name=name #Attributes\n self.age=age #Attributes\n\n def Shout(self): #Methods\n print(f\"My name is {self.name}\")\n\n\nplayer1=PlayerCharacter(\"Aditya\",27)\n\nprint(player1.name,player1.age)\n\nplayer1.Shout()","repo_name":"Aditya-A-Pardeshi/Coding-Hands-On","sub_path":"4 Python_Programs/Programs to demo python concepts/oop4.py","file_name":"oop4.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"75087011606","text":"import math\nimport numpy as np\nfrom typing import Sequence\nfrom qib.lattice import AbstractLattice\nfrom qib.lattice.shifted_lattice_convention import ShiftedLatticeConvention\n\n\nclass BrickLattice(AbstractLattice):\n \"\"\"\n Brick lattice.\n The lattice has n full rectangles per row and m full rectangles per column.\n \"\"\"\n def __init__(self, shape: Sequence[int], pbc=False, delete=False, convention: ShiftedLatticeConvention=ShiftedLatticeConvention.COLS_SHIFTED_UP):\n if len(shape) != 2:\n raise NotImplementedError(\"Brick lattices require 2 dimensions, {len(shape)} were given\")\n self.shape = tuple(shape)\n self.convention = convention\n self.shape_square = self._shape_square\n self.nsites_square = self._nsites_square\n self.delete = delete\n if pbc is True:\n # TODO: add pbc in adjacency matrix\n raise NotImplementedError(\"The brick lattice doesn't hold periodic boundary conditions yet\")\n self.pbc = pbc\n\n @property\n def nsites(self) -> int:\n \"\"\"\n Number of lattice sites.\n If delete=False, it includes the 2 extra points if they are needed.\n \"\"\"\n if not self.delete and ((self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1]>1) or (self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0]>1)):\n return 2*self.shape[0]*self.shape[1] + 2*(self.shape[0]+self.shape[1]) + 2\n else:\n return 2*self.shape[0]*self.shape[1] + 2*(self.shape[0]+self.shape[1])\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Number of spatial dimensions.\n \"\"\"\n return len(self.shape)\n\n @property\n def _shape_square(self) -> tuple:\n \"\"\"\n Shape of the equivalent square lattice.\n Includes the 2 extra points.\n \"\"\"\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP:\n if self.shape[1]>1:\n nrows_square = 2*self.shape[0]+2\n else:\n nrows_square = 2*self.shape[0]+1\n ncols_square = self.shape[1]+1\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT:\n if self.shape[0]>1:\n ncols_square = 2*self.shape[1]+2\n else:\n ncols_square = 2*self.shape[1]+1\n nrows_square = self.shape[0]+1\n return (nrows_square,ncols_square)\n\n @property\n def _nsites_square(self) -> int:\n \"\"\"\n Number of lattice sites in the equivalent square lattice.\n Includes the 2 extra points.\n \"\"\"\n return self.shape_square[0]*self.shape_square[1]\n\n def adjacency_matrix(self):\n \"\"\"\n Construct the adjacency matrix, indicating nearest neighbors.\n Brick lattice embedded in a square grid::\n\n _ _ _\n | |_| |_| |\n |_| |_| |_|\n . |_| |_| .\n\n If delete == True, the 2 extra points are eliminated from the adjacency matrix.\n Otherwise, they are just disconnected (corresponding rows and columns are 0)\n \"\"\"\n # An equivalent square graph is built.\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP:\n d_square = 0\n parity_shift_condition = (self.shape[1]%2 == 1)\n\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT:\n d_square = 1\n parity_shift_condition = (self.shape[0]>1)\n\n adj = np.zeros((self.nsites_square, self.nsites_square), dtype=int)\n idx = np.arange(self.nsites_square).reshape(self.shape_square)\n # the y axis for COLS_SHIFTED_UP and x axis for ROWS_SHIFTED_LEFT are treated like the square graph case.\n # the other axis only has half of the connections.\n for d in range(self.ndim):\n for s in [-1, 1]:\n ids = np.roll(idx, s, axis=d)\n # single out axis `d`\n seld = (math.prod(self.shape_square[:d]), self.shape_square[d], math.prod(self.shape_square[d+1:]))\n idx_cut = idx.reshape(seld)\n ids_cut = ids.reshape(seld)\n if s == 1:\n idx_cut = idx_cut[:, 1:, :]\n ids_cut = ids_cut[:, 1:, :]\n elif s == -1:\n idx_cut = idx_cut[:, :-1, :]\n ids_cut = ids_cut[:, :-1, :]\n else:\n assert False\n if d == d_square:\n for (i, j) in zip(idx_cut.reshape(-1), ids_cut.reshape(-1)):\n adj[i, j] = 1\n else:\n for (i, j) in zip(idx_cut.reshape(-1), ids_cut.reshape(-1)):\n if parity_shift_condition:\n if (s == -1 and (i+i//self.shape_square[1])%2 == 0) or (s == 1 and (i+i//self.shape_square[1])%2 == 1):\n adj[i, j] = 1\n else:\n if (s == -1 and i%2 == 0) or (s == 1 and i%2 == 1):\n adj[i, j] = 1\n if self.delete:\n adj = self._delete_extra_points(adj)\n else:\n adj = self._disconnect_extra_points(adj)\n return adj\n\n def _delete_extra_points(self, adj):\n \"\"\"\n Deletes the 2 extra points from the adjacency matrix.\n \"\"\"\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1]>1:\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1], 0)\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1], 1)\n if self.shape_square[1]%2 == 0:\n adj = np.delete(adj, -1, 0)\n adj = np.delete(adj, -1, 1)\n else:\n adj = np.delete(adj, self.shape_square[1]-1, 0)\n adj = np.delete(adj, self.shape_square[1]-1, 1)\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0]>1:\n adj = np.delete(adj, self.shape_square[1]-1, 0)\n adj = np.delete(adj, self.shape_square[1]-1, 1)\n if self.shape_square[0]%2 == 1:\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1]-1, 0)\n adj = np.delete(adj, (self.shape_square[0]-1)*self.shape_square[1]-1, 1)\n else:\n adj = np.delete(adj, -1, 0)\n adj = np.delete(adj, -1, 1)\n\n return adj\n\n def _disconnect_extra_points(self, adj):\n \"\"\"\n Disconnects the 2 extra points from the adjacency matrix.\n They are still counted in, but are not connected anymore to the rest of the lattice.\n \"\"\"\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1]>1:\n adj[(self.shape_square[0]-1)*self.shape_square[1], :] = 0\n adj[:, (self.shape_square[0]-1)*self.shape_square[1]] = 0\n if self.shape_square[1]%2 == 0:\n adj[-1, :] = 0\n adj[:, -1] = 0\n else:\n adj[self.shape_square[1]-1, :] = 0\n adj[:, self.shape_square[1]-1] = 0\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0]>1:\n adj[self.shape_square[1]-1, :] = 0\n adj[:, self.shape_square[1]-1] = 0\n if self.shape_square[0]%2 == 1:\n adj[(self.shape_square[0]-1)*self.shape_square[1], :] = 0\n adj[:, (self.shape_square[0]-1)*self.shape_square[1]] = 0\n else:\n adj[-1, :] = 0\n adj[:, -1] = 0\n\n return adj\n\n def index_to_coord(self, i: int) -> tuple:\n \"\"\"\n Map linear index to the equivalent square lattice coordinate.\n If self.delete=True the two extra points of the equivalent square lattice are not counted in.\n \"\"\"\n shift = 0\n if self.delete:\n assert i < self.nsites\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1] > 1:\n if i >= self.shape_square[1]-1 and self.shape[1]%2 == 0:\n shift += 1\n if i >= (self.shape_square[0]-1)*self.shape_square[1]-shift:\n shift += 1\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0] > 1:\n if i >= self.shape_square[1]-1:\n shift += 1\n if i >= (self.shape_square[0]-1)*self.shape_square[1]-shift and self.shape[0]%2 == 0:\n shift += 1\n return np.unravel_index((i+shift), self.shape_square)\n\n def coord_to_index(self, c) -> int:\n \"\"\"\n Map lattice coordinate to the equivalent square lattice coordinate.\n If delete=True the two extra points of the equivalent square lattice are not counted in.\n \"\"\"\n shift = 0\n if self.delete:\n if self.convention == ShiftedLatticeConvention.COLS_SHIFTED_UP and self.shape[1] > 1:\n # even and odd columns specific cases\n if self.shape[1]%2 == 0:\n if c[0] == 0 and c[1] == self.shape_square[1]-1:\n return None\n elif c[0] > 0:\n shift += 1\n else:\n if c[0] == self.shape_square[0]-1 and c[1] == self.shape_square[1]-1:\n return None\n # common shift for even and odd cases\n if c[0] == self.shape_square[0]-1:\n if c[1] == 0:\n return None\n else:\n shift += 1\n if self.convention == ShiftedLatticeConvention.ROWS_SHIFTED_LEFT and self.shape[0] > 1:\n # even and odd columns specific cases\n if self.shape[0]%2 == 0:\n if c[0] == self.shape_square[0]-1:\n if c[1] == 0:\n return None\n else:\n shift += 1\n else:\n if c[0] == self.shape_square[0]-1 and c[1] == self.shape_square[1]-1:\n return None\n # common shift for even and odd cases\n if c[0] == 0:\n if c[1] == self.shape_square[1]-1:\n return None\n else:\n shift += 1\n return int(np.ravel_multi_index(c, self.shape_square)) - shift\n","repo_name":"qc-tum/qib","sub_path":"src/qib/lattice/brick_lattice.py","file_name":"brick_lattice.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"71348820568","text":"def reverse(number):\n\trevnumber = str(number)\n\trevnumber = int(revnumber[::-1])\n\treturn revnumber\n\ndef allodd(number):\n\tnumber = str(number)\n\tif '2' in number or '4' in number or '6' in number or '8' in number:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\ncount = 0\n\nfor n in xrange(1,1000000000):\n\tif allodd(n+reverse(n)):\n\t\tcount +=1\n\t","repo_name":"arshaver/Project-Euler","sub_path":"euler145.py","file_name":"euler145.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37404316257","text":"import unittest\nimport math\n\n\nclass TriangleNotValidArgumentException(Exception):\n pass\n\n\nclass TriangleNotExistException(Exception):\n pass\n\n\nclass Triangle:\n\n def __init__(self, coordinates):\n if self._valid_arguments(coordinates):\n if self._valid_triangle(coordinates):\n self.a, self.b, self.c = [x for x in coordinates]\n\n def _valid_triangle(self, coordinates):\n if 2 * max(coordinates) >= sum(coordinates) or sum([x for x in coordinates if x > 0]) != sum(coordinates):\n raise TriangleNotExistException(\"Can`t create triangle with this arguments\")\n else:\n print(coordinates)\n return True\n\n def _valid_arguments(self, coordinates):\n try:\n if type(coordinates) != tuple or len(coordinates) != 3 or sum([x for x in coordinates if isinstance(x, (int,float))]) != sum(coordinates):\n raise TriangleNotValidArgumentException(\"Not valid arguments\")\n else:\n return True\n except:\n raise TriangleNotValidArgumentException(\"Not valid arguments\")\n\n def get_area(self):\n perimetr = (self.a+self.b+self.c)/2\n S = math.sqrt(perimetr * (perimetr - self.a) * (perimetr - self.b) * (perimetr - self.c))\n return S\n\n\nclass TriangleTest(unittest.TestCase):\n\n def test_triangle(self):\n self.assertEquals(Triangle((3, 4, 5)).get_area(), 6.0)\n\n def test_not_valid_arguments(self):\n with self.assertRaises(TriangleNotValidArgumentException):\n Triangle((2,3))\n\n def test_not_valid_triangle(self):\n with self.assertRaises(TriangleNotExistException):\n Triangle((1,2,3))\n\n\n\n\n\n\nvalid_test_data = [\n (3, 4, 5),\n (26, 25, 3),\n (30, 29, 5),\n (87, 55, 34),\n (120, 109, 13),\n (123, 122, 5)\n]\nfor data in valid_test_data:\n print(Triangle(data).get_area())\n\nnot_valid_triangle = [\n (1, 2, 3),\n (1, 1, 2),\n (7, 7, 15),\n (100, 7, 90),\n (17, 18, 35),\n (127, 17, 33),\n (145, 166, 700),\n (1000, 2000, 1),\n (717, 17, 7),\n (0, 7, 7),\n (-7, 7, 7)\n]\nfor data in not_valid_triangle:\n try:\n Triangle(data)\n\n except TriangleNotExistException as e:\n print(e)\n#\n# not_valid_arguments = [\n# ('3', 4, 5),\n# ('a', 2, 3),\n# 'string',\n# (7, 2),\n# (7, 7, 7, 7),\n# 10\n# ]\n# for data in not_valid_arguments:\n# try:\n# Triangle(data)\n# except TriangleNotValidArgumentException as e:\n# print(e)\n","repo_name":"ignat24/PythonExamples","sub_path":"unittest.py","file_name":"unittest.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17106205720","text":"from LinkedList import LinkedListNode\r\nfrom LinkedList import LinkedList\r\n\r\n\r\ndef partition(head, x):\r\n before = before_head = LinkedListNode(0)\r\n after = after_head = LinkedListNode(0)\r\n while head:\r\n if head.val < x:\r\n before.next = head\r\n before = before.next\r\n else:\r\n after.next = head\r\n after = after.next\r\n head = head.next\r\n after.next = None\r\n before.next = after_head.next\r\n return before_head.next\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lList = LinkedList([1, 4, 3, 2, 5, 2])\r\n partition(lList.head, 3)\r\n lList.printNode()\r\n","repo_name":"xiaolinangela/cracking-the-coding-interview-soln","sub_path":"Ch2-LinkedLists/2.4-Partition.py","file_name":"2.4-Partition.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69874272088","text":"import argparse\nimport os\nfrom tqdm import tqdm\nimport numpy as np\nfrom collections import defaultdict\nimport cv2\nimport glob\n\n\ndef calculate_metrics(gen_mask, gt_mask, threshold=0, skip_undetected=False):\n uncertain_zone = np.all(gt_mask == 128, axis=2)\n gt_human_mask = np.any(gt_mask > 0, axis=2) & (~uncertain_zone)\n\n gen_mask_bin = gen_mask > threshold\n gen_mask_bin = gen_mask_bin & (~uncertain_zone)\n\n if gt_human_mask.sum() == 0:\n return defaultdict(lambda: np.nan)\n if skip_undetected and gen_mask_bin.sum() == 0:\n return defaultdict(lambda: np.nan)\n\n overlap_sum = (gen_mask_bin & gt_human_mask).sum()\n union_sum = (gen_mask_bin | gt_human_mask).sum()\n\n gen_mask_soft = gen_mask / 255.0 * (~uncertain_zone).astype(int)\n overlap_sum_soft = (gen_mask_soft * gt_human_mask.astype(int)).sum()\n union_sum_soft = (gen_mask_soft + gt_human_mask.astype(int)).clip(0, 1).sum()\n\n metrics = dict()\n if overlap_sum == 0:\n return defaultdict(lambda: np.nan)\n metrics['iou'] = overlap_sum / union_sum\n metrics['f1'] = 2 * overlap_sum / (union_sum + overlap_sum)\n metrics['precision'] = overlap_sum / gen_mask_bin.sum()\n metrics['recall'] = overlap_sum / gt_human_mask.sum()\n\n metrics['iou_soft'] = overlap_sum_soft / union_sum_soft\n metrics['f1_soft'] = 2 * overlap_sum_soft / (union_sum_soft + overlap_sum_soft)\n # metrics['precision'] = overlap_sum_soft / gen_mask_soft.sum()\n # metrics['recall'] = overlap_sum_soft / gt_human_mask.sum()\n\n return metrics\n\n\ndef eval_dir(gen_masks_dir, gt_masks_dir, threshold=64):\n metric_list = defaultdict(list)\n\n if not os.path.isdir(gen_masks_dir):\n print(f'No directory {gen_masks_dir}')\n return metric_list\n gen_masks_paths = [\n os.path.join(gen_masks_dir, f) for f in os.listdir(gen_masks_dir) if f.endswith('.png')\n ]\n assert len(os.listdir(gt_masks_dir)) >= len(\n gen_masks_paths\n ), f'More generated masks than gt: {len(gen_masks_paths)} vs {gt_masks_dir}'\n\n for gen_mask_path in gen_masks_paths:\n gen_mask = cv2.imread(gen_mask_path, 0)\n assert gen_mask is not None, gen_mask_path\n\n gt_path = os.path.join(gt_masks_dir, os.path.basename(gen_mask_path).replace('_img.png', '.png'))\n gt_mask = cv2.imread(gt_path)\n assert gt_mask is not None, gt_path\n assert gen_mask.shape == gt_mask.shape[:2], (gen_mask.shape, gt_mask.shape)\n\n metrics = calculate_metrics(gen_mask, gt_mask, threshold=threshold)\n for key, value in metrics.items():\n metric_list[key].append(value)\n\n return metric_list\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'gen_mask_dirnames', help='Names of folders with generated masks separated by a comma'\n )\n parser.add_argument('gt_mask_dirname', help='Folder name with ground truth masks')\n parser.add_argument('parent_dir', help='Folder name with ground truth masks')\n parser.add_argument('--csv_dir', help='Where to write results as .csv for each metric')\n args = parser.parse_args()\n\n os.makedirs(args.csv_dir, exist_ok=True)\n assert os.path.isdir(args.parent_dir), f'Not a directory: {args.parent_dir}'\n gen_mask_dirnames = [d.strip() for d in args.gen_mask_dirnames.split(',')]\n\n vid_names = sorted(\n [d for d in os.listdir(args.parent_dir) if os.path.isdir(os.path.join(args.parent_dir, d))]\n )\n metric_rows = defaultdict(lambda: [[''] + vid_names + ['Mean']])\n\n for method in tqdm(gen_mask_dirnames):\n tqdm.write(f'Method in progress: {method}')\n metric_lists = defaultdict(lambda: [method])\n for dir_name in vid_names:\n dir_metrics = eval_dir(\n os.path.join(args.parent_dir, dir_name, method),\n os.path.join(args.parent_dir, dir_name, args.gt_mask_dirname),\n )\n for key in set(metric_lists.keys()).union(dir_metrics.keys()):\n metric_lists[key].append(\n np.nanmean(dir_metrics[key]) if len(dir_metrics[key]) > 0 else 0.0\n )\n metric_means = {key: np.nanmean(values[1:]) for key, values in metric_lists.items()}\n for metric in metric_lists:\n metric_rows[metric].append(metric_lists[metric] + [metric_means[metric]])\n tqdm.write(f'Finished!\\n-------------')\n for metric, rows in metric_rows.items():\n np.savetxt(\n os.path.join(args.csv_dir, f'{metric}.csv'),\n np.array(rows).astype(str),\n fmt='%s',\n delimiter=',',\n )\n","repo_name":"EgorNemchinov/HumanBGSegmentation","sub_path":"scripts/eval_dir.py","file_name":"eval_dir.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31714013910","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\ncv2.namedWindow('configure MinMax HSV')\n\ndef nothing(x):\n pass\n\nerode_dialate_kernel = np.ones((5,5), np.uint8)\nHSV_minmax = ['HMin','SMin','VMin','HMax','SMax','VMax']\nvalue = [179,255,255,179,255,255]\n\nfor string,val in zip(HSV_minmax,value):\n cv2.createTrackbar(string, 'configure MinMax HSV', 0, val,nothing)\n \ncv2.setTrackbarPos('HMax', 'configure MinMax HSV', 179)\ncv2.setTrackbarPos('SMax', 'configure MinMax HSV', 255)\ncv2.setTrackbarPos('VMax', 'configure MinMax HSV', 255)\n\n# Initialize HSV min/max values\nhMin = sMin = vMin = hMax = sMax = vMax = 0\n\n\nwhile True:\n _,frame = cap.read()\n frame = cv2.resize(frame,(650,500))\n blurred = cv2.GaussianBlur(frame, (15,15),0)\n hsv = cv2.cvtColor(blurred,cv2.COLOR_BGR2HSV)\n\n hMin = cv2.getTrackbarPos('HMin', 'configure MinMax HSV')\n sMin = cv2.getTrackbarPos('SMin', 'configure MinMax HSV')\n vMin = cv2.getTrackbarPos('VMin', 'configure MinMax HSV')\n hMax = cv2.getTrackbarPos('HMax', 'configure MinMax HSV')\n sMax = cv2.getTrackbarPos('SMax', 'configure MinMax HSV')\n vMax = cv2.getTrackbarPos('VMax', 'configure MinMax HSV')\n \n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n \n mask = cv2.inRange(hsv, lower,upper)\n mask = cv2.erode(mask,erode_dialate_kernel,iterations=2)\n mask = cv2.dilate(mask,erode_dialate_kernel,iterations=2)\n\n \n #cv2.imshow(\"main frame\",frame)\n cv2.imshow(\"mask\",mask)\n cv2.imshow(\"blur\",blurred)\n\n if cv2.waitKey(1) & 0xff == ord(\"q\"):\n break\n\n \nprint(lower,upper)\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"Thehunk1206/Color-detecting-and-tracking","sub_path":"get_HSV_for_color.py","file_name":"get_HSV_for_color.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"4327000085","text":"import base\n\nclass CephGeneralStatsPlugin(base.Base):\n\tdef __init__(self,cluster,cache,timestamp,c,k):\n\t\t#initialise\n\t\tbase.Base.__init__(self,cluster,cache,timestamp,c,k)\n\n\tdef gather_metrics(self):\n\t\t'''\n\t\tSubclassed method of base.py which is called by loader.py\n\t\tReturns array of points collected by the plugin\n\t\t'''\n\t\tself.logger.info('Gathering metrics')\n\t\tpoints=[]\n\n\t\ttry:\n\t\t\tpoints.extend(self.get_storage_stats())\n\t\t\tpoints.extend(self.get_quorum_stats())\n\t\texcept Exception as e:\n\t\t\tself.logger.error('Did not manage to get general stats: {0}'.format(e))\n\n\t\treturn points\n\n\tdef get_storage_stats(self):\n\t\t'''\n\t\tCollects storage metrics about the cluster.\n\t\tReturns array of points, formatted into the format of the line protocol\n\t\tmetrics collected:\n\t\t\t-total_bytes\n\t\t\t-total_used_bytes\n\t\t\t-total_avail_bytes\n\t\t'''\n\t\tself.logger.info('Getting storage metrics')\n\t\t#run ceph command\n\t\toutput = self.execute_command(True,'ceph','df','--format','json')\n\n\t\tif output == None:\n\t\t\tself.logger.error('No output recieved from \"ceph df --format json\"')\n\t\t\treturn []\n\n\t\tpoints = []\n\n\t\tclusterStats = output['stats']\n\t\tfor stat in ('total_bytes', 'total_used_bytes', 'total_avail_bytes'):\n\t\t\t\tstatValue = clusterStats[stat] if clusterStats.has_key(stat) else 0\n\t\t\t\tpoints.append(self.create_measurement(\n\t\t\t\t\t{'type':'general','metric':stat},\n\t\t\t\t\t{'value':statValue}))\n\n\t\t#calculate percentage space used\n\t\tpercentage = (float(clusterStats['total_used_bytes'])/float(clusterStats['total_bytes']))*100\n\t\tpoints.append(self.create_measurement(\n\t\t\t{'type':'general','metric':'percentage_space_used'},\n\t\t\t{'value':percentage}))\n\n\t\treturn points\n\n\n\tdef get_quorum_stats(self):\n\t\t'''\n\t\tCollects monitor quorum metrics about the cluster.\n\t\tReturns array of points, formatted into the format of the line protocol\n\t\tmetrics collected:\n\t\t\t-mons_up\n\t\t\t-quorum\n\t\t\t-ratio_in_quorum\n\t\t'''\n\t\tself.logger.info('Getting quorum metrics')\n\t\t#run ceph command\n\t\toutput = self.execute_command(True,'ceph','mon','dump','--format','json')\n\n\t\tif output == None:\n\t\t\tself.logger.error('No output recieved from \"ceph mon dump --format json\"')\n\t\t\treturn []\n\n\t\tpoints=[]\n\t\tmonNum= len(output['mons'])\n\t\tquorum= len(output['quorum'])\n\t\tpercentage = (quorum/monNum)*100\n\t\t#get number of monitors\n\t\tpoints.append(self.create_measurement({'type':'general','metric':'mons_up'},{'value':monNum}))\n\t\t#get number of monitors in quorum\n\t\tpoints.append(self.create_measurement({'type':'general','metric':'quorum'},{'value':quorum}))\n\t\t#get percentage\n\t\tpoints.append(self.create_measurement({'type':'general','metric':'ratio_in_quorum'},{'value':percentage}))\n\n\t\treturn\tpoints\n\n","repo_name":"stfc/ceph-InfluxDB-metricsCollector","sub_path":"plugins/ceph_general_metrics_plugin.py","file_name":"ceph_general_metrics_plugin.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"40506905769","text":"import os\nimport sys\nimport unittest\nimport warnings\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom io import StringIO\nfrom timeit import Timer\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nimport numpy\nfrom numpy.testing import assert_allclose\n\n\ndef unit_test_going():\n \"\"\"\n Enables a flag telling the script is running while testing it.\n Avois unit tests to be very long.\n \"\"\"\n going = int(os.environ.get(\"UNITTEST_GOING\", 0))\n return going == 1\n\n\ndef ignore_warnings(warns: List[Warning]) -> Callable:\n \"\"\"\n Catches warnings.\n\n :param warns: warnings to ignore\n \"\"\"\n\n def wrapper(fct):\n if warns is None:\n raise AssertionError(f\"warns cannot be None for '{fct}'.\")\n\n def call_f(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", warns)\n return fct(self)\n\n return call_f\n\n return wrapper\n\n\ndef measure_time(\n stmt: Union[str, Callable],\n context: Optional[Dict[str, Any]] = None,\n repeat: int = 10,\n number: int = 50,\n warmup: int = 1,\n div_by_number: bool = True,\n max_time: Optional[float] = None,\n) -> Dict[str, Any]:\n \"\"\"\n Measures a statement and returns the results as a dictionary.\n\n :param stmt: string or callable\n :param context: variable to know in a dictionary\n :param repeat: average over *repeat* experiment\n :param number: number of executions in one row\n :param warmup: number of iteration to do before starting the\n real measurement\n :param div_by_number: divide by the number of executions\n :param max_time: execute the statement until the total goes\n beyond this time (approximatively), *repeat* is ignored,\n *div_by_number* must be set to True\n :return: dictionary\n\n .. runpython::\n :showcode:\n\n from teachcompute.ext_test_case import measure_time\n from math import cos\n\n res = measure_time(lambda: cos(0.5))\n print(res)\n\n See `Timer.repeat `_\n for a better understanding of parameter *repeat* and *number*.\n The function returns a duration corresponding to\n *number* times the execution of the main statement.\n\n .. versionchanged:: 0.4\n Parameter *max_time* was added.\n \"\"\"\n if not callable(stmt) and not isinstance(stmt, str):\n raise TypeError(\n f\"stmt is not callable or a string but is of type {type(stmt)!r}.\"\n )\n if context is None:\n context = {}\n\n if isinstance(stmt, str):\n tim = Timer(stmt, globals=context)\n else:\n tim = Timer(stmt)\n\n if warmup > 0:\n warmup_time = tim.timeit(warmup)\n else:\n warmup_time = 0\n\n if max_time is not None:\n if not div_by_number:\n raise ValueError(\n \"div_by_number must be set to True of max_time is defined.\"\n )\n i = 1\n total_time = 0\n results = []\n while True:\n for j in (1, 2):\n number = i * j\n time_taken = tim.timeit(number)\n results.append((number, time_taken))\n total_time += time_taken\n if total_time >= max_time:\n break\n if total_time >= max_time:\n break\n ratio = (max_time - total_time) / total_time\n ratio = max(ratio, 1)\n i = int(i * ratio)\n\n res = numpy.array(results)\n tw = res[:, 0].sum()\n ttime = res[:, 1].sum()\n mean = ttime / tw\n ave = res[:, 1] / res[:, 0]\n dev = (((ave - mean) ** 2 * res[:, 0]).sum() / tw) ** 0.5\n mes = dict(\n average=mean,\n deviation=dev,\n min_exec=numpy.min(ave),\n max_exec=numpy.max(ave),\n repeat=1,\n number=tw,\n ttime=ttime,\n )\n else:\n res = numpy.array(tim.repeat(repeat=repeat, number=number))\n if div_by_number:\n res /= number\n\n mean = numpy.mean(res)\n dev = numpy.mean(res**2)\n dev = (dev - mean**2) ** 0.5\n mes = dict(\n average=mean,\n deviation=dev,\n min_exec=numpy.min(res),\n max_exec=numpy.max(res),\n repeat=repeat,\n number=number,\n ttime=res.sum(),\n )\n\n if \"values\" in context:\n if hasattr(context[\"values\"], \"shape\"):\n mes[\"size\"] = context[\"values\"].shape[0]\n else:\n mes[\"size\"] = len(context[\"values\"])\n else:\n mes[\"context_size\"] = sys.getsizeof(context)\n mes[\"warmup_time\"] = warmup_time\n return mes\n\n\nclass ExtTestCase(unittest.TestCase):\n _warns = []\n\n def assertEndsWith(self, string, suffix):\n if not string.endswith(suffix):\n raise AssertionError(f\"{string!r} does not end with {suffix!r}.\")\n\n def assertExists(self, name):\n if not os.path.exists(name):\n raise AssertionError(f\"File or folder {name!r} does not exists.\")\n\n def assertEqualArray(\n self,\n expected: numpy.ndarray,\n value: numpy.ndarray,\n atol: float = 0,\n rtol: float = 0,\n ):\n self.assertEqual(expected.dtype, value.dtype)\n self.assertEqual(expected.shape, value.shape)\n assert_allclose(expected, value, atol=atol, rtol=rtol)\n\n def assertAlmostEqual(\n self,\n expected: numpy.ndarray,\n value: numpy.ndarray,\n atol: float = 0,\n rtol: float = 0,\n ):\n if not isinstance(expected, numpy.ndarray):\n expected = numpy.array(expected)\n if not isinstance(value, numpy.ndarray):\n value = numpy.array(value).astype(expected.dtype)\n self.assertEqualArray(expected, value, atol=atol, rtol=rtol)\n\n def assertRaise(self, fct: Callable, exc_type: Exception):\n try:\n fct()\n except exc_type as e:\n if not isinstance(e, exc_type):\n raise AssertionError(f\"Unexpected exception {type(e)!r}.\")\n return\n raise AssertionError(\"No exception was raised.\")\n\n def assertEmpty(self, value: Any):\n if value is None:\n return\n if len(value) == 0:\n return\n raise AssertionError(f\"value is not empty: {value!r}.\")\n\n def assertNotEmpty(self, value: Any):\n if value is None:\n raise AssertionError(f\"value is empty: {value!r}.\")\n if isinstance(value, (list, dict, tuple, set)):\n if len(value) == 0:\n raise AssertionError(f\"value is empty: {value!r}.\")\n\n def assertStartsWith(self, prefix: str, full: str):\n if not full.startswith(prefix):\n raise AssertionError(f\"prefix={prefix!r} does not start string {full!r}.\")\n\n @classmethod\n def tearDownClass(cls):\n for name, line, w in cls._warns:\n warnings.warn(f\"\\n{name}:{line}: {type(w)}\\n {str(w)}\")\n\n def capture(self, fct: Callable):\n \"\"\"\n Runs a function and capture standard output and error.\n\n :param fct: function to run\n :return: result of *fct*, output, error\n \"\"\"\n sout = StringIO()\n serr = StringIO()\n with redirect_stdout(sout):\n with redirect_stderr(serr):\n res = fct()\n return res, sout.getvalue(), serr.getvalue()\n","repo_name":"sdpython/teachcompute","sub_path":"teachcompute/ext_test_case.py","file_name":"ext_test_case.py","file_ext":"py","file_size_in_byte":7441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73547814487","text":"\n\nif __name__ == \"__main__\":\n # N = 3\n # sample = [\"10 40 70\", \"20 50 80\", \"30 60 90\"]\n # data = [[int(j) for j in sample[i].split()] for i in range(N)]\n data = [[int(j) for j in input().split()] for i in range(int(input()))]\n last_idx = -1\n sum_data = 0\n for i in range(len(data)):\n max_data = data[i][0]\n for j in range(1,3):\n if data[i][j] > max_data and j != last_idx:\n max_data = data[i][j]\n max_idx = j\n last_idx = max_idx\n sum_data += max_data\n print(sum_data)\n \n \n\n \n \n\n\n\n","repo_name":"cafenoctua/algorithms_data_structures","sub_path":"src/part5/test5-1.py","file_name":"test5-1.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9650187284","text":"#!usr/bin/python\n### coding: utf-8\n#__author__ = 'Przemyslaw Teodorski'\n\nimport sys\nfrom functools import partial\nfrom PyQt4 import QtGui\nfrom databaseObjects import Word, Answer\nfrom model import Interface\n\n\nclass MainWindowOld(QtGui.QMainWindow):\n\n #languages = {1: }\n\n spanishButtonLabel = str('Español')\n spanishSpecialCharacters = ['ñ', 'ó', 'á', 'ú', 'é', 'í']\n mode = {'es', 'en'}\n\n englishButtonLabel = str('English')\n\n def __init__(self):\n super(MainWindowOld, self).__init__()\n\n def initUI(self, onAddWordModel, closeConnection, onStartTestModel, onCheckAnswerModel,\n onNextTestWordModel, onContinueWithTest):\n\n self.onAddWordModel = onAddWordModel\n self.closeConnection = closeConnection\n self.onStartTestModel = onStartTestModel\n self.onCheckAnswerModel = onCheckAnswerModel\n self.onNextTestWordModel = onNextTestWordModel\n self.continueWithTest = onContinueWithTest\n\n self.spanishButton = QtGui.QPushButton(self.spanishButtonLabel)\n self.spanishButton.clicked.connect(self.onSpanishDict)\n\n self.englishButton = QtGui.QPushButton(self.englishButtonLabel)\n self.englishButton.clicked.connect(self.onEnglishDict)\n\n self.mainWidget = QtGui.QWidget()\n mainLayout = QtGui.QGridLayout()\n mainLayout.addWidget(self.spanishButton, 0,0,2,1)\n mainLayout.addWidget(self.englishButton, 1,0,2,1)\n self.mainWidget.setLayout(mainLayout)\n\n self.setCentralWidget(self.mainWidget)\n self.setGeometry(200, 200, 600, 500)\n self.setWindowTitle('Words Memory')\n self.show()\n\n def onSpanishDict(self):\n self.languageMode = 1\n self.addWordButton = QtGui.QPushButton('Añadir una palabra')\n self.addWordButton.clicked.connect(self.onAddWordWindow)\n\n self.startTestButton = QtGui.QPushButton('Empezar test')\n self.startTestButton.clicked.connect(self.onStartTest)\n\n subWidget = QtGui.QWidget()\n newLayout = QtGui.QGridLayout()\n newLayout.addWidget(self.addWordButton, 0,0,2,1)\n newLayout.addWidget(self.startTestButton, 1,0,2,1)\n subWidget.setLayout(newLayout)\n\n self.setCentralWidget(subWidget)\n\n def onEnglishDict(self):\n self.languageMode = 2\n self.addWordButton = QtGui.QPushButton('Add new word')\n self.addWordButton.clicked.connect(self.onAddWordWindow)\n\n self.startTestButton = QtGui.QPushButton('Start test')\n self.startTestButton.clicked.connect(self.onStartTest)\n\n subWidget = QtGui.QWidget()\n newLayout = QtGui.QGridLayout()\n newLayout.addWidget(self.addWordButton, 0,0,2,1)\n newLayout.addWidget(self.startTestButton, 1,0,2,1)\n subWidget.setLayout(newLayout)\n\n self.setCentralWidget(subWidget)\n\n def swapToSpanishCharacters(self, text):\n if len(text):\n root = str(text)[0:-1]\n l = str(text)[-1]\n if l == 'ń':\n self.newWordLine.setText(root + 'ñ')\n elif l == 'ó':\n self.newWordLine.setText(root + 'ó')\n elif l == 'ą':\n self.newWordLine.setText(root + 'á')\n elif l == 'ę':\n self.newWordLine.setText(root + 'é')\n\n\n def onAddWordWindow(self):\n self.newWordLine = QtGui.QLineEdit()\n #connect newWordLine with spanish characters\n self.newWordLine.textEdited.connect(self.swapToSpanishCharacters)\n\n self.translation = QtGui.QLineEdit()\n self.enTranslation = QtGui.QLineEdit()\n self.comment = QtGui.QPlainTextEdit()\n\n wordLabel = QtGui.QLabel('Palabra')\n translationLabel = QtGui.QLabel('Traslación polaco')\n enTranslationLabel = QtGui.QLabel('Traslación ingles')\n commentLabel = QtGui.QLabel('Comentario')\n\n self.acceptButton = QtGui.QPushButton('Ok')\n self.acceptButton.clicked.connect(self.onAcceptNewWord)\n\n self.cancelButton = QtGui.QPushButton('Cancel')\n self.cancelButton.clicked.connect(self.onCancelAddWord)\n\n layout = QtGui.QGridLayout()\n layout.addWidget(self.getWidgetSpecialLetters(), 0, 0, 1, 2)\n layout.addWidget(wordLabel, 1, 0, 1, 1)\n layout.addWidget(self.newWordLine, 1, 1, 1, 2)\n layout.addWidget(translationLabel, 2, 0, 1, 1)\n layout.addWidget(self.translation, 2, 1, 1, 2)\n\n layout.addWidget(enTranslationLabel, 3, 0, 1, 1)\n layout.addWidget(self.enTranslation, 3, 1, 1, 2)\n\n layout.addWidget(commentLabel, 4, 0, 1, 1)\n layout.addWidget(self.comment, 4, 1, 1, 2)\n\n layout.addWidget(self.acceptButton, 5, 0, 1, 1)\n layout.addWidget(self.cancelButton, 6, 0, 1, 1)\n\n subWidget = QtGui.QWidget()\n subWidget.setLayout(layout)\n self.setCentralWidget(subWidget)\n\n def getWidgetSpecialLetters(self):\n if self.languageMode == 1:\n #spanish dictionary\n buttonWidget = QtGui.QWidget()\n layout = QtGui.QHBoxLayout()\n for i in self.spanishSpecialCharacters:\n b = QtGui.QPushButton(str(i))\n t = b.text()\n b.clicked.connect(partial(self.onSpecialCharacterClicked, t))\n layout.addWidget(b)\n buttonWidget.setLayout(layout)\n return buttonWidget\n\n else :\n pass\n\n def onSpecialCharacterClicked(self, other):\n text = self.newWordLine.text()\n text += other\n self.newWordLine.setText(text)\n\n def checkAnswer(self):\n a = str(self.translation.text())\n self.onCheckAnswerModel(a)\n\n def onStartTest(self):\n self.onStartTestModel(self.languageMode)\n\n def showTestView(self):\n self.newWordLine = QtGui.QLabel()\n\n self.translation = QtGui.QLineEdit()\n self.enTranslation = QtGui.QLineEdit()\n\n self.correctPLTranslation = QtGui.QLineEdit()\n self.correctPLTranslation.setReadOnly(True)\n self.correctPLTranslation.hide()\n\n self.comment = QtGui.QPlainTextEdit()\n self.comment.hide()\n\n wordLabel = QtGui.QLabel('Palabra')\n translationLabel = QtGui.QLabel('Traslación polaco')\n enTranslationLabel = QtGui.QLabel('Traslación ingles')\n commentLabel = QtGui.QLabel('Comentario')\n\n self.nextButton = QtGui.QPushButton('Siguiente')\n self.nextButton.clicked.connect(self.onNextTestWordModel)\n\n self.acceptButton = QtGui.QPushButton('Ok')\n self.acceptButton.clicked.connect(self.checkAnswer)\n\n self.cancelButton = QtGui.QPushButton('Cancelar')\n self.cancelButton.clicked.connect(self.onCancelAddWord)\n\n layout = QtGui.QGridLayout()\n layout.addWidget(self.getWidgetSpecialLetters(), 0, 0, 1, 2)\n layout.addWidget(wordLabel, 2, 0, 1, 1)\n layout.addWidget(self.newWordLine, 2, 1, 1, 1)\n\n layout.addWidget(translationLabel, 3, 0, 1, 1)\n layout.addWidget(self.translation, 3, 1, 1, 1)\n layout.addWidget(self.correctPLTranslation, 4, 1, 1, 1)\n\n layout.addWidget(enTranslationLabel, 5, 0, 1, 1)\n layout.addWidget(self.enTranslation, 5, 1, 1, 1)\n\n layout.addWidget(commentLabel, 6, 0, 1, 1)\n layout.addWidget(self.comment, 6, 1, 1, 2)\n\n layout.addWidget(self.nextButton, 7, 0, 1, 1)\n layout.addWidget(self.acceptButton, 7, 1, 1, 1)\n layout.addWidget(self.cancelButton, 8, 0, 1, 1)\n\n subWidget = QtGui.QWidget()\n subWidget.setLayout(layout)\n self.setCentralWidget(subWidget)\n\n def windowContinue(self):\n label = ('Continuar con las palabras que no supiste?')\n ret = QtGui.QMessageBox.question(self, 'Atención', label, QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok,\n QtGui.QMessageBox.Ok)\n if ret == QtGui.QMessageBox.Ok:\n self.continueWithTest()\n\n def congratulations(self):\n pass\n\n def onCancelAddWord(self):\n if self.languageMode == 1:\n self.onSpanishDict()\n else:\n pass\n\n def clearFields(self):\n self.translation.setText('')\n self.enTranslation.setText('')\n self.newWordLine.setText('')\n self.comment.setText('')\n\n def onAcceptNewWord(self):\n print('accept ui')\n newWord = str(self.newWordLine.text())\n polishTranslation = str(self.translation.text())\n englishTranslation = str(self.enTranslation.text())\n comment = str(self.comment)\n w = Word(self.languageMode, newWord, polishTranslation, englishTranslation, comment=comment)\n self.onAddWordModel(w)\n self.clearFields()\n\n def validateNewWord(self):\n if str(self.newWordLine.text()) == None:\n return False\n elif str(self.translation.text()) == None:\n return False\n\n def closeEvent(self, event):\n self.closeConnection()\n event.accept()\n print('after close event')\n\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n ui = MainWindowOld()\n interface = Interface(ui);\n #ui.initUI()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()","repo_name":"przemek1899/wordsMemory_Desktop_pyqt4","sub_path":"mainWindowOld.py","file_name":"mainWindowOld.py","file_ext":"py","file_size_in_byte":9193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"266789491","text":"\nfrom collections import Counter\n\nwith open('2021/week_2/day_14/polymerinput.txt') as f:\n polyinputs = [f.strip() for f in f.readlines()]\n\ndef base_poly_and_pairs():\n poly = polyinputs[0]\n\n keys = [pair[0:2] for pair in polyinputs[2:]]\n vals = [pair[6] for pair in polyinputs[2:]]\n\n pairs = {keys[i]: vals[i] for i in range(len(keys))}\n\n return poly, pairs\n\npoly, pairs = base_poly_and_pairs()\n\ndef pair_insertion(steps, poly, pairs):\n for _ in range(steps):\n new_poly = []\n chunks = [poly[i:i+2] for i in range(len(poly)-1)]\n for c in chunks:\n if c in pairs.keys():\n new_poly += c[0] + pairs[c]\n\n new_poly.append(chunks[-1][1])\n poly = ''.join(new_poly)\n\n element_count = Counter(poly)\n return element_count.most_common()[0][1] - element_count.most_common()[-1][1]\n\nprint(pair_insertion(10, poly, pairs))\n\n\n\n\n\n","repo_name":"JackIHill/AdventOfCode","sub_path":"2021/week_2/day_14/day14_puzzles.py","file_name":"day14_puzzles.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27306376199","text":"N, L = map(int, input().split())\n\nnot_eat = N * (L - 1) + N * (N + 1) // 2\n\ntmp_abs = float('INF')\nans = 0\n\nfor i in range(1, N+1):\n eat_i = not_eat - (L + i - 1)\n tmp = abs(not_eat - eat_i)\n if tmp < tmp_abs:\n ans = eat_i\n tmp_abs = tmp\n \nprint(ans)","repo_name":"yuuLab/algorithms","sub_path":"atcoder/abc-100-149/131/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6992209129","text":"import os, os.path\nimport time, datetime\n# os.chdir(os.path.join(os.getcwd(),'src')) if os.path.basename(os.getcwd())!='src' else None\nimport pandas as pd\nimport numpy as np\nfrom src.utils.tools import db\nfrom src.projects.dima.tabletools import table_create, sql_command, tablecheck\nfrom src.projects.tall_tables.talltables_handler import ingesterv2\nfrom src.projects.dima.tabletools import table_create, tablecheck\nfrom src.projects.aero.aero_model import update_model\n\n\"\"\"\nX 1.read directory that holds outputs\nX 2.concatenate dataframe\nX 2.5 Plotid\n3.choose model_run_id / ModelRunKey\n- for now modelrun lookup + modeloutputs will be in dima db\n\n4. check if lookuptable exists +\n check if model run id exists +\n update modelrun lookup table with appropriate id\n4.update model_run + model run loookup table on postgres\n\"\"\"\n# p = r\"C:\\Users\\kbonefont\\Desktop\\aero_flux_2\"\n# df = txt_read(p)\n# df['Source'] = 'AIM'\n# df\n# table_create(df, \"aero_runs\", \"aero\")\ndef txt_read(path):\n df_dict = {}\n testset = [\"20184145384203B2_flux\",\"20184145384203B1_flux\",\"20184145374203B2_flux\"]\n count = 1\n for i in os.listdir(path):\n #if file is not an excelfile\n if os.path.splitext(i)[1]!=\".xlsx\":\n # debug block\n # if os.path.splitext(i)[0] in [i for i in testset]:\n # file = os.path.join(path,i)\n # created_time = os.path.getctime(file)\n # parsed_ctime = time.ctime(created_time)\n # date_ctime = datetime.datetime.strptime(parsed_ctime, \"%a %b %d %H:%M:%S %Y\")\n # # print(date_ctime)\n # complete = os.path.join(path,i)\n # temp = pd.read_table(complete, sep=\"\\t\", low_memory=False)\n # df_dict.update({f\"df{count}\":temp})\n # count+=1\n\n # get date/time for modelrun\n file = os.path.join(path,i)\n created_time = os.path.getctime(file)\n parsed_ctime = time.ctime(created_time)\n date_ctime = datetime.datetime.strptime(parsed_ctime, \"%a %b %d %H:%M:%S %Y\")\n # get plotid\n plotid = i.split('_')[0]\n complete = os.path.join(path,i)\n temp = pd.read_table(complete, sep=\"\\t\", low_memory=False)\n temp['PlotId'] = plotid\n df_dict.update({f\"df{count}\":temp})\n # print(f\"{count} added\")\n count+=1\n else:\n pass\n return pd.concat([d[1] for d in df_dict.items()],ignore_index=True)\n\n\n\ndef model_run_updater(batchpath, modelrunkey, source = None):\n \"\"\"\n 1. creates a table in postgres with supplied dataframe\n 2. appends data to postgres table\n \"\"\"\n d = db(\"aero\")\n df = txt_read(batchpath)\n if source!=None:\n df['Source'] = source\n else:\n pass\n df['ModelRunKey'] = modelrunkey\n\n if tablecheck('aero_runs'):\n print('aero_runs exists, skipping table creation')\n update_model(batchpath,modelrunkey)\n\n ingesterv2.main_ingest(df, \"aero_runs\", d.str,100000)\n else:\n print('creating aero_runs table..')\n table_create(df, \"aero_runs\", \"aero\")\n update_model(batchpath,modelrunkey)\n ingesterv2.main_ingest(df, \"aero_runs\", d.str,100000)\n\n\ndef model_run_create():\n pass\n\ntype_translate = {np.dtype('int64'):'int',\n 'Int64':'int',\n np.dtype(\"object\"):'text',\n np.dtype('datetime64[ns]'):'timestamp',\n np.dtype('bool'):'boolean',\n np.dtype('float64'):'float(5)',}\n\nfields_dict = {\n\"ModelRunKey\":pd.Series([],dtype='object'),\n\"Model\":pd.Series([],dtype='object'),\n\"LocationType\":pd.Series([],dtype='object'),\n\"SurfaceSoilSource\":pd.Series([],dtype='datetime64[ns]'),\n\"MeteorologicalSource\":pd.Series([],dtype='object'),\n\"ModelRunNotes\":pd.Series([],dtype='object'),\n}\n","repo_name":"krstphrrr/ingesterv2","sub_path":"src/projects/aero/aero.py","file_name":"aero.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34699491499","text":"import telebot\nfrom generator import Generator\n\ngenerator = Generator.load('my_model')\n\nbot = telebot.TeleBot(\"TOKEN\")\n\nmarkup = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\ngen_button = telebot.types.KeyboardButton('Generate')\nmarkup.add(gen_button)\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n start_message = 'Hi there!\\nPress \\\"Generate\\\" button to get a startup name.'\n bot.send_message(message.chat.id, start_message, reply_markup=markup)\n\n\n@bot.message_handler(commands=['help'])\ndef send_help(message):\n help_message = 'Press \\\"Generate\\\" button to get a startup name.'\n bot.send_message(message.chat.id, help_message)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Generate')\ndef answer_message(message):\n bot.send_message(message.chat.id, generator.simulate(1))\n\n\nbot.polling()\n","repo_name":"egor-sergeev/SPbU-homework","sub_path":"Semester_5/computer_networks/bot/startupNameGeneratorBot.py","file_name":"startupNameGeneratorBot.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19415226401","text":"# -*— coding:utf-8 -*-\r\nimport copy\r\nimport time\r\nimport datetime\r\nimport math\r\nimport random\r\nimport pandas as pd\r\nimport asyncio\r\nfrom alpha.utils import logger\r\nfrom six.moves import xrange, zip\r\nimport numpy as np\r\nimport strategies as ST\r\nfrom market.huobi import HuobiMarket\r\nfrom alpha.tasks import LoopRunTask, SingleTask\r\nfrom alpha.order import Order\r\nfrom alpha.asset import Asset\r\nfrom alpha.position import Position\r\nfrom alpha.quant import quant\r\n\r\n\r\nIS_CLOSE = False\r\n\r\n# 回测数据市场撒旦法\r\nclass HuobiTestMarket(object):\r\n def __init__(self, market_config):\r\n super().__init__()\r\n self.market_config = market_config\r\n self.market = HuobiMarket(market_config)\r\n self.trader = self.market.trader\r\n self.now_timeline = int(time.time() - (2) * 24 * 3600)\r\n # self.now_timeline = int(time.time() - 5.5 * 3600)\r\n # self.now_timeline = 1615917738\r\n\r\n self.init_timeline = self.now_timeline\r\n self.klines_data = {}\r\n self.asset_list = []\r\n self.op_time = []\r\n self.inited = False\r\n self.level = int(market_config['level'])\r\n self.face_value = self.market.face_value\r\n\r\n SingleTask.run(self.InitMarket)\r\n self.klines_idx = {}\r\n self.last_tick_time = -1\r\n self.last_minut_s = -1\r\n self.last_tick_data = dict()\r\n self.high_first = False\r\n\r\n self.position = Position()\r\n self.position.long_quantity = 0\r\n self.position.short_quantity = 0\r\n self.tot_fee = 0\r\n self.free_asset = 100\r\n self.deal_count = 0\r\n self.tot_asset = self.free_asset\r\n self.init_asset = self.free_asset\r\n\r\n self.ask1_price = 0 # 卖一价格\r\n self.bid1_price = 0 # 买一价格\r\n\r\n async def InitMarket(self):\r\n the_start_time = self.now_timeline - 6 * 3600\r\n for period, step in [('1min', 120000 - 60), ('5min', 600000 - 60 * 5), ('15min', 1800000 - 60 * 15), ]:\r\n # for period, step in [('1min', 120000 - 60)]:\r\n if period in self.klines_data:\r\n continue\r\n\r\n now_time = the_start_time\r\n self.klines_data[period] = []\r\n print(\"{} period data start inited\".format(period), end='', flush=True)\r\n\r\n while now_time + step + 1 < time.time():\r\n data = await self.market.GetKLines(period, now_time, now_time + step + 1)\r\n if not data:\r\n await asyncio.sleep(0.04)\r\n continue\r\n if self.klines_data[period] and self.klines_data[period][-1]['id'] == data[0]['id']:\r\n data.remove(data[0])\r\n self.klines_data[period].extend(data)\r\n now_time += step\r\n print(\"\\r{} period data init process: {:.2f}%\".format(period, 100 * (now_time - the_start_time) / (time.time() - the_start_time)), end='', flush=True)\r\n\r\n while 1:\r\n data = await self.market.GetKLines(period, now_time, time.time())\r\n if not data:\r\n await asyncio.sleep(0.04)\r\n continue\r\n if self.klines_data[period] and self.klines_data[period][-1]['id'] == data[0]['id']:\r\n data.remove(data[0])\r\n self.klines_data[period].extend(data)\r\n print(\"\\r{} period data init process: {:.2f}%, with {} data\".format(period, 100, len(self.klines_data[period])), flush=True)\r\n break\r\n\r\n for idx in xrange(len(self.klines_data[period])):\r\n if self.klines_data[period][idx]['id'] >= self.now_timeline:\r\n self.last_tick_data[period] = copy.copy(self.klines_data[period][idx - 1])\r\n self.klines_idx[period] = idx - 1\r\n break\r\n\r\n self.inited = True\r\n self.UpdateTick()\r\n\r\n def UpdateTick(self):\r\n if self.now_timeline >= time.time():\r\n self.CalResult()\r\n quant.stop()\r\n ST.IS_CLOSE = True\r\n return\r\n if self.now_timeline == self.init_timeline:\r\n print(\"\\nhuobi test process: %.1f%%\" % (0, ), end='', flush=True)\r\n\r\n if self.last_tick_time != ST.strategies.tick_count:\r\n # 每次测试tick的更新时间\r\n # self.now_timeline += 60\r\n COUNT = 3\r\n self.last_minut_s += 1\r\n if self.last_minut_s >= COUNT:\r\n self.last_minut_s = 0\r\n self.now_timeline += 60 - COUNT + 1\r\n self.high_first = random.choice([True, False])\r\n\r\n self.last_tick_time = ST.strategies.tick_count\r\n f = 100 * (self.now_timeline - self.init_timeline) / (time.time() - self.init_timeline)\r\n print(\"\\rhuobi test process: %.1f%%\" % (f, ), end='', flush=True)\r\n\r\n for period in self.klines_idx:\r\n for idx in xrange(self.klines_idx[period] - 1, len(self.klines_data[period])):\r\n if self.klines_data[period][idx]['id'] >= self.now_timeline:\r\n if idx - 1 != self.klines_idx[period]:\r\n self.klines_data[period][self.klines_idx[period]] = self.last_tick_data[period]\r\n self.klines_idx[period] = idx - 1\r\n self.last_tick_data[period] = copy.copy(self.klines_data[period][idx - 1])\r\n d = self.klines_data[period][idx - 1]\r\n d['close'] = d['high'] = d['low'] = d['open']\r\n break\r\n\r\n min_data = self.last_tick_data['1min']\r\n if self.last_minut_s == 0:\r\n price = min_data['open']\r\n elif self.last_minut_s == COUNT - 1:\r\n price = min_data['close']\r\n elif self.last_minut_s == COUNT - 2:\r\n price = min_data['high'] if self.high_first else min_data['low']\r\n elif self.last_minut_s == COUNT - 3:\r\n price = min_data['low'] if self.high_first else min_data['high']\r\n else:\r\n price = random.uniform(min_data['low'], min_data['high'])\r\n # price = random.uniform(min_data['low'], min_data['high'])\r\n\r\n for period in self.klines_idx:\r\n d = self.klines_data[period][self.klines_idx[period]]\r\n if price > d['high']:\r\n d['high'] = price\r\n if price < d['low']:\r\n d['low'] = price\r\n d['close'] = price\r\n\r\n self.ask1_price = price # 卖一价格\r\n self.bid1_price = price - 0.001 # 买一价格\r\n\r\n self.UpdateTotAsset()\r\n if self.last_minut_s == COUNT - 1:\r\n dateArray = datetime.datetime.fromtimestamp(self.now_timeline)\r\n otherStyleTime = dateArray.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n self.asset_list.append({'asset': self.tot_asset,\r\n 'price': price,\r\n 'time': otherStyleTime,\r\n 'op': self.op_time,})\r\n self.op_time = []\r\n\r\n async def GerOrders(self):\r\n pass\r\n\r\n async def GetPosition(self):\r\n return self.position\r\n\r\n def FetchFreeAsset(self):\r\n return self.free_asset\r\n\r\n def FetchTotAsset(self):\r\n self.UpdateTotAsset()\r\n return self.tot_asset\r\n\r\n async def CheckOrderStatus(self, order_id):\r\n # TODO:判断是否购买成功\r\n pass\r\n\r\n async def Buy(self, price, quantity):\r\n price = float(price)\r\n if self.free_asset < 0:\r\n return\r\n can_buy = math.floor(float(self.free_asset) * self.level * price // self.face_value)\r\n if can_buy == 0:\r\n print(\"Can't buy 3\")\r\n return\r\n quantity = int(quantity)\r\n\r\n if (quantity > 0 and price < self.ask1_price) or (quantity < 0 and price > self.bid1_price): \r\n return\r\n\r\n if abs(quantity) > can_buy:\r\n return\r\n\r\n logger.info(\"[OP Buy]\", price, quantity)\r\n self.op_time.append(('BUY', price))\r\n asset_diya = abs(quantity) / price / self.level * self.face_value\r\n\r\n fee = abs(quantity) * 0.0004 * self.face_value / price\r\n # print('quantity:', quantity, 'fee:', fee)\r\n self.tot_fee += fee\r\n self.free_asset -= asset_diya + fee\r\n if quantity > 0:\r\n self.position.long_quantity += int(quantity)\r\n self.position.long_avg_price = price\r\n else:\r\n self.position.short_quantity += int(abs(quantity))\r\n self.position.short_avg_price = price\r\n return quantity\r\n\r\n async def Sell(self, price, quantity):\r\n if quantity == 0:\r\n return\r\n if (quantity > 0 and price > self.bid1_price) or (quantity < 0 and price < self.ask1_price): \r\n return\r\n quantity = int(quantity)\r\n\r\n self.UpdateTotAsset()\r\n add_asset = 0\r\n if quantity > 0:\r\n if self.position.long_quantity < quantity:\r\n logger.error(\"Can't be!\")\r\n return\r\n self.position.long_quantity -= quantity\r\n buy_price = self.position.long_avg_price\r\n dire = 1\r\n add_asset = (1/buy_price - 1/price) * quantity * self.face_value\r\n if self.position.long_quantity:\r\n self.position.long_avg_price = 0\r\n else:\r\n if self.position.short_quantity < int(abs(quantity)):\r\n logger.error(\"Can't be!\")\r\n return\r\n self.position.short_quantity += quantity\r\n buy_price = self.position.short_avg_price\r\n dire = -1\r\n add_asset += (1/price - 1/buy_price) * -quantity * self.face_value\r\n if self.position.short_quantity:\r\n self.position.short_avg_price = 0\r\n\r\n logger.info(\"[OP Sell]\", price, quantity)\r\n self.deal_count += 1\r\n self.op_time.append((buy_price, price, dire, add_asset))\r\n fee = abs(quantity) * 0.0004 * self.face_value / price\r\n self.tot_fee += fee\r\n self.free_asset = self.tot_asset - fee\r\n return quantity\r\n\r\n def UpdateTotAsset(self):\r\n asset_recover = 0\r\n add_asset = 0\r\n price = self.ask1_price\r\n if self.position.long_quantity > 0:\r\n quantity = self.position.long_quantity\r\n buy_price = self.position.long_avg_price\r\n add_asset = (1/buy_price - 1/price) * quantity * self.face_value\r\n asset_recover = abs(quantity) / buy_price / self.level * self.face_value\r\n\r\n if self.position.short_quantity > 0:\r\n quantity = self.position.short_quantity\r\n buy_price = self.position.short_avg_price\r\n add_asset += (1/price - 1/buy_price) * quantity * self.face_value\r\n asset_recover += abs(quantity) / buy_price / self.level * self.face_value\r\n\r\n self.tot_asset = self.free_asset + asset_recover + add_asset\r\n\r\n def CalResult(self):\r\n self.UpdateTotAsset()\r\n init_array = datetime.datetime.fromtimestamp(self.init_timeline)\r\n init_time_str = init_array.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n now_array = datetime.datetime.fromtimestamp(self.now_timeline)\r\n now_time_str = now_array.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n f = 100 * (self.now_timeline - self.init_timeline) / (time.time() - self.init_timeline)\r\n\r\n print('\\nStart time: ', init_time_str, self.init_timeline)\r\n if f < 100:\r\n print('End time: ', now_array, 'rate:%.3f%%' % (f, ))\r\n print(\"left:\", self.tot_asset, 'fee:', self.tot_fee, 'deal count:', self.deal_count)\r\n save = pd.DataFrame(self.asset_list)\r\n save.to_csv(\"test.csv\")\r\n self.Draw()\r\n return\r\n\r\n def Draw(self):\r\n return\r\n\r\n async def GetRecentKLine(self, period, time_long):\r\n self.UpdateTick()\r\n last_idx = self.klines_idx[period]\r\n ret = self.klines_data[period][last_idx - time_long + 1:last_idx + 1]\r\n return ret\r\n\r\n def Search(self, li, key, find_big=True):\r\n le = 0\r\n ri = len(li) - 1\r\n while le < ri:\r\n if not find_big and (le + ri) & 1:\r\n mid = (le + ri) // 2\r\n else:\r\n mid = (le + ri) // 2\r\n\r\n if mid == key:\r\n return mid\r\n\r\n if key < li[mid]['id']:\r\n ri = mid - 1\r\n else:\r\n le = mid + 1\r\n return le\r\n\r\n async def GetKLines(self, period, from_time, to_time=None):\r\n self.UpdateTick()\r\n if self.now_timeline >= time.time():\r\n return None\r\n if to_time is None:\r\n to_time = self.now_timeline\r\n\r\n l1 = self.Search(self.klines_data[period], from_time)\r\n l2 = self.Search(self.klines_data[period], to_time, False)\r\n\r\n def time(self):\r\n return self.now_timeline\r\n","repo_name":"yylogo/huobi_trade","sub_path":"market/huobi_test.py","file_name":"huobi_test.py","file_ext":"py","file_size_in_byte":12996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3317507816","text":"import csv\nimport numpy as np\nimport numpy.random as random\nimport numpy.linalg as linalg\nimport matplotlib.pyplot as plt\nimport operator\nimport math\n\n\nfrom poly_fit_plot import plot_train_test_errors\nfrom poly_fit_plot import plot_function\nfrom poly_fit_plot import plot_function_and_data\nfrom poly_fit_plot import plot_function_data_and_approximation\n\nwith open('training_data.csv', 'r') as csvfile:\n datareader = csv.reader(csvfile, delimiter=',')\n header = next(datareader)\n training_data = []\n \n\n for row in datareader:\n row_of_floats = list(map(float, row))\n training_data.append(row_of_floats)\n\n# data is of type list\ntraining_data_as_array = np.array(training_data)\n\n\n\nwith open('validation_data.csv', 'r') as csvfile1:\n datareader = csv.reader(csvfile1, delimiter=',')\n header = next(datareader)\n validation_data = []\n \n\n for row in datareader:\n row_of_floats = list(map(float, row))\n validation_data.append(row_of_floats)\n\n# data is of type list\nvalidation_data_as_array = np.array(validation_data)\n\ndef expand_to_monomials(inputs, degree):\n # create a list of the all inputs raise to each possible power\n expanded_inputs = []\n for i in range(degree+1):\n expanded_inputs.append(inputs**i)\n return np.array(expanded_inputs).transpose()\n\ndef create_prediction_function(degree, weights):\n \"\"\"\n This function creates and returns a prediction function based on a\n feature mapping and some weights.\n\n The returned prediction function takes a set of input values and returns\n the predicted output for each.\n \"\"\"\n # here is a function that is created on the fly from the input feature\n # mapping and weights\n def prediction_function(xs):\n expanded_xs = np.matrix(expand_to_monomials(xs, degree))\n ys = expanded_xs*np.matrix(weights).reshape((len(weights),1))\n return np.array(ys).flatten()\n # we return the function reference (handle) itself. This can be used like\n # any other function\n return prediction_function \n\ndef least_squares_weights(processed_inputs, targets):\n \"\"\"\n This method returns the weights that give the best linear fit between\n the processed inputs and the targets.\n \"\"\"\n Phi = np.matrix(processed_inputs)\n targets = np.matrix(targets).reshape((len(targets),1))\n weights = linalg.inv(Phi.transpose()*Phi)*Phi.transpose()*targets\n return np.array(weights).flatten()\n\ndef regularised_least_squares_weights(\n processed_inputs, targets, reg_param):\n \"\"\"\n This method returns the weights that give the best linear fit between\n the processed inputs and the targets penalised by some regularisation term\n (reg_param)\n \"\"\"\n Phi = np.matrix(processed_inputs)\n targets = np.matrix(targets).reshape((len(targets),1))\n I = np.identity(Phi.shape[1])\n weights = linalg.inv(reg_param*I + Phi.transpose()*Phi)*Phi.transpose()*targets\n return np.array(weights).flatten() \n\n\ndef root_mean_squared_error(y_true, y_pred):\n \"\"\"\n Evaluate how closely predicted values (y_pred) match the true values\n (y_true, also known as targets)\n\n Parameters\n ----------\n y_true - the true targets\n y_pred - the predicted targets\n\n Returns\n -------\n mse - The root mean squared error between true and predicted target\n \"\"\"\n N = len(y_true)\n # be careful, square must be done element-wise (hence conversion\n # to np.array)\n mse = np.sum((np.array(y_true).flatten() - np.array(y_pred).flatten())**2)/N\n return np.sqrt(mse) \n\n\ndef train_and_test(\n degree, train_inputs, train_targets, test_inputs, test_targets,\n reg_param=None):\n \"\"\"\n Fits a polynomial of degree \"degree\" to your training data then evaluates\n train and test errors and plots the resulting curves\n\n Parameters\n ----------\n degree - the degree of polynomial to fit\n train_inputs - the training inputs\n train_targets - the training targets\n test_inputs - the test inputs\n test_targets - the test targets\n reg_param (optional) - the regularisation strength. If not provided then\n the non-regularised least squares method is used.\n\n Returns\n -------\n train_error - the training error for the approximation\n test_error - the test error for the approximation\n \"\"\"\n # convert both train and test inputs to monomial vectors\n processed_train_inputs = expand_to_monomials(train_inputs, degree)\n processed_test_inputs = expand_to_monomials(test_inputs, degree)\n # find the weights, using least squares or regularised least squares\n if reg_param is None:\n # use simple least squares approach\n weights = least_squares_weights(processed_train_inputs, train_targets)\n else:\n # use regularised least squares approach\n weights = regularised_least_squares_weights(\n processed_train_inputs, train_targets, reg_param)\n # create the prediction function\n trained_func = create_prediction_function(degree, weights)\n # get the train and test errors and return them\n train_error = root_mean_squared_error(train_targets, trained_func(train_inputs))\n test_error = root_mean_squared_error(test_targets, trained_func(test_inputs))\n return train_error, test_error\n\n\n\ndef evaluate_degree(reg_param, degree_sequence=[0,1,2,3,4,5,6,7,8,9,10,11]):\n \"\"\"\n Evaluates and plots test & train error (RMSE) for different degrees of\n polynomial fit to synthetic data.\n Allows one to essentially recreate Figure 1.5 from Bishop.\n\n Parameters\n ----------\n reg_param - the regularisation parameter if one is being used. Set to None\n if conventional least squares fit needed\n N - number of data points to sample for training\n degree_sequence - specifies which degrees of polynomial to fit to the data\n \"\"\"\n # sample train and test data\n train_inputs = training_data_as_array[:,0:11]\n train_targets = training_data_as_array[:,11:12]\n test_inputs = validation_data_as_array[:,0:11]\n test_targets = validation_data_as_array[:11:12]\n # \n train_errors = []\n test_errors = []\n for degree in degree_sequence:\n # for each degree fit the data then evaluated train/test error\n train_error, test_error = train_and_test(\n degree, train_inputs, train_targets, test_inputs,\n test_targets, reg_param)\n train_errors.append(train_error)\n test_errors.append(test_error)\n # plot the results\n plot_train_test_errors(\"degree\", degree_sequence, train_errors, test_errors)\n plt.show()\n\n\n\n\nevaluate_degree(None)\n","repo_name":"dr1012/Machine-Learning-group-C2","sub_path":"polynomial_regression.py","file_name":"polynomial_regression.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27623564941","text":"\"\"\"\nselecting subcomponents & disaggregated series of Component objects\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom edan.delims import (\n\tconcat_codes,\n\tcontains\n)\n\nfrom edan.utils.dtypes import iterable_not_string\n\n\ndef recursive_subcomponent(comp: Component, target: str):\n\t\"\"\"\n\trecursively search for subcomponent with code `target`\n\t\"\"\"\n\n\tif comp.code == target:\n\t\treturn comp\n\n\tfor sub in comp.subs:\n\t\tif contains(sub.code, target):\n\t\t\treturn recursive_subcomponent(sub, target)\n\n\ndef collect_elemental(comp: Component):\n\t\"\"\"\n\tsearch for and compile all the elemental subcomponents of `comp`\n\t\"\"\"\n\n\tif comp.elemental:\n\t\treturn []\n\n\telements = []\n\tdef _disagg_to_elements(obj):\n\t\tif obj.elemental:\n\t\t\telements.append(obj)\n\t\telse:\n\t\t\tfor sub in obj.subs:\n\t\t\t\t_disagg_to_elements(sub)\n\tfor sub in comp.subs:\n\t\t_disagg_to_elements(sub)\n\n\treturn elements\n\n\ndef collect_all_subcomponents(comp: Component):\n\t\"\"\"\n\tgather every subcomponent of `comp`\n\t\"\"\"\n\n\tif comp.elemental:\n\t\treturn []\n\n\tsubcomponents = []\n\tdef _disagg_all(obj):\n\t\tif obj.elemental:\n\t\t\tpass\n\t\telse:\n\t\t\tfor sub in obj.subs:\n\t\t\t\tsubcomponents.append(sub)\n\t\t\t\t_disagg_all(sub)\n\n\t_disagg_all(comp)\n\treturn subcomponents\n\n\nclass Disaggregator(object):\n\n\tdef __init__(\n\t\tself,\n\t\tcomponent: Component,\n\t\tsubcomponents: Union[str, Iterable[str]] = '',\n\t\tlevel: int = 0\n\t):\n\n\t\t\"\"\"\n\t\tclass for selecting subcomponents of a component that are further down the\n\t\tsubcomponent tree than just the Component objects immediately available in\n\t\tthe `subs` attribute. functionality for selecting based on `edan` code and\n\t\tlevel relative to the Component is provided\n\n\t\tParameters\n\t\t----------\n\t\tsubcomponents : str | Iterable[str] | bool ( = '' )\n\t\t\tan iterable of subcomponents to return. the elements of `subs` are\n\t\t\tassumed to be (relative or absolute) `edan` codes. if True, the\n\t\t\timmediate subcomponents are returned.\n\t\tlevel : int ( = 0 )\n\t\t\tthe level, relative to the current Component, of subcomponents that\n\t\t\tare to be returned. if a subcomponent has level `l`, with\n\t\t\tl < level + component.level, and that subcomponent is elemental, it is\n\t\t\tincluded\n\t\t\"\"\"\n\t\tself.disaggregates = []\n\t\tself.component = component\n\n\t\tif subcomponents and level:\n\t\t\traise ValueError(\"only one of 'subs' and 'level' can be provided\")\n\n\t\tif subcomponents:\n\n\t\t\tif iterable_not_string(subcomponents):\n\n\t\t\t\tfor code in subcomponents:\n\t\t\t\t\t# outrageously un-pythonic but can't be bothered to workaround yet\n\t\t\t\t\tnot_sub = True\n\n\t\t\t\t\t# concatenate later ids if `code` isn't an absolute edan code\n\t\t\t\t\tabs_code = concat_codes(component.code, code)\n\n\t\t\t\t\tfor sub in component.subs:\n\n\t\t\t\t\t\tif sub.code == abs_code:\n\t\t\t\t\t\t\t# `subcomponents` references an immediate subcomponent\n\t\t\t\t\t\t\tnot_sub = False\n\t\t\t\t\t\t\tself.disaggregates.append(sub)\n\n\t\t\t\t\t\telif contains(sub.code, abs_code):\n\t\t\t\t\t\t\t# references a subcomponent further down\n\t\t\t\t\t\t\tcomp = recursive_subcomponent(sub, abs_code)\n\t\t\t\t\t\t\tnot_sub = False\n\t\t\t\t\t\t\tself.disaggregates.append(comp)\n\n\t\t\t\t\tif not_sub:\n\t\t\t\t\t\traise KeyError(\n\t\t\t\t\t\t\tf\"{code} is not a subcomponent of {component.code}\"\n\t\t\t\t\t\t)\n\n\t\t\telif isinstance(subcomponents, str):\n\n\t\t\t\tif subcomponents == 'all':\n\t\t\t\t\tself.disaggregates = collect_all_subcomponents(component)\n\n\t\t\t\telif subcomponents == 'elements':\n\t\t\t\t\tself.disaggregates = collect_elemental(component)\n\n\t\t\t\telse:\n\t\t\t\t\tnot_sub = True\n\t\t\t\t\tabs_code = concat_codes(component.code, subcomponents)\n\n\t\t\t\t\tfor sub in component.subs:\n\n\t\t\t\t\t\tif sub.code == abs_code:\n\t\t\t\t\t\t\tnot_sub = False\n\t\t\t\t\t\t\tself.disaggregates.append(sub)\n\n\t\t\t\t\t\telif contains(sub.code, abs_code):\n\t\t\t\t\t\t\tcomp = recursive_subcomponent(sub, abs_code)\n\t\t\t\t\t\t\tnot_sub = False\n\t\t\t\t\t\t\tself.disaggregates.append(comp)\n\n\t\t\t\t\tif not_sub:\n\t\t\t\t\t\traise KeyError(\n\t\t\t\t\t\t\tf\"{subcomponents} is not a subcomponent of {component.code}\"\n\t\t\t\t\t\t)\n\n\t\t\telif isinstance(subcomponents, bool):\n\n\t\t\t\tif subcomponents:\n\t\t\t\t\tfor sub in self.component.subs:\n\t\t\t\t\t\tself.disaggregates.append(sub)\n\n\t\t\telse:\n\t\t\t\traise TypeError(\"'subs' must be a str, list of str, or bool\")\n\n\t\telif level:\n\t\t\tabs_level = level + component.level\n\t\t\tself.recursive_level(component, abs_level)\n\n\t\telif subcomponents == '':\n\t\t\tself.disaggregates = component.subs\n\n\tdef recursive_level(self, comp: Component, level: int):\n\t\tif comp.level < level:\n\t\t\tif comp.elemental:\n\t\t\t\tself.disaggregates.append(comp)\n\t\t\telse:\n\t\t\t\tfor sub in comp.subs:\n\t\t\t\t\tself.recursive_level(sub, level)\n\n\t\telif comp.level == level:\n\t\t\tself.disaggregates.append(comp)\n\n\tdef __iter__(self):\n\t\tfor sub in self.disaggregates:\n\t\t\tyield sub\n\n\tdef __len__(self):\n\t\treturn len(self.disaggregates)\n\n\tdef __str__(self):\n\t\treturn f\"Disaggregator({len(self)} Subs)\"\n","repo_name":"loganhotz/edan","sub_path":"edan/core/disaggregate.py","file_name":"disaggregate.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"254450377","text":"#!/usr/bin/env python3\n\nfrom pytube import YouTube\nfrom pytube.cli import on_progress\nfrom termcolor import colored\nimport re\nfrom youtube.files import remove_files\nfrom youtube.mpeg import merge_mp4_audio_and_video, convert_audio_to_mp3\nfrom youtube.format import shorten_name\nfrom urllib.error import HTTPError\nfrom random import randrange\nfrom time import sleep\n\n\nPROFILES = {\n 'progressive': {\n 'intro_message': 'file in highest available progressive resolution...',\n 'params': {\n 'progressive': True,\n 'file_extension': 'mp4'\n },\n 'order_by': 'resolution',\n 'out_message': 'Progressive MP4 file successfully downloaded!'\n },\n 'video': {\n 'intro_message': '.mp4 video file in highest available resolution...',\n 'params': {\n 'progressive': False,\n 'only_video': True,\n 'file_extension': 'mp4'\n },\n 'order_by': 'resolution',\n 'out_message': '.mp4 video file was successfully downloaded!'\n },\n 'audio': {\n 'intro_message': 'audio file in highest available bitrate...',\n 'params': {\n 'progressive': False,\n 'only_audio': True,\n 'file_extension': 'mp4'\n },\n 'order_by': 'abr',\n 'out_message': 'Audio track successfully downloaded...'\n }\n}\n\n\ndef get_filename(url: str):\n \"\"\"Validate if URL is ever exists and return output filename.\n\n Args:\n url (str): some YouTube URL\n\n Returns:\n [str] or [None]: output filename or None if URL is not correct\n \"\"\"\n try:\n yt = YouTube(url=url)\n title = re.sub(r'[^\\w\\s-]', '', yt.title) # remove all symbols\n title = re.sub(r'\\s+', ' ', title) # remove recurring spaces\n publish_date = yt.publish_date.strftime('%Y-%m-%d')\n slug = yt.video_id\n except HTTPError:\n print(colored(f'HTTP Error 404: url \"{url}\" not found!\\n', 'red'))\n return None\n except Exception as e:\n print(colored(f'Error: some unexpected error occured - \"{e}\"', 'red'))\n return None\n else:\n return f'{publish_date} - {title} [{slug}]'\n\n\ndef list_all_streams(url):\n return YouTube(url=url).streams\n\n\ndef list_streams(url: str, settings: dict):\n try:\n yt = YouTube(url=url).streams.\\\n filter(**settings['params']).\\\n order_by(settings['order_by']).\\\n desc()\n except Exception as err:\n print(f'Some error occured while listing streams: {err}')\n else:\n return list(yt) # 'list' here required to format output while print\n\n\ndef download(url: str, settings: dict, filename: str):\n try:\n yt = YouTube(url=url, on_progress_callback=on_progress).streams.\\\n filter(**settings['params']).\\\n order_by(settings['order_by']).\\\n desc().\\\n first()\n except Exception as error:\n print(f'Some error occured while downloading: {error}')\n else:\n print(\n colored(f'Downloading \"{shorten_name(yt.title)}\" '\n f'{settings[\"intro_message\"]}'))\n yt.download(filename=f'{filename}.mp4', skip_existing=False,\n timeout=10, max_retries=5)\n print(colored(f'\\n{settings[\"out_message\"]}', 'blue'))\n\n\ndef load_hq_video(url: str):\n # Get output filename\n filename = get_filename(url=url)\n\n # Break execution if 'filename' return None\n if filename is None:\n return\n\n # Remove old and temp files if exists, then download audio and video files\n remove_files('audio.mp4', 'video.mp4', f'{filename}.mp4')\n download(url=url, settings=PROFILES['audio'], filename='audio')\n download(url=url, settings=PROFILES['video'], filename='video')\n\n # Merge output and video files\n merge_mp4_audio_and_video(audio_file='audio', video_file='video',\n output_filename=filename)\n\n # Print summary message\n print(colored(f'Video was saved as \"{shorten_name(filename)}.mp4\"!',\n 'green'))\n\n # Remove temp files\n remove_files('audio.mp4', 'video.mp4')\n\n # Print empty line\n print()\n\n\ndef load_hq_audio(url: str):\n # Get output filename\n filename = get_filename(url=url)\n\n # Break execution if 'filename' return None\n if filename is None:\n return\n\n # Remove old and temp files if exists, then download mp4 audio file\n remove_files('audio.mp4', f'{filename}.mp3')\n download(url=url, settings=PROFILES['audio'], filename='audio')\n\n # Convert mp4 file into mp3 file\n convert_audio_to_mp3(audio_file='audio', output_filename=filename)\n\n # Print summary message\n print(colored(f'MP3 track was saved as \"{shorten_name(filename)}.mp3\"!',\n 'green'))\n\n # Remove temp file\n remove_files('audio.mp4')\n\n # Print empty line\n print()\n\n\ndef load_progressive(url: str):\n # Get output filename\n filename = get_filename(url=url)\n\n # Break execution if 'filename' return None\n if filename is None:\n return\n\n # Remove old file (not temp!) with the same name if it ever exist,\n # and download video in progressive format\n remove_files(f'{filename}.mp4')\n download(url=url, settings=PROFILES['progressive'], filename=filename)\n\n # Print summary message\n print(colored(f'Video was saved as \"{shorten_name(filename)}.mp4\"!',\n 'green'))\n\n # Print empty line\n print()\n\n\ndef make_pause(min: int, max: int):\n timeout = randrange(min, max)\n print(f'Making pause between requests for {timeout} seconds...\\n')\n sleep(timeout)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"n8creator/youtube","sub_path":"youtube/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14127571237","text":"from llama_index.llms import Replicate\nfrom llama_index import set_global_tokenizer # set tokenizer to match LLM\nfrom transformers import AutoTokenizer\nfrom llama_index.embeddings import HuggingFaceEmbedding\nfrom llama_index import ServiceContext\nfrom llama_index import VectorStoreIndex, SimpleDirectoryReader\n\nquery_message = '''Using the data provided, write detailed study notes. \n Keep generating until you are finished summarizing the data.\n Ensure you use all the information provided. \n Make sure your response looks like a student wrote it during class. Limit each line in the response to 100 characters. \n Do not include information about the authors to keep it anonymous. '''\nllama2_7b_chat = \"meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e\"\n\ndef llama_supernotes(folderpath):\n llm = Replicate(\n model=llama2_7b_chat,\n temperature=0.9,\n additional_kwargs={\"top_p\": 1, \"max_new_tokens\": 3000},\n )\n\n set_global_tokenizer(\n AutoTokenizer.from_pretrained(\"NousResearch/Llama-2-7b-chat-hf\").encode\n )\n\n embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n service_context = ServiceContext.from_defaults(\n llm=llm, embed_model=embed_model\n )\n\n documents = SimpleDirectoryReader(folderpath).load_data()\n index = VectorStoreIndex.from_documents(\n documents, service_context=service_context\n )\n query_engine = index.as_query_engine()\n response = query_engine.query(query_message)\n if __name__ == \"__main__\":\n print(response.response)\n return response.response\n\nif __name__ == \"__main__\":\n llama_supernotes(\"docs\")\n\n","repo_name":"ishaan-arya/note-mesh-student","sub_path":"machine-learning/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"47998429254","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom bailout.views import index, data, links, member_search, financial_services_committee, switchers, no_no, yes_yes, register, user_login, user_dashboard, rating_page, user_logout, members_by_user_state, user_ratings, analyze, order_by_pac, explain_variables, members_of_congress_list, member_of_congress_detail\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n url(r'^$', index),\n url(r'^data$', data),\n url(r'^links$', links),\n url(r'^member_search$', member_search),\n url(r'^financial_services_committee$', financial_services_committee),\n url(r'^switchers$', switchers),\n url(r'^no_no$', no_no),\n url(r'^yes_yes$', yes_yes),\n url(r'^register$', register),\n url(r'^login/$', user_login),\n url(r'^logout/$', user_logout),\n url(r'^dashboard$', user_dashboard),\n url(r'^dashboard/(?P\\d+)$', rating_page), #change to r'^/rate/...\n url(r'^dashboard/(?P\\w+)$', members_by_user_state),\n url(r'^ratings/$', user_ratings),\n url(r'^analyze/$', analyze),\n url(r'^order_by_pac/$', order_by_pac),\n url(r'^explain_variables$', explain_variables),\n url(r'^members_of_congress/$', members_of_congress_list),\n url(r'^member_of_congress/(?P[\\w\\s]+)/$', member_of_congress_detail),\n\n\n\n # url(r'^members/$', members_of_congress_list),\n # url(r'^api/(?P[0-9]+)/$', member_of_congress_detail),\n\n # url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","repo_name":"joshuagendal/2008-Bank-Bailout-Data-Project","sub_path":"bailout/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8994545692","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nObjWeights.py: \nweights applied \nto single objects\n\"\"\"\n\n#import fnmatch\n#import os\n#import sys\nfrom math import sqrt\nfrom array import array\n# logging\nimport logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n# ROOT\nimport ROOT\nimport metaroot\n\n# pyframe\nimport pyframe\n\n# pyutils\nimport rootutils\n\nimport mcutils\n\nGeV = 1000.0\n\n#------------------------------------------------------------------------------\nclass MuAllSF(pyframe.core.Algorithm):\n \"\"\"\n Single muon reco efficiency\n \"\"\"\n #__________________________________________________________________________\n def __init__(self, name=\"MuAllSF\",\n mu_index = None,\n #mu_level = None,\n key = None,\n scale = None,\n ):\n pyframe.core.Algorithm.__init__(self, name=name)\n self.mu_index = mu_index\n #self.mu_level = mu_level\n self.key = key\n self.scale = scale\n\n assert key, \"Must provide key for storing mu iso sf\"\n \n #_________________________________________________________________________\n def initialize(self): \n pass\n \"\"\"\n self.reco_levels = {\"Loose\":\"Loose\", \"Medium\":\"Loose\", \"Tight\":\"Loose\"}\n self.iso_levels = {\"Loose\":\"Loose\", \"Medium\":\"FixedCutLoose\", \"Tight\":\"FixedCutTightTrackOnly\"}\n self.ttva_levels = {\"Loose\": None, \"Medium\": None, \"Tight\": None}\n\n self.mu_levels = [\"Loose\", \"Medium\", \"Tight\"]\n if self.mu_level.startswith(\"Not\"):\n self.mu_levels.remove(self.mu_level.replace(\"Not\",\"\"))\n else:\n assert self.mu_level in self.mu_levels, \"ERROR: mu_level %s not recognised!!!\" % self.lead_mu_level\n self.mu_levels = [self.mu_level]\n \"\"\"\n \n #_________________________________________________________________________\n def execute(self, weight):\n sf=1.0\n if \"mc\" in self.sampletype: \n muons = self.store['muons']\n muon = muons[self.mu_index]\n \n if muon.isTruthMatchedToMuon:\n \n sf *= getattr(muon,\"_\".join([\"RecoEff\",\"SF\",\"Loose\"])).at(0)\n sf *= getattr(muon,\"_\".join([\"TTVAEff\",\"SF\"])).at(0)\n \n if getattr(muon,\"isIsolated_FixedCutTightTrackOnly\"):\n sf *= getattr(muon,\"_\".join([\"IsoEff\",\"SF\",\"FixedCutTightTrackOnly\"])).at(0)\n #elif getattr(muon,\"isIsolated_FixedCutLoose\"):\n # sf *= getattr(muon,\"_\".join([\"IsoEff\",\"SF\",\"FixedCutLoose\"])).at(0)\n elif getattr(muon,\"isIsolated_Loose\"):\n sf *= getattr(muon,\"_\".join([\"IsoEff\",\"SF\",\"Loose\"])).at(0)\n else: pass\n\n if self.scale: pass\n\n if self.key: \n self.store[self.key] = sf\n return True\n\n#------------------------------------------------------------------------------\nclass MuFakeFactorHist(pyframe.core.Algorithm):\n \"\"\"\n Applies the fake-factors to muon pairs\n \"\"\"\n #__________________________________________________________________________\n def __init__(self, name=\"MuFakeFactor\",config_file=None,mu_index=None,key=None,scale=None):\n pyframe.core.Algorithm.__init__(self,name=name)\n self.config_file = config_file\n self.mu_index = mu_index\n self.key = key\n self.scale = scale\n \n assert mu_index in [0,1], \"ERROR: mu_index must be in [0,1,2]\"\n assert config_file, \"Must provide config file!\"\n assert key, \"Must provide key for storing fakefactor\"\n #_________________________________________________________________________\n def initialize(self):\n f = ROOT.TFile.Open(self.config_file)\n assert f, \"Failed to open fake-factor config file: %s\"%(self.config_file)\n\n h_ff = f.Get(\"h_ff\")\n assert h_ff, \"Failed to get 'h_ff' from %s\"%(self.config_file)\n \n self.h_ff = h_ff.Clone()\n self.h_ff.SetDirectory(0)\n f.Close()\n #_________________________________________________________________________\n def execute(self, weight):\n muons = self.store['muons']\n mu = muons[self.mu_index]\n #if not self.sampletype == \"datadriven\": continue\n #if self.sampletype == \"mc\": continue\n pt_mu = mu.tlv.Pt()/GeV \n \n ff_mu = 1.0\n eff_mu = 0.0\n \n ibin_mu = self.h_ff.GetXaxis().FindBin(pt_mu) \n assert ibin_mu, \"ERROR: pt bin for lead mu not found!!!\"\n \n # error bars are symmetric\n #if self.mu_index == 0: \n # The previous line caused a bug in the \n # application of the fake-factors to the\n # validation region with di-muons triggers\n \n ff_mu = self.h_ff.GetBinContent(ibin_mu)\n eff_mu = self.h_ff.GetBinError(ibin_mu)\n \n if self.scale == 'up': \n ff_mu +=eff_mu\n if self.scale == 'dn': \n ff_mu -=eff_mu\n \n if self.key: \n self.store[self.key] = ff_mu\n\n return True\n\n#------------------------------------------------------------------------------\nclass MuFakeFactorGraph(pyframe.core.Algorithm):\n \"\"\"\n Applies the fake-factors to muon pairs\n \"\"\"\n #__________________________________________________________________________\n def __init__(self, name=\"MuFakeFactor\",config_file=None,mu_index=None,key=None,scale=None):\n pyframe.core.Algorithm.__init__(self,name=name)\n self.config_file = config_file\n self.mu_index = mu_index\n self.key = key\n self.scale = scale\n \n assert mu_index in [0,1], \"ERROR: mu_index must be in [0,1]\"\n assert config_file, \"Must provide config file!\"\n assert key, \"Must provide key for storing fakefactor\"\n #_________________________________________________________________________\n def initialize(self):\n f = ROOT.TFile.Open(self.config_file)\n assert f, \"Failed to open fake-factor config file: %s\"%(self.config_file)\n\n g_ff = f.Get(\"g_ff_stat_sys\")\n assert g_ff, \"Failed to get 'g_ff' from %s\"%(self.config_file)\n \n self.g_ff = g_ff.Clone()\n f.Close()\n #_________________________________________________________________________\n def execute(self, weight):\n muons = self.store['muons']\n mu = muons[self.mu_index]\n #if not self.sampletype == \"datadriven\": continue\n #if self.sampletype == \"mc\": continue\n pt_mu = mu.tlv.Pt()/GeV \n \n for ibin_mu in xrange(1,self.g_ff.GetN()):\n edlow = self.g_ff.GetX()[ibin_mu] - self.g_ff.GetEXlow()[ibin_mu]\n edhi = self.g_ff.GetX()[ibin_mu] + self.g_ff.GetEXhigh()[ibin_mu]\n if pt_mu>=edlow and pt_mu img1*alpha + img2*beta + gamma\ndst2 = cv2.addWeighted(img, 0.75, img2, 0.25, 0)\ncv2.imshow('imageadd2', dst2)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"Karma-Team/py-video-processing","sub_path":"examples/10_basic_operations_on_images.py","file_name":"10_basic_operations_on_images.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22744948661","text":"import click\nimport os\nimport time\nfrom decimal import Decimal\n\n# import watchtower\nimport datetime\nfrom src.common.common_helper import init_env, LOGGER, s3resource\nfrom src.common.Lineage import Lineage\nfrom src.creation.Creation import Creation\nfrom src.ingestion.Ingestion import Ingestion\nimport json\nfrom src.store.Store import Store\nfrom src.analize.Analize import Analize\n\n\ndef run_job_creation(aws_job_id: str, days) -> None:\n \"\"\"\n run the job ingestion process that access data from a weather API,\n insert it into a DynamoDB Database, and generate some queries\n over the data that are exported to a S3 bucket.\n\n aws_job_id: the job id to be executed\n days: the number of days of the data to be ingested from the weather API\n\n \"\"\"\n try:\n LOGGER.info(\"Creation Status Running\")\n start_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n with open(os.environ[\"ATTRIBUTES\"]) as json_attr:\n attr = json.load(json_attr)\n with open(os.environ[\"KEYSCHEMA\"]) as json_key_schema:\n key_schema = json.load(json_key_schema)\n with open(os.environ[\"PROVISION\"]) as json_provision:\n provisions = json.load(json_provision)\n create_table = Creation()\n\n create_table.process(\n table_name=os.environ[\"TABLE_LANDING\"],\n attributes=attr,\n schema=key_schema,\n provisions=provisions,\n )\n\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n LOGGER.info(f\"CREATION FINISH SUCCESSFULLY AT {end_time}\")\n\n except Exception as e:\n LOGGER.info(\"Ingestion status Failed\")\n LOGGER.info(f\"{e}\")\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n\n\ndef split_list(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\ndef run_job_ingestion_process(aws_job_id: str, days) -> None:\n \"\"\"\n run the job ingestion process that access data from a weather API,\n insert it into a DynamoDB Database, and generate some queries\n over the data that are exported to a S3 bucket.\n\n aws_job_id: the job id to be executed\n days: the number of days of the data to be ingested from the weather API\n\n \"\"\"\n try:\n LOGGER.info(\"Ingestion Status Running\")\n start_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n ingestion = Ingestion()\n record_list, output_df = ingestion.process(days)\n chunk_list = list(split_list(record_list, 25))\n store_item = Store()\n for i, chunk in enumerate(chunk_list):\n store_item.store_transaction(\n item=chunk,\n job_id=aws_job_id + \"_\" + str(i),\n PK=\"timezoneloc\",\n table=os.environ[\"TABLE_LANDING\"],\n )\n analize = Analize()\n path = f\"s3://{os.environ['REFINED']+'/'+os.environ['PATH_01']+'/'}\"\n result_query = analize.process(\n df=output_df, sql_path=str(os.environ[\"QUERY_MAX_TEMP_LOC\"])\n )\n analize.export(\n df=result_query, s3=s3resource, path=path, partition_cols=[\"date\"]\n )\n result_query_02 = analize.process(\n df=output_df, sql_path=str(os.environ[\"QUERY_STATS_DAY\"])\n )\n path = f\"s3://{os.environ['REFINED']+'/'+os.environ['PATH_02']+'/'}\"\n analize.export(\n df=result_query_02,\n s3=s3resource,\n path=path,\n partition_cols=[\"locationtime\"],\n )\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n LOGGER.info(f\"PROCESS END TO END FINISH SUCCESSFULLY AT {end_time}\")\n\n except Exception as e:\n LOGGER.info(\"END TO END status Failed\")\n LOGGER.error(f\"END TO END status Failed {e}\")\n LOGGER.info(f\"{e}\")\n end_time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n\n\n@click.command()\n@click.option(\"--overwrite\", help=\"True, False\", required=False)\n@click.option(\"--days\", help=\"int value from 1 to n\", required=True)\n@click.option(\"--aws_job_id\", help=\"The current Job ID\")\n@click.option(\"--job\", help=\"creation_job, ingestion_process_job\", required=True)\n# @click.option(\"--job\", help=\"Job name \")\n\n\n@click.option(\n \"--env\",\n default=\"prod\",\n help=\"'dev' for development, 'prod' for production environment\",\n)\ndef main(job, overwrite, days, env, aws_job_id):\n start = time.time()\n # os.environ[\"OVERWRITE_DATA\"] = overwrite or \"False\"\n days = days or 5\n init_env(env)\n # Add Handler to cloudwatch logs\n # LOGGER.addHandler(\n # watchtower.CloudWatchLogHandler(log_group=os.environ[\"CLOUDWATCH_LOG_GROUP\"])\n # )\n LOGGER.info(f\"Job: {job} {aws_job_id}\")\n LOGGER.info(f\"Overwrite files: {overwrite}\")\n LOGGER.info(\"Execution Days: {0}\".format(days))\n jobname = \"-\".join([env, days, aws_job_id])\n os.environ[\"AWS_BATCH_JOB_ID\"] = aws_job_id\n LOGGER.info(f\"Starting a job: {jobname}\")\n # Start a job\n function_dict = {\n \"creation_job\": run_job_creation,\n \"ingestion_process_job\": run_job_ingestion_process,\n # \"processing_job\": run_job_processing,\n # \"analize_job\": run_job_analyzing,\n }\n # job_function = function_dict.get(\"creation_job\")\n # job_function(aws_job_id, days)\n job_function = function_dict.get(job)\n job_function(aws_job_id, days)\n\n\nif __name__ == \"__main__\":\n # allows to set the aws access key\n main(auto_envvar_prefix=\"X\")\n","repo_name":"wilisumo/weather_data_pipeline","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22422753913","text":"# USAGE\n# python config_image.py --template data/templatecnh.jpg --image data/testecnh.png\n\nimport numpy as np\nimport cv2\nimport argparse\nfrom scipy import ndimage\nimport pytesseract\nfrom align_images import align_images\n\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=False,\n\thelp=\"Imagem que será alinhada com o template\")\nap.add_argument(\"-t\", \"--template\", required=True,\n\thelp=\"Template para fazer o alinhamento\")\nargs = vars(ap.parse_args())\n\n#imagem template\ntemplate_img = cv2.imread(args[\"template\"])\ninput_img = cv2.imread(args[\"image\"])\n\n\ndef ResizeIMG(image):\n \n #calculate the 50 percent of original dimensions\n width = 700\n (h, w) = image.shape[:2]\n r = width / float(w)\n dim = (width, int(h * r))\n # resize image\n image = cv2.resize(image, dim)\n\n return image\n\nresized_template = ResizeIMG(template_img)\nif input_img is not None:\n resized_image = ResizeIMG(input_img)\n final_image = align_images(resized_image, resized_template, debug=True) \n \nelse:\n final_image = resized_template \n\n\n# REGION OF INTEREST (ROI) SELECTION\n\n# initializing the list for storing the coordinates \ncoordinates = [] \n \n# Defining the event listener (callback function)\ndef shape_selection(event, x, y, flags, param): \n # making coordinates global\n global coordinates \n \n # Storing the (x1,y1) coordinates when left mouse button is pressed \n if event == cv2.EVENT_LBUTTONDOWN: \n coordinates = [(x, y)] \n \n # Storing the (x2,y2) coordinates when the left mouse button is released and make a rectangle on the selected region\n elif event == cv2.EVENT_LBUTTONUP: \n coordinates.append((x, y)) \n \n # Drawing a rectangle around the region of interest (roi)\n cv2.rectangle(image, coordinates[0], coordinates[1], (0,0,255), 2) \n cv2.imshow(\"image\", final_image) \n \n \n# load the image, clone it, and setup the mouse callback function \nimage = final_image\nimage_copy = image.copy()\ncv2.namedWindow(\"image\") \ncv2.setMouseCallback(\"image\", shape_selection) \n \n \n# keep looping until the 'q' key is pressed \nwhile True: \n # display the image and wait for a keypress \n cv2.imshow(\"image\", image) \n key = cv2.waitKey(1) & 0xFF\n \n if key==13: # If 'enter' is pressed, apply OCR\n break\n\n if key == ord(\"c\"): # Clear the selection when 'c' is pressed \n image = image_copy.copy() \n \nif len(coordinates) == 2: \n image_roi = image_copy[coordinates[0][1]:coordinates[1][1], \n coordinates[0][0]:coordinates[1][0]] \n cv2.imshow(\"Selected Region of Interest - Press any key to proceed\", image_roi) \n \n cv2.waitKey(0) \n \n# closing all open windows \ncv2.destroyAllWindows() \n \n\n#####################################################################################################\n# OPTICAL CHARACTER RECOGNITION (OCR) ON ROI\ncustom_config = r' -l por --oem 1'\ntext = pytesseract.image_to_string(image_roi,config=custom_config)\nprint(\"Text:\")\nprint(text)\nprint(\"Coordinates:\")\nprint(coordinates)","repo_name":"TgoPedrosa/Align-Image-And-Select-ROI","sub_path":"alignAndGetCoordinates/config_image.py","file_name":"config_image.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15728272050","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 1 14:56:00 2023\n\n@author: Ryan.Larson\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport statsmodels.stats.api as sms\nimport resin_ttest_experiments as rtt\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.cbook import boxplot_stats\n\n\nkeys = [\"green\", \"orange\", \"pink\", \"red\", \"purple\", \"brown\"]\n\ndef step_1():\n file_list = [\"green.csv\", \"orange.csv\", \"pink.csv\", \"red.csv\", \"purple.csv\", \"brown.csv\"]\n \n mold_dfs = {key: None for key in keys}\n \n \n dropcols = [\"Leak Time\", \"Leak Count\", \"Parts Count\", \"Weekly Count\",\n \"Monthly Count\", \"Trash Count\", \"Lead\", \"Assistant 1\", \"Assistant 2\",\n \"Assistant 3\"]\n \n \n for file in file_list:\n color = file.replace(\".csv\",\"\")\n df = pd.read_csv(file)\n \n # Deal with time column\n df[\"time\"] = pd.to_datetime(df[\"time\"])\n df[\"Date\"] = pd.to_datetime(df[\"time\"]).dt.date\n df.drop(\"time\",axis=1,inplace=True)\n first_column = df.pop(\"Date\")\n df.insert(0,\"Date\", first_column)\n \n for col in dropcols:\n df = df.drop(col,axis=1)\n df = df.dropna(how=\"all\")\n for column in df.columns:\n df = df[df[column] != 0]\n \n mold_dfs[color] = df\n \n df.to_csv(\"{}2.csv\".format(color),index=False)\n # df = df.dropna(axis=0)\n # df.to_csv(bag, index=False)\n \n return mold_dfs\n \n\ndef step_2():\n \"\"\"\n \n\n Returns\n -------\n None.\n\n \"\"\"\n file_list = [\"green2.csv\", \"orange2.csv\", \"pink2.csv\", \"red2.csv\", \"purple2.csv\", \"brown2.csv\"]\n \n for i,file in enumerate(file_list):\n if i == 0:\n df_all = pd.read_csv(file)\n else:\n df = pd.read_csv(file)\n df_all = pd.concat([df_all, df], axis=0, ignore_index=True)\n \n df_all.dropna(axis=0,inplace=True)\n \n return df_all\n\n\ndef filter_saturated(df):\n df = df[df[\"Layup Time\"] != 276]\n df = df[df[\"Layup Time\"] != 275]\n df = df[df[\"Close Time\"] != 90]\n df = df[df[\"Resin Time\"] != 180]\n df.drop_duplicates(inplace=True)\n return df\n \n # mold_dfs = {key: None for key in keys}\n \n # for file in file_list:\n # color = file.replace(\"2.csv\",\"\")\n # df = pd.read_csv(file)\n # df = df[df[\"Layup Time\"] != 276]\n # df = df[df[\"Layup Time\"] != 275]\n # df = df[df[\"Close Time\"] != 90]\n # df = df[df[\"Resin Time\"] != 180]\n \n # # Remove duplicates\n # df.drop_duplicates(inplace=True)\n \n # mold_dfs[color] = df\n \n # df.to_csv(\"{}3.csv\".format(color),index=False)\n \n \n# alldata = pd.concat(frames)\n\n# feature_vals = list(alldata[\"Bag\"].unique())\n# n_feature_vals = len(feature_vals)\n\n# df_features = [alldata.where(alldata[\"Bag\"] == feature_val) for feature_val in feature_vals]\n# df_features = [df_feature.dropna(axis=0) for df_feature in df_features]\n\n\n# # rtt.oneway_anova(df_features, \"Cycle Time\")\n\n# rtt.find_stat_difference_2group(df_features[0], df_features[1], \"Resin Time\")\n# # rtt.find_stat_difference_2group(df_features[0], df_features[1], \"Cycle Time\")\n\n\nif __name__ == \"__main__\":\n # mold_dfs = step_1()\n df_all = step_2()\n \n df10 = df_all[df_all[\"Bag\"]==10.0]\n df11 = df_all[df_all[\"Bag\"]==11.0]\n df12 = df_all[df_all[\"Bag\"]==12.0]\n df13 = df_all[df_all[\"Bag\"]==13.0]\n df14 = df_all[df_all[\"Bag\"]==14.0]\n df15 = df_all[df_all[\"Bag\"]==15.0]\n df16 = df_all[df_all[\"Bag\"]==16.0]\n \n df10_filtered = filter_saturated(df10)\n df11_filtered = filter_saturated(df11)\n df12_filtered = filter_saturated(df12)\n df13_filtered = filter_saturated(df13)\n df14_filtered = filter_saturated(df14)\n df15_filtered = filter_saturated(df15)\n df16_filtered = filter_saturated(df16)\n df_all_filtered = filter_saturated(df_all)\n \n df10_no_outliers = df10_filtered.copy()\n df11_no_outliers = df11_filtered.copy()\n df12_no_outliers = df12_filtered.copy()\n df13_no_outliers = df13_filtered.copy()\n df14_no_outliers = df14_filtered.copy()\n df15_no_outliers = df15_filtered.copy()\n df16_no_outliers = df16_filtered.copy()\n \n ## Filter out outliers for each bag ##\n df_no_outliers_list = [df10_no_outliers, df11_no_outliers, df12_no_outliers, df13_no_outliers, df14_no_outliers, df15_no_outliers, df16_no_outliers]\n \n for i,df in enumerate(df_no_outliers_list):\n layup_stats = boxplot_stats(list(df[\"Layup Time\"]))[0]\n resin_stats = boxplot_stats(list(df[\"Resin Time\"]))[0]\n close_stats = boxplot_stats(list(df[\"Close Time\"]))[0]\n cycle_stats = boxplot_stats(list(df[\"Cycle Time\"]))[0]\n \n layup_conditions = [(df[\"Layup Time\"] > layup_stats[\"whishi\"]) | (df[\"Layup Time\"] < layup_stats[\"whislo\"])]\n close_conditions = [(df[\"Close Time\"] > close_stats[\"whishi\"]) | (df[\"Close Time\"] < close_stats[\"whislo\"])]\n resin_conditions = [(df[\"Resin Time\"] > resin_stats[\"whishi\"]) | (df[\"Resin Time\"] < resin_stats[\"whislo\"])]\n cycle_conditions = [(df[\"Cycle Time\"] > cycle_stats[\"whishi\"]) | (df[\"Cycle Time\"] < cycle_stats[\"whislo\"])]\n \n df[\"Layup Outlier\"] = np.transpose(np.where(layup_conditions, True, False))\n df[\"Close Outlier\"] = np.transpose(np.where(close_conditions, True, False))\n df[\"Resin Outlier\"] = np.transpose(np.where(resin_conditions, True, False))\n df[\"Cycle Outlier\"] = np.transpose(np.where(cycle_conditions, True, False))\n \n \n df_all_no_outliers = pd.concat([df10_no_outliers, df11_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df12_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df13_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df14_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df15_no_outliers])\n df_all_no_outliers = pd.concat([df_all_no_outliers, df16_no_outliers])\n \n df_all_layup_filtered = df_all_no_outliers[df_all_no_outliers[\"Layup Outlier\"] == False]\n df_all_close_filtered = df_all_no_outliers[df_all_no_outliers[\"Close Outlier\"] == False]\n df_all_resin_filtered = df_all_no_outliers[df_all_no_outliers[\"Resin Outlier\"] == False]\n df_all_cycle_filtered = df_all_no_outliers[df_all_no_outliers[\"Cycle Outlier\"] == False]\n \n sns.set(rc={\"figure.dpi\":300, \"figure.figsize\":(15.0, 8.27)})\n sns.set_style(\"whitegrid\")\n palette_str = \"Paired\"\n \n ### Bag Days as x\n # Layup Time\n plt.figure()\n sns.relplot(data=df_all_layup_filtered, x=\"Bag Days\", y=\"Layup Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Layup Time\")\n \n # Close Time\n plt.figure()\n sns.relplot(data=df_all_close_filtered, x=\"Bag Days\", y=\"Close Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Close Time\")\n \n # Resin Time\n plt.figure()\n sns.relplot(data=df_all_resin_filtered, x=\"Bag Days\", y=\"Resin Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Resin Time\")\n \n # Cycle Time\n plt.figure()\n sns.relplot(data=df_all_cycle_filtered, x=\"Bag Days\", y=\"Cycle Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Cycle Time\")\n \n ### Bag Cycles as x\n # Layup Time\n plt.figure()\n sns.relplot(data=df_all_layup_filtered, x=\"Bag Cycles\", y=\"Layup Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Layup Time\")\n \n # Close Time\n plt.figure()\n sns.relplot(data=df_all_close_filtered, x=\"Bag Cycles\", y=\"Close Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Close Time\")\n \n # Resin Time\n plt.figure()\n sns.relplot(data=df_all_resin_filtered, x=\"Bag Cycles\", y=\"Resin Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Resin Time\")\n \n # Cycle Time\n plt.figure()\n sns.relplot(data=df_all_cycle_filtered, x=\"Bag Cycles\", y=\"Cycle Time\", hue=\"Bag\", palette=palette_str)\n plt.title(\"Age Effects on Cycle Time\")\n \n ### Boxplot by bag for overall differences\n # Layup Time\n plt.figure()\n ax = sns.boxplot(data=df_all_layup_filtered, x=\"Bag\", y=\"Layup Time\", palette=palette_str)\n medians = df_all_layup_filtered.groupby(['Bag'])['Layup Time'].median().values\n nobs = df_all_layup_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n # Close Time\n plt.figure()\n ax = sns.boxplot(data=df_all_close_filtered, x=\"Bag\", y=\"Close Time\", palette=palette_str)\n medians = df_all_close_filtered.groupby(['Bag'])['Close Time'].median().values\n nobs = df_all_close_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n # Resin Time\n plt.figure()\n ax = sns.boxplot(data=df_all_resin_filtered, x=\"Bag\", y=\"Resin Time\", palette=palette_str)\n medians = df_all_resin_filtered.groupby(['Bag'])['Resin Time'].median().values\n nobs = df_all_resin_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n # Cycle Time\n plt.figure()\n ax = sns.boxplot(data=df_all_cycle_filtered, x=\"Bag\", y=\"Cycle Time\", palette=palette_str)\n medians = df_all_cycle_filtered.groupby(['Bag'])['Cycle Time'].median().values\n nobs = df_all_cycle_filtered['Bag'].value_counts().values\n nobs = [str(x) for x in nobs.tolist()]\n nobs = [\"n: \" + i for i in nobs]\n pos = range(len(nobs))\n for tick,label in zip(pos,ax.get_xticklabels()):\n ax.text(pos[tick],\n medians[tick] + 0.04,\n nobs[tick],\n horizontalalignment='center',\n size='small',\n color='w',\n weight='semibold')\n \n ","repo_name":"rockwell-window-wells/random-projects","sub_path":"Silicone_Seals/bag_seal_cycle_analysis.py","file_name":"bag_seal_cycle_analysis.py","file_ext":"py","file_size_in_byte":10876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24541430279","text":"import cv2\r\nimport numpy as np\r\n\r\nOriginal = cv2.imread(\"original.png\")\r\nChanged = cv2.imread(\"change.png\")\r\n# specify colors boundaries\r\nP_lowerBoundary = [137, 73, 163]\r\nP_upperBoundary = [177, 113, 203]\r\n\r\nW_lowerBoundary = [220, 204, 182]\r\nW_upperBoundary = [255, 244, 222]\r\n\r\n# change data type to uint8 to be used with the bitwise_and function\r\nP_lowerBoundary = np.array(P_lowerBoundary, dtype = \"uint8\")\r\nP_upperBoundary = np.array(P_upperBoundary, dtype = \"uint8\")\r\n\r\nW_lowerBoundary = np.array(W_lowerBoundary, dtype = \"uint8\")\r\nW_upperBoundary = np.array(W_upperBoundary, dtype = \"uint8\")\r\n\r\n# Create Color Mask\r\nP_mask = cv2.inRange(Original, P_lowerBoundary, P_upperBoundary) \r\nW_mask = cv2.inRange(Original, W_lowerBoundary, W_upperBoundary)\r\n\r\n# Merge detected color with mask\r\nP_res = cv2.bitwise_and(Original, Original, mask = P_mask)\r\nW_res = cv2.bitwise_and(Original, Original, mask = W_mask)\r\n\r\ncv2.imshow('PINK', np.hstack([Original, P_res]))\r\ncv2.imshow('White', np.hstack([Original, W_res]))\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"NadaAbbasMohamed/ROV-Competition","sub_path":"Task 1 - Color Detection Coral Reef Color Change/color detection_2.py","file_name":"color detection_2.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74265362329","text":"import os\n\nfrom dotenv import load_dotenv\nfrom pathlib import Path\n\nload_dotenv(dotenv_path=Path(\".env\")) # Переменные (пароли, ИД) из файла .env\n\n\nADMINS = os.environ.get(\"ADMINS\").split(\",\")\nBOT_API_KEY = os.environ.get(\"BOT_API_KEY\")\n\nSHOW_FREE = \"Когда свободное время\"\nABOUT_MY = \"Обо мне\"\n\nSHOW_ALL = \"Показать все записи\"\nADD_GIRL = \"Добавить запись\"\nDELETE_GIRL = \"Удалить запись\"\nADD_ADMIN = \"Добавить администратора\"\n\nBUSY = \"Занята\"\nFREE = \"Свободная\"\n\nACCOUNT_TYPE_ADMIN = 1\nACCOUNT_TYPE_GIRL = 2\n\n# При указании * чтобы передавались только эти переменные\n__all__ = (\n \"ADMINS\",\n \"BOT_API_KEY\",\n \"SHOW_FREE\",\n \"SHOW_ALL\",\n \"ADD_GIRL\",\n \"DELETE_GIRL\",\n \"ADD_ADMIN\",\n \"BUSY\",\n \"FREE\",\n \"ACCOUNT_TYPE_ADMIN\",\n \"ACCOUNT_TYPE_GIRL\",\n)\n","repo_name":"fier43/ManageBot","sub_path":"bot/bot/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29460816112","text":"import cadquery as cq\nfrom . import Base\nfrom cadqueryhelper import shape\nfrom cqterrain import Ladder\n\nclass Ring(Base):\n def __init__(self):\n super().__init__()\n self.cut_diameter = 76\n self.diameter = self.cut_diameter + 10\n self.inset = 5\n self.height = 10\n\n self.render_ladders = True\n self.ladder_height = 71\n self.ladder_length = 25\n self.ladder_width = 10\n self.ladder_cut_padding = 1.5\n self.ladder_cut_chamfer = 2\n\n self.ring = None\n self.cut_ladders = None\n self.ladders = None\n self.cut_ring = None\n\n def __make_ring(self):\n ring = shape.cone(\n radius=self.diameter/2,\n radius_top=self.diameter/2-self.inset,\n height=self.height\n )\n\n cut_ring = (\n cq.Workplane(\"XY\")\n .cylinder(self.ladder_height, self.cut_diameter/2)\n )\n\n ring_slice = (cq.Workplane(\"XY\").box(10,.5,self.height))\n\n self.cut_ring = cut_ring.translate((0,0,self.ladder_height/2))\n self.ring = (\n ring.cut(cut_ring)\n .translate((0,0,self.height/2))\n .cut(ring_slice.translate((self.diameter/2-.1,0,self.height/2)))\n )\n\n def __make_cut_ladders(self):\n x_translate = self.cut_diameter/2+self.ladder_length/2+self.ladder_cut_padding\n cut_ladder = (\n cq.Workplane(\"XY\")\n .box(self.ladder_length,self.ladder_length,self.height)\n .faces(\"X or -X\")\n .edges(\"Z\")\n .chamfer(self.ladder_cut_chamfer)\n .translate((\n 0,\n x_translate,\n self.height/2\n ))\n )\n\n cut_ladders = (\n cq.Workplane(\"XY\")\n .union(cut_ladder)\n .union(cut_ladder.rotate((0,0,1),(0,0,0),180))\n )\n self.cut_ladders = cut_ladders\n\n def __make_ladder(self):\n bp = Ladder()\n bp.length = self.ladder_length\n bp.width = self.ladder_width\n bp.height = self.ladder_height\n bp.make()\n bp.rungs = bp.rungs.translate((0,self.ladder_width/4,0))\n\n ladder = bp.build()\n\n ladder = ladder.translate((\n 0,\n self.cut_diameter/2+.6,\n self.ladder_height/2\n )).cut(self.cut_ring)\n\n ladders = (\n cq.Workplane()\n .union(ladder)\n .union(ladder.rotate((0,0,1),(0,0,0),180))\n )\n\n #show_object(ladders)\n\n self.ladders = ladders\n\n def make(self):\n super().make()\n self.__make_ring()\n\n if self.render_ladders:\n self.__make_cut_ladders()\n self.__make_ladder()\n\n def build(self):\n super().build()\n scene = (\n cq.Workplane(\"XY\")\n .union(self.ring)\n )\n\n if self.render_ladders and self.ladders:\n scene = (\n scene\n .cut(self.cut_ladders)\n .add(self.ladders)\n )\n return scene\n","repo_name":"medicationforall/cqindustry","sub_path":"src/cqindustry/Ring.py","file_name":"Ring.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"19081093260","text":"import cs50\n\nwhile True:\n print(\"Choose the height (between 0 and 23): \", end=\"\")\n height = cs50.get_int()\n if(height>0 and height<23):\n break\nfor i in range(height):\n for x in range(height-i):\n print(\" \", end = \"\")\n for a in range(i+1):\n print(\"#\", end = \"\")\n print(\"\")\n","repo_name":"BATMOOSEMIKE/CS50-Psets","sub_path":"pset6/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29305710713","text":"# Chapter 4: Containers and Advanced Controls\r\n# Recipe 8: Building a system tray application\r\n#\r\nimport urllib\r\nimport json\r\nimport wx\r\n\r\nID_GET_CITY = wx.NewId()\r\n\r\nclass WeatherTray(wx.TaskBarIcon):\r\n def __init__(self):\r\n super(WeatherTray, self).__init__()\r\n self.data = { 'desc' : \"Unknown\", 'temp' : \"??\" }\r\n self.UpdateData(\"London,UK\")\r\n self.Bind(wx.EVT_MENU, self.OnMenu)\r\n\r\n def UpdateData(self, city):\r\n src = \"http://api.openweathermap.org/data/2.5/weather?q=%s\"\r\n try:\r\n formatted = city.replace(' ', \"%20\")\r\n url = urllib.urlopen(src % formatted)\r\n j = json.load(url)\r\n\r\n weather = j['weather'][0]\r\n temp = j['main']['temp']\r\n self.data = dict()\r\n self.data['desc'] = weather['main']\r\n self.data['icon'] = weather['icon']\r\n c = float(temp) - 273.15\r\n self.data['temp'] = c\r\n \r\n self.city = city\r\n self.UpdateIcon()\r\n except:\r\n pass\r\n\r\n def UpdateIcon(self):\r\n img = None\r\n try:\r\n loc = \"http://openweathermap.org/img/w/%s.png\"\r\n url = urllib.urlopen(loc % self.data['icon'])\r\n img = wx.ImageFromStream(url, wx.BITMAP_TYPE_PNG)\r\n img = wx.BitmapFromImage(img)\r\n except:\r\n img = wx.Bitmap('errIcon.png')\r\n icon = wx.IconFromBitmap(img)\r\n self.SetIcon(icon)\r\n\r\n def CreatePopupMenu(self):\r\n menu = wx.Menu()\r\n \r\n data = (self.city, \r\n \"Weather: %s\" % self.data['desc'], \r\n \"Temp: %s C\" % self.data['temp'])\r\n for d in data:\r\n item = menu.Append(wx.ID_ANY, d)\r\n item.Enable(False)\r\n\r\n menu.AppendSeparator()\r\n menu.Append(ID_GET_CITY, \"Enter city name...\")\r\n menu.AppendSeparator()\r\n menu.Append(wx.ID_CLOSE)\r\n return menu\r\n\r\n def OnMenu(self, event):\r\n if event.Id == wx.ID_CLOSE:\r\n self.Destroy()\r\n elif event.Id == ID_GET_CITY:\r\n t = wx.GetTextFromUser(\"Enter City Name (City,Country):\", \r\n default_value=self.city)\r\n if t:\r\n self.UpdateData(t)\r\n else:\r\n event.Skip()\r\n\r\nclass WeatherTrayApp(wx.App):\r\n def OnInit(self):\r\n self._trayIcon = WeatherTray()\r\n return True\r\n\r\nif __name__ == \"__main__\":\r\n app = WeatherTrayApp(False)\r\n app.MainLoop()\r\n ","repo_name":"cubu/wxPython-Application-Development-Cookbook","sub_path":"Chapter 4/08/sysTrayApp.py","file_name":"sysTrayApp.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"11877015022","text":"import os\nimport pathlib\nimport pickle\nimport shutil\nimport time\nfrom functools import partial\nimport json \nimport fire\nimport numpy as np\nimport torch\nfrom google.protobuf import text_format\nfrom tensorboardX import SummaryWriter\nimport torchvision\nimport torchplus\nimport second.data.kitti_common as kitti\nfrom second.builder import target_assigner_builder, voxel_builder\nfrom second.data.preprocess_tr import merge_second_batch_tr\nfrom second.data.preprocess import merge_second_batch\nfrom second.data.preprocess_tr_vid import merge_second_batch_tr_vid\nfrom second.data.preprocess_tr_vid_spatio import merge_second_batch_tr_vid_spatio\nfrom second.protos import pipeline_pb2\nfrom second.pytorch.builder import (box_coder_builder, input_reader_builder_tr, input_reader_builder_tr_vid, input_reader_builder_tr_vid_spatio,\n lr_scheduler_builder, optimizer_builder,\n second_builder,\n second_2stage_builder,\n second_endtoend_builder,\n second_endtoend_builder_tr,\n second_endtoend_builder_tr_share,\n second_endtoend_builder_tr_share_freeze,\n second_endtoend_builder_tr_share_freeze_mmmot,\n second_endtoend_builder_tr_share_freeze_mmmot_ori,\n second_endtoend_builder_spatio)\nfrom second.utils.eval import get_coco_eval_result, get_official_eval_result\nfrom second.utils.progress_bar import ProgressBar\nfrom collections import OrderedDict\n# import torch.distributed as dist\n# from apex.parallel import DistributedDataParallel as DDP\n\nimport sys\nsys.path.append('./mmMOT')\nimport argparse\nimport logging\nimport os\nimport pprint\nimport time\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport yaml\nfrom easydict import EasyDict\nfrom kitti_devkit.evaluate_tracking import evaluate as evaluate_tr\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader\n# from models import model_entry\n# from tracking_model import TrackingModule\nfrom tracking_model_vid import TrackingModule\nfrom utils_tr.build_util import (build_augmentation, build_criterion, \n build_dataset, build_lr_scheduler, build_model,\n build_optim)\nfrom utils_tr.data_util import write_kitti_result\nfrom utils_tr.train_util import (AverageMeter, DistributedGivenIterationSampler,\n create_logger, load_state, save_checkpoint)\n\n\ndef validate(val_loader,\n net,\n step,\n config,\n result_path,\n part='train',\n fusion_list=None,\n fuse_prob=False):\n\n logger = logging.getLogger('global_logger')\n for i, (sequence) in enumerate(val_loader):\n logger.info('Test: [{}/{}]\\tSequence ID: KITTI-{}'.format(\n i, len(val_loader), sequence.name))\n seq_loader = DataLoader(\n sequence,\n batch_size=config.batch_size,\n shuffle=False,\n num_workers=config.workers,\n pin_memory=True)\n if len(seq_loader) == 0:\n net.eval_tr()\n logger.info('Empty Sequence ID: KITTI-{}, skip'.format(\n sequence.name))\n else:\n validate_seq(seq_loader, net, config)\n\n write_kitti_result(\n result_path,\n sequence.name,\n step,\n net.frames_id,\n net.frames_det,\n part=part)\n\n MOTA, MOTP, recall, prec, F1, fp, fn, id_switches = evaluate_tr(\n step, result_path, part=part)\n\n # net.train()\n return MOTA, MOTP, recall, prec, F1, fp, fn, id_switches\n\n\ndef validate_seq(val_loader,\n net,\n config,\n fusion_list=None,\n fuse_prob=False):\n batch_time = AverageMeter(0)\n\n # switch to evaluate mode\n net.eval_tr()\n\n logger = logging.getLogger('global_logger')\n end = time.time()\n\n with torch.no_grad():\n for i, (input, det_info, dets, det_split) in enumerate(val_loader):\n input = input.cuda()\n if len(det_info) > 0:\n for k, v in det_info.items():\n det_info[k] = det_info[k].cuda() if not isinstance(\n det_info[k], list) else det_info[k]\n\n # compute output\n aligned_ids, aligned_dets, frame_start = net.predict(\n input[0], det_info, dets, det_split)\n\n batch_time.update(time.time() - end)\n end = time.time()\n if i % config.print_freq == 0:\n logger.info(\n 'Test Frame: [{0}/{1}]\\tTime '\n '{batch_time.val:.3f} ({batch_time.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time))\n\n\ndef _get_pos_neg_loss(cls_loss, labels):\n # cls_loss: [N, num_anchors, num_class]\n # labels: [N, num_anchors]\n batch_size = cls_loss.shape[0]\n if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:\n cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_pos_loss = cls_pos_loss.sum() / batch_size\n cls_neg_loss = cls_neg_loss.sum() / batch_size\n else:\n cls_pos_loss = cls_loss[..., 1:].sum() / batch_size\n cls_neg_loss = cls_loss[..., 0].sum() / batch_size\n return cls_pos_loss, cls_neg_loss\n\n\ndef _flat_nested_json_dict(json_dict, flatted, sep=\".\", start=\"\"):\n for k, v in json_dict.items():\n if isinstance(v, dict):\n _flat_nested_json_dict(v, flatted, sep, start + sep + k)\n else:\n flatted[start + sep + k] = v\n\n\ndef flat_nested_json_dict(json_dict, sep=\".\") -> dict:\n \"\"\"flat a nested json-like dict. this function make shadow copy.\n \"\"\"\n flatted = {}\n for k, v in json_dict.items():\n if isinstance(v, dict):\n _flat_nested_json_dict(v, flatted, sep, k)\n else:\n flatted[k] = v\n return flatted\n\n\ndef example_convert_to_torch(example, dtype=torch.float32,\n device=None) -> dict:\n device = device or torch.device(\"cuda:0\")\n example_torch = {}\n float_names = [\n \"voxels\", \"anchors\", \"reg_targets\", \"reg_weights\", \"bev_map\", \"rect\",\n \"Trv2c\", \"P2\", \"f_view\",\"idxs_norm\", \"p_voxels\", \"p_f_view\", \"p_idxs_norm\", 'box_id', 'p_box_id', 'gt_boxes', 'p_gt_boxes', 'boxes_2d', 'p_boxes_2d'\n ]\n\n for k, v in example.items():\n if k in float_names:\n example_torch[k] = torch.tensor(v, dtype=torch.float32, device=device).to(dtype)\n elif k in [\"coordinates\", \"labels\", \"num_points\", \"p_coordinates\", \"p_num_points\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.int32, device=device)\n elif k in [\"anchors_mask\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.uint8, device=device)\n else:\n example_torch[k] = v\n return example_torch\n\n\ndef train(config_path,\n model_dir,\n use_fusion=True,\n use_ft=False,\n use_second_stage=True,\n use_endtoend=True,\n result_path=None,\n create_folder=False,\n display_step=50,\n summary_step=5,\n local_rank=0,\n pickle_result=True,\n patchs=None):\n \"\"\"train a VoxelNet mod[el specified by a config file.\n \"\"\"\n ############ tracking\n config_tr_path = '/mnt/new_iou/second.pytorch/second/mmMOT/experiments/second/spatio_test/config.yaml'\n load_tr_path = '/mnt/new_iou/second.pytorch/second/mmMOT/experiments/second/spatio_test/results'\n with open(config_tr_path) as f:\n config_tr = yaml.load(f, Loader=yaml.FullLoader)\n\n result_path_tr = load_tr_path\n config_tr = EasyDict(config_tr['common'])\n config_tr.save_path = os.path.dirname(config_tr_path)\n\n # create model\n # model_tr = build_model(config_tr)\n # model_tr.cuda()\n\n # optimizer_tr = build_optim(model_tr, config_tr)\n\n criterion_tr = build_criterion(config_tr.loss)\n\n last_iter = -1\n best_mota = 0\n # if load_tr_path:\n # if False:\n # best_mota, last_iter = load_state(\n # load_tr_path, model_tr, optimizer=optimizer_tr)\n # else:\n # load_state(load_tr_path, model_tr)\n\n cudnn.benchmark = True\n\n # Data loading code\n train_transform, valid_transform = build_augmentation(config_tr.augmentation)\n\n # # train\n # train_dataset = build_dataset(\n # config_tr,\n # set_source='train',\n # evaluate=False,\n # train_transform=train_transform)\n # trainval_dataset = build_dataset(\n # config_tr,\n # set_source='train',\n # evaluate=True,\n # valid_transform=valid_transform)\n # val_dataset = build_dataset(\n # config_tr,\n # set_source='val',\n # evaluate=True,\n # valid_transform=valid_transform)\n\n # train_sampler = DistributedGivenIterationSampler(\n # train_dataset,\n # config_tr.lr_scheduler.max_iter,\n # config_tr.batch_size,\n # world_size=1,\n # rank=0,\n # last_iter=last_iter)\n\n # import pdb; pdb.set_trace()\n # train_loader = DataLoader(\n # train_dataset,\n # batch_size=config_tr.batch_size,\n # shuffle=False,\n # num_workers=config_tr.workers,\n # pin_memory=True)\n\n tb_logger = SummaryWriter(config_tr.save_path + '/events')\n logger = create_logger('global_logger', config_tr.save_path + '/log.txt')\n # logger.info('args: {}'.format(pprint.pformat(args)))\n logger.info('config: {}'.format(pprint.pformat(config_tr)))\n\n # tracking_module = TrackingModule(model_tr, criterion_tr,\n # config_tr.det_type)\n # tracking_module.model.train()\n #### tracking setup done\n\n if create_folder:\n if pathlib.Path(model_dir).exists():\n model_dir = torchplus.train.create_folder(model_dir)\n patchs = patchs or []\n model_dir = pathlib.Path(model_dir)\n model_dir.mkdir(parents=True, exist_ok=True)\n if result_path is None:\n result_path = model_dir / 'results'\n config_file_bkp = \"pipeline.config\"\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n for patch in patchs:\n patch = \"config.\" + patch \n exec(patch)\n shutil.copyfile(config_path, str(model_dir / config_file_bkp))\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n ######################\n # BUILD VOXEL GENERATOR\n ######################\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n ######################\n # BUILD TARGET ASSIGNER\n ######################\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n class_names = target_assigner.classes\n ######################\n # BUILD NET\n ######################\n center_limit_range = model_cfg.post_center_limit_range\n # if use_second_stage:\n # net = second_2stage_builder.build(model_cfg, voxel_generator, target_assigner)\n if use_endtoend:\n net = second_endtoend_builder_spatio.build(model_cfg, voxel_generator, target_assigner, criterion_tr, config_tr.det_type)\n else:\n net = second_builder.build(model_cfg, voxel_generator, target_assigner)\n net.cuda()\n print(\"num_trainable parameters:\", len(list(net.parameters())))\n\n for n, p in net.named_parameters():\n print(n, p.shape)\n # pth_name = './pre_weight/first_stage_gating_det/voxelnet-17013.tckpt'\n pth_name = './pre_weight/second_stage_gating_det/voxelnet-35000.tckpt'\n\n res_pre_weights = torch.load(pth_name)\n new_res_state_dict = OrderedDict()\n model_dict = net.state_dict()\n for k,v in res_pre_weights.items():\n if 'global_step' not in k:\n # if 'dir' not in k:\n new_res_state_dict[k] = v\n model_dict.update(new_res_state_dict)\n net.load_state_dict(model_dict)\n\n # for k, weight in dict(net.named_parameters()).items(): # lidar_conv, p_lidar_conv, fusion_module, w_det, w_link, appearance, point_net\n # if 'middle_feature_extractor' in '%s'%(k) or 'rpn' in '%s'%(k) or 'second_rpn' in '%s'%(k):\n # weight.requires_grad = False\n\n # BUILD OPTIMIZER\n #####################\n # we need global_step to create lr_scheduler, so restore net first.\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n gstep = net.get_global_step() - 1\n optimizer_cfg = train_cfg.optimizer\n if train_cfg.enable_mixed_precision:\n net.half()\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n loss_scale = train_cfg.loss_scale_factor\n mixed_optimizer = optimizer_builder.build(optimizer_cfg, net, mixed=train_cfg.enable_mixed_precision, loss_scale=loss_scale)\n optimizer = mixed_optimizer\n\n # must restore optimizer AFTER using MixedPrecisionWrapper\n torchplus.train.try_restore_latest_checkpoints(model_dir,\n [mixed_optimizer])\n lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, optimizer, train_cfg.steps)\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n ######################\n # PREPARE INPUT\n ######################\n # import pdb; pdb.set_trace()\n dataset = input_reader_builder_tr_vid_spatio.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n config_tr=config_tr,\n set_source='train',\n evaluate=False,\n train_transform=train_transform)\n eval_dataset = input_reader_builder_tr_vid_spatio.build(\n eval_input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n config_tr=config_tr,\n set_source='val',\n evaluate=True,\n valid_transform=valid_transform)\n\n def _worker_init_fn(worker_id):\n time_seed = np.array(time.time(), dtype=np.int32)\n np.random.seed(time_seed + worker_id)\n print(f\"WORKER {worker_id} seed:\", np.random.get_state()[1][0])\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch_tr_vid_spatio,\n worker_init_fn=_worker_init_fn)\n\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=eval_input_cfg.batch_size,\n shuffle=False,\n num_workers=eval_input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch_tr_vid_spatio)\n \n data_iter = iter(dataloader)\n\n ######################\n # TRAINING\n ######################\n training_detail = []\n log_path = model_dir / 'log.txt'\n training_detail_path = model_dir / 'log.json'\n if training_detail_path.exists():\n with open(training_detail_path, 'r') as f:\n training_detail = json.load(f)\n logf = open(log_path, 'a')\n logf.write(proto_str)\n logf.write(\"\\n\")\n summary_dir = model_dir / 'summary'\n summary_dir.mkdir(parents=True, exist_ok=True)\n writer = SummaryWriter(str(summary_dir))\n\n total_step_elapsed = 0\n remain_steps = train_cfg.steps - net.get_global_step()\n t = time.time()\n ckpt_start_time = t\n\n total_loop = train_cfg.steps // train_cfg.steps_per_eval + 1\n clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch\n\n if train_cfg.steps % train_cfg.steps_per_eval == 0:\n total_loop -= 1\n mixed_optimizer.zero_grad()\n \n # optimizer_tr.zero_grad()\n logger = logging.getLogger('global_logger')\n best_mota = 0\n losses = AverageMeter(config_tr.print_freq)\n\n total_steps = train_cfg.steps\n total_loop = total_steps // len(dataloader)\n \n kkkk = 0\n for step in range(total_loop):\n for i, (example) in enumerate(dataloader):\n\n curr_step = 0 + i\n kkkk += 1\n lr_scheduler.step(net.get_global_step())\n\n example_torch = example_convert_to_torch(example, float_dtype)\n\n batch_size = example[\"anchors\"].shape[0]\n\n ret_dict = net(example_torch, train_param=True)\n\n cls_preds = ret_dict[\"cls_preds\"]\n loss = ret_dict[\"loss\"].mean()\n cls_loss_reduced = ret_dict[\"cls_loss_reduced\"].mean()\n loc_loss_reduced = ret_dict[\"loc_loss_reduced\"].mean()\n cls_pos_loss = ret_dict[\"cls_pos_loss\"]\n cls_neg_loss = ret_dict[\"cls_neg_loss\"]\n loc_loss = ret_dict[\"loc_loss\"]\n cls_loss = ret_dict[\"cls_loss\"]\n dir_loss_reduced = ret_dict[\"dir_loss_reduced\"]\n cared = ret_dict[\"cared\"]\n # loss_tr = ret_dict[\"loss_tr\"]\n\n if use_second_stage or use_endtoend:\n labels = ret_dict[\"labels\"]\n else:\n labels = example_torch[\"labels\"]\n if train_cfg.enable_mixed_precision:\n loss *= loss_scale\n\n try:\n loss.backward()\n except:\n abc = 1\n # import pdb; pdb.set_trace()\n # abc = 1\n # torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)\n # optimizer_tr.step()\n # optimizer_tr.zero_grad()\n mixed_optimizer.step()\n mixed_optimizer.zero_grad()\n net.update_global_step()\n net_metrics = net.update_metrics(cls_loss_reduced,\n loc_loss_reduced, cls_preds,\n labels, cared)\n\n step_time = (time.time() - t)\n t = time.time()\n metrics = {}\n num_pos = int((labels > 0)[0].float().sum().cpu().numpy())\n num_neg = int((labels == 0)[0].float().sum().cpu().numpy())\n if 'anchors_mask' not in example_torch:\n num_anchors = example_torch['anchors'].shape[1]\n else:\n num_anchors = int(example_torch['anchors_mask'][0].sum())\n global_step = net.get_global_step()\n # print(step)\n if global_step % display_step == 0:\n loc_loss_elem = [\n float(loc_loss[:, :, i].sum().detach().cpu().numpy() /\n batch_size) for i in range(loc_loss.shape[-1])\n ]\n metrics[\"type\"] = \"step_info\"\n metrics[\"step\"] = global_step\n metrics[\"steptime\"] = step_time\n metrics.update(net_metrics)\n metrics[\"loss\"] = {}\n metrics[\"loss\"][\"loc_elem\"] = loc_loss_elem\n metrics[\"loss\"][\"cls_pos_rt\"] = float(\n cls_pos_loss.detach().cpu().numpy())\n metrics[\"loss\"][\"cls_neg_rt\"] = float(\n cls_neg_loss.detach().cpu().numpy())\n if model_cfg.use_direction_classifier:\n metrics[\"loss\"][\"dir_rt\"] = float(\n dir_loss_reduced.detach().cpu().numpy())\n metrics[\"num_vox\"] = int(example_torch[\"voxels\"].shape[0])\n metrics[\"num_pos\"] = int(num_pos)\n metrics[\"num_neg\"] = int(num_neg)\n metrics[\"num_anchors\"] = int(num_anchors)\n metrics[\"lr\"] = float(\n optimizer.lr)\n\n metrics[\"image_idx\"] = example['image_idx'][0][7:]\n training_detail.append(metrics)\n flatted_metrics = flat_nested_json_dict(metrics)\n flatted_summarys = flat_nested_json_dict(metrics, \"/\")\n for k, v in flatted_summarys.items():\n if isinstance(v, (list, tuple)):\n v = {str(i): e for i, e in enumerate(v)}\n if type(v) != str and ('loc_elem' not in k):\n writer.add_scalars(k, v, global_step)\n else:\n if (type(v) != str) and ('loc_elem' not in k):\n writer.add_scalar(k, v, global_step)\n\n metrics_str_list = []\n for k, v in flatted_metrics.items():\n if isinstance(v, float):\n metrics_str_list.append(f\"{k}={v:.3}\")\n elif isinstance(v, (list, tuple)):\n if v and isinstance(v[0], float):\n v_str = ', '.join([f\"{e:.3}\" for e in v])\n metrics_str_list.append(f\"{k}=[{v_str}]\")\n else:\n metrics_str_list.append(f\"{k}={v}\")\n else:\n metrics_str_list.append(f\"{k}={v}\")\n log_str = ', '.join(metrics_str_list)\n print(log_str, file=logf)\n print(log_str)\n\n ckpt_elasped_time = time.time() - ckpt_start_time\n if ckpt_elasped_time > train_cfg.save_checkpoints_secs:\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n\n ckpt_start_time = time.time()\n\n if kkkk > 0 and (kkkk) % config_tr.val_freq == 0:\n # if True:\n torchplus.train.save_models(model_dir, [net, optimizer], net.get_global_step())\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n print(\"#################################\")\n print(\"#################################\", file=logf)\n print(\"# EVAL\")\n print(\"# EVAL\", file=logf)\n print(\"#################################\")\n print(\"#################################\", file=logf)\n print(\"Generate output labels...\")\n print(\"Generate output labels...\", file=logf)\n t = time.time()\n dt_annos = []\n prog_bar = ProgressBar()\n net.clear_timer()\n prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1) // eval_input_cfg.batch_size)\n for example in iter(eval_dataloader):\n example = example_convert_to_torch(example, float_dtype)\n if pickle_result:\n results = predict_kitti_to_anno(\n net, example, class_names, center_limit_range,\n model_cfg.lidar_input)\n dt_annos += results\n\n else:\n _predict_kitti_to_file(net, example, result_path_step,\n class_names, center_limit_range,\n model_cfg.lidar_input)\n\n prog_bar.print_bar()\n\n sec_per_ex = len(eval_dataset) / (time.time() - t)\n print(f'generate label finished({sec_per_ex:.2f}/s). start eval:')\n print(f'generate label finished({sec_per_ex:.2f}/s). start eval:',file=logf)\n gt_annos = [\n info[\"annos\"] for info in eval_dataset.dataset.kitti_infos\n ]\n if not pickle_result:\n dt_annos = kitti.get_label_annos(result_path_step)\n # result = get_official_eval_result_v2(gt_annos, dt_annos, class_names)\n # print(json.dumps(result, indent=2), file=logf)\n result = get_official_eval_result(gt_annos, dt_annos, class_names)\n print(result, file=logf)\n print(result)\n result_1 = result.split(\"\\n\")[:5]\n result_2 = result.split(\"\\n\")[10:15]\n result_3 = result.split(\"\\n\")[20:25]\n emh = ['0_easy', '1_mod', '2_hard']\n result_save = result_1\n for i in range(len(result_save)-1):\n save_targ = result_save[i+1]\n name_val = save_targ.split(':')[0].split(' ')[0]\n value_val = save_targ.split(':')[1:]\n for ev in range(3):\n each_val = value_val[0].split(',')[ev]\n merge_txt = 'AP_kitti/car_70/' + name_val+'/'+emh[ev]\n try:\n writer.add_scalar(merge_txt, float(each_val), global_step)\n except:\n abc=1\n import pdb; pdb.set_trace()\n abc=1\n if pickle_result:\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(dt_annos, f)\n writer.add_text('eval_result', result, global_step)\n\n logger.info('Evaluation on validation set:')\n # MOTA, MOTP, recall, prec, F1, fp, fn, id_switches = validate(\n # val_dataset,\n # net,\n # str(0 + 1),\n # config_tr,\n # result_path_tr,\n # part='val')\n # print(MOTA, MOTP, recall, prec, F1, fp, fn, id_switches)\n\n # curr_step = step\n # if tb_logger is not None:\n # tb_logger.add_scalar('prec', prec, curr_step)\n # tb_logger.add_scalar('recall', recall, curr_step)\n # tb_logger.add_scalar('mota', MOTA, curr_step)\n # tb_logger.add_scalar('motp', MOTP, curr_step)\n # tb_logger.add_scalar('fp', fp, curr_step)\n # tb_logger.add_scalar('fn', fn, curr_step)\n # tb_logger.add_scalar('f1', F1, curr_step)\n # tb_logger.add_scalar('id_switches', id_switches, curr_step)\n # if lr_scheduler is not None:\n # tb_logger.add_scalar('lr', current_lr, curr_step)\n\n # is_best = MOTA > best_mota\n # best_mota = max(MOTA, best_mota)\n # print(best_mota)\n\n # import pdb; pdb.set_trace()\n # save_checkpoint(\n # { 'step': net.get_global_step(),\n # 'score_arch': config_tr.model.score_arch,\n # 'appear_arch': config_tr.model.appear_arch,\n # 'best_mota': best_mota,\n # 'state_dict': tracking_module.model.state_dict(),\n # 'optimizer': tracking_module.optimizer.state_dict(),\n # }, is_best, config_tr.save_path + '/ckpt')\n\n # net.train()\n\n # save model before exit\n torchplus.train.save_models(model_dir, [net, optimizer],\n net.get_global_step())\n logf.close()\n\n\ndef _predict_kitti_to_file(net,\n example,\n result_save_path,\n class_names,\n center_limit_range=None,\n lidar_input=False):\n batch_image_shape = example['image_shape']\n batch_imgidx = example['image_idx']\n predictions_dicts, assign_det, assign_link, assign_new, assign_end = net(example)\n # t = time.time()\n for i, preds_dict in enumerate(predictions_dicts):\n image_shape = batch_image_shape[i]\n img_idx = preds_dict[\"image_idx\"][7:]\n if preds_dict[\"bbox\"] is not None or preds_dict[\"bbox\"].size.numel():\n box_2d_preds = preds_dict[\"bbox\"].data.cpu().numpy()\n box_preds = preds_dict[\"box3d_camera\"].data.cpu().numpy()\n scores = preds_dict[\"scores\"].data.cpu().numpy()\n box_preds_lidar = preds_dict[\"box3d_lidar\"].data.cpu().numpy()\n # write pred to file\n box_preds = box_preds[:, [0, 1, 2, 4, 5, 3,\n 6]] # lhw->hwl(label file format)\n label_preds = preds_dict[\"label_preds\"].data.cpu().numpy()\n # label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)\n result_lines = []\n for box, box_lidar, bbox, score, label in zip(\n box_preds, box_preds_lidar, box_2d_preds, scores,\n label_preds):\n if not lidar_input:\n if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:\n continue\n if bbox[2] < 0 or bbox[3] < 0:\n continue\n # print(img_shape)\n if center_limit_range is not None:\n limit_range = np.array(center_limit_range)\n if (np.any(box_lidar[:3] < limit_range[:3])\n or np.any(box_lidar[:3] > limit_range[3:])):\n continue\n bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n bbox[:2] = np.maximum(bbox[:2], [0, 0])\n result_dict = {\n 'name': class_names[int(label)],\n 'alpha': -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6],\n 'bbox': bbox,\n 'location': box[:3],\n 'dimensions': box[3:6],\n 'rotation_y': box[6],\n 'score': score,\n }\n result_line = kitti.kitti_result_line(result_dict)\n result_lines.append(result_line)\n else:\n result_lines = []\n result_file = f\"{result_save_path}/{kitti.get_image_index_str(img_idx)}.txt\"\n result_str = '\\n'.join(result_lines)\n with open(result_file, 'w') as f:\n f.write(result_str)\n\n\ndef predict_kitti_to_anno(net,\n example,\n class_names,\n center_limit_range=None,\n lidar_input=False,\n global_set=None):\n batch_image_shape = example['image_shape']\n batch_imgidx = example['image_idx']\n predictions_dicts = net(example, False)\n # t = time.time()\n annos = []\n for i, preds_dict in enumerate(predictions_dicts):\n image_shape = batch_image_shape[i]\n img_idx = preds_dict[\"image_idx\"][7:]\n if preds_dict[\"bbox\"] is not None or preds_dict[\"bbox\"].size.numel() != 0:\n box_2d_preds = preds_dict[\"bbox\"].detach().cpu().numpy()\n box_preds = preds_dict[\"box3d_camera\"].detach().cpu().numpy()\n scores = preds_dict[\"scores\"].detach().cpu().numpy()\n box_preds_lidar = preds_dict[\"box3d_lidar\"].detach().cpu().numpy()\n # write pred to file\n label_preds = preds_dict[\"label_preds\"].detach().cpu().numpy()\n # label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)\n anno = kitti.get_start_result_anno()\n num_example = 0\n for box, box_lidar, bbox, score, label in zip(\n box_preds, box_preds_lidar, box_2d_preds, scores,\n label_preds):\n if not lidar_input:\n if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:\n continue\n if bbox[2] < 0 or bbox[3] < 0:\n continue\n # print(img_shape)\n if center_limit_range is not None:\n limit_range = np.array(center_limit_range)\n if (np.any(box_lidar[:3] < limit_range[:3])\n or np.any(box_lidar[:3] > limit_range[3:])):\n continue\n bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n bbox[:2] = np.maximum(bbox[:2], [0, 0])\n anno[\"name\"].append(class_names[int(label)])\n anno[\"truncated\"].append(0.0)\n anno[\"occluded\"].append(0)\n anno[\"alpha\"].append(-np.arctan2(-box_lidar[1], box_lidar[0]) +\n box[6])\n anno[\"bbox\"].append(bbox)\n anno[\"dimensions\"].append(box[3:6])\n anno[\"location\"].append(box[:3])\n anno[\"rotation_y\"].append(box[6])\n if global_set is not None:\n for i in range(100000):\n if score in global_set:\n score -= 1 / 100000\n else:\n global_set.add(score)\n break\n anno[\"score\"].append(score)\n\n num_example += 1\n if num_example != 0:\n anno = {n: np.stack(v) for n, v in anno.items()}\n annos.append(anno)\n else:\n annos.append(kitti.empty_result_anno())\n else:\n annos.append(kitti.empty_result_anno())\n num_example = annos[-1][\"name\"].shape[0]\n # import pdb; pdb.set_trace()\n annos[-1][\"image_idx\"] = np.array(\n [img_idx] * num_example, dtype=np.int64)\n return annos\n\ndef evaluate(config_path,\n model_dir,\n use_second_stage=False,\n use_endtoend=False,\n result_path=None,\n predict_test=False,\n ckpt_path=None,\n ref_detfile=None,\n pickle_result=True,\n measure_time=False,\n batch_size=None):\n model_dir = pathlib.Path(model_dir)\n if predict_test:\n result_name = 'predict_test_0095'\n else:\n result_name = 'eval_results'\n if result_path is None:\n result_path = model_dir / result_name\n else:\n result_path = pathlib.Path(result_path)\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n \n center_limit_range = model_cfg.post_center_limit_range\n ######################\n # BUILD VOXEL GENERATOR\n ######################\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n class_names = target_assigner.classes\n if use_second_stage: \n net = second_2stage_builder.build(model_cfg, voxel_generator, target_assigner, measure_time=measure_time)\n elif use_endtoend:\n net = second_endtoend_builder.build(model_cfg, voxel_generator, target_assigner, measure_time=measure_time)\n else:\n net = second_builder.build(model_cfg, voxel_generator, target_assigner, measure_time=measure_time)\n net.cuda()\n #########################################\n # net = torch.nn.DataParallel(net)\n #########################################\n if ckpt_path is None:\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n else:\n torchplus.train.restore(ckpt_path, net)\n if train_cfg.enable_mixed_precision:\n net.half()\n print(\"half inference!\")\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n batch_size = batch_size or input_cfg.batch_size\n eval_dataset = input_reader_builder_tr.build(\n input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=0,# input_cfg.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n t = time.time()\n dt_annos = []\n global_set = None\n print(\"Generate output labels...\")\n bar = ProgressBar()\n bar.start((len(eval_dataset) + batch_size - 1) // batch_size)\n prep_example_times = []\n prep_times = []\n t2 = time.time()\n for example in iter(eval_dataloader):\n if measure_time:\n prep_times.append(time.time() - t2)\n t1 = time.time()\n torch.cuda.synchronize()\n example = example_convert_to_torch(example, float_dtype)\n if measure_time:\n torch.cuda.synchronize()\n prep_example_times.append(time.time() - t1)\n\n if pickle_result:\n dt_annos += predict_kitti_to_anno(\n net, example, class_names, center_limit_range,\n model_cfg.lidar_input, global_set)\n else:\n _predict_kitti_to_file(net, example, result_path_step, class_names,\n center_limit_range, model_cfg.lidar_input)\n # print(json.dumps(net.middle_feature_extractor.middle_conv.sparity_dict))\n bar.print_bar()\n if measure_time:\n t2 = time.time()\n\n sec_per_example = len(eval_dataset) / (time.time() - t)\n print(f'generate label finished({sec_per_example:.2f}/s). start eval:')\n if measure_time:\n print(f\"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms\")\n print(f\"avg prep time: {np.mean(prep_times) * 1000:.3f} ms\")\n for name, val in net.get_avg_time_dict().items():\n print(f\"avg {name} time = {val * 1000:.3f} ms\")\n if not predict_test:\n gt_annos = [info[\"annos\"] for info in eval_dataset.dataset.kitti_infos]\n img_idx = [info[\"image_idx\"] for info in eval_dataset.dataset.kitti_infos]\n if not pickle_result:\n dt_annos = kitti.get_label_annos(result_path_step)\n result = get_official_eval_result(gt_annos, dt_annos, class_names)\n # print(json.dumps(result, indent=2))\n print(result)\n result = get_coco_eval_result(gt_annos, dt_annos, class_names)\n print(result)\n if pickle_result:\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(dt_annos, f)\n # annos to txt file\n if True:\n os.makedirs(str(result_path_step) + '/txt', exist_ok=True)\n for i in range(len(dt_annos)):\n dt_annos[i]['dimensions'] = dt_annos[i]['dimensions'][:, [1, 2, 0]]\n result_lines = kitti.annos_to_kitti_label(dt_annos[i])\n image_idx = img_idx[i]\n with open(str(result_path_step) + '/txt/%06d.txt' % image_idx, 'w') as f:\n for result_line in result_lines:\n f.write(result_line + '\\n')\n abcd = 1\n else:\n os.makedirs(str(result_path_step) + '/txt', exist_ok=True)\n img_idx = [info[\"image_idx\"] for info in eval_dataset.dataset.kitti_infos]\n for i in range(len(dt_annos)):\n dt_annos[i]['dimensions'] = dt_annos[i]['dimensions'][:, [1, 2, 0]]\n result_lines = kitti.annos_to_kitti_label(dt_annos[i])\n image_idx = img_idx[i]\n with open(str(result_path_step) + '/txt/%06d.txt' % image_idx, 'w') as f:\n for result_line in result_lines:\n f.write(result_line + '\\n')\n\n\ndef save_config(config_path, save_path):\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n ret = text_format.MessageToString(config, indent=2)\n with open(save_path, 'w') as f:\n f.write(ret)\n\ndef assign_det_id(self, assign_det, assign_link, assign_new, assign_end,\n det_split, dets):\n det_start_idx = 0\n det_ids = []\n already_used_id = []\n fake_ids = []\n dets_out = []\n\n for i in range(len(det_split)):\n frame_id = []\n det_curr_num = det_split[i].item()\n fake_id = []\n det_out = get_start_gt_anno()\n for j in range(det_curr_num):\n curr_det_idx = det_start_idx + j\n # check w_det\n if assign_det[curr_det_idx] != 1:\n fake_id.append(-1)\n continue\n else:\n # det_out.append(dets[i][j])\n det_out['name'].append(dets[i]['name'][:, j])\n det_out['truncated'].append(dets[i]['truncated'][:, j])\n det_out['occluded'].append(dets[i]['occluded'][:, j])\n det_out['alpha'].append(dets[i]['alpha'][:, j])\n det_out['bbox'].append(dets[i]['bbox'][:, j])\n det_out['dimensions'].append(dets[i]['dimensions'][:, j])\n det_out['location'].append(dets[i]['location'][:, j])\n det_out['rotation_y'].append(dets[i]['rotation_y'][:, j])\n\n # w_det=1, check whether a new det\n if i == 0:\n if len(already_used_id) == 0:\n frame_id.append(0)\n fake_id.append(0)\n already_used_id.append(0)\n det_out['id'].append(torch.Tensor([0]).long())\n else:\n new_id = already_used_id[-1] + 1\n frame_id.append(new_id)\n fake_id.append(new_id)\n already_used_id.append(new_id)\n det_out['id'].append(torch.Tensor([new_id]).long())\n continue\n elif assign_new[curr_det_idx] == 1:\n new_id = already_used_id[-1] + 1 if len(\n already_used_id) > 0 else 0\n frame_id.append(new_id)\n fake_id.append(new_id)\n already_used_id.append(new_id)\n det_out['id'].append(torch.Tensor([new_id]).long())\n else:\n # look prev\n det_prev_num = det_split[i - 1]\n for k in range(det_prev_num):\n if assign_link[i - 1][0][k][j] == 1:\n prev_id = fake_ids[-1][k]\n frame_id.append(prev_id)\n fake_id.append(prev_id)\n det_out['id'].append(\n torch.Tensor([prev_id]).long())\n break\n\n assert len(fake_id) == det_curr_num\n fake_ids.append(fake_id)\n det_ids.append(np.array(frame_id))\n for k, v in det_out.items():\n if len(det_out[k]) == 0:\n det_out[k] = torch.Tensor([])\n else:\n det_out[k] = torch.cat(v, dim=0)\n det_out['frame_idx'] = dets[i]['frame_idx']\n dets_out.append(det_out)\n det_start_idx += det_curr_num\n return det_ids, dets_out\n\ndef align_id(self, dets_ids, dets_out):\n frame_start = 0\n if len(self.used_id) == 0:\n # Start of a sequence\n self.used_id += dets_ids\n self.frames_id += dets_ids\n self.frames_det += dets_out\n max_id = 0\n for i in range(len(dets_ids)):\n if dets_out[i]['id'].size(0) == 0:\n continue\n max_id = np.maximum(np.max(dets_ids[i]), max_id)\n self.last_id = np.maximum(self.last_id, max_id)\n return dets_ids, dets_out, frame_start\n elif self.frames_det[-1]['frame_idx'] != dets_out[0]['frame_idx']:\n # in case the sequence is not continuous\n aligned_ids = []\n aligned_dets = []\n max_id = 0\n id_offset = self.last_id + 1\n for i in range(len(dets_ids)):\n if dets_out[i]['id'].size(0) == 0:\n aligned_ids.append([])\n continue\n new_id = dets_ids[i] + id_offset\n max_id = np.maximum(np.max(new_id), max_id)\n aligned_ids.append(new_id)\n dets_out[i]['id'] += id_offset\n aligned_dets += dets_out\n self.last_id = np.maximum(self.last_id, max_id)\n self.frames_id += aligned_ids\n self.frames_det += aligned_dets\n return aligned_ids, aligned_dets, frame_start\n else:\n # the first frame of current dets\n # and the last frame of last dets is the same\n frame_start = 1\n aligned_ids = []\n aligned_dets = []\n max_id = 0\n id_pairs = {}\n \"\"\"\n assert len(dets_ids[0])== len(self.frames_id[-1])\n \"\"\"\n # Calculate Id pairs\n for i in range(len(dets_ids[0])):\n # Use minimum because because sometimes\n # they are not totally the same\n has_match = False\n for j in range(len(self.frames_id[-1])):\n if ((self.det_type == '3D'\n and torch.sum(dets_out[0]['location'][i] !=\n self.frames_det[-1]['location'][j]) == 0\n and torch.sum(dets_out[0]['bbox'][i] !=\n self.frames_det[-1]['bbox'][j]) == 0)\n or (self.det_type == '2D' and torch.sum(\n dets_out[0]['bbox'][i] != self.frames_det[-1]\n ['bbox'][j]) == 0)): # noqa\n\n id_pairs[dets_ids[0][i]] = self.frames_id[-1][j]\n has_match = True\n break\n if not has_match:\n id_pairs[dets_ids[0][i]] = self.last_id + 1\n self.last_id += 1\n if len([v for k, v in id_pairs.items()]) != len(\n set([v for k, v in id_pairs.items()])):\n print(\"ID pairs has duplicates!!!\")\n print(id_pairs)\n print(dets_ids)\n print(dets_out[0])\n print(self.frames_id[-1])\n print(self.frames_det[-1])\n\n for i in range(1, len(dets_ids)):\n if dets_out[i]['id'].size(0) == 0:\n aligned_ids.append([])\n continue\n new_id = dets_ids[i].copy()\n for j in range(len(dets_ids[i])):\n if dets_ids[i][j] in id_pairs.keys():\n new_id[j] = id_pairs[dets_ids[i][j]]\n else:\n new_id[j] = self.last_id + 1\n id_pairs[dets_ids[i][j]] = new_id[j]\n self.last_id += 1\n if len(new_id) != len(\n set(new_id)): # check whether there is duplicate\n print('have duplicates!!!')\n print(id_pairs)\n print(new_id)\n print(dets_ids)\n print(dets_out)\n print(self.frames_id[-1])\n print(self.frames_det[-1])\n import pdb\n pdb.set_trace()\n\n max_id = np.maximum(np.max(new_id), max_id)\n self.last_id = np.maximum(self.last_id, max_id)\n aligned_ids.append(new_id)\n dets_out[i]['id'] = torch.Tensor(new_id).long()\n # TODO: This only support check for 2 frame case\n if dets_out[1]['id'].size(0) != 0:\n aligned_dets += dets_out[1:]\n self.frames_id += aligned_ids\n self.frames_det += aligned_dets\n return aligned_ids, aligned_dets, frame_start\n\nif __name__ == '__main__':\n fire.Fire()\n","repo_name":"HYjhkoh/3dobject_detection_temporal","sub_path":"second/pytorch/train_2st_spatio.py","file_name":"train_2st_spatio.py","file_ext":"py","file_size_in_byte":48303,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"40478173877","text":"from django.db.models import Q\nfrom message_control.models import MatchEX\nfrom exchange_control.models import Exchange, SelectedEx\ndef setDone(instance):\n if instance.case_match != None:\n # add case\n user_case = Exchange.objects.get(id=instance.case_match.id)\n user_case.status = instance.status\n user_case.save()\n print(user_case.id, user_case.author.id)\n print(instance.id, instance.author.id)\n matching = MatchEX.objects.filter(\n Q(author=user_case.author.id, user=instance.author.id) | Q(author=instance.author.id, user=user_case.author.id)\n )\n print(matching)\n for m in matching:\n m.case_match.remove(instance)\n m.save()\n print('remove case')\n if instance.status == 'Wait':\n instance.case_match = None\n instance.save()\n user_case.case_match = None\n user_case.save()\n else:\n pass\n else:\n # select case\n selected = SelectedEx.objects.filter(caseEx=instance.id)\n print(selected)\n \n for i in selected:\n matching = MatchEX.objects.filter(\n Q(author=i.user.id, user=instance.author.id) | Q(author=instance.author.id, user=i.user.id)\n )\n print(matching)\n for m in matching:\n m.case_match.remove(instance)\n m.save()\n print('remove case')\n\n sel = SelectedEx.objects.get(id=i.id)\n sel.delete()\n print('delete')\n \n","repo_name":"ChanakanD/deal","sub_path":"exchange_control/setDone.py","file_name":"setDone.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8135940728","text":"# 1. deque\nfrom collections import deque\na = deque()\na.append()\na.appendleft()\na.pop()\na.popleft()\n\n# 2. heapq\nfrom heapq import heappop, heappush\na = []\nheappop(a)\nheappush(a, 1)\n\n# sys\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**6)\nsys.maxsize\n\n# 기타\narr = []\nstr.replace(\"a\", \"b\")\narr[::-1] \ns1 = set()\ns1.intersaction(s2)\ns1.union(s2)\n\n# 행렬 전치\nA = [[1,2,3], [4,5,6]]\nB = [list(x) for x in zip(*A)]","repo_name":"5zo-s-magician/CodingTest","sub_path":"KHJ/시험전정리.py","file_name":"시험전정리.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41722745250","text":"import graph\nimport parser\nimport sys\n\nclass Cli():\n\t\"\"\"\n\trepresents the command line interface\n\t\"\"\"\t\n\t\n\tdef __init__(self, arguments):\n\t\tips = str(arguments.ips).split(';')\n\t\tself.ip1 = ips[0]\n\t\tself.ip2 = ips[1]\n\t\tself.colored = arguments.colored\n\t\tself.trainingTrace = arguments.trainingTrace\n\t\tself.testingTrace = arguments.testingTrace\n\t\tself.bisimulation = int(arguments.bisimulation)\n\t\tself.outputXml = arguments.outputxml\n\n\t\tpdffilename = arguments.outputpdf.rstrip('pdf').rstrip('.')\n\t\tself.outputPdf = pdffilename\n\t\t\n\t\tif not self.testingTrace == \"\":\n\t\t\tself.colored = False\n\t\t\n\tdef run(self):\n\t\tif self.testingTrace == \"\":\n\t\t\tdtmc = self.createDTMC(self.trainingTrace, int(self.bisimulation))\n\t\t\t\n\t\telse:\n\t\t\ttrainingDtmc = self.createDTMC(self.trainingTrace, self.bisimulation)\n\t\t\ttestingDtmc = self.createDTMC(self.testingTrace, self.bisimulation)\n\t\t\ttestingDtmc.validate(trainingDtmc)\n\t\t\tdtmc = testingDtmc\n\t\t\t\n\t\tif not self.outputPdf == \"\":\n\t\t\tself.createPdf(dtmc, self.outputPdf)\n\t\t\t\n\t\tif not self.outputXml == \"\":\n\t\t\tself.createXml(dtmc, self.outputXml)\n\t\t\t\n\t\treturn 0\n\t\n\tdef createDTMC(self, trace, bisimulation):\n\t\tCli.cprint('creating DTMC...')\n\t\tiec104Parser = parser.PcapParser(trace, self.ip1, self.ip2, bisimulation)\n\t\tdtmc = iec104Parser.parsePcap()\n\t\tCli.cprintnl('done')\n\t\treturn dtmc\n\t\n\tdef createPdf(self, dtmc, filename):\n\t\tCli.cprint('generating pdf file(' + filename + '.pdf)...')\n\t\tg = graph.Graph(dtmc)\n\t\tg.generate_graph(filename, self.colored)\n\t\tCli.cprintnl('done')\n\t\t\n\tdef createXml(self, dtmc, filename):\n\t\tCli.cprint('generating xml file(' + filename + ')...')\n\t\txmlwriter = graph.XMLWriter(dtmc)\n\t\txmlwriter.createXml(filename)\n\t\tCli.cprintnl('done')\n\t\n\tdef cprint(msg):\n\t\tprint(msg, end='')\n\t\tsys.stdout.flush()\n\t\n\tdef cprintnl(msg):\n\t\tprint(msg)\n\t\tsys.stdout.flush()\n\t","repo_name":"jjchromik/intravis","sub_path":"cli/cmdInterface.py","file_name":"cmdInterface.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73514226329","text":"from run_dir_testing import test_and_analyze\nfrom run_dir_testing import mutationParas\nfrom get_impls_util import get_std_impls\n\n\nif __name__ == '__main__':\n impls = get_std_impls()\n tested_dir = '/host_data/v18'\n result_base_dir = '/host_data/v18_330_rerun'\n tested_dir = '/home/spec/extract_document/generated_tcs/v19/tcs'\n result_base_dir = '/host_data/v19_330_rerun'\n result_base_dir = '/host_data/v19_330_rerun_v2'\n paras = mutationParas.get_paras_with_mutation(tested_dir, result_base_dir, one_tc_limit=30, mutate_num=3, mutate_prob=1, impls=impls)\n test_and_analyze(result_base_dir, paras, impls=impls)\n","repo_name":"erxiaozhou/cp912_runtime_tester","sub_path":"run_dir_std_testing_main.py","file_name":"run_dir_std_testing_main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34247988617","text":"from sqlalchemy import create_engine, Column, String, Integer\nfrom sqlalchemy import select, func, and_, or_, between, union\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\n\nengine = create_engine(\"mysql+pymysql://elko:elko@10.10.64.201/elko\", echo = False)\n\nBase = declarative_base()\n\nclass City(Base):\n\n __tablename__= \"city\"\n\n #id\n id = Column(Integer, primary_key=True)\n # name\n name = Column(String(10))\n # age\n population = Column(Integer)\n\n\n\n def getName(self):\n return \"Wonderful \"+self.name\n\n def getPopulation(self):\n return self.population\n\n\nBase.metadata.create_all(engine)\n\nKyiv = City(id =1, name = \"Kyiv\", population = 3700000)\n\nsession = sessionmaker(engine)\nopen_session = session()\n\n#add info to table\n\n# open_session.add_all([\n# # City( name = \"Kyiv\", population = 3700000),\n# # City( name = \"Kharkiv\", population = 1400000)\n# City( name = \"Lviv\", population = 721000)\n# ])\nopen_session.commit()\n\n# print(Kyiv.getName())\n# print(Kyiv.getPopulation())\n\n#select all cities with their populations\n\ncities = open_session.query(City).all()\nfor city in cities:\n print(city.getName(), \"population is \", city.getPopulation())\n\n#select first element of table\n\nfirst_city = open_session.query(City).first()\nprint(first_city.name, \"is first city on DB\")\n\ncity_id = open_session.query(City).get(6)\nprint(\"Second city on DB is \", city_id.name)\n\n\n#update info (содержит ошибки)\n\n# lviv = open_session.query(City).get(7)\n# lviv.population = lviv.getPopulation()+20000\n#\n# open_session.commit()\n#\n# for lviv in open_session.query(City).all():\n# lviv.population = lviv.getPopulation()+20000\n#\n# open_session.commit()\n\n#select with filter\n\ncities = open_session.query(City).filter(and_(City.population>1000000, City.population<2000000))\nfor city in cities:\n print(city.population)\n\n\n#delete info from table\n\n# open_session.query(City).filter(and_(City.population>3000000)).delete()\n\n","repo_name":"JohnKosten/lessons_py","sub_path":"orm_classes.py","file_name":"orm_classes.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1257750606","text":"from django.contrib.auth.models import User,Group\nfrom django import forms\nfrom basicinformation.models import *\nfrom basicinformation.tasks import *\nfrom .models import *\n\nclass CreateTimeTableForm(forms.ModelForm):\n class Meta:\n model = TimeTable\n fields = [\n 'date',\n 'timeStart',\n 'timeEnd',\n 'batch',\n 'sub',\n 'note',\n\n ]\n read_only_fields = ('created')\n","repo_name":"prashantspandey/BodhiAI","sub_path":"QuestionsAndPapers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18549280344","text":"pNums = [1]\npossibleAns = []\ncount = 2\nwhile len(possibleAns) == 0:\n pNums.append(count*(3*count -1)/2)\n for num in range(0, len(pNums)-2):\n checkSum = pNums[num] + pNums[len(pNums)-1]\n c = checkSum *2\n if int((1+(1+12*c)**.5)/6) == (1+(1+12*c)**.5)/6 or int((1-(1+12*c)**.5)/6) == (1+(1+12*c)**.5)/6:\n checkSum = pNums[len(pNums)-1] -pNums[num]\n c = checkSum *2\n if int((1+(1+12*c)**.5)/6) == (1+(1+12*c)**.5)/6 or int((1-(1+12*c)**.5)/6) == (1+(1+12*c)**.5)/6:\n possibleAns.append(checkSum)\n else:\n continue\n else: \n continue\n count+=1\nprint(possibleAns)","repo_name":"ansaws/Euler","sub_path":"problem_44.py","file_name":"problem_44.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20314321154","text":"#!/usr/bin/env python3\n\nimport discord\nfrom discord.ext.commands import Bot\nfrom discord.ext import commands\nimport asyncio\nimport time\nimport io\nimport json\n\n\nClient = discord.Client()\nclient = commands.Bot(command_prefix = \"$\")\n\n\n@client.event\nasync def on_ready():\n print(\"Bot is online and connected to Discord\")\n\n\n@client.event\nasync def on_message(message):\n\tif message.content.upper().startswith('$'):\n\t\targs = message.content.split(\" \")\n\t\ttry:\n\t\t\tprint(args[0] + args[1])\n\t\t\tcoinShort, coinAmount, coinLong, coinPriceUSD, coinPriceBTC, coinValueUSD, coinValueBTC = getPrice(args)\n\t\t\tembed = discord.Embed(title=coinLong + ' (' + coinShort + ')', description= str(coinAmount) + ' ' + str(coinLong) + ' is worth: $' + str(coinValueUSD) + ' or ₿' + str(coinValueBTC), color=0x00ff00)\n\t\t\tawait client.send_message(message.channel, embed=embed)\n\t\texcept:\n\t\t\tcoinLong, coinShort, coinRank, coinMarketcap, coinPriceUSD, coinPriceBTC, coinVolume, coinChange1h, coinChange1d, coinChange7d = getTicker(str(message.content).replace('$','').upper())\n\t\t\tembed = discord.Embed(title=coinLong + ' (' + coinShort + ')', description= '**Rank: **' + str(coinRank) + '\\n' + '**Marketcap: **' + str(coinMarketcap) + '\\n' + '**Volume 24H: **' + str(coinVolume) + '\\n\\n' + '**Price USD: **' + str(coinPriceUSD) + '\\n' + '**Price BTC: **' + str(coinPriceBTC) + '\\n\\n' + '**Change 1 hour: **' + str(coinChange1h) + '\\n' + '**Change 1 day: **' + str(coinChange1d) + '\\n' + '**Change 7 days: **' + str(coinChange7d), color=0x00ff00)\n\t\t\tawait client.send_message(message.channel, embed=embed)\n\ndef getTicker(coin):\n\twith io.open('/home/ExchangeData/APIData.json', 'r', encoding='utf8') as outfile:\n\t\toutfileRead = outfile.read() \n\t\tdict = json.loads(outfileRead)\n\t\ttry:\n\t\t\tcoinLong = dict[coin]['name'].replace('-', ' ')\n\t\t\tcoinShort = dict[coin]['shortname']\n\t\t\tcoinRank = str('#' + str(dict[coin]['rank'])) if dict[coin]['rank'] else '?'\n\t\t\tcoinMarketcap = str('$' + str(\"{:,}\".format(round(float(dict[coin]['marketcap']),0)))).replace('.0','') if dict[coin]['marketcap'] else '?'\n\t\t\tcoinPriceUSD = '$' + dict[coin]['average_price_USD']\n\t\t\tcoinPriceBTC = '₿' + str(round(float(dict[coin]['average_price_BTC']),5)) if float(dict[coin]['average_price_BTC']) > 0.01 else '₿' + str(round(float(dict[coin]['average_price_BTC']),9))\n\t\t\tcoinVolume = '$' + str(\"{:,}\".format(round(float(dict[coin]['total_volume']),2)))\n\t\t\tcoinChange1h = str(round(float(dict[coin]['percent_change_1h_USD']),2))+ '%' if dict[coin]['percent_change_1h_USD'] else '?'\n\t\t\tcoinChange1d = str(round(float(dict[coin]['percent_change_24h_USD']),2))+ '%' if dict[coin]['percent_change_24h_USD'] else '?'\n\t\t\tcoinChange7d = str(round(float(dict[coin]['percent_change_7d_USD']),2))+ '%' if dict[coin]['percent_change_7d_USD'] else '?'\n\t\t\tif coinChange1h != '?':\n\t\t\t\tcoinChange1h = str('▼ ' + coinChange1h) if '-' in dict[coin]['percent_change_1h_USD'] else str('▲ ' + coinChange1h)\n\t\t\tif coinChange1d != '?':\n\t\t\t\tcoinChange1d = str('▼ ' + coinChange1d) if '-' in dict[coin]['percent_change_24h_USD'] else str('▲ ' + coinChange1d)\n\t\t\tif coinChange7d != '?':\n\t\t\t\tcoinChange7d = str('▼ ' + coinChange7d) if '-' in dict[coin]['percent_change_7d_USD'] else str('▲ ' + coinChange7d)\n\t\t\treturn coinLong, coinShort, coinRank, coinMarketcap, coinPriceUSD, coinPriceBTC, coinVolume, coinChange1h, coinChange1d, coinChange7d\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tmessageToSend = 'Unknown Coin'\n\t\t\treturn messageToSend\n\t\t\ndef getPrice(args):\n\tcoinShort = str(args[0]).replace('$','').upper()\n\tcoinAmount = float(args[1])\n\twith io.open('/home/ExchangeData/APIData.json', 'r', encoding='utf8') as outfile:\n\t\toutfileRead = outfile.read() \n\t\tdict = json.loads(outfileRead)\n\t\tcoinLong = dict[coinShort]['name'].replace('-', ' ')\n\t\tcoinPriceUSD = float(dict[coinShort]['average_price_USD'])\n\t\tcoinPriceBTC = float(dict[coinShort]['average_price_BTC'])\n\tcoinValueUSD = str(round((coinPriceUSD * coinAmount),2))\n\tcoinValueBTC = str(coinPriceBTC * coinAmount)\n\treturn coinShort, coinAmount, coinLong, coinPriceUSD, coinPriceBTC, coinValueUSD, coinValueBTC\n\t\t\n\t\n\nclient.run(\"NDcwMjAwNTExMjk2MTc2MTI4.DjS0EA.JJOk9QeHquRyO95nlaMrz-RCCkI\") #Replace token with your bots token\n\t","repo_name":"Eddie-The-Eagle/Cryptotracker","sub_path":"DiscordBot/DiscordBot.py","file_name":"DiscordBot.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1219552527","text":"from queue import LifoQueue\nfrom typing import Dict, List, Tuple\n\nlines: List[str] = []\nwith open(\"input.txt\", mode=\"rt\") as inputfile:\n lines = [i.strip() for i in inputfile.readlines()]\n\nmatching: Dict[str, str] = {\"{\": \"}\", \"[\": \"]\", \"<\": \">\", \"(\": \")\"}\nillegal: Dict[str, int] = {\")\": 3, \"]\": 57, \"}\": 1197, \">\": 25137}\nsyntax_errors: List[Tuple[str, str, str]] = []\n\nillegal_sum: int = 0\nfor line_idx, line in enumerate(lines):\n q: LifoQueue[str] = LifoQueue()\n has_error: bool = False\n for c_idx, c in enumerate(line):\n if c in matching.keys():\n q.put_nowait(c)\n else:\n r = q.get_nowait()\n if c != matching[r]:\n has_error = True\n illegal_sum += illegal[c]\n se = (f\"SYNTAX ERROR [{line_idx}] at [{c_idx}]:\", f\"expected {matching[r]} but found {c} instead\", line)\n syntax_errors.append(se)\n\n # remaining_qsize = q.qsize()\n # remainder: str = \"\"\n # while True:\n # try:\n # remainder += q.get_nowait()\n # except Empty:\n # break\n # if not has_error:\n # print(f\"line [{line_idx}] has [{remaining_qsize}] elements remaining: {remainder}\")\n\nprint(illegal_sum)\n\nwith open(\"result.txt\", mode=\"wt\") as result:\n result.write(f\"syntax error score: {illegal_sum}\")\n\nwith open(\"output.txt\", mode=\"wt\") as outfile:\n for se in syntax_errors:\n outfile.write(\" \".join(se))\n outfile.write(\"\\n\")\n","repo_name":"seldonPlan/advent_of_code","sub_path":"2021/10/a/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11458725479","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\ndata = requests.get('https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20190909',headers=headers)\n\nsoup = BeautifulSoup(data.text, 'html.parser')\n\n# select를 이용해서, tr들을 불러오기\nmovies = soup.select('#old_content > table > tbody > tr')\n\n# soup.select('input[type=\"checkbox\"]')\n\n# movies (tr들) 의 반복문을 돌리기\n\nnum = 0\nfor movie in movies:\n # movie 안에 a 가 있으면,\n\n a_tag = movie.select_one('td.title > div > a')\n rank = movie.select_one('td.point')\n\n if a_tag is not None:\n # a의 text를 찍어본다.\n num += 1\n # print(a, end=' ')\n print(num, a_tag.text, rank.text)\n\n\n\n","repo_name":"hanrimJO/sparta","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19336793897","text":"import sys\r\nimport os\r\nfrom CompilationEngine import *\r\nfrom JackTokenizer import *\r\n#from SymbolTable import *\r\nfrom SymbolTable import SymbolTable\r\n\r\n# this class is the main class which handles files and directory input\r\nclass JackAnalyzer:\r\n Xml_code = \"\"\r\n\r\n def Main(self):\r\n file_name = sys.argv[1]\r\n if os.path.isdir(file_name):\r\n files = os.listdir(file_name)\r\n for file in files:\r\n if \".jack\" in file:\r\n abspath = os.path.join(file_name,file)\r\n with open(abspath, 'r') as current_file:\r\n tokenizer = JackTokenizer(current_file)\r\n\r\n with open(abspath.replace(\".jack\", \".vm\"),'a') as vmFile:\r\n engine = CompilationEngine(tokenizer,vmFile)\r\n engine.compileClass() # at this moment only vmWriter deal with output file\r\n #XMLfile.write(\"\\n\".join(Xml_code))\r\n\r\n # if the file name is rather a normal file\r\n else:\r\n with open(file_name, 'r') as current_file:\r\n tokenizer = JackTokenizer(current_file)\r\n symbolTable = SymbolTable()\r\n with open(file_name.replace(\".jack\", \".vm\"), 'a') as vmFile:\r\n engine = CompilationEngine(tokenizer, vmFile)\r\n engine.compileClass()\r\n #XMLfile.write(\"\\n\".join(Xml_code))\r\n\r\n\r\n# starter\r\nif __name__ == '__main__':\r\n starter = JackAnalyzer()\r\n starter.Main()","repo_name":"Harelyac/Python-PROJECTS","sub_path":"Compiler/JackCompiler.py","file_name":"JackCompiler.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35191051419","text":"from ..models import Dictionary\n\n\ndef add_new_word(word, meaning, synonyms, antonyms, usage):\n try:\n dictionary_object = Dictionary(word=word, meaning=meaning, antonyms=antonyms, synonyms=synonyms, usage=usage)\n dictionary_object.save()\n return {'message': \"Word has added to dictionary, Thank You to increase number of words.\"}\n except Exception as exception:\n print(exception)\n return {'message': \"something went wrong, please try again after sometime\"}\n\n\ndef get_all_words():\n try:\n dictionary_object = Dictionary.objects.all()\n words_list = []\n for words in dictionary_object.iterator():\n word_dictionary = {'word': words.word, 'meaning': words.meaning, 'synonyms': words.synonyms,\n 'antonyms': words.antonyms, 'usage': words.usage}\n words_list.append(word_dictionary)\n return words_list\n except Exception as exception:\n print(exception)\n return [{'word': \"\", 'meaning': \"\", 'synonyms': \"\", 'antonyms': \"\", 'usage': \"\"}]\n\n\ndef get_words_by_matching(search_sting):\n try:\n dictionary_object = Dictionary.objects.filter(word__icontains=search_sting).filter(\n word__istartswith=search_sting)\n words_list = []\n for words in dictionary_object.iterator():\n word_dictionary = {'word': words.word, 'meaning': words.meaning, 'synonyms': words.synonyms,\n 'antonyms': words.antonyms, 'usage': words.usage}\n words_list.append(word_dictionary)\n print(words_list)\n return words_list\n except Exception as exception:\n print(exception)\n return [{'word': \"\", 'meaning': \"\", 'synonyms': \"\", 'antonyms': \"\", 'usage': \"\"}]\n","repo_name":"yadav-subodh/English_dictionary_django_application","sub_path":"dictionaryapp/resources/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13290865572","text":"class Node(object):\n def __init__(self, data):\n self.data = data\n self.nextNode = None\n\n\nclass IntersectionPointOfLinkedList(object):\n\n def getIntersectionNode(self, headA: Node, headB: Node):\n listA, listB = headA,headB\n lenA, lenB = 0,0\n\n while listA is not None:\n lenA += 1\n listA = listA.nextNode\n\n while listB is not None:\n lenB += 1\n listB = listB.nextNode\n\n listA, listB = headA,headB\n\n if lenA > lenB:\n for i in range(lenA - lenB):\n listA = listA.nextNode\n\n elif lenB > lenA:\n for i in range(lenB-lenA):\n listB = listB.nextNode\n\n while listA != listB:\n listA = listA.nextNode\n listB = listB.nextNode\n return listA\n\n\n\n\n\n\n\n\n","repo_name":"vaisakhsrinivas/Python","sub_path":"IntersectionPointOfLinkedList.py","file_name":"IntersectionPointOfLinkedList.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13638618006","text":"import random\nfrom turtle import Screen,Turtle\nfrom playernames import name\nfrom tkinter import messagebox\n\ndef players(color):\n player_name = Turtle()\n player_name.color(color)\n player_name.shape(\"turtle\")\n player_name.penup()\n return player_name\n\n\nlist_color = [\"red\", \"yellow\", \"blue\", \"orange\", \"green\", \"brown\", \"gray\"]\n\nrace = []\nscreen = Screen()\nscreen.bgpic(\"image/Asset 3.png\")\nfor x in list_color:\n race.append(players(x))\ny = -90\nfor x in race:\n x.goto(-270,y)\n y+=30\n\nplay =True\nwhile play:\n rr = random.choice(race)\n rr.forward(5)\n for x in race:\n if x.position() >= (237,x.ycor()):\n messagebox.showinfo(\"game result\",f\"The winner is {x.color()[0].upper()}\")\n play = False\n\nscreen.exitonclick()","repo_name":"FJacobb/day_19_turtle_game_and_act_box","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34842139589","text":"from micropython import alloc_emergency_exception_buf, mem_info\nalloc_emergency_exception_buf(400)\n\n\nfrom math import ceil\nfrom time import sleep\nfrom pyb import Timer, DAC, UART\nfrom machine import Pin, ADC\nfrom music_playa import MusicPlaya, MusicPlayaMulti, EOSong\nfrom song_lister import lister\n\nfrom gc import collect, mem_alloc, mem_free\n\n\ntim = Timer(8)\ndac_right = DAC(Pin('X6'))\nadc_volume = ADC(Pin('X1'))\nuart = UART(2, 115200)\n\nnext_song_pin = Pin('Y1', Pin.IN, Pin.PULL_UP)\nplay_pause_pin = Pin('Y2', Pin.IN, Pin.PULL_UP)\nprev_song_pin = Pin('Y3', Pin.IN, Pin.PULL_UP)\nvolume_pin = Pin('X9', Pin.IN, Pin.PULL_UP)\nvolume_reset_pin = Pin('Y8', Pin.IN, Pin.PULL_UP)\nspeed_pin = Pin('X11', Pin.IN, Pin.PULL_UP)\nspeed_reset_pin = Pin('X10', Pin.IN, Pin.PULL_UP)\n\n\ncollect()\nplayer = MusicPlaya(tim, dac_right, speed=1)\nsongs = lister()\n\n\ndef size_to_bytes(num):\n tot_bits = len(bin(num)) - 2\n if tot_bits == 0: return b'\\x80'\n tot_bytes = ceil(tot_bits / 7)\n res = bytearray(tot_bytes)\n for i in range(tot_bytes):\n res[tot_bytes - i - 1] = (num & (0b1111111 << (7*i))) >> (7*i)\n res[tot_bytes - 1] |= 0b10000000\n return res\n\nexceptions_list = []\ndef main():\n sleep(3)\n while True:\n # check if stereo or mono\n \n playing = True\n song, song_size = songs.current()\n try:\n uart.write(b'\\x01')\n uart.write(song[:-4])\n uart.write(b'\\x03')\n uart.write(size_to_bytes(song_size // MusicPlaya.BUFFER_SIZE))\n collect()\n with open(song, 'rb') as song:\n song.seek(0, 0)\n while True:\n if playing:\n player.play(song)\n uart.write(b'\\x06')\n \n if play_pause_pin() == 0:\n playing = not playing\n sleep(0.5)\n if next_song_pin() == 0:\n playing = False\n sleep(0.5)\n raise EOSong(EOSong.NEXT)\n if prev_song_pin() == 0:\n playing = False\n sleep(0.5)\n raise EOSong(EOSong.PREV)\n \n if volume_pin() == 0:\n volume = adc_volume.read_u16() / 6554 + 1\n player.vol = volume\n if volume_reset_pin() == 0:\n player.vol = 1\n \n if speed_pin() == 0:\n speed = adc_volume.read_u16() / 655 + 1\n player.speed = speed\n if speed_reset_pin() == 0:\n player.speed = 1\n except EOSong as command:\n if command.op == EOSong.PREV:\n songs.prev()\n else:\n songs.next()\n except Exception as exc:\n exceptions_list.append(exc)\n print(exceptions_list)\n sleep(25)\n songs.next()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gieddy660/MusicPlaya","sub_path":"pyboard/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23404181499","text":"str=input(\"请输入两个正整数:\")\na,b=str.split(' ',1)\nA=eval(a)\nB=eval(b)\nlst=[A,B]\nsorted(lst)\nm=lst[0]\nn=lst[1]\nl=n%m\nwhile l!=0:\n n=m\n m=l\n l=n%m\nelse:\n print(\"最大公约数为:\",m)\nx=int(A*B/m)\nprint(\"最小公倍数为:\",x)\n","repo_name":"fanxueyingsyj/bbb","sub_path":"实验题/实验三/实验三第二题.py","file_name":"实验三第二题.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33050462491","text":"import os\nimport genai_core.types\nimport genai_core.upload\nimport genai_core.documents\nfrom pydantic import BaseModel\nfrom aws_lambda_powertools import Logger, Tracer\nfrom aws_lambda_powertools.event_handler.api_gateway import Router\n\ntracer = Tracer()\nrouter = Router()\nlogger = Logger()\n\n\nclass FileUploadRequest(BaseModel):\n fileName: str\n\n\nclass TextDocumentRequest(BaseModel):\n title: str\n content: str\n\n\nclass QnADocumentRequest(BaseModel):\n question: str\n answer: str\n\n\nclass WebsiteDocumentRequest(BaseModel):\n sitemap: bool\n address: str\n followLinks: bool\n limit: int\n\n\nallowed_extensions = set(\n [\n \".csv\",\n \".doc\",\n \".docx\",\n \".epub\",\n \".odt\",\n \".pdf\",\n \".ppt\",\n \".pptx\",\n \".tsv\",\n \".xlsx\",\n \".eml\",\n \".html\",\n \".json\",\n \".md\",\n \".msg\",\n \".rst\",\n \".rtf\",\n \".txt\",\n \".xml\",\n ]\n)\n\n\n@router.post(\"/workspaces//documents/file-upload\")\n@tracer.capture_method\ndef file_upload(workspace_id: str):\n data: dict = router.current_event.json_body\n request = FileUploadRequest(**data)\n\n _, extension = os.path.splitext(request.fileName)\n if extension not in allowed_extensions:\n raise genai_core.types.CommonError(\"Invalid file extension\")\n\n result = genai_core.upload.generate_presigned_post(workspace_id, request.fileName)\n\n return {\"ok\": True, \"data\": result}\n\n\n@router.get(\"/workspaces//documents/\")\n@tracer.capture_method\ndef get_documents(workspace_id: str, document_type: str):\n query_string = router.current_event.query_string_parameters or {}\n last_document_id = query_string.get(\"lastDocumentId\", None)\n\n result = genai_core.documents.list_documents(\n workspace_id, document_type, last_document_id\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"items\": [_convert_document(item) for item in result[\"items\"]],\n \"lastDocumentId\": result[\"last_document_id\"],\n },\n }\n\n\n@router.post(\"/workspaces//documents/\")\n@tracer.capture_method\ndef add_document(workspace_id: str, document_type: str):\n data: dict = router.current_event.json_body\n\n if document_type == \"text\":\n request = TextDocumentRequest(**data)\n request.title = request.title.strip()[:1000]\n result = genai_core.documents.create_document(\n workspace_id=workspace_id,\n document_type=document_type,\n title=request.title,\n content=request.content,\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"workspaceId\": result[\"workspace_id\"],\n \"documentId\": result[\"document_id\"],\n },\n }\n elif document_type == \"qna\":\n request = QnADocumentRequest(**data)\n request.question = request.question.strip()[:1000]\n request.answer = request.answer.strip()[:1000]\n result = genai_core.documents.create_document(\n workspace_id=workspace_id,\n document_type=document_type,\n title=request.question,\n content=request.question,\n content_complement=request.answer,\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"workspaceId\": result[\"workspace_id\"],\n \"documentId\": result[\"document_id\"],\n },\n }\n elif document_type == \"website\":\n request = WebsiteDocumentRequest(**data)\n request.address = request.address.strip()[:10000]\n document_sub_type = \"sitemap\" if request.sitemap else None\n request.limit = min(max(request.limit, 1), 1000)\n\n result = genai_core.documents.create_document(\n workspace_id=workspace_id,\n document_type=document_type,\n document_sub_type=document_sub_type,\n path=request.address,\n crawler_properties={\n \"follow_links\": request.followLinks,\n \"limit\": request.limit,\n },\n )\n\n return {\n \"ok\": True,\n \"data\": {\n \"workspaceId\": result[\"workspace_id\"],\n \"documentId\": result[\"document_id\"],\n },\n }\n\n\ndef _convert_document(document: dict):\n return {\n \"id\": document[\"document_id\"],\n \"type\": document[\"document_type\"],\n \"subType\": document[\"document_sub_type\"],\n \"status\": document[\"status\"],\n \"title\": document[\"title\"],\n \"path\": document[\"path\"],\n \"sizeInBytes\": document[\"size_in_bytes\"],\n \"vectors\": document[\"vectors\"],\n \"subDocuments\": document[\"sub_documents\"],\n \"errors\": document[\"errors\"],\n \"createdAt\": document[\"created_at\"],\n \"updatedAt\": document[\"updated_at\"],\n }\n","repo_name":"donhbk/aws-genai-llm-chatbot","sub_path":"lib/chatbot-api/functions/api-handler/routes/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31039889235","text":"\n\"\"\"\nClasses:\n Layer: Implementation of a layer of neurons\n Network: Implementation of a feedforward neural network\n \nUsage:\n\n l1 = Layer(2, 3) # Create a layer with 2 inputs and 3 outputs\n n1 = Network([4, 3, 2]) # Create a network with 2 input, 3 hidden, and 1 output neurons\n n2 = Network([100, 10, 5, 10]) # Create a network with 2 hidden layers\n\n output = n1.ff([0.5, 0.8, 0.1, 0.9]) # Forward pass\n error = n1.bp([0.4, -0.7]) # Backward pass, returns the error at the 4 input neurons\n \n etc. (see the examples at the end of the file)\n \nBoth contain methods to do a forward pass and a backward pass.\nLayers and Networks can be chained together.\n\n\"\"\"\n\n__author__ = \"Panagiotis Thomaidis\"\n\nfrom math import exp\nfrom random import random\n\n# activation functions\nlinear = lambda x: x\nbinary = lambda x: 1 if (x>=0) else 0\nrectif = lambda x: x if (x>=0) else 0\nsigmoid = lambda x: 1/(1 + exp(-x))\nstoch = lambda x: 1 if ((1/(1 + exp(-x)))>random()) else 0\n\nclass Layer:\n \"\"\"Implementation of a layer of neurons\n \n Params:\n nx: the number of input neurons\n ny: the number of output neurons\n w[][]: the weight of the layer\n b[]: the bias terms of the output neurons\n dw[][]: the calculated weight change\n db[]: the calculated bias change\n x[]: the latest input vector\n y[]: the latest output vector\n ex[]: the latest output error vector\n ey[]: the latest input error vector\n \n \"\"\"\n \n def __init__(self, inno, outno):\n \"\"\"Constructor\n \n Args:\n inno: the number of input neurons\n outno: the number of output neurons\n \n \"\"\"\n self.nx = inno # number of inputs\n self.ny = outno # number of outputs\n self.w = [[random()*2-1 for j in range(self.ny)] for i in range(self.nx)] # weights \n self.b = [random()*2-1 for j in range(self.ny)] # bias terms\n self.dw = [[0]*self.ny for i in range(self.nx)] # weight change \n self.db = [0]*self.ny # bias change\n self.x = [0]*self.nx # last input vector\n self.y = [0]*self.ny # last output vector\n self.ex = [0]*self.nx # last error at input\n self.ey = [0]*self.ny # last error at output\n\n def ff(self, input):\n \"\"\"Feedforward pass\n \n Args:\n input: activation values of the input neurons\n Returns:\n the activation of the neurons of this layer\n \n \"\"\"\n self.x = input\n for j in range(self.ny):\n self.y[j] = self.b[j]\n for i in range(self.nx):\n self.y[j] += self.x[i]*self.w[i][j]\n self.y[j] = sigmoid(self.y[j])\n return self.y\n\n def bp(self, error):\n \"\"\"Back-propagate the error and update the weights\n \n Args:\n error: the error of the output neurons\n Returns:\n the error of the input neurons\n \n \"\"\"\n self.ey = error\n self.ex = [0]*self.nx\n for j in range(self.ny):\n dEdz = self.y[j]*(1-self.y[j])*self.ey[j] # dE/dz\n for i in range(self.nx):\n self.ex[i] += dEdz*self.w[i][j] # back propagated error derivative\n self.dw[i][j] = dEdz*self.x[i] # weight update\n self.w[i][j] += self.dw[i][j]\n self.db[j] = dEdz # bias update\n self.b[j] += self.db[j]\n \n return self.ex\n \nclass Network:\n \"\"\"Implementation of a feedforward neural network\n \n Params:\n ls[]: the layers that constitute the network\n \n \"\"\"\n\n def __init__(self, ns):\n \"\"\"Constructor\n \n Args:\n ns[]: the number of neurons per layer\n \n \"\"\"\n self.ls = [0]*(len(ns)-1)\n for i in range(len(ns)-1):\n self.ls[i] = Layer(ns[i], ns[i+1])\n \n # Feed Forward\n def ff(self, input):\n \"\"\"Feedforward pass\n \n Args:\n input: activation values of the input layer\n Returns:\n the activation of the neurons of the output layer\n \n \"\"\"\n output = input\n for l in self.ls:\n output = l.ff(output)\n return output\n\n def bp(self, error):\n \"\"\"Back-propagate the error and update the weights\n \n Args:\n error: the error of the output layer\n Returns:\n the error of the input layer\n \n \"\"\"\n errin = error\n for l in reversed(self.ls):\n errin = l.bp(errin)\n return errin\n \nif __name__ == \"__main__\":\n \n # targets and vectors\n targets = [[0], [1], [1], [0]]\n vectors = [[0, 0], [0, 1], [1, 0], [1, 1]]\n \n # Using the Layers\n print(\"=== Layers ===\")\n l1 = Layer(2, 3)\n l2 = Layer(3, 1)\n\n for k in range(2000):\n for i in range(len(vectors)):\n out = l1.ff(vectors[i])\n out2 = l2.ff(out)\n diff = [targets[i][j]-out2[j] for j in range(len(out2))] # Error derivative: -(target - output)\n err2 = l2.bp(diff)\n err = l1.bp(err2)\n \n # Print the network outputs after training\n for i in range(len(vectors)):\n out = l1.ff(vectors[i])\n out2 = l2.ff(out)\n print(out2)\n \n # Using the Network\n print(\"=== Network ===\")\n n1 = Network([2, 3, 1])\n for k in range(2000):\n for i in range(len(vectors)):\n out = n1.ff(vectors[i])\n diff = [targets[i][j]-out[j] for j in range(len(out))]\n err = n1.bp(diff)\n\n # Print the network outputs after training \n for i in range(len(vectors)):\n out = n1.ff(vectors[i])\n print(out)\n","repo_name":"pthomaid/machinelearning","sub_path":"backpropagation.py","file_name":"backpropagation.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70800511127","text":"import re\nimport os\nimport time\nimport joblib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport typing as t\nimport numpy as np\nimport lightgbm as lgb\nimport category_encoders as ce\nfrom sklearn.preprocessing import LabelEncoder\nfrom datetime import datetime\nfrom itertools import product\n\nDATA_DIR = \"src/sample_data/Kaggle/predict_future_price\"\nSALES_TRAIN_PATH = f\"{DATA_DIR}/sales_train.csv\"\nITEMS_PATH = f\"{DATA_DIR}/items.csv\"\nITEM_CATEGORIES_PATH = f\"{DATA_DIR}/item_categories.csv\"\nPRED_PRICE_PATH = f\"{DATA_DIR}/pred_price.csv\"\nSHOPS_PATH = f\"{DATA_DIR}/shops.csv\"\nTEST_PATH = f\"{DATA_DIR}/test.csv\"\n\ndef name_correction(x):\n x = x.lower()\n x = x.partition('[')[0]\n x = x.partition('(')[0]\n x = re.sub('[^A-Za-z0-9А-Яа-я]+', ' ', x)\n x = x.replace(' ', ' ')\n x = x.strip()\n return x\n\ndef preprocessing_shops(shops):\n # shopsの前処理\n shops.loc[ shops.shop_name == 'Сергиев Посад ТЦ \"7Я\"',\"shop_name\" ] = 'СергиевПосад ТЦ \"7Я\"'\n shops[\"city\"] = shops.shop_name.str.split(\" \").map( lambda x: x[0] )\n shops[\"category\"] = shops.shop_name.str.split(\" \").map( lambda x: x[1] )\n shops.loc[shops.city == \"!Якутск\", \"city\"] = \"Якутск\"\n category = [] # 登場回数の少ないカテゴリは\"etc\"とする\n for cat in shops.category.unique():\n if len(shops[shops.category == cat]) > 4:\n category.append(cat)\n shops.category = shops.category.apply( lambda x: x if (x in category) else \"etc\" ) # 母数の多いカテゴリはそのまま、それ以外を「etc(その他)」としている。\n shops[\"category\"] = LabelEncoder().fit_transform(shops.category) # categoryとcityのカテゴリ変数をencording\n shops[\"city\"] = LabelEncoder().fit_transform(shops.city)\n shops = shops.drop(\"shop_name\", axis=1)\n return shops\n\ndef preprocessing_item_category(item_categories):\n # item_categoryの前処理\n item_categories[\"type_code\"] = item_categories.item_category_name.apply(lambda x: x.split()[0]).astype(str) # 文字列の\" \"で区切られている部分の先頭の文字列を取得する。\n item_categories.loc[(item_categories.type_code == \"Игровые\") | (item_categories.type_code == \"Аксессуары\"), \"category\"] = \"Игры\"\n category = [] # 登場回数の少ないカテゴリは\"etc\"とする\n for cat in item_categories.type_code.unique():\n if len(item_categories[item_categories.type_code == cat]) > 4:\n category.append(cat)\n item_categories.type_code = item_categories.type_code.apply(lambda x: x if (x in category) else \"etc\")\n item_categories[\"type_code\"] = LabelEncoder().fit_transform(item_categories.type_code)\n item_categories[\"split\"] = item_categories.item_category_name.apply(lambda x: x.split(\"-\"))\n item_categories[\"subtype\"] = item_categories.split.apply(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())\n item_categories[\"subtype_code\"] = LabelEncoder().fit_transform(item_categories.subtype)\n item_categories = item_categories.loc[:, [\"item_category_id\", \"type_code\", \"subtype_code\"]]\n return item_categories\n\ndef preprocessing_items(items):\n # itemsの前処理\n items[\"name1\"], items[\"name2\"] = items.item_name.str.split(\"[\", 1).str\n items[\"name1\"], items[\"name3\"] = items.item_name.str.split(\"(\", 1).str\n items[\"name2\"] = items.name2.str.replace('[^A-Za-z0-9А-Яа-я]+', \" \").str.lower()\n items[\"name3\"] = items.name3.str.replace('[^A-Za-z0-9А-Яа-я]+', \" \").str.lower()\n items = items.fillna('0')\n items[\"item_name\"] = items[\"item_name\"].apply(lambda x: name_correction(x))\n items.name2 = items.name2.apply( lambda x: x[:-1] if x !=\"0\" else \"0\")\n items[\"type\"] = items.name2.apply(lambda x: x[0:8] if x.split(\" \")[0] == \"xbox\" else x.split(\" \")[0] )\n items.loc[(items.type == \"x360\") | (items.type == \"xbox360\") | (items.type == \"xbox 360\") ,\"type\"] = \"xbox 360\"\n items.loc[ items.type == \"\", \"type\"] = \"mac\"\n items.type = items.type.apply( lambda x: x.replace(\" \", \"\") )\n items.loc[ (items.type == 'pc' )| (items.type == 'pс') | (items.type == \"pc\"), \"type\" ] = \"pc\"\n items.loc[ items.type == 'рs3' , \"type\"] = \"ps3\"\n remove_cols = []\n for name, value in items[\"type\"].value_counts().items():\n if value < 40:\n remove_cols.append(name) \n else:\n pass\n items.name2 = items.name2.apply(lambda x: \"etc\" if (x in remove_cols) else x)\n items = items.drop([\"type\"], axis = 1)\n items.name2 = LabelEncoder().fit_transform(items.name2)\n items.name3 = LabelEncoder().fit_transform(items.name3)\n items.drop([\"item_name\", \"name1\"],axis = 1, inplace= True)\n return items\n\n\n\ndef preprocessing_train_test(train, test):\n train = train[train.item_price > 0].reset_index(drop = True)\n train.loc[train.item_cnt_day < 1, \"item_cnt_day\"] = 0\n\n train.loc[train.shop_id == 0, \"shop_id\"] = 57\n test.loc[test.shop_id == 0 , \"shop_id\"] = 57\n train.loc[train.shop_id == 1, \"shop_id\"] = 58\n test.loc[test.shop_id == 1 , \"shop_id\"] = 58\n train.loc[train.shop_id == 11, \"shop_id\"] = 10\n test.loc[test.shop_id == 11, \"shop_id\"] = 10\n train.loc[train.shop_id == 40, \"shop_id\"] = 39\n test.loc[test.shop_id == 40, \"shop_id\"] = 39\n train[\"revenue\"] = train[\"item_cnt_day\"] * train[\"item_price\"]\n test[\"date_block_num\"] = 34 # 0~33までを学習データとし用い、34(ひと月分)の売り上げを予測する\n test = test.apply(lambda x: x.astype(np.int16))\n\n return train, test\n\n\ndef gen_lag_feature(matrix, lags, cols):\n pre_cols = [\"shop_id\", \"item_id\"]\n for col in cols:\n _df = matrix.loc[:, [*pre_cols, col]]\n for lag in lags:\n matrix[f\"{col}_lag_{lag}\"] = _df.groupby(pre_cols)[col].shift(lag)\n return matrix\n\n\ndef get_model(train_dataset: t.Any, valid_dataset: t.Any) -> t.Any:\n params = {\n \"objective\": \"regression\",\n \"boosting_type\": \"gbdt\",\n 'metric' : {'rmse'},\n 'num_leaves' : 200,\n 'min_data_in_leaf': 1000,\n 'num_iterations' : 10000,\n 'learning_rate' : 0.1,\n 'feature_fraction' : 0.8,\n }\n model = lgb.train(\n params=params,\n train_set=train_dataset,\n valid_sets=valid_dataset,\n early_stopping_rounds=10,\n )\n return model\n\n\n\ndef main():\n train = pd.read_csv(SALES_TRAIN_PATH)\n items = pd.read_csv(ITEMS_PATH)\n item_categories = pd.read_csv(ITEM_CATEGORIES_PATH)\n pred_price = pd.read_csv(PRED_PRICE_PATH)\n shops = pd.read_csv(SHOPS_PATH)\n test = pd.read_csv(TEST_PATH)\n\n train, test = preprocessing_train_test(train, test)\n shops = preprocessing_shops(shops)\n item_categories = preprocessing_item_category(item_categories)\n items = preprocessing_items(items)\n\n matrix = []\n cols = [\"date_block_num\", \"shop_id\", \"item_id\"]\n for i in range(34): # date_block_num(1月分)が0~33まで存在する。\n sales = train[train[\"date_block_num\"] == i] # 1月毎の売り上げを抽出\n sales_matrix = np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype = np.int16) # 月、item_id、shop_idの組み合わせ(直積)を算出\n matrix.append(sales_matrix)\n matrix = pd.DataFrame(np.vstack(matrix), columns=cols).sort_values(cols) # 月、item_id、shop_idの組み合わせをdataframeにしたもの\n matrix = matrix.apply(lambda x: x.astype(np.int16))\n\n group = train.groupby([\"date_block_num\", \"shop_id\", \"item_id\"]).agg({\"item_cnt_day\": [\"sum\"]}) # [\"date_block_num\", \"shop_id\", \"item_id\"]の組み合わせごとの総売り上げ数\n group.columns = [\"item_cnt_month\"]\n group = group.reset_index()\n merged_matrix = pd.merge(matrix, group, how=\"left\", on=cols)\n merged_matrix[\"item_cnt_month\"] = merged_matrix[\"item_cnt_month\"].fillna(0).clip(0, 20) # その月で売り上げのなかったitemは0で置換し、最小値を0、最大値を20として外れ値を除去\n\n submit_ids = test.loc[:, \"ID\"]\n merged_matrix = pd.concat([merged_matrix, test.drop(\"ID\", axis=1)], axis=0).fillna(0).reset_index(drop=True)\n\n # 与えられたデータフレームのマージ\n merged_items = pd.merge(\n items,\n item_categories,\n on=\"item_category_id\",\n how=\"left\"\n )\n merged_matrix = pd.merge(\n merged_matrix,\n merged_items,\n on=\"item_id\",\n how=\"left\"\n )\n merged_matrix = pd.merge(\n merged_matrix,\n shops,\n on=\"shop_id\",\n how=\"left\"\n )\n merged_matrix = merged_matrix.apply(lambda x: x.astype(np.int16))\n merged_matrix = gen_lag_feature(merged_matrix, [1,2,3], [\"item_cnt_month\"]) # 1,2,3か月前の\"item_cnt_month\"を特徴量に追加\n\n # 各月における全アイテムの売り上げ平均数を特徴量に追加\n group = merged_matrix.groupby(\"date_block_num\").agg({\"item_cnt_month\" : \"mean\"}) \n group.columns = [\"date_avg_item_cnt\"] \n group.reset_index(inplace = True) # col: date_block_num, date_avg_item_cnt とする\n merged_matrix = pd.merge(merged_matrix, group, how=\"left\", on=\"date_block_num\")\n merged_matrix.date_avg_item_cnt = merged_matrix[\"date_avg_item_cnt\"].astype(np.float16)\n merged_matrix = gen_lag_feature(merged_matrix, [1], [\"date_avg_item_cnt\"]) # 1か月前の\"date_avg_item_cnt\"を特徴量に追加\n\n # 各月における、アイテム毎の売り上げ平均数を特徴量に追加\n group = merged_matrix.groupby([\"date_block_num\", \"item_id\"]).agg({\"item_cnt_month\" : \"mean\"}) \n group.columns = [\"date_item_avg_item_cnt\"] \n group.reset_index(inplace = True) # col: date_block_num, item_id, date_item_avg_item_cnt とする\n merged_matrix = pd.merge(merged_matrix, group, how=\"left\", on=[\"date_block_num\", \"item_id\"])\n merged_matrix.date_avg_item_cnt = merged_matrix[\"date_item_avg_item_cnt\"].astype(np.float16)\n merged_matrix = gen_lag_feature(merged_matrix, [1,2,3], [\"date_item_avg_item_cnt\"])\n\n # 各月におけるアイテムごとの平均価格を特徴量として追加\n group = train.groupby([\"date_block_num\", \"item_id\"]).agg({\"item_price\": \"mean\"})\n group.columns = [\"date_item_avg_item_price\"]\n group.reset_index(inplace = True)\n merged_matrix = pd.merge(merged_matrix, group, how=\"left\", on=[\"date_block_num\", \"item_id\"])\n merged_matrix.date_item_avg_item_price = merged_matrix[\"date_item_avg_item_price\"].astype(np.float16)\n merged_matrix = gen_lag_feature(merged_matrix, [1,2,3], [\"date_item_avg_item_price\"])\n\n # 月と日付の特徴量も追加\n merged_matrix[\"month\"] = merged_matrix[\"date_block_num\"] % 12\n days = pd.Series([31,28,31,30,31,30,31,31,30,31,30,31])\n merged_matrix[\"days\"] = merged_matrix[\"month\"].map(days).astype(np.int16)\n\n dataset = merged_matrix[merged_matrix[\"date_block_num\"] > 3] # lag情報付与により3月分はNanになっている為除去\n\n # date_block_num が1~32 のものを学習に、33のものを評価用に、34のものを検証用に用いる\n train_x = dataset[dataset.date_block_num < 33].drop(['item_cnt_month'], axis=1).reset_index(drop=True)\n train_y = dataset[dataset.date_block_num < 33]['item_cnt_month'].reset_index(drop=True)\n valid_x = dataset[dataset.date_block_num == 33].drop(['item_cnt_month'], axis=1).reset_index(drop=True)\n valid_y = dataset[dataset.date_block_num == 33]['item_cnt_month'].reset_index(drop=True)\n test_x = dataset[dataset.date_block_num == 34].drop(['item_cnt_month'], axis=1).reset_index(drop=True)\n\n train_dataset = lgb.Dataset(train_x, train_y)\n valid_dataset = lgb.Dataset(valid_x, valid_y, reference=train_dataset)\n\n if not os.path.isfile(f\"{DATA_DIR}/lgb_model.pkl\"):\n model = get_model(train_dataset, valid_dataset)\n joblib.dump(model, f\"{DATA_DIR}/lgb_model.pkl\")\n else:\n model = joblib.load(f\"{DATA_DIR}/lgb_model.pkl\")\n \n ids = test_x.index\n y_pred = model.predict(test_x).clip(0, 20) # test_x に対して予測し、予測値の範囲を(0,20)に設定\n print(y_pred)\n submission = pd.DataFrame(\n {\n \"ID\": range(len(test_x)),\n \"item_cnt_month\": y_pred\n }\n )\n print(submission)\n submission.to_csv(f\"{DATA_DIR}/submission_1.csv\", index=False)\n\n\n\nif __name__ == \"__main__\":\n # 2015年11月の売り上げを予測する。\n main()","repo_name":"ueda-hiroyuki/machine_learning","sub_path":"app/src/python_file/kaggle/predict_future_price/predict_future_price.py","file_name":"predict_future_price.py","file_ext":"py","file_size_in_byte":12396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4442452280","text":"import sys, collections\nsys.stdin = open(\"force2048_input.txt\")\n\ntc = int(input())\nfor T in range(1, tc+1):\n N = int(input())\n arr = list(map(int, input().split()))\n chk = [0] * 12 # 2의 11승 = 2048\n nums = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]\n\n if 2048 in arr:\n print('YES')\n else:\n for num in arr:\n if num <= 2048:\n # print(nums.index(num))\n chk[nums.index(num)] += 1 # 몇승인지 그 값을 올려주자.\n # print(chk)\n for i in range(11):\n chk[i + 1] += chk[i] // 2\n if chk[-1] > 0:\n print('YES')\n else:\n print('NO')","repo_name":"yoonwoo123/Algorithm","sub_path":"백준모음/2048.py","file_name":"2048.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30635746244","text":"import datetime\nfrom unittest import TestCase\n\nfrom kskm.common.data import AlgorithmDNSSEC, Key, Signature, Signer, TypeDNSSEC\nfrom kskm.common.parse_utils import (\n duration_to_timedelta,\n keys_from_dict,\n signature_from_dict,\n)\nfrom kskm.ksr.parse_utils import signers_from_list\n\n\nclass Test_duration_to_timedelta(TestCase):\n def test_duration_to_timedelta_empty(self):\n \"\"\" Test empty input \"\"\"\n td = duration_to_timedelta(\"\")\n self.assertEqual(td.total_seconds(), 0)\n\n def test_duration_to_timedelta_basic(self):\n \"\"\" Test the most basic case \"\"\"\n td = duration_to_timedelta(\"P1D\")\n self.assertEqual(td.total_seconds(), 86400)\n\n def test_duration_to_timedelta_day_hour(self):\n \"\"\" Test hour \"\"\"\n td = duration_to_timedelta(\"P1H\")\n self.assertEqual(td.total_seconds(), 3600)\n\n def test_duration_to_timedelta_day_minute(self):\n \"\"\" Test both day and minute \"\"\"\n td = duration_to_timedelta(\"P1DT1M\")\n self.assertEqual(td.total_seconds(), 86460)\n\n def test_duration_to_timedelta_day_second(self):\n \"\"\" Test day and second \"\"\"\n td = duration_to_timedelta(\"P1D1\")\n self.assertEqual(td.total_seconds(), 86401)\n\n def test_duration_to_timedelta_second(self):\n \"\"\" Test second \"\"\"\n td = duration_to_timedelta(\"P11S\")\n self.assertEqual(td.total_seconds(), 11)\n\n def test_duration_to_timedelta_week(self):\n \"\"\" Test second \"\"\"\n td = duration_to_timedelta(\"P1W\")\n self.assertEqual(td.total_seconds(), 86400 * 7)\n\n def test_bogus(self):\n \"\"\" Test totally bogus duration \"\"\"\n with self.assertRaises(ValueError):\n duration_to_timedelta(\"foo\")\n\n def test_invalid(self):\n \"\"\" Test invalid duration \"\"\"\n with self.assertRaises(ValueError):\n duration_to_timedelta(\"Pfoo\")\n\n\nclass Test_signers_from_list(TestCase):\n def test_basic(self):\n \"\"\" Test basic KSR Signer parsing \"\"\"\n data = [\n {\"attrs\": {\"keyIdentifier\": \"KC00020\"}, \"value\": \"\"},\n {\"attrs\": {\"keyIdentifier\": \"KC00094\"}, \"value\": \"\"},\n ]\n out = signers_from_list(data)\n self.assertEqual(\n out, {Signer(key_identifier=\"KC00020\"), Signer(key_identifier=\"KC00094\")}\n )\n\n def test_no_signer(self):\n \"\"\" Test that KSR Signer is optional \"\"\"\n self.assertIsNone(signers_from_list([]))\n\n\nclass Test_keys_from_list(TestCase):\n def test_basic(self):\n \"\"\" Test basic KSR Key parsing \"\"\"\n data = [\n {\n \"attrs\": {\"keyIdentifier\": \"ZSK-24315\", \"keyTag\": \"24315\"},\n \"value\": {\n \"Algorithm\": \"5\",\n \"Flags\": \"256\",\n \"Protocol\": \"3\",\n \"PublicKey\": \"A...\",\n \"TTL\": 1978,\n },\n }\n ]\n out = keys_from_dict(data)\n expected = {\n Key(\n key_identifier=\"ZSK-24315\",\n key_tag=24315,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=b\"A...\",\n )\n }\n self.assertEqual(out, expected)\n\n def test_with_ttl(self):\n \"\"\" Test Key with TTL \"\"\"\n data = [\n {\n \"attrs\": {\"keyIdentifier\": \"ZSK-24315\", \"keyTag\": \"24315\"},\n \"value\": {\n \"Algorithm\": \"5\",\n \"Flags\": \"256\",\n \"Protocol\": \"3\",\n \"PublicKey\": \"A...\",\n \"TTL\": \"1978\",\n },\n }\n ]\n out = keys_from_dict(data)\n expected = {\n Key(\n key_identifier=\"ZSK-24315\",\n key_tag=24315,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=b\"A...\",\n )\n }\n self.assertEqual(out, expected)\n\n def test_ecdsa_key(self):\n \"\"\" Test loading an ECDSA key \"\"\"\n public_key = r\"BGuqYyOGr0p/uKXm0MmP4Cuiml/a8FCPRDLerVyBS4jHmJlKTJmYk/nCbOp936DSh5SMu6+2WYJUI6K5AYfXbTE=\"\n data = [\n {\n \"attrs\": {\"keyIdentifier\": \"EC1\", \"keyTag\": \"0\"},\n \"value\": {\n \"Algorithm\": AlgorithmDNSSEC.ECDSAP256SHA256.value,\n \"Flags\": \"256\",\n \"Protocol\": \"3\",\n \"PublicKey\": public_key,\n \"TTL\": \"1978\",\n },\n }\n ]\n out = keys_from_dict(data)\n expected = {\n Key(\n key_identifier=\"EC1\",\n key_tag=0,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.ECDSAP256SHA256,\n public_key=public_key.encode(),\n )\n }\n self.assertEqual(out, expected)\n\n # now change the algorithm and verify that the discrepancy between curve point size and algorithm is detected\n data[0][\"value\"][\"Algorithm\"] = AlgorithmDNSSEC.ECDSAP384SHA384.value\n with self.assertRaises(ValueError) as exc:\n keys_from_dict(data)\n self.assertEqual(\n \"Unexpected ECDSA key length 256 for algorithm AlgorithmDNSSEC.ECDSAP384SHA384\",\n str(exc.exception),\n )\n\n\nclass Test_signature_from_dict(TestCase):\n def test_basic(self):\n \"\"\" Test basic KSR Signature parsing \"\"\"\n sig = {\n \"attrs\": {\"keyIdentifier\": \"ZSK-24315\"},\n \"value\": {\n \"Algorithm\": \"5\",\n \"KeyTag\": \"24315\",\n \"Labels\": \"0\",\n \"OriginalTTL\": \"3600\",\n \"SignatureData\": \"SIG...\",\n \"SignatureExpiration\": \"2009-09-24T18:22:41Z\",\n \"SignatureInception\": \"2009-08-25T18:22:41Z\",\n \"SignersName\": \".\",\n \"TypeCovered\": \"DNSKEY\",\n \"TTL\": 1234,\n },\n }\n out = signature_from_dict(sig)\n utc = datetime.timezone.utc\n expected = {\n Signature(\n key_identifier=\"ZSK-24315\",\n ttl=1234,\n type_covered=TypeDNSSEC.DNSKEY,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n labels=0,\n original_ttl=3600,\n signature_expiration=datetime.datetime(\n 2009, 9, 24, 18, 22, 41, tzinfo=utc\n ),\n signature_inception=datetime.datetime(\n 2009, 8, 25, 18, 22, 41, tzinfo=utc\n ),\n key_tag=24315,\n signers_name=\".\",\n signature_data=b\"SIG...\",\n )\n }\n self.assertEqual(out, expected)\n","repo_name":"iana-org/dnssec-keytools","sub_path":"src/kskm/ksr/tests/test_parse_utils.py","file_name":"test_parse_utils.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32286058637","text":"import json\nimport sys\nfrom django.db import transaction\nfrom django.core.management.base import BaseCommand\n\nfrom catalog.models import Location, PublicPlace, SocialInfo, WorkingSchedule, PhoneContact\n\n\nclass Command(BaseCommand):\n help = 'Load datasets of ...'\n\n @transaction.atomic\n def get_data(self):\n with open('data.txt', mode='r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n for obj in data:\n public_place, _isCreated = PublicPlace.objects.get_or_create(\n name=obj['name'],\n city_id=obj['city_id'],\n country_id=obj['country_id'],\n category_id=obj['category_id']\n )\n\n location, _isCreated = Location.objects.get_or_create(\n public_place=public_place,\n address=obj['address']\n )\n # social\n if obj['socials']['facebook']:\n SocialInfo.objects.get_or_create(\n public_place=public_place,\n name_social=\"Facebook\",\n link=obj['socials']['facebook']\n )\n\n if obj['socials']['facebook']:\n SocialInfo.objects.get_or_create(\n public_place=public_place,\n name_social=\"Instagram\",\n link=obj['socials']['instagram']\n )\n\n # schedule\n schedule_object = obj['schedule']\n days_names = [('Mon', 'Monday'), ('Tue', 'Tuesday'), ('Wed', 'Wednesday'), ('Thu', 'Thursday'),\n ('Fri', 'Friday'), ('Sat', 'Saturnday'), ('Sun', 'Sunday')]\n if schedule_object:\n for day_tuple_item in days_names:\n day_name_short = day_tuple_item[0]\n day_name_long = day_tuple_item[1]\n\n schedule_in_mon_from_to = schedule_object[day_name_short][0]\n schedule_in_mon_from = schedule_object[day_name_short][0][0:5]\n schedule_in_mon_to = schedule_object[day_name_short][0][8:]\n\n break_in_mon = schedule_object[day_name_short][1]\n\n if 'Без перерви' == break_in_mon:\n break_from = '00:00'\n break_to = '00:00'\n else:\n break_from = schedule_object[day_name_short][1][0:5]\n break_to = schedule_object[day_name_short][1][8:]\n\n WorkingSchedule.objects.get_or_create(\n location=location,\n day=day_name_long,\n work_time_from=schedule_in_mon_from,\n work_time_to=schedule_in_mon_to,\n break_time_from=break_from,\n break_time_to=break_to\n )\n\n # phone\n for phone in obj['phones']:\n PhoneContact.objects.get_or_create(\n location=location,\n phone=phone\n )\n\n def handle(self, *args, **options):\n self.get_data()\n self.stdout.write(self.style.SUCCESS('Script successfully finished!'))\n","repo_name":"andreea0008/cc_server_side_python","sub_path":"catalog/management/commands/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2883772216","text":"# coding=utf-8\n\nimport pandas as pd\nimport numpy as np\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nfrom matplotlib import pyplot as plt\n# from matplotlib import font_manager\n\n# my_font = font_manager.FontProperties(fname=\"/usr/share/fonts/truetype/arphic/uming.ttc\") # /usr/share/fonts/truetype/arphic/ukai.ttc\")\n\nfile_path = \"./BeijingPM20100101_20151231.csv\"\ndf = pd.read_csv(file_path)\nprint(df.head(1))\n\n# 把分开的时间字段合并成pandas时间类型\nperiod = pd.PeriodIndex(year=df[\"year\"],month=df[\"month\"], day=df[\"day\"],hour=df[\"hour\"], freq=\"H\")\n# print(period)\n# print(type(period))\ndf[\"datetime\"] = period\nprint(df.head(5))\n\ndf.set_index(\"datetime\", inplace=True)\ndf = df.resample(\"7D\").mean()\n# 删除缺失数据\n# print(df[\"PM_US Post\"])\ndata = df[\"PM_US Post\"].dropna()\ndata_china = df[\"PM_Dongsi\"]\n\n# 画图\n_x = data.index\n_x = [i.strftime(\"%Y-%m-%d\") for i in _x]\n_x_china = [i.strftime(\"%Y-%m-%d\") for i in data_china.index]\n_y = data.values\n_y_china = data_china.values\n\nplt.figure(figsize=(20, 8), dpi=80)\nplt.plot(range(len(_x)), _y, label=\"US_POST\")\nplt.plot(range(len(_x)), _y_china, label=\"CN_POST\")\nplt.xticks(range(len(_x))[::5], _x[::5], rotation=90)\nplt.legend(loc=\"best\")\nplt.grid(alpha=0.4, linestyle=\":\")\nplt.show()\n\n","repo_name":"00lab/AI-Lab","sub_path":"21数据分析/21源码/28_合并时间_PM2.5案例.py","file_name":"28_合并时间_PM2.5案例.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"3916689097","text":"import json\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Callable, List, Union\nfrom urllib.parse import parse_qsl, urlencode\n\nfrom jsonpath_ng import parse\n\nfrom lxml import etree\n\n\nclass TransformerError(Exception):\n pass\n\n\nclass PayloadTransformer(metaclass=ABCMeta):\n @abstractmethod\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ) -> Union[str, bytes]:\n pass\n\n\nclass JsonTransformer(PayloadTransformer):\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ):\n payload_json = json.loads(payload)\n for expression in transformer_array:\n json_expr = parse(expression)\n for match in json_expr.find(payload_json):\n json_expr.update(payload_json, operation(match.value))\n return json.dumps(payload_json)\n\n\nclass FormDataTransformer(PayloadTransformer):\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ) -> str:\n result = []\n target_fields = set(transformer_array)\n if isinstance(payload, bytes):\n payload = payload.decode()\n\n for name, value in parse_qsl(payload, keep_blank_values=True):\n if value and name in target_fields:\n value = operation(value)\n result.append((name, value))\n\n return urlencode(result)\n\n\nclass XMLTransformer(PayloadTransformer):\n def transform(\n self,\n payload: Union[str, bytes],\n transformer_array: List[str],\n operation: Callable,\n ) -> str:\n try:\n root = etree.fromstring(payload)\n except etree.XMLSyntaxError as exc:\n raise TransformerError(f'Invalid XML payload: {exc}.') from exc\n\n has_matches = False\n for expr in transformer_array:\n try:\n for element in root.xpath(expr):\n has_matches = True\n self._transform(operation, element)\n\n except etree.XPathEvalError as exc:\n raise TransformerError(\n f'Invalid XPath expression {expr}: {exc}.'\n ) from exc\n\n if has_matches:\n return etree.tostring(root, encoding='utf-8').decode('utf-8')\n\n return payload\n\n def _transform(self, operation: Callable, element: etree.ElementBase):\n value = ''.join(element.itertext())\n if not value:\n return\n element.clear()\n element.text = operation(value)\n\n\ntransformer_map = {\n 'FORM_FIELD': FormDataTransformer(),\n 'JSON_PATH': JsonTransformer(),\n 'XPATH': XMLTransformer(),\n}\n","repo_name":"mnimmny/vgs-satellite","sub_path":"satellite/vault/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"20389006397","text":"\"\"\"\n Product of Array Except Self\n\nSolution\nGiven an integer array nums, return an array answer such that answer[i] is equal to the product of all the elements of nums except nums[i].\n\nThe product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.\n\n \n\nExample 1:\n\nInput: nums = [1,2,3,4]\nOutput: [24,12,8,6]\nExample 2:\n\nInput: nums = [-1,1,0,-3,3]\nOutput: [0,0,9,0,0]\n \n\nConstraints:\n\n2 <= nums.length <= 105\n-30 <= nums[i] <= 30\nThe product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.\n \n\nFollow up:\n\nCould you solve it in O(n) time complexity and without using division?\nCould you solve it with O(1) constant space complexity? (The output array does not count as extra space for space complexity analysis.)\n\"\"\"\n\ndef product_except_self(nums):\n p = 1\n n = len(nums)\n output = []\n for i in range(0,n):\n output.append(p)\n p = p * nums[i]\n p = 1\n for i in range(n-1,-1,-1):\n output[i] = output[i] * p\n print(\"output[i] = \" + str(output[i]))\n p = p * nums[i]\n print(\"p = \" + str(p))\n print(\"----------------------\")\n return output\n \nif __name__ == \"__main__\":\n product_except_self([1,2,3,4])","repo_name":"DenysTT/leetcode","sub_path":"medium/product_of_array_except_self.py","file_name":"product_of_array_except_self.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30868441916","text":"# Import flask from the Flask library\nfrom flask import Flask, render_template, request, session\nfrom flask_session import Session\n\n# Import the translation function\nfrom translate import detect, translate\n\n# Import the dpla search\nfrom dpla import dpla\n\n# Use CSV reader to read in Amazon Listings\nimport csv\n\n# Create a Flask object and pass in __name__\napp = Flask(__name__)\n\n# Configure session\napp.config[\"SESSION PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = 'filesystem'\nSession(app)\n\nDATABASE = [\n \"All\", \"Dpla\", \"Amazon Listings\", \"Europana\"\n]\n\nLANGUAGES = {\n 'ar': 'Arabic',\n 'cs': 'Czech',\n 'de': 'German',\n 'el': 'Greek',\n 'es': 'Spanish',\n 'fr': 'French',\n 'ga': 'Irish',\n 'he': 'Hebrew',\n 'it': 'Italian',\n 'ja': 'Japanese',\n 'ko': 'Korean',\n 'ms': 'Malay',\n 'ru': 'Russian',\n 'sw': 'Swahili',\n 'ur': 'Urdu',\n 'zh-CN': 'Chinese'\n}\n\n# Set the url ending, this is a root path\n@app.route('/')\ndef greet():\n return render_template(\"greet.html\")\n\n@app.route('/select')\ndef select():\n return render_template(\"select.html\", database=DATABASE)\n\n@app.route('/language')\ndef language():\n name = request.args.get(\"name\")\n database = request.args.get(\"database\")\n session[\"name\"] = name\n session[\"database\"] = database\n\n langauge = detect(name).lang\n output = []\n\n for key in LANGUAGES:\n temp = dict()\n temp['language'] = LANGUAGES[key]\n temp['text'] = translate(dest=key, string=name).text\n output.append(temp)\n\n return render_template(\"language.html\", name=name, database=database,\n langauge=langauge, output=output)\n\n@app.route('/result')\ndef result():\n name = session.get(\"name\")\n database = session.get(\"database\")\n\n dpla_data = dpla(tosearch=name)\n return render_template(\"result.html\", name=name, database=database, dpla_data=dpla_data)\n\n@app.route('/amazon')\ndef amazon():\n with open(\"images.txt\", 'r') as f:\n content_list = f.readlines()\n content_list = content_list[0].replace(\"[\", \"\").replace(\"]\",\"\").\\\n replace(\"src=\", \"\").replace('\"', '').replace(\"'\",\"\").split(\",\")\n return render_template('amazon.html', dress=content_list)\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n\n\n","repo_name":"Towerhint/is310_final_project","sub_path":"Project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36816968039","text":"#!/usr/bin/env python3\r\nimport sys\r\nimport os\r\nimport argparse\r\nfrom collections import defaultdict\r\n\r\ndef ArgumentParser():\r\n parser = argparse.ArgumentParser(\r\n description='Statistic file number in a given directory and its subdirectories')\r\n parser.add_argument(\"--dir\",\"-d\", help='give a directory', required=True)\r\n parser.add_argument(\"--out\",\"-o\",help=\"output name\")\r\n parser.add_argument(\"--maxdepth\",\"-m\",default=999,type=int,help=\"max depth directory query\")\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef DirectoryParser(path,maxdepth):\r\n out = defaultdict(lambda : 0)\r\n for root, dirs, files in os.walk(path):\r\n n_depth = len(root.split(os.sep))\r\n if n_depth > maxdepth: continue\r\n nFiles = len(files)\r\n out[root] += nFiles\r\n out[path] += nFiles\r\n print(f\"{root}\\t{nFiles}\")\r\n return out\r\n\r\ndef output(dat, file_name):\r\n with open(file_name,'w') as fh:\r\n for key in sorted(dat.keys()):\r\n fh.write(f\"{key}\\t{dat[key]}\\n\")\r\n\r\ndef main():\r\n args = ArgumentParser()\r\n \r\n data = DirectoryParser(args.dir,args.maxdepth)\r\n output(data,args.out)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n","repo_name":"zhunshi/ztool","sub_path":"bin/FileNumberStat.py","file_name":"FileNumberStat.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22837100078","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nfrom cosyai.dataset import RandSet\nfrom cosyai.util import Config\nfrom cosyai.model import Model\n\n\nclass TestModel(unittest.TestCase):\n def setUp(self):\n self.conf_dict = {\n \"task\": \"regression\",\n \"name\": \"DemoTrain\",\n \"backend\": \"paddle\",\n \"dataset\": {\n \"data_type\": \"random\",\n \"input_dim\": 100,\n \"output_dim\": 1,\n \"dataset_size\": 1000\n },\n \"model\": {\n \"net\": \"DNN\",\n \"input_size\": 100,\n \"output_size\": 1\n }\n }\n conf = Config(self.conf_dict)\n\n ds = RandSet(conf.dataset)\n self.X, self.y = ds.train_set.data\n self.tX, self.ty = ds.test_set.data\n\n def test_model(self):\n conf = Config(self.conf_dict)\n model = Model(conf.model)\n y = model(self.X)\n self.assertListEqual(y.shape, [700, 1])\n","repo_name":"ChipSum-Group/CosyAI","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26460325638","text":"#!/usr/bin/env python\nimport subprocess\nimport os\nimport io\n\n\nn = 5\nresults = []\nchecked_results = []\nmaple = float(\"4.545447652E6\")\nmin_time = float('inf')\n\nfor i in range(0, n, 1):\n result = []\n print(i)\n proc = subprocess.Popen([os.getcwd() + \"/a.out\"], stdout=subprocess.PIPE)\n for line in io.TextIOWrapper(proc.stdout, encoding=\"utf-8\"):\n result.append(line)\n res = result[0].strip()[8:]\n time = result[2].strip()[6:]\n results.append((res, time))\n\nfor el in results:\n if (abs(maple - float(el[0])) / maple) < 0.01:\n checked_results.append(el)\n time = float(el[1])\n if time < min_time:\n min_time = time\n\nprint(\"cheked results = \", checked_results)\nprint(\"min_time = \", min_time)\n","repo_name":"KushnirDmytro/Lab2Check","sub_path":"Labs/Petruk___Romanjuk/integral_concurrency/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14249347543","text":"# from random import randint\n\n# number = 0\n# while number != 5:\n# number = randint(1,5)\n# print(number)\n\n# for char in \"Hello\":\n# print(char)\n\n# for i in range(1, 8):\n# print(i)\n\n# for i in \"Coffee\":\n# print(\"To much\")\n\n# for i in range(8, 0, -2):\n# print(i)\n# x = 0\n# # for i in range(11, 20, 2):\n# # x = x + i\n# # print(i)\n# # print(x)\n\n# for i in range(11,21,2):\n# x += i\n# print(x)\n\n\n#CLEAN UP YOUR ROOM!!!\n\n# times = int(input(\"How many times do I have to tell you? \"))\n\n# this_many = 0\n\n# while this_many < times:\n# print(\"Clean your room!\")\n# this_many += 1\n\n# Lucky number\n\n# for num in range(1,21):\n# if num % 2 == 0:\n# print( str(num) + \" Is Even\")\n# if num % 2 != 0:\n# print( str(num) + \" Is Odd\")\n# if num % 13 == 0:\n# print( str(num) + \" Unlucky number\" )\n# if num == 4:\n# print( str(num) + \" Unlucky number\")\n\n# for i in range(0,9):\n# print (\"\\U0001f600\" * i)\n# user_input = input(\"Hey how's it going? \")\nuser_input = None\nwhile user_input != \"Stop copying me!!!\":\n user_input = input(\"Hey how's it going? \")\n if user_input == \"Stop copying me!!!\":\n break\n else:\n print(user_input)\n\n","repo_name":"Ryoung27/Python-3-Colt-Steele-Course","sub_path":"done/loop/loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27579338648","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 31 13:38:33 2020\n\n@author: guill\n\"\"\"\n\nimport NAUTIAETL as nt\nimport NAUTIAFIXCSV as nfv\n\ndef serv(HouseHold,Entities):\n #S_HealthCenterService #información de plano\n \n S_MedicineAccess = nfv.dfFix(HouseHold,\"health_001:Healthcare\",\"Economy:FamilyHead\")\n S_MedicineAccess = S_MedicineAccess.isin([\"yes\"])\n nt.mkCSV(S_MedicineAccess,\"S_MedicineAccess.csv\")\n \n S_DataAccess = nfv.dfFix(Entities,\"Data_Access\",\"Antenna\")\n S_DataAccess = nt.separateValues(S_DataAccess)\n nt.mkCSV(S_DataAccess,\"S_DataAccess.csv\")\n \n S_RepeaterAntena = nfv.dfFix(Entities,\"Antenna\",\"meta:instanceID\")\n nt.mkCSV(S_RepeaterAntena,\"S_RepeaterAntena.csv\")\n \n S_NoEducationCause = nfv.dfFix(Entities,\"Education_Issues\",\"Data_Access\")\n S_NoEducationCause = nt.separateValues(S_NoEducationCause)\n nt.mkCSV(S_NoEducationCause,\"S_NoEducationCause.csv\")","repo_name":"guillermosanchezg/NAUTIA","sub_path":"DesarrolloPy/serv.py","file_name":"serv.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8048937890","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import osv,fields\nfrom openerp import SUPERUSER_ID\nfrom openerp.tools.translate import _\n\nclass res_partner(osv.Model):\n _inherit = \"res.partner\"\n\n _columns = {\n 'site_customer': fields.boolean('Сайт')\n }\n def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):\n if context and context.has_key('hacked_global_search'):\n #args = [['customer', '=', 1], '|', '|', ['name', 'ilike', u'\\u0444'], ['parent_id', 'ilike', u'\\u0444'], ['ref', '=', u'\\u0444']]\n\n check = {'name':False, 'parent_id':False, 'ref':False}\n for a in args:\n if type(a) == list and len(a)==3 and (a[0] in check):\n try:\n check[a[0]] = len(a[2]) >= 3\n except:\n pass\n do_global_search = True\n for c in check:\n do_global_search = do_global_search and check[c]\n if do_global_search:\n user = SUPERUSER_ID\n else:\n del context['hacked_global_search']\n res = super(res_partner, self).search(cr, user, args, offset, limit, order, context, count)\n return res\n\n def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):\n if context and context.has_key('hacked_global_search'):\n user = SUPERUSER_ID\n #del context['hacked_global_search']\n res = super(res_partner, self).read(cr, user, ids, fields, context, load)\n return res\n\nclass control_site_customer(osv.TransientModel):\n _name = 'security_sarrz.control_site_customer'\n _columns = {\n #'value': fields.selection([('mark', 'Отметить'), ('unmark', 'Снять отметку'),], 'Новое значение поля Сайт'),\n }\n def _do(self, cr, uid, ids, value, context=None):\n active_ids = context.get('active_ids') or []\n cr.execute(\"\"\"UPDATE res_partner\n set site_customer=%s\n WHERE id IN %s\n \"\"\", (value,tuple(active_ids)))\n\n def mark(self, cr, uid, ids, context=None):\n self._do(cr, uid, ids, True, context)\n def unmark(self, cr, uid, ids, context=None):\n self._do(cr, uid, ids, False, context)\n","repo_name":"ShaheenHossain/google_drive_backup_13","sub_path":"security_sarrz/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18520061561","text":"import time\nfrom datetime import datetime\nfrom functools import wraps\n\nfrom bitarray import bitarray\nfrom pybloom_live import BloomFilter\nimport warnings\n\nimport pandas as pd\nimport ast\nimport dask.dataframe as dd\nfrom dask import delayed\n\nfrom dask.tests.test_system import psutil\nfrom dateutil.relativedelta import relativedelta\n\n\nwarnings.filterwarnings(\"ignore\", category=Warning)\n\n# Function to calculate the optimal number of partitions\ndef calculate_partitions():\n # Get the available system resources\n cpu_cores = psutil.cpu_count(logical=False)\n memory = psutil.virtual_memory().total\n\n # Get the size of your dataset (replace with your actual dataset size)\n dataset_size = 1000000 # Example dataset size\n\n # Calculate the desired partition size based on system resources\n partition_size = 100000 # Example desired partition size\n\n # Calculate the optimal number of partitions\n num_partitions = min(cpu_cores, max(1, dataset_size // partition_size))\n\n return num_partitions\n\n\ndef udf_reformat_to_iso(string: str):\n splits = string.replace(' ', '').split(',')\n\n if len(splits) < 6:\n splits += ['00' for _ in range(0, 6 - len(splits))]\n\n year, month, day, hour, minute, second = splits[0], splits[1], splits[2], splits[3], splits[4], splits[5]\n\n if len(month) != 2:\n month = '0' + month\n\n if len(day) != 2:\n day = '0' + day\n\n if len(hour) != 2:\n hour = '0' + hour\n\n if len(minute) != 2:\n minute = '0' + minute\n\n if len(second) != 2:\n second = '0' + second\n\n return f\"{year}-{month}-{day}T{hour}:{minute}:{second}\"\n\n\ndef redis_to_pandas(data) -> pd.DataFrame:\n df = pd.DataFrame().from_dict(data, orient=\"index\", columns=['raw_data'])\n df.sort_index(inplace=True)\n index_df = df.index\n\n # Convert the string to a dictionary while preserving the datetime object\n df = pd.DataFrame(df[\"raw_data\"].apply(\n lambda x: ast.literal_eval(x.replace('datetime.datetime', '').replace(\"(\", '\"').replace(\")\", '\"'))).tolist())\n df.index = index_df\n\n df[\"timestamp\"] = df[\"timestamp\"].apply(udf_reformat_to_iso)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df.reset_index(drop=False, inplace=True, names='counter')\n return df\n\n\ndef redis_to_pandas(data) -> pd.DataFrame:\n index_df = list(data.keys())\n values = [ast.literal_eval(x.replace('datetime.datetime', '').replace(\"(\", '\"').replace(\")\", '\"'))\n for x in data.values()]\n\n df = pd.DataFrame(values, index=index_df)\n df[\"timestamp\"] = df[\"timestamp\"].apply(udf_reformat_to_iso)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df.reset_index(drop=False, inplace=True, names='counter')\n\n return df\n\n\ndef sql_to_pandas(data) -> pd.DataFrame:\n df = pd.DataFrame(data, columns=['counter', 'user_id', 'timestamp'])\n df['user_id'] = df['user_id'].astype('int64')\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n return df\n\n\ndef timeit(func):\n @wraps(func)\n def timeit_wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n result = func(*args, **kwargs)\n end_time = time.perf_counter()\n total_time = end_time - start_time\n print(f'Function {func.__name__} Took {total_time:.4f} seconds')\n\n return result\n\n return timeit_wrapper\n\n\nclass CustomJoinPipelines:\n\n def __init__(self):\n self.capacity = 0\n\n @delayed\n def perform_join(self, block1, block2, join_key):\n # Perform the join operation\n join_result = dd.merge(block1, block2, on=join_key, how='inner')\n return join_result\n\n @timeit\n def normal_join(self, df1, df2, join_key):\n # Assuming df1 and df2 are Pandas DataFrames\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n # Apply the timestamp constraint and select columns\n filtered_df1 = df1[df1['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n filtered_df2 = df2[df2['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n\n # Perform the join operation\n final_result = filtered_df1.merge(filtered_df2, on='user_id', how='inner')\n\n return final_result\n\n @timeit\n @timeit\n def pipelined_hash_join(self, df1, df2, join_key, npartitions):\n print(f\"The number of partitions calculated {npartitions}\")\n\n df1 = dd.from_pandas(df1, npartitions=npartitions)\n df2 = dd.from_pandas(df2, npartitions=npartitions)\n\n df1['hash_value'] = df1['user_id'].apply(lambda x: x % npartitions)\n df2['hash_value'] = df2['user_id'].apply(lambda x: x % npartitions)\n\n # Set \"hash_value\" column as the index\n df1 = df1.set_index('hash_value')\n df2 = df2.set_index('hash_value')\n\n # Repartition the DataFrame based on the index\n blocks_df1 = df1.repartition(npartitions=npartitions)\n blocks_df2 = df2.repartition(npartitions=npartitions)\n\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n # Concatenate the join results\n final_result = [\n self.perform_join(\n block1[block1['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']],\n block2[block2['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']],\n join_key\n )\n for block1, block2 in zip(blocks_df1.partitions, blocks_df2.partitions)\n ]\n # Compute and display the final result\n final_result = dd.compute(*final_result, num_workers=4)\n\n return final_result\n\n\n @timeit\n def semi_join(self, df1, df2, join_key, npartitions):\n\n df1 = dd.from_pandas(df1, npartitions=npartitions)\n df2 = dd.from_pandas(df2, npartitions=npartitions)\n\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n df1 = df1.reset_index(drop=True)\n df1 = df1.drop(columns='counter')\n\n df2 = df2.reset_index(drop=True)\n df2 = df2.drop(columns='counter')\n\n # Apply the timestamp constraint and select columns\n df1 = df1[df1['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n df2 = df2[df2['timestamp'] >= timestamp_constraint][['user_id', 'timestamp']]\n\n df1 = df1.set_index(join_key).repartition(npartitions=npartitions)\n df2 = df2.set_index(join_key).repartition(npartitions=npartitions)\n\n semi_join_result = dd.merge(df1, df2, left_index=True, right_index=True, how='inner')\n semi_join_result = semi_join_result.dropna().compute()\n\n semi_join_result.reset_index(drop=False, inplace=True)\n\n return semi_join_result\n\n\n def create_bloom_filter(self, partition):\n bloom_filter = BloomFilter(capacity=self.capacity, error_rate=0.1)\n partition.loc[:, 'user_id'] = partition['user_id'].astype(\"string\")\n partition['user_id'].apply(bloom_filter.add)\n return bloom_filter\n\n def merge_bloom_filters(self, bloom_filters):\n bit_arrays = [pd.Series(bloomf.bitarray) for bloomf in bloom_filters.compute()]\n\n # Perform union using a loop\n union_bit_array = bit_arrays[0]\n for bit_array in bit_arrays[1:]:\n union_bit_array |= bit_array\n\n final_bloom_filter = BloomFilter(capacity=self.capacity, error_rate=0.1)\n final_bloom_filter.bitarray = bitarray(union_bit_array.astype(bool).tolist())\n\n return final_bloom_filter\n\n @timeit\n def intersection_bloom_filter_join(self, df1, df2, join_key, npartitions):\n start = time.time()\n\n df1[join_key] = df1[join_key].astype('string')\n df2[join_key] = df1[join_key].astype('string')\n\n df1 = dd.from_pandas(df1, npartitions=npartitions)\n df2 = dd.from_pandas(df2, npartitions=npartitions)\n\n self.capacity = max([df1['user_id'].compute().unique().shape[0], df2['user_id'].compute().unique().shape[0]])\n\n bloom_filter1 = df1.map_partitions(self.create_bloom_filter, meta=pd.DataFrame(columns=df1.columns))\n bloom_filter2 = df2.map_partitions(self.create_bloom_filter, meta=pd.DataFrame(columns=df2.columns))\n\n merged_bloom_fitlers = self.merge_bloom_filters(bloom_filter1).intersection(\n self.merge_bloom_filters(bloom_filter2))\n\n print(f\"total time to build the filter {time.time() - start}\")\n\n timestamp_constraint = datetime.now() - relativedelta(years=2)\n\n df1 = \\\n df1[(df1[join_key].apply(lambda x: x in merged_bloom_fitlers)) & (df1['timestamp'] >= timestamp_constraint)][\n [join_key, 'timestamp']]\n df2 = \\\n df2[(df2[join_key].apply(lambda x: x in merged_bloom_fitlers)) & (df2['timestamp'] >= timestamp_constraint)][\n [join_key, 'timestamp']]\n\n df1 = df1.compute()\n df2 = df2.compute()\n\n final_result = pd.merge(df1, df2, on=join_key, how='inner')\n\n return final_result\n","repo_name":"kostasrazgkelis/DDPassigment","sub_path":"test_directory/test_join_methods.py","file_name":"test_join_methods.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4028660432","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Module for providing randoms data for web-app's entities.\"\"\"\n__author__ = 'AleksNeStu'\n__copyright__ = \"The GNU General Public License v3.0\"\n\nimport functools\nimport random\nimport string\n\nfrom constants import data, repeat\n\n\nclass RandomData(object):\n \"\"\"Class that provide random data for generate web-app objects.\"\"\"\n\n @staticmethod\n def common_part(prefix, maxlen=repeat.RANDOM_DATA):\n \"\"\"Random generator of the common parts.\"\"\"\n symbols = string.ascii_letters + string.digits + string.punctuation\n return prefix + \"\".join(\n [random.choice(symbols) for _ in range(random.randint(1, maxlen))])\n\n @staticmethod\n def email_part(domain=data.CONTACT_EMAIL_DOMAIN,\n maxlen=repeat.RANDOM_EMAIL):\n \"\"\"Random generator of the email parts.\n Example:\n dosFS@gmail.com.\n \"\"\"\n symbols = string.ascii_letters + string.digits\n return \"\".join([random.choice(symbols) for _ in\n range(random.randint(1, maxlen))]) + domain\n\n @staticmethod\n def phone(code=data.PHONE_CODE):\n \"\"\"Random generator of the phone number (default for USA: code=1)\n Example:\n +1-844-751-8951\n \"\"\"\n d = functools.partial(random.randint, 0, 9)\n phone = lambda: \"+{}-{}{}{}-{}{}{}-{}{}{}{}\".format(\n code, d(), d(), d(), d(), d(), d(), d(), d(), d(), d())\n return phone()","repo_name":"AleksNeStu/Testing_Automation_Framework__web-app","sub_path":"framework/generator/random_data.py","file_name":"random_data.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"18761424420","text":"import logging\n#-*- coding:utf-8 –*-\nimport logging\nfrom logging.handlers import RotatingFileHandler\n#定义一个RotatingFileHandler,最多备份5个日志文件,每个日志文件最大10M\nRthandler = RotatingFileHandler('myapp.log', maxBytes=0.1*1024*1024,backupCount=5)\nRthandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nRthandler.setFormatter(formatter)\nlogging.getLogger('').addHandler(Rthandler)\n\nlogging.debug('debug 日志信息')\nlogging.info('info 日志信息')\nlogging.warning('warning 日志信息')\nlogging.error('error 日志信息') \nlogging.critical('critical 日志信息') \n","repo_name":"iweimingliang/script","sub_path":"python/logging/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42322507730","text":"import numpy as np\n\nclass TimelineMap:\n data_points = []\n events = {} # Dictionary like: {\"-5145503496319726845\": 1, \"-586275386119677184\": 2, ...}\n events_count = {}\n interval = [float(\"inf\"), float(\"-inf\")]\n\n def __init__(self, slice_num=1000):\n self.slice_num = slice_num\n self.id_factory = 1\n\n '''\n event_id: (我们定义的 相同事件 应有相同的event_id)\n '''\n def insert(self, event_id, ts):\n if not event_id in self.events:\n self.events[event_id] = self.id_factory\n self.events_count[self.id_factory] = 0\n self.id_factory += 1\n\n event_id = self.events[event_id]\n datapoint = [event_id, ts]\n self.data_points.append(datapoint)\n self.events_count[event_id] += 1\n\n if ts > self.interval[1]:\n self.interval[1] = ts\n if ts < self.interval[0]:\n self.interval[0] = ts\n\n def get_event_num(self):\n return len(self.events)\n\n def get_event_count(self, event_id):\n if event_id in self.events:\n event_id = self.events[event_id]\n else:\n return -1\n\n count = self.events_count[event_id]\n return count\n\n '''\n 从 timeline_map 中提取 event_id 指定的事件,生成长度为slice_num的timeline,\n 其中,事件发生的次数被限制在 max_dp_num 以内\n :return time_map: 生成的timeline map\n precision: timeline的精度 \n max_dp_num: timeline所包含事件的个数\n '''\n def resolve(self, slice_num=0, max_dp_num=0, event_id=[]):\n if slice_num == 0:\n slice_num = self.slice_num\n if max_dp_num == 0:\n max_dp_num = int(self.slice_num / 2)\n\n data_points = []\n interval = [float(\"inf\"), float(\"-inf\")]\n\n # 如果用户指定 event_id 则只生成由指定 event_id 组成的 timeline map\n if len(event_id) != 0:\n new_id = []\n for val in event_id:\n new_id.append(self.events[val])\n event_id = new_id\n for point in self.data_points:\n if point[0] in event_id:\n data_points.append(point)\n if interval[0] > point[1]:\n interval[0] = point[1]\n if interval[1] < point[1]:\n interval[1] = point[1]\n else:\n data_points = self.data_points.copy()\n interval = self.interval.copy()\n\n # 对选出的数据进行排序\n data_points.sort(key=lambda x: x[1]) # Sort by timestamp\n\n # Truncate\n if len(data_points) > max_dp_num:\n del data_points[max_dp_num:]\n interval[1] = data_points[max_dp_num-1][1]\n else:\n max_dp_num = len(data_points)\n\n # Calculate precision\n precision = (interval[1] - interval[0]) / (slice_num - 1)\n if precision == 0.0:\n precision = 0.001\n element_num = slice_num\n offset = interval[0]\n\n # Construct output timeMap\n time_map = np.zeros(element_num)\n for point in data_points:\n ts = point[1]\n index = int((ts - offset) / precision)\n time_map[index] += 1.0\n\n return time_map, precision, max_dp_num\n","repo_name":"YanbingChen/GraduateProject","sub_path":"src/timeline_mapping.py","file_name":"timeline_mapping.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11259385810","text":"#! -*- coding:utf-8 -*-\nfrom django.dispatch import Signal\nfrom django.db.models.signals import post_save\nfrom django.core.signals import request_finished\nfrom groups.models import Group, Topic, Reply, Applicant\nfrom sys_notification.models import Notification\n\n# 群组操作的signal\ngroup_notify = Signal(providing_args=[\"instance\", \"args\", \"kwargs\"])\n\n# 话题操作的signal\ntopic_notify = Signal(providing_args=[\"instance\", \"args\", \"kwargs\"])\n\n# 好友操作的signal\nfriend_notify = Signal(providing_args=[\"instance\", \"args\", \"kwargs\"])\n\n# 将通知置为已点击\nset_notity_clicked = Signal(providing_args=[\"request\", \"no_type\", \"args\", \"kwargs\"])\n\n\ndef group_action(sender, instance, *args, **kwargs):\n obj = instance\n if obj.status != 'processing':\n notify = Notification(no_type='group', group_action=obj.status, to_user=obj.applicant, group=obj.group,\n applicant=obj)\n notify.save()\n\ngroup_notify.connect(group_action, dispatch_uid='create_group_notify')\n\n\ndef topic_action(sender, instance, *args, **kwargs):\n obj = instance\n if obj.reply: # 是否是对回复的回复\n notify = Notification(no_type='topic', topic_action='re_reply', to_user=obj.reply.creator, reply=obj.reply,\n topic=obj.topic, )\n notify.save()\n else:\n notify = Notification(no_type='topic', topic_action='re_topic', to_user=obj.topic.creator, topic=obj.topic)\n notify.save()\n\ntopic_notify.connect(topic_action, dispatch_uid='create_topic_notify')\n\n\ndef friend_action(sender, instance, *args, **kwargs):\n obj = instance\n # 关注操作\n notify = Notification(no_type='friend', friend_action='follow', to_user=obj.to_user, follower=obj.from_user)\n notify.save()\nfriend_notify.connect(friend_action, dispatch_uid='follow_notify')\n\n\ndef set_notification_clicked(sender, request, no_type, **kwargs):\n \"\"\"\n 点击相应页面后将通知置为已经点击clicked\n @fanlintao\n \"\"\"\n notify_qs = Notification.objects.filter(to_user=request.user, no_type=no_type, click='unclick')\n notify_qs.update(click='clicked')\n\nset_notity_clicked.connect(set_notification_clicked, dispatch_uid='set_notification_clicked')\n\n\n\n","repo_name":"mutoulbj/BOHOO","sub_path":"sys_notification/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"3411079387","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BigramLanguageModel(nn.Module):\n def __init__(self, vocab_size):\n super().__init__()\n self.embedding = nn.Embedding(vocab_size, vocab_size)\n\n def forward(self, x, targets=None):\n # logits: b*t => b*t*c\n logits = self.embedding(x) \n\n if targets is not None:\n # cross-entropy for predicted distribution\n # (in this case, embedding layer actually represents trainable statistics\n # for next character prediction, i.e. each row is log p(x_i+1 | x_i))\n # and targets is essentially array of next tokens per each\n b, t, c = logits.shape\n logits = logits.view(b*t, c)\n targets = torch.flatten(targets)\n loss = F.cross_entropy(logits, targets)\n return logits, loss\n else:\n return logits, None\n \n def generate(self, idx, max_new_tokens):\n for _ in range(max_new_tokens):\n logits, _ = self(idx)\n # last logit in each batch\n logits = logits[:, -1, :]\n # turn logits into probability\n probs = F.softmax(logits, dim=1)\n # sample next token idx per batch\n idx_next = torch.multinomial(probs, num_samples=1)\n # concatenate predicted idx to sequence, repeat\n idx = torch.cat((idx, idx_next), dim=1) \n return idx\n","repo_name":"shredder67/gpt-playground","sub_path":"src/bigram_model.py","file_name":"bigram_model.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28145258082","text":"import secrets\nimport string\nimport sys\n\ndef main(iters=20):\n try:\n rondas = int(iters)\n final = string.ascii_letters + string.punctuation.replace(\"\\'\", \"\").replace('\"', '')\n key = \"\"\n for i in range(rondas):\n key += secrets.choice(final)\n print(key)\n return key\n except Exception as e:\n raise ValueError(\"Second parameter must be a Integer.\")\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if len(args) == 1:\n main(args[0])\n elif len(args) == 0:\n main()\n else:\n print(\"\\nGenerate random secure key.\")\n print(f'\\n. {sys.argv[0]} [string] [integer]\\n')\n print(\"Options:\\n1) String\\n2) Iterations - Integer\\n\")\n","repo_name":"kurotom/littleKeysGenerator","sub_path":"generateRandomKeys.py","file_name":"generateRandomKeys.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"312086402","text":"li=''\r\nd=[\"'\",'.',\",\",';',':']\r\nfor i in range(int(input())):\r\n l=input()\r\n li=li+' '+l\r\nk=li.lower()\r\n\r\nk=sorted(set(k.split()))\r\nprint(len(k))\r\n#print(k)\r\ns=' '.join(k)\r\nfor i in d:\r\n s=s.replace(i,\" \")\r\ns=s.split()\r\nfor i in s:\r\n print(i)\r\n\r\n\r\n\r\n","repo_name":"rakesh1309/Python_program","sub_path":"word_list.py","file_name":"word_list.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70260862490","text":"class Solution:\n def findNumbers(self, nums: List[int]) -> int:\n num_len = [len(str(i)) for i in nums]\n count = 0\n \n for i in num_len:\n if i % 2 == 0:\n count += 1\n \n return count\n\n return sum([len(str(i)) % 2 == 0 for i in nums])","repo_name":"ZacharyVillarreal/Leetcode","sub_path":"Python/Easy/find-numbers-with-even-number-of-digits.py","file_name":"find-numbers-with-even-number-of-digits.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74800737689","text":"from nextcord.ext import commands\nfrom nextcord import User\nimport re\nfrom cogs.utils import monetaryConversions\nfrom utils.customCogChecks import is_public, has_wallet\nfrom cogs.utils.systemMessaages import CustomMessages\n\ncustom_messages = CustomMessages()\nCONST_STELLAR_EMOJI = '<:stelaremoji:684676687425961994>'\nCONST_TX_ERROR_TITLE = \":exclamation: __Transaction Error__ :exclamation: \"\n\n\ndef process_message(message):\n \"\"\"\n Filter message so it is not too long for transaction report\n \"\"\"\n if message:\n if len(message) > 100:\n message = message[:98] + '...'\n else:\n message = 'None'\n\n return message\n\n\nclass TransactionCommands(commands.Cog):\n \"\"\"\n Class handling off-chain discord transactions\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.backoffice = bot.backoffice\n\n def build_stats(self, transaction_data: dict, tx_type: str):\n \"\"\"\n Process data according to the type of transaction\n \"\"\"\n if tx_type == \"public\":\n processed = {\"globalBot\": {\"totalTx\": 1,\n 'totalMoved': transaction_data[\"amount\"],\n \"totalPublicCount\": 1,\n \"totalPublicMoved\": transaction_data[\"amount\"]},\n \"senderStats\": {f\"{transaction_data['ticker']}.publicTxSendCount\": 1,\n f\"{transaction_data['ticker']}.publicSent\": transaction_data[\"amount\"],\n },\n \"recipientStats\": {f\"{transaction_data['ticker']}.publicTxReceivedCount\": 1,\n f\"{transaction_data['ticker']}.publicReceived\": transaction_data[\n \"amount\"],\n },\n \"guildStats\": {\n f'{transaction_data[\"ticker\"]}.publicCount': 1,\n f\"{transaction_data['ticker']}.txCount\": 1,\n f\"{transaction_data['ticker']}.volume\": transaction_data[\"amount\"]\n }\n }\n\n elif tx_type == 'private':\n processed = {\"globalBot\": {\"totalTx\": 1,\n 'totalMoved': transaction_data[\"amount\"],\n \"totalPrivateCount\": 1,\n \"totalPrivateMoved\": transaction_data[\"amount\"]},\n \"senderStats\": {f\"{transaction_data['ticker']}.privateTxSendCount\": 1,\n f\"{transaction_data['ticker']}.privateSent\": transaction_data[\"amount\"],\n },\n \"recipientStats\": {f\"{transaction_data['ticker']}.privateTxReceivedCount\": 1,\n f\"{transaction_data['ticker']}.privateReceived\": transaction_data[\n \"amount\"],\n },\n \"guildStats\": {\n f'{transaction_data[\"ticker\"]}.privateCount': 1,\n f\"{transaction_data['ticker']}.txCount\": 1,\n f\"{transaction_data['ticker']}.volume\": transaction_data[\"amount\"]\n }\n }\n\n return processed\n\n async def update_stats(self, ctx, transaction_data: dict, tx_type: str):\n \"\"\"\n Update all required stats when transaction is executed\n \"\"\"\n processed_stats = self.build_stats(transaction_data=transaction_data, tx_type=tx_type)\n\n # Update stats stats\n await self.backoffice.stats_manager.update_cl_off_chain_stats(ticker=transaction_data[\"ticker\"],\n ticker_stats=processed_stats[\"globalBot\"])\n\n # Updates sender and recipient public transaction stats\n await self.backoffice.stats_manager.update_usr_tx_stats(user_id=ctx.message.author.id,\n tx_stats_data=processed_stats['senderStats'])\n await self.backoffice.stats_manager.update_usr_tx_stats(user_id=transaction_data[\"recipientId\"],\n tx_stats_data=processed_stats[\"recipientStats\"])\n\n await self.backoffice.stats_manager.update_guild_stats(guild_id=ctx.message.guild.id,\n guild_stats_data=processed_stats[\"guildStats\"])\n\n async def stream_transaction(self, ctx, recipient, tx_details: dict, message: str, tx_type: str):\n \"\"\"\n Send reports out to all destinations\n \"\"\"\n # Process message\n msg = process_message(message=message)\n # Send to channel where tx has been executed\n native_token = None\n if tx_details['ticker'] == 'xlm':\n native_token = \"stellar\"\n\n if native_token:\n in_dollar = monetaryConversions.convert_to_usd(amount=tx_details[\"amount\"], coin_name='stellar')\n tx_report_msg = f\"{ctx.message.author} just sent {recipient.mention} {tx_details['amount']:.7f}\" \\\n f\" {tx_details['emoji']} (${in_dollar['total']:.4f})\"\n explorer_msg = f'💵 {tx_details[\"amount\"]:.7f} {CONST_STELLAR_EMOJI} (${in_dollar[\"total\"]:.4f}) on ' \\\n f'{ctx.message.guild} channel {ctx.message.channel}'\n total_dollar_value = in_dollar['total']\n conversion_rate = in_dollar[\"usd\"]\n else:\n explorer_msg = f'💵 {tx_details[\"amount\"]} {tx_details[\"assetCode\"].upper()} on ' \\\n f'{ctx.message.guild} channel {ctx.message.channel}'\n tx_report_msg = f\"{ctx.message.author} just sent {recipient.mention} {tx_details['amount']:.7f} \" \\\n f\"{tx_details['assetCode'].upper()}\"\n total_dollar_value = 0\n conversion_rate = 0\n\n if tx_type == 'private':\n explorer_msg = \":detective: \"\n\n await custom_messages.transaction_report_to_channel(ctx=ctx, message=tx_report_msg, tx_type=tx_type)\n\n tx_details[\"conversion\"] = total_dollar_value\n tx_details[\"conversionRate\"] = conversion_rate\n\n # report to sender\n\n await custom_messages.transaction_report_to_user(ctx=ctx, user=recipient, transaction_data=tx_details,\n destination=ctx.message.author,\n direction=0, tx_type=tx_type,\n message=msg)\n\n # report to recipient\n await custom_messages.transaction_report_to_user(ctx=ctx, user=ctx.message.author, transaction_data=tx_details,\n destination=recipient,\n direction=1, tx_type=tx_type,\n message=msg)\n\n # Send out explorer\n\n load_channels = [self.bot.get_channel(int(chn)) for chn in\n self.backoffice.guild_profiles.get_all_explorer_applied_channels()]\n\n await custom_messages.explorer_messages(applied_channels=load_channels, message=explorer_msg)\n\n async def send_impl(self, ctx, amount: float, ticker: str, recipient: User, *, tx_type: str, message: str = None):\n coin = ticker.lower()\n if amount > 0:\n if not ctx.message.author == recipient and not recipient.bot:\n supported = [sup[\"assetCode\"] for sup in self.bot.backoffice.token_manager.get_registered_tokens() if\n sup[\"assetCode\"] == coin]\n if supported or coin == 'xlm':\n coin_data = self.backoffice.token_manager.get_token_details_by_code(coin)\n atomic_value = (int(amount * (10 ** 7)))\n\n # Get user wallet ticker balance\n wallet_value = self.backoffice.wallet_manager.get_ticker_balance(asset_code=coin,\n user_id=ctx.message.author.id)\n\n if wallet_value:\n if wallet_value >= atomic_value:\n # Check if recipient has wallet or not\n if not self.backoffice.account_mng.check_user_existence(user_id=recipient.id):\n self.backoffice.account_mng.register_user(discord_id=recipient.id,\n discord_username=f'{recipient}')\n\n # Update user count in guild system\n await self.backoffice.stats_manager.update_registered_users(\n guild_id=ctx.message.guild.id)\n\n # Increase bridge\n await self.backoffice.stats_manager.create_bridge(user_id=ctx.message.author.id)\n\n # Send up link\n load_channels = [self.bot.get_channel(int(chn)) for chn in\n self.backoffice.guild_profiles.get_all_explorer_applied_channels()]\n current_total = self.backoffice.account_mng.count_registrations()\n\n explorer_msg = f':new: user registered into ***{self.bot.user} System*** ' \\\n f'(Σ {current_total})'\n for chn in load_channels:\n if chn is not None:\n await chn.send(content=explorer_msg)\n\n await custom_messages.bridge_notification(ctx, recipient=recipient)\n\n # Deduct balance from sender\n if self.backoffice.wallet_manager.update_coin_balance(coin=coin,\n user_id=ctx.message.author.id,\n amount=int(atomic_value),\n direction=2):\n # Append to recipient\n if self.backoffice.wallet_manager.update_coin_balance(coin=coin, user_id=recipient.id,\n amount=int(atomic_value),\n direction=1):\n\n normal_value = (atomic_value / (10 ** 7))\n\n coin_data[\"amount\"] = normal_value\n coin_data[\"ticker\"] = coin\n\n # Produce dict for streamer\n await self.stream_transaction(ctx=ctx, recipient=recipient, tx_details=coin_data,\n message=message, tx_type=tx_type)\n\n coin_data[\"recipientId\"] = recipient.id\n\n await self.update_stats(ctx=ctx, transaction_data=coin_data, tx_type=tx_type)\n\n else:\n self.backoffice.wallet_manager.update_coin_balance(coin=coin,\n user_id=ctx.message.author.id,\n amount=int(atomic_value),\n direction=1)\n message = f'{amount} {coin.upper()} could not be sent to the {recipient} ' \\\n f'please try again later'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n else:\n message = f'There has been an error while making P2P transaction please try again later'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = f'You have insufficient balance! Your current wallet balance is' \\\n f' {wallet_value / (10 ** 7)} XLM'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = f'Your wallet balance of token ***{coin.upper()}*** is 0.0000000. Before you can ' \\\n f'make payment you need to first deposit some.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n\n message = f'Coin {coin} has not been integrated yet into {self.bot.user}.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = f'You are not allowed to send {amount} xlm to either yourself or the bot.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message,\n destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = 'Amount needs to be greater than 0.0000000 XLM'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n @commands.group()\n @commands.check(is_public)\n @commands.check(has_wallet)\n @commands.guild_only()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def send(self, ctx, recipient: User, amount: float, asset_code: str, *, message: str = None):\n if not re.search(\"[~!#$%^&*()_+{}:;\\']\", asset_code.lower()):\n if amount > 0:\n await self.send_impl(ctx, amount, asset_code.lower(), recipient, tx_type=\"public\", message=message)\n else:\n message = f'Amount needs to be greater than 0.0000000 {asset_code.upper()}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = 'Special characters are not allowed in token code'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n @commands.group()\n @commands.check(is_public)\n @commands.check(has_wallet)\n @commands.guild_only()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def private(self, ctx, recipient: User, amount: float, asset_code: str, *, message: str = None):\n if not re.search(\"[~!#$%^&*()_+{}:;\\']\", asset_code.lower()):\n if amount > 0:\n await self.send_impl(ctx, amount, asset_code.lower(), recipient, tx_type=\"private\", message=message)\n else:\n message = f'Amount needs to be greater than 0.0000000 {asset_code.upper()}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n else:\n message = 'Special characters are not allowed in token code'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=CONST_TX_ERROR_TITLE)\n\n @send.error\n async def send_error(self, ctx, error):\n if isinstance(error, commands.CheckFailure):\n title = f'__System Transaction Error__'\n message = f'In order to execute P2P transaction you need to be registered into the system, and ' \\\n f'transaction request needs to be executed on one of the text public text channels ' \\\n f'on {ctx.message.guild}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n elif isinstance(error, commands.BadArgument):\n title = f'__Bad Argument Provided __'\n message = f'You have provided wrong argument either for amount or than for the recipient'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n elif isinstance(error, AssertionError):\n title = f'__Amount Check failed __'\n message = f'You have provided wrong amount for tx value.'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n elif isinstance(error, commands.CommandOnCooldown):\n title = f'__Command on cool-down__!'\n message = f'{error}. Please try again after {error.retry_after}s'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n\n elif isinstance(error, commands.MissingRequiredArgument):\n title = f'__Missing Required Argument Error __'\n message = f'{str(error)}'\n await custom_messages.system_message(ctx=ctx, color_code=1, message=message, destination=1,\n sys_msg_title=title)\n\n\ndef setup(bot):\n bot.add_cog(TransactionCommands(bot))\n","repo_name":"Crypto-Link-Payments/crypto-link","sub_path":"cogs/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":19256,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"22042301831","text":"import requests\nimport numpy as np\nfrom sklearn import linear_model\n\n## get data and trans to np\nurl = 'http://127.0.0.1:5000'\njson_data = requests.get(url).json()\ndata = np.array( [[ float(x), json_data[x]] for x in json_data ])\nprint(data)\n\n## prepare data\nfor i in range(1, len(data)):\n data[i, 0] = np.sum(data[i, 0] + data[i - 1, 0])\nprint(data)\n\n## fit data\nclf = linear_model.LinearRegression()\nclf.fit(data[:, 0].reshape(-1, 1), data[:, 1])\n\n## get coef\nprint(clf.coef_)\n","repo_name":"kylechenoO/dblr","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23523741326","text":"import os\nimport glob\nimport math\nimport numpy as np\n\nimport torch\nimport torchvision.transforms.functional as VF\n\nfrom torch.utils.data import Dataset, DataLoader\n\nmax_scale = 1\nimg_size = (3, 188, 250)\nlabel_size = 2\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nclass CalibData(Dataset):\n def __init__(self, img_paths, labels, transform=None):\n self.img_paths = img_paths\n self.labels = labels\n self.transform = transform\n\n def __len__(self):\n return len(self.img_paths)\n\n def __getitem__(self, i):\n img = VF.resize(VF.to_tensor(Image.open(self.img_paths[i])), (img_size[1], img_size[2]))\n label = torch.from_numpy(self.labels[i] * max_scale).float()\n \n if self.transform is not None:\n img = self.transform(img)\n return img, label\n \nclass DummyData(Dataset):\n def __init__(self, img_size, labels):\n self.mats = torch.zeros((labels.shape[0], *img_size))\n self.labels = labels\n\n def __len__(self):\n return self.mats.shape[0]\n\n def __getitem__(self, i):\n mat = self.mats[i]\n label = torch.from_numpy(self.labels[i] * max_scale).float()\n return mat, label\n \ndef view_angle_images(data, start, end):\n show([data[i][0] for i in range(start, end)])\n print([data[i][1] for i in range(start, end)])\n \ndef load_img_path_labels(input_dir):\n labels = []\n img_paths = []\n for file in sorted([f for f in os.listdir(input_dir) if f.endswith('txt')]):\n labels.append(np.loadtxt(f'{input_dir}/{file}'))\n for l in range(1, len(labels[-1])+1):\n img_paths.append(f\"{input_dir}/{file.replace('.txt', '')}_{l}.jpg\")\n return np.array(img_paths), np.vstack(labels)\n\ndef get_mse(gt, test):\n test = np.nan_to_num(test)\n return np.mean(np.nanmean((gt - test)**2, axis=0))\n\ndef to_radians(deg):\n return deg * math.pi / 180\n\ndef mse_zero_percent(gt, mp, convert=0):\n if convert:\n gt = to_radians(gt)\n mp = to_radians(mp)\n \n err_mse = get_mse(gt, mp)\n zero_mse = get_mse(gt, np.zeros_like(gt))\n \n return 100 * (err_mse / (zero_mse if zero_mse > 0 else 1.25e-3))\n\ndef fill_zeros_previous(arr):\n for i, r in enumerate(arr):\n if r.sum() == 0 and i > 0:\n arr[i] = arr[i-1]\n return arr\n \ndef remove_zero_labels(x, y):\n y = y[np.all(y != 0, axis=1)]\n x = x[np.where(np.any(y != 0, axis=1))[0]]\n return x, y\n \ndef split_data(img_paths, labels, split=0.90, transform=None, non_zero_labels=1, remove_nans=1):\n labels = np.nan_to_num(labels)\n \n if non_zero_labels:\n if remove_nans:\n img_paths, labels = remove_zero_labels(img_paths, labels)\n else:\n labels = fill_zeros_previous(labels)\n \n x_train, x_test, y_train, y_test = train_test_split(img_paths, labels, test_size=(1.0 - split), random_state=42)\n train_size = int(split * x_train.shape[0])\n x_valid, y_valid, x_train, y_train = x_train[train_size:], y_train[train_size:], x_train[:train_size], y_train[:train_size]\n\n train_data = CalibData(x_train, y_train, transform=transform)\n valid_data = CalibData(x_valid, y_valid)\n test_data = CalibData(x_test, y_test)\n \n return train_data, valid_data, test_data\n\ndef load_pretrained_model(model, weights_path):\n model.load_state_dict(torch.load(weights_path))\n return model\n\n\n\n\n","repo_name":"asceznyk/calipy","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29025079150","text":"import time\nimport datetime\nfrom report import report_sxw\nfrom osv import fields, osv\nfrom tools.translate import _\n\nclass labtest_report(report_sxw.rml_parse):\n _name = 'report.patient.labtest'\n def __init__(self, cr, uid, name, context):\n super(labtest_report, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'time': time,\n 'get_test': self._get_test,\n 'get_doctor' : self.get_doctor,\n })\n \n def _get_test(self, patient, ids={}):\n doctor_id = self.get_doctor_id() \n if doctor_id: \n test_ids = self.pool.get('medical.patient.lab.test').search(self.cr, self.uid, [('doctor_id','=',doctor_id),('patient_id','=',patient.id),('state','=','draft')])\n if test_ids:\n return self.pool.get('medical.patient.lab.test').browse(self.cr, self.uid, test_ids)\n return []\n\n def get_doctor_id(self): \n partner_id = self.pool.get('res.partner').search(self.cr,self.uid,[('user_id','=',self.uid)])\n if partner_id:\n physician_id = self.pool.get('medical.physician').search(self.cr, self.uid, [('name','in',partner_id)]) \n if physician_id:\n return physician_id[0]\n return False\n\n def get_doctor(self):\n partner_id = self.pool.get('res.partner').search(self.cr,self.uid,[('user_id','=',self.uid)])\n if partner_id:\n return self.pool.get('res.partner').read(self.cr, self.uid, partner_id, ['name'])[0]['name']\n else:\n return '' \n\nreport_sxw.report_sxw('report.patient.labtest', 'medical.patient', 'addons/hms_plus/medical_lab/report/labtest.rml', parser=labtest_report, header=False)","repo_name":"elmerdpadilla/odoo","sub_path":"modtest70/hms_plus/medical_lab/report/labtest.py","file_name":"labtest.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"17883424555","text":"import cv2\nimport numpy as np\nimport math\nimport random\n\nimport MainScript\nimport EngineFiles.ReadChars as RC\nimport EngineFiles.Preprocessing as PP\nimport EngineFiles.PredictChar as PC\nimport EngineFiles.PredictPlate as PPL\n\nPLATE_WIDTH_PADDING_FACTOR = 1.3\nPLATE_HEIGHT_PADDING_FACTOR = 1.5\n\n\ndef lookupChars(img1):\n ListChars, countChars, imgC = [], 0, img1.copy()\n contours, hierC = cv2.findContours(imgC, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n h, w = img1.shape\n img1Cont = np.zeros((h,w,3), np.uint8)\n\n for i in range(len(contours)):\n if MainScript.showSteps == True:\n cv2.drawContours(img1Cont, contours, i, MainScript.BGR_white,)\n\n PredictChar = PC.PredictChar(contours[i])\n\n if RC.checkChar(PredictChar):\n countChars += 1\n ListChars.append(PredictChar)\n\n if MainScript.showSteps == True:\n print('\\nstep 2 - len(contours) =',len(contours))\n print('\\nstep 2 - countChars =', countChars)\n cv2.imshow('2a',img1Cont)\n\n return ListChars\n\n\ndef gainPlate(imgO, listChars):\n predictPlate = PPL.PredictPlate()\n listChars.sort(key = lambda mChar : mChar.centerX)\n flatPlateX = (listChars[0].centerX + listChars[len(listChars)-1].centerX) / 2\n flatPlateY = (listChars[0].centerY + listChars[len(listChars)-1].centerY) / 2\n coorPlateCenter = flatPlateX, flatPlateY\n PlateW = int((listChars[len(listChars) - 1].bRectX + listChars[len(listChars) - 1].bRectW - listChars[0].bRectX) * PLATE_WIDTH_PADDING_FACTOR)\n totalCharH = 0\n\n for i in listChars:\n totalCharH += i.bRectH\n\n flatAvgCharH = totalCharH / len(listChars)\n plateH = int(flatAvgCharH * PLATE_HEIGHT_PADDING_FACTOR)\n flatOpp = listChars[len(listChars) - 1].centerY - listChars[0].centerY\n flatHyp = RC.DistanceChars(listChars[0], listChars[len(listChars) - 1])\n flatCorrAngelRad = math.asin(flatOpp / flatHyp)\n flatCorrAngelDeg = flatCorrAngelRad * (180/math.pi)\n predictPlate.locPlate = (tuple(coorPlateCenter), (PlateW, plateH), flatCorrAngelDeg)\n rotMatrix = cv2.getRotationMatrix2D(tuple(coorPlateCenter), flatCorrAngelDeg, 1.0)\n h, w, numC = imgO.shape\n iRot = cv2.warpAffine(imgO, rotMatrix, (w, h))\n iCrop = cv2.getRectSubPix(iRot, (PlateW, plateH), tuple(coorPlateCenter))\n predictPlate.iPlate = iCrop\n\n return predictPlate\n\n\ndef CropPlates(img):\n ListPlates = []\n h, w, numC = img.shape\n\n GrayScale = np.zeros((h,w,1), np.uint8)\n thresh = np.zeros((h,w,1), np.uint8)\n contours = np.zeros((h,w,3), np.uint8)\n cv2.destroyAllWindows()\n\n if MainScript.showSteps == True:\n cv2.imshow('0', img)\n\n GrayScale, thresh = PP.preprocessing(img)\n\n if MainScript.showSteps == True:\n cv2.imshow('1a', GrayScale)\n cv2.imshow('1b', thresh)\n\n ListChars = lookupChars(thresh)\n\n if MainScript.showSteps == True:\n print('\\n Step 2 - length of ListChars =', len(ListChars))\n contours = np.zeros((h,w,3), np.uint8)\n Conts = []\n\n for n in ListChars:\n Conts.append(n.contour)\n\n cv2.drawContours(contours, Conts, -1, MainScript.BGR_white)\n cv2.imshow('2b', contours)\n\n FitChars = RC.DetectFitChars(ListChars)\n\n if MainScript.showSteps == True:\n print('\\nStep 3 - FitChars =', len(FitChars))\n contours = np.zeros((h,w,3), np.uint8)\n\n for i in FitChars:\n RandBlue = random.randint(0,255)\n RandGreen = random.randint(0,255)\n RandRed = random.randint(0,255)\n Conts = []\n\n for n in i:\n Conts.append(n.contour)\n\n cv2.drawContours(contours, Conts, -1, (RandBlue, RandGreen, RandRed))\n\n cv2.imshow('3', contours)\n\n for i in FitChars:\n predictPlate = gainPlate(img, i)\n\n if predictPlate.iPlate is not None:\n ListPlates.append(predictPlate)\n\n print(\"\\n\", len(ListPlates), \"possibilities found!\")\n\n if MainScript.showSteps == True:\n cv2.imshow(\"4a\", contours)\n\n for i in range(len(ListPlates)):\n p2fRpt = cv2.boxPoints(ListPlates[i].locPlate)\n cv2.line(contours, tuple(p2fRpt[0]), tuple(p2fRpt[1]), MainScript.BGR_red, 2)\n cv2.line(contours, tuple(p2fRpt[1]), tuple(p2fRpt[2]), MainScript.BGR_red, 2)\n cv2.line(contours, tuple(p2fRpt[2]), tuple(p2fRpt[3]), MainScript.BGR_red, 2)\n cv2.line(contours, tuple(p2fRpt[3]), tuple(p2fRpt[0]), MainScript.BGR_red, 2)\n cv2.imshow(\"4a\", contours)\n print(\"One of some predicted plate :\",i,\", click to continue!\")\n cv2.imshow('4b', ListPlates[i].iPlate)\n cv2.waitKey(0)\n\n print(\"\\nPlate detection complete!\")\n cv2.waitKey(0)\n\n return ListPlates","repo_name":"devildances/ComputerVision-Plate_Recognition","sub_path":"EngineFiles/ReadPlates.py","file_name":"ReadPlates.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14761058338","text":"from .services import get_transfers_by_id\nfrom django.http import HttpResponse\nimport json\n\n\n# Create your views here.\n\n\ndef get_transfers(requests):\n chain = requests.GET.get(\"chain\")\n address = requests.GET.get(\"address\")\n token_id = requests.GET.get(\"token_id\")\n\n nft_transfers = get_transfers_by_id(chain=chain, address=address, token_id=token_id)\n json_transfers = json.dumps(nft_transfers)\n return HttpResponse(json_transfers)\n","repo_name":"MoralisWeb3/youtube-tutorials","sub_path":"get-nft-transfers-by-id/backend/nft/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":606,"dataset":"github-code","pt":"31"} +{"seq_id":"33056173192","text":"from os.path import getsize as path_getsize\nfrom zlib import compress as zlib_compress\nfrom simplejson import loads as json_loads\n\n__version__ = '1.0.0'\n\ndef analyse_json(filename):\n \"\"\"Utility to return the ratio of key size, punctuation size, and leaf value size.\"\"\"\n\n unique_keys = { }\n\n def __get_size(j):\n \"\"\"Recurse to generate size.\"\"\"\n (keys, punctuation, key_count) = (0, 0, 0)\n if isinstance(j, list):\n punctuation += 1 # [\n punctuation += (len(j) - 1) # ,\n for v in j:\n sub_k, sub_p, sub_count = __get_size(v)\n keys += sub_k\n punctuation += sub_p\n key_count += sub_count\n punctuation += 1 # ]\n elif isinstance(j, dict):\n punctuation += 1 # {\n if len(j.keys()) > 1:\n punctuation += (len(j.keys()) - 1) # ,\n for k, v in j.iteritems():\n if k not in unique_keys:\n unique_keys[k] = True\n key_count += 1\n punctuation += 1 # \"\n keys += len(k)\n punctuation += 1 # \"\n punctuation += 1 # :\n sub_k, sub_p, sub_count = __get_size(v)\n keys += sub_k\n punctuation += sub_p\n key_count += sub_count\n punctuation += 1 # }\n elif isinstance(j, (str, unicode)):\n punctuation += 1 # \"\n punctuation += 1 # \"\n return (keys, punctuation, key_count)\n\n total_size = path_getsize(filename)\n with open(filename, 'r') as f:\n data = f.read()\n j = json_loads(data)\n\n (keys, punctuation, key_count) = __get_size(j)\n values = total_size - (keys + punctuation)\n unique_count = len(unique_keys.keys())\n compressed_size = len(zlib_compress(data, 6))\n\n return (keys, punctuation, values, key_count, unique_count, total_size, compressed_size)\n","repo_name":"turbulenz/turbulenz_tools","sub_path":"turbulenz_tools/utils/json_stats.py","file_name":"json_stats.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"31"} +{"seq_id":"2188622329","text":"from features.metadata_base import MetadataBase\nfrom features.website_manager import WebsiteData\nfrom lib.constants import STRICT_TRANSPORT_SECURITY, VALUES\n\n\nclass Security(MetadataBase):\n decision_threshold = 1\n\n expected_headers: dict = {\n \"cache-control\": {0: [\"no-cache\", \"no-store\"]},\n \"content-security-policy\": {},\n \"referrer-policy\": {},\n STRICT_TRANSPORT_SECURITY: {0: [\"max-age=\", \"includeSubDomains\"]},\n \"x-content-type-options\": {0: [\"nosniff\"]},\n \"x-frame-options\": {0: [\"deny\", \"same_origin\"]},\n \"x-xss-protection\": {\n 0: [\"1\"],\n 1: [\"mode=block\"],\n },\n }\n\n @staticmethod\n def _unify_text(text: str) -> str:\n return text.replace(\"_\", \"\").replace(\"-\", \"\").lower()\n\n def _start(self, website_data: WebsiteData) -> dict:\n values = []\n\n for tag, expected_value in self.expected_headers.items():\n if tag in website_data.headers:\n if len(expected_value) == 0:\n values.append(tag)\n else:\n\n header_value = self._extract_header_values(\n website_data.headers[tag]\n )\n\n expected_value = self._process_expected_values(\n expected_value\n )\n\n found_keys = self._number_of_expected_keys_in_header(\n expected_value, header_value\n )\n\n if (\n tag == STRICT_TRANSPORT_SECURITY\n and self._is_sts_mag_age_greater_than_zero(\n header_value\n )\n ):\n found_keys += 1\n\n if found_keys == len(expected_value.keys()):\n values.append(tag)\n\n return {VALUES: values}\n\n def _extract_header_values(self, header: list) -> list:\n header_value = [\n self._unify_text(value).replace(\",\", \";\").split(\";\")\n for value in header\n ]\n return [el for val in header_value for el in val]\n\n def _process_expected_values(self, expected_value: dict) -> dict:\n for idx, element in expected_value.items():\n expected_value.update(\n {int(idx): [self._unify_text(value) for value in element]}\n )\n return expected_value\n\n @staticmethod\n def _number_of_expected_keys_in_header(\n expected_value: dict, header_value: list\n ) -> int:\n found_values = sum(\n [\n 1\n for value in expected_value.values()\n for val in value\n if val in header_value\n ]\n )\n return found_values\n\n @staticmethod\n def _is_sts_mag_age_greater_than_zero(header_value: list) -> bool:\n greater_than_zero = False\n for el in header_value:\n if el.startswith(\"maxage=\") and int(el.split(\"=\")[-1]) > 0:\n greater_than_zero = True\n return greater_than_zero\n\n def _decide(self, website_data: WebsiteData) -> tuple[bool, float]:\n probability = len(website_data.values) / len(\n self.expected_headers.keys()\n )\n decision = probability >= self.decision_threshold\n return decision, probability\n","repo_name":"codecentric/metadata_picker","sub_path":"src/features/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26282734360","text":"from heapq import *\n\nclass Solution:\n def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:\n v_lines = {l for b in buildings for l in (b[0],b[1])}\n heap, i, res = [], 0, []\n for vl in sorted(v_lines):\n while i < len(buildings) and buildings[i][0] <= vl:\n heapq.heappush(heap, (-buildings[i][2], buildings[i][1]))\n i+=1\n while heap and heap[0][1] <= vl:\n heapq.heappop(heap)\n h = len(heap) and -heap[0][0]\n if not res or res[-1][1]!= h:\n res.append((vl, h))\n return res\n","repo_name":"jw3329/leetcode-problem-solving","sub_path":"Top Interview Questions/218. The Skyline Problem/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74239184087","text":"# Implementation of a queue \n\nclass Node:\n def __init__(self,value):\n self.value = value\n self.next = next\n\nclass Queue():\n\n def __init__(self) -> None:\n self.top = None\n self.bottom = None\n self.length = 0\n\n def peek(self):\n if self.top != None:\n return self.top.value\n\n\n\n def enqueue(self,value):\n newNode = Node(value)\n if self.isEmpty():\n self.top = newNode\n self.top.next = newNode\n self.bottom = newNode\n self.bottom.next = None\n self.length += 1\n\n else:\n self.bottom.next = newNode\n self.bottom = newNode\n self.bottom.next = None\n self.length += 1\n\n\n def dequeue(self):\n if self.top == self.bottom:\n self.bottom = None\n self.length -=1\n return self.top.value\n \n if self.top:\n temp = self.top\n self.top = temp.next\n self.length -=1\n return temp.value\n\n\n def isEmpty(self):\n if self.length == 0:\n return True\n else:\n return False\n\n def print_stack(self):\n cur = self.top\n ar = []\n while cur != None:\n ar.append(cur.value)\n cur = cur.next\n return ar \n\n\nsr = Queue()\nsr.enqueue(99)\nsr.enqueue(1)\nsr.enqueue(2)\nprint(\"queue\",sr.print_stack())\nsr.enqueue(3)\nsr.enqueue(4)\nprint(\"queue\",sr.print_stack())\nprint(\"queue\",sr.print_stack())\nprint(\"peek\",sr.peek())\nprint(\"dequeue\",sr.dequeue())\nprint(\"queue\",sr.print_stack())\n ","repo_name":"GunalanD95/DSA-GUNALAN","sub_path":"data-structures/user-defined/stack and queue/queue_imp.py","file_name":"queue_imp.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5230173141","text":"import magic\n\nfrom infrastructure.config.config import LANGUAGE_EXTENSIONS\n\n\nasync def identify_language(file_path: str, languages: set):\n # Use python-magic to determine the file type and\n # add to languages set\n mime: magic.Magic = magic.Magic()\n file_type: str = mime.from_file(file_path)\n for language in LANGUAGE_EXTENSIONS:\n if language in file_type:\n languages.add(language)\n","repo_name":"Lehsqa/eon_telegram_bot","sub_path":"project/infrastructure/files/identify_language.py","file_name":"identify_language.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11282266863","text":"import torch\nimport numpy as np\nis_gpu = False\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\n is_gpu = True\nprint(\"is gpu \"+str(is_gpu))\n\nclass MOA():\n def __init__(self,data_obj,moa_factor,subset,beta):\n self.data_obj = data_obj\n tf_probing = (torch.eye(len(self.data_obj.tfs))).to(device)\n control_row = torch.zeros((1,len(self.data_obj.tfs))).to(device)\n self.probe = torch.cat((tf_probing,control_row),axis=0).to(device)\n self.moa_matrix = self.get_tf_gene_matrix()\n self.moa_factor = moa_factor\n self.subset = subset\n self.beta = beta\n\n def get_moa_loss(self,decoder):\n #create probe tensor\n moa_matrix = self.moa_matrix\n decoder_oputput = None\n mask = None\n probe = self.probe\n if self.subset != 0:\n selectedIndex = np.arange(len(self.data_obj.tfs))\n selectedIndex = np.random.permutation(selectedIndex)[0:self.subset]\n TF_index = selectedIndex.copy()\n selectedIndex = np.insert(selectedIndex,[-1],len(self.data_obj.tfs))\n\n moa_matrix = self.moa_matrix[TF_index,:]\n decoder_output = decoder(probe[selectedIndex,:])\n else:\n decoder_output = decoder(probe)\n mask = torch.logical_not(moa_matrix == 0)\n\n control_row = torch.masked_select(decoder_output[-1].repeat(moa_matrix.shape[0],1),mask)\n probe_rows = torch.masked_select(decoder_output[:-1],mask)\n\n diff = (probe_rows-control_row)\n #print(\"diff\",diff)\n\n moa_vals = torch.masked_select(moa_matrix,mask)\n\n violated = torch.logical_not(torch.eq(torch.sign(diff),moa_vals))\n violated_count = torch.count_nonzero(violated.int().detach()).detach()\n\n loss = torch.tensor(0.0, requires_grad = True)\n if torch.any(violated):\n violated_values = (torch.abs(diff))*violated.int()\n lossL1 = self.beta * torch.sum(torch.abs(violated_values))\n lossL2 = (1-self.beta) * torch.sum(torch.square(violated_values))\n loss = self.moa_factor * (lossL1 + lossL2)\n\n return loss,violated_count\n\n\n def get_tf_gene_matrix(self):\n gene_index_dict = {}\n for i in range(len(self.data_obj.overlap_list)):\n gene_index_dict[self.data_obj.overlap_list[i]] = i\n\n x_coords = []\n y_coords = []\n moa_val = []\n for i in range(len(self.data_obj.tfs)):\n tf = self.data_obj.tfs[i]\n tf_info = self.data_obj.tf_gene_dict[tf]\n for gene in tf_info.keys():\n gene_index = gene_index_dict[gene]\n moa = tf_info[gene]\n if moa != 0:\n x_coords.append(i)\n y_coords.append(gene_index)\n moa_val.append(moa)\n ind = [x_coords,y_coords]\n moa_matrix = torch.sparse_coo_tensor(ind,moa_val,([len(self.data_obj.tfs),len(self.data_obj.overlap_list)]))\n\n moa_matrix = moa_matrix.to_dense().to(device)\n\n return moa_matrix\n","repo_name":"schaferd/Modular_DSCA_TF_Prediction","sub_path":"moa.py","file_name":"moa.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72576167447","text":"\"\"\"Calculate Mean Average Precision on the ground truth and predictions in the COCO format.\"\"\"\n\nimport argparse\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\n\ndef print_results(coco_eval):\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg(\"-p\", \"--pred_path\", type=str, help=\"Path to the ground truth json file.\", required=True)\n arg(\"-g\", \"--gt_path\", type=str, help=\"Path to the json file with predictions.\", required=True)\n\n args = parser.parse_args()\n\n coco = COCO(args.gt_path)\n\n pred_coco = coco.loadRes(args.pred_path)\n\n categories = coco.cats\n\n print(\"-------------------------------------------------------------------------------\")\n print(\"CATEGORIES:\")\n print(categories)\n\n print(\"-------------------------------------------------------------------------------\")\n\n coco_eval = COCOeval(cocoGt=coco, cocoDt=pred_coco, iouType=\"bbox\")\n\n print(\"ALL CLASSES :\")\n\n print_results(coco_eval)\n\n for value in categories.values():\n category_id = value[\"id\"]\n class_name = value[\"name\"]\n print(\"-------------------------------------------------------------------------------\")\n print(\"CLASS_NAME = \", class_name)\n\n coco_eval.params.catIds = category_id\n print_results(coco_eval)\n","repo_name":"ternaus/iglovikov_helper_functions","sub_path":"iglovikov_helper_functions/metrics/coco_eval.py","file_name":"coco_eval.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"31"} +{"seq_id":"667626396","text":"\"\"\"\n完全二叉树:\n在子节点添加新节点时保证前面的节点都不为空\nindex为i时\n左孩子为 i*2 + 1\n右孩子为 i*2 + 2\n父节点为 (i-1)/2\n堆是一种特殊的完全二叉树,分为大根堆和小根堆\n大根堆:\n每一个子二叉树的根节点都是该二叉树中所有节点的最大值\n可以用来求某任意长度数组中的前几个最大值\n小根堆:\n每一个子二叉树的根节点都是该二叉树中所有节点的最小值\n堆排序:\n将一个无序数组堆化(大根堆或小根堆)\n假设按照大根堆规则排序,先将数组堆化,然后取出二叉树根节点(最大值),然后堆化,再取出根节点(第二大)\n以此类推,全部取出后则实现了数组的从大到小排序\n\"\"\"\n\nfrom P3 import *\nfrom generator import *\n\nclass Heap:\n def __init__(self, arr):\n self.arr = arr\n # pass\n\n def heap_sort(self):\n \"\"\"\n 时间复杂度 O(NlogN)\n 空间复杂度 O(1)\n :param self.arr:\n :return:\n \"\"\"\n if (not self.arr) or (len(self.arr) < 2):\n return\n\n # 将数字插入堆中\n # for i in range(len(self.arr)):\n # self.heap_insert(i)\n # self.heap_sort()\n # 较快一些的方法:从下往上堆化, 时间复杂度为O(N)\n for i in range(len(self.arr)-1, -1, -1):\n self.heapify(i, len(self.arr))\n\n heapSize = len(self.arr)\n swap(self.arr, 0, heapSize - 1)\n heapSize -= 1\n while heapSize:\n self.heapify(0, heapSize)\n swap(self.arr, 0, heapSize - 1)\n heapSize -= 1\n\n def heap_insert(self, idx):\n # 如果下标为idx的节点值比其父节点的值大,则将其交换,继续向上比较\n # 不比其父节点的值大时,退出循环\n while self.arr[idx] > self.arr[int((idx - 1) / 2)]:\n swap(self.arr, idx, int((idx - 1) / 2))\n idx = int((idx - 1) / 2)\n\n def heapify(self, idx, heap_size):\n \"\"\"\n 堆化,从某个idx向下出发,将整个二叉树变成堆的过程\n :param self.arr:\n :param idx:\n :param heap_size:\n :return:\n \"\"\"\n left = idx*2 + 1 # 左孩子下标\n while left < heap_size: # 当left < heap_size时说明左孩子下标没有越界,即根节点idx存在一个左孩子\n\n # 将下标为idx的根节点的左孩子与右孩子进行比较,得到拥有较大值的孩子下标\n largest = left + 1 if left+1 < heap_size and self.arr[left + 1] > self.arr[left] else left\n\n # 如果较大的孩子值比根节点大,则largest不变,以便后续将其与根节点进行交换,\n # 反之,largest = idx,代表根节点已经是该子二叉树中最大的值了,不需要再进行堆化。后续退出循环\n largest = largest if self.arr[largest] > self.arr[idx] else idx\n\n if largest == idx:\n break\n\n # 如果没有退出循环,则将较大的孩子节点与根节点进行交换\n swap(self.arr, largest, idx)\n\n # 然后根节点的下标变为上一步中较大的孩子节点的下标,继续堆化\n idx = largest\n\n # left变为新根节点下标的左孩子下标\n left = idx * 2 + 1\n\n\ndef heap_main():\n arr = [7,1,4,2,5,3,9,8,6]\n print(arr)\n heap = Heap(arr)\n heap.heap_sort()\n print(arr)\n\n\n\"\"\"\n基于比较的排序:快排,冒泡排序,堆排序灯\n不基于比较的排序:桶排序,基数排序\n不基于比较的排序适用于数据量有限的情况\n\"\"\"\n\n\ndef bucket_sort(arr, l, r, bit_num):\n bucket = [0] * (r - l + 1)\n size = 10 # 固定变量,分别代表位数为0-9\n for digit in range (1, bit_num + 1): # 有多少位,就出入桶多少次\n # count[0] 当前位(d)是0的数字有多少个\n # count[1] 当前位(d)是1的数字有多少个\n # count[2] 当前位(d)是2的数字有多少个\n # count[3] 当前位(d)是3的数字有多少个\n # 以此类推\n count = [0] * size # 分别存放位数0-9的桶\n for i in range(l, r + 1):\n bit = get_digit(arr[i], digit)\n count[bit] += 1\n for i in range(1, size):\n count[i] += count[i-1]\n for i in range(r, l-1, -1):\n bit = get_digit(arr[i], digit)\n bucket[count[bit] - 1] = arr[i]\n count[bit] -= 1\n j = 0\n for i in range(l, r + 1):\n arr[i] = bucket[j]\n j += 1\n\n return arr\n\n\ndef get_digit(num, d):\n result = 0\n for i in range(1, d + 1):\n result = num % 10\n num //= 10\n\n return result\n\n\ndef count_bit(arr):\n max = 0\n for item in arr:\n\n item = abs(item)\n bit_num = 0\n while item:\n bit_num += 1\n item //= 10\n\n if bit_num > max:\n max = bit_num\n return max\n\n\ndef absolute(arr):\n for i in range(len(arr)):\n arr[i] = abs(arr[i])\n\n\na = 321\nprint(get_digit(321, 3))\ng = Generator(10, 10000)\narr = g.random_generator()\nprint(arr)\nabsolute(arr)\nprint(arr)\nbit_num = count_bit(arr)\narr = bucket_sort(arr, 0, len(arr) - 1, bit_num)\nprint(arr)","repo_name":"fenzhengrou/zuoshen","sub_path":"P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33510602956","text":"# Implementation of the multilingual adapter as in Bapna et. al 2019\n\nimport torch\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\nimport math\n# Commented out since the new components are not available\n# from ..optimized.feed_forward import PositionWiseFeedForward\n# from ..layer_norm import LayerNorm\nimport torch.nn as nn\nfrom onmt.modules.linear import FeedForward\n\ndef xavier_normal(weight, gain=1.0):\n\n fan_in, fan_out = weight.size(-2), weight.size(-1)\n std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n\n with torch.no_grad():\n weight.normal_(0, std)\n\n\nclass MultilingualAdapter(torch.nn.Module):\n\n def __init__(self, model_size, bottleneck_size, n_languages=1, dropout=0.0,\n elementwise_affine=True, variational=False, death_rate=0.0):\n \n super(MultilingualAdapter, self).__init__()\n\n self.all_modules = torch.nn.ModuleList()\n\n for i in range(n_languages):\n layer_norm = nn.LayerNorm((model_size,), elementwise_affine=elementwise_affine)\n feed_forward = FeedForward(model_size, bottleneck_size, p=dropout, variational=variational)\n adapter = torch.nn.Sequential(layer_norm, feed_forward)\n self.all_modules.append(adapter)\n\n self.death_rate = death_rate\n\n def forward(self, input, lang=None):\n \"\"\"\n :param input: TxBxN Tensor\n :param lang: [1] Tensor\n :return:\n \"\"\"\n adapter_lives = True\n\n if self.death_rate > 0:\n if self.training:\n adapter_lives = (torch.rand(1)[0].item() >= self.death_rate)\n if not adapter_lives:\n return input # if rand < death rate, direclty give out input (adapter is dead)\n\n unique_lang_id = torch.unique(lang)\n assert len(unique_lang_id) == 1\n # Danni: This line was lang.numel() == 1. Had to change this after giving language tag to tokens:\n\n index = unique_lang_id.item() # was lang.item()\n adapter = self.all_modules[index]\n\n # normalize -> transform -> residual\n return input + adapter(input)\n\n\n","repo_name":"lenacabrera/gb_mnmt","sub_path":"onmt/modules/multilingual_factorized/multilingual_adapters.py","file_name":"multilingual_adapters.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12975457858","text":"# -*- coding: utf-8 -*-\nimport json\nimport requests\ndef send(token):\n # planId 是设备id可以随便改 address 自己填一下就可以了 longitude 经度 latitude纬度 在地图上搜一下\n data = {\"country\": \"中国\", \"address\": \"河北省 · 石家庄市 · 石家庄水电机动车驾驶员学校\", \"province\": \"河北省\", \"city\": \"石家庄市\",\n \"latitude\": \"38.038716\", \"description\": \"\", \"planId\": \"28ebd7aa4e5342c9229f17d1b0f5c066\", \"type\": \"START\",\n \"device\": \"Android\", \"longitude\": \"114.411354\"}\n data = json.dumps(data)\n url = 'https://api.moguding.net:9000/attendence/clock/v1/save'\n headers = {\n 'Authorization': token,\n 'Content-Type': 'application/json; charset=UTF-8'\n }\n res = requests.post(url=url, data=data, headers=headers).text\n print(res)\ndef login():\n # 请自行更换 账号和密码\n data = {\n \"phone\": \"157321\",\n \"password\": \"a123456\",\n \"loginType\": \"android\"\n }\n res = requests.post('https://api.moguding.net:9000/session/user/v1/login',\n data=json.dumps(data),\n headers={\n 'Content-Type': 'application/json; charset=UTF-8'\n }, ).text\n res = json.loads(res)\n if (res['code'] != 200):\n print(\"请检查账号\")\n exit()\n token = res['data']['token']\n send(token)\nif __name__ == '__main__':\n login()\n","repo_name":"yecca/moguding","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28658249321","text":"# coding: utf-8 \n\"\"\"\nimport sys\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView\n\nclass Render(QWebEngineView):\n def __init__(self, url):\n self.app = QApplication(sys.argv)\n QWebEngineView.__init__(self)\n self.loadFinished.connect(self._loadFinished)\n self.load(QUrl(url))\n self.app.exec_()\n\n def _loadFinished(self, result):\n # This is an async call, you need to wait for this\n # to be called before closing the app\n self.page().toHtml(self.callable)\n\n def callable(self, data):\n self.html = data\n # Data has been stored, it's safe to quit the app\n self.app.quit()\n\n\n\nimport lxml.html\n\n#定义一个网页地址\nurl = 'https://www.baidu.com'\n\nr = Render(url)\nresult = r.html\ntree = lxml.html.fromstring(result)\n\n\"\"\"\n\n\n\n\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\ndef render(source_html):\n\n class Render(QWebEngineView):\n def __init__(self, html):\n self.html = None\n self.app = QApplication(sys.argv)\n QWebEngineView.__init__(self)\n self.loadFinished.connect(self._loadFinished)\n self.setHtml(html)\n self.app.exec_()\n\n def _loadFinished(self, result):\n # what's going on here? how can I get the HTML from toHtml?\n self.page().toHtml(self.callable)\n self.app.quit()\n\n def callable(self, data):\n self.html = data\n\n return Render(source_html).html\nprint(render(\"http://www.widlabs.com\"))\n\"\"\"\n\n\n\n\n\"\"\"\nimport sys\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView\napp = QApplication(sys.argv)\nbrowser = QWebEngineView()\nbrowser.load(QUrl(\"http://www.widlabs.com/\"))\n\nbrowser.page().toHtml(this._callable)\n\nbrowser.show()\napp.exec_()\n\"\"\"\n\n\n\n\n\"\"\" \nimport sys \nfrom PyQt5.QtCore import * \nfrom PyQt5.QtWidgets import * \nfrom PyQt5.QtGui import * \nfrom PyQt5.QtWebEngineWidgets import * \n \nclass MainWindow(QMainWindow): \n def __init__(self, *args, **kwargs): \n super().__init__(*args, **kwargs) \n self.setWindowTitle(\"client\") \n self.setWindowIcon(QIcon('icons/icon.png')) \n self.resize(900, 600) \n self.show() \n \n self.browser = QWebEngineView() \n url = 'https://www.baidu.com' \n self.browser.load(QUrl(url)) \n self.setCentralWidget(self.browser) \n \nif __name__=='__main__': \n app = QApplication(sys.argv) \n window = MainWindow() \n window.show() \n sys.exit(app.exec_()) \n\"\"\"\n\n\n\n\n\n\"\"\"\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWidgets import QApplication\n#from PyQt5.QtWebKitWidgets import QWebView\nfrom PyQt5.QtCore import QEventLoop\n#import lxml.html\nfrom bs4 import BeautifulSoup\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView\nurl = 'https://zhuanlan.zhihu.com/p/27363298'\n\napp = QApplication([])\nwebview = QWebEngineView()\nloop = QEventLoop()\n\nwebview.loadFinished.connect(loop.quit)\nwebview.load(QUrl(url))\nloop.exec_()\nhtml = webview.page().mainFrame().toHtml()\n#tree = lxml.html.fromstring(html)\n#fixed_html = lxml.html.tostring(tree, pretty_print=True)\nsoup = BeautifulSoup(html, 'html.parser')\nfixed_html = soup.prettify()\ntitle = soup.find(class_=\"PostIndex-title av-paddingSide av-titleFont\")\n#print(fixed_html)\n\"\"\"\n\n\n\n\n\"\"\"\nimport sys,re\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtWebKit import *\n \n \np = re.compile(r'500):\n return x\n return np.log(1+np.exp(x))\n\nnon_linearity = np.vectorize(base_non_linearity)\n\ndef deriv(x):\n return 1/(1+np.exp(-x))\n\ndef gen_one_hot(x):\n out = np.zeros(10)\n out[x] = 1\n return out\n\ndef calc_accuracy(num):\n tot_correct = 0\n for i in tqdm(range(num)):\n hidden_layer = non_linearity(input_to_hidden_weights.dot(test_images[i])).reshape((hidden_layer_size,1))\n output_layer = softmax(hidden_to_output_weights.dot(hidden_layer))\n pred = np.argmax(output_layer)\n if(pred == test_labels[i]):\n tot_correct += 1\n return tot_correct/num\n \n \n \nmndata = MNIST('/home/will/Dropbox/isicni_project/MNIST_data')\nmndata.gz = True\nimages, labels = mndata.load_training()\n\ntest_images, test_labels = mndata.load_testing()\n\n\nimages = np.array(images)\nlabels = np.array(labels)\ntest_images = np.array(test_images)\ntest_labels = np.array(test_labels)\n\nimages = images/255\ntest_images = test_images/255\n\none_hot_labels = []\n\nfor i in range(len(labels)):\n one_hot_labels.append(gen_one_hot(labels[i]))\none_hot_labels = np.array(one_hot_labels)\n\n\nlearning_rate = 0.01\nauto_learning_rate = 0.01\n\ninput_size = 784\nhidden_layer_size = 50\noutput_size = 10\n\nbias_input_to_hidden = np.random.uniform(-1,1,(hidden_layer_size,1))\ninput_to_hidden_weights = np.random.uniform(-1,1,(hidden_layer_size,input_size))\nhidden_to_output_weights = np.random.uniform(-1,1,(output_size,hidden_layer_size))\nbias_hidden_to_output = np.random.uniform(-1,1,(output_size,1))\n\nautoencoder_output_to_hidden = np.random.uniform(-1,1,(hidden_layer_size,output_size))\nbias_autoencoder = np.random.uniform(-1,1,(hidden_layer_size,1))\n\nnum_batches = 1\n\ninv_dist_store = []\n\n#acc = calc_accuracy(len(test_images))\n#print(acc)\n\nfor _ in range(num_batches):\n \n for i in tqdm(range(len(images))):\n idx = np.random.randint(0,len(images))\n target = one_hot_labels[idx].reshape((output_size,1))\n hidden_layer = non_linearity(input_to_hidden_weights.dot(images[idx]).reshape((hidden_layer_size,1))+ bias_input_to_hidden).reshape((hidden_layer_size,1))\n output_layer = softmax(hidden_to_output_weights.dot(hidden_layer) + bias_hidden_to_output)\n db_hidden_out = (output_layer - target).reshape((output_size,1))\n dw_hidden_output = np.dot((output_layer - target),hidden_layer.T)\n \n hidden_to_output_weights += -learning_rate*dw_hidden_output -learning_rate*0.1*hidden_to_output_weights \n bias_hidden_to_output += -learning_rate*db_hidden_out -learning_rate*0.1*bias_hidden_to_output \n \n \n noisy_hidden = (hidden_layer + np.random.normal(0,0.0001,(hidden_layer_size,1))).reshape((hidden_layer_size,1))\n noisy_output = softmax(hidden_to_output_weights.dot(noisy_hidden) + bias_hidden_to_output )\n \n auto_hidden = non_linearity(autoencoder_output_to_hidden.dot(noisy_output)+ bias_autoencoder ).reshape((hidden_layer_size,1))\n \n auto_deriv = deriv(autoencoder_output_to_hidden.dot(noisy_output).reshape((hidden_layer_size,1)) +bias_autoencoder ).reshape((hidden_layer_size,1))\n if(i%5000):\n inv_dist_store.append(np.mean(np.abs(non_linearity(autoencoder_output_to_hidden.dot(output_layer)).reshape((hidden_layer_size,1)) - hidden_layer)))\n \n dw_auto = np.dot((auto_hidden - noisy_hidden)*auto_deriv,noisy_output.T) \n dw_auto_bias = (auto_hidden - noisy_hidden)*auto_deriv\n \n autoencoder_output_to_hidden += -auto_learning_rate*dw_auto - auto_learning_rate*0.1*autoencoder_output_to_hidden\n bias_autoencoder += -auto_learning_rate*dw_auto_bias - auto_learning_rate*0.1*bias_autoencoder \n \n target_hidden = hidden_layer + non_linearity(autoencoder_output_to_hidden.dot(target)+bias_autoencoder ) - non_linearity(autoencoder_output_to_hidden.dot(output_layer)+bias_autoencoder )\n \n der = deriv(input_to_hidden_weights.dot(images[idx]).reshape((hidden_layer_size,1)) +bias_input_to_hidden).reshape((hidden_layer_size,1))\n \n dw_input_hidden = np.dot((hidden_layer - target_hidden)*der,(images[idx].reshape(784,1)).T)\n db_input_hidden = (hidden_layer - target_hidden)*der\n \n input_to_hidden_weights += -learning_rate*dw_input_hidden - learning_rate*0.05*input_to_hidden_weights\n bias_input_to_hidden += -learning_rate*db_input_hidden - learning_rate*0.1*bias_input_to_hidden\n acc = calc_accuracy(len(test_images))\n print(acc)\n auto_learning_rate = auto_learning_rate/1.05\n learning_rate = learning_rate/1.05\n\ninv_dist_store = np.array(inv_dist_store)\n","repo_name":"EntropicEffect/dendritic_backprop","sub_path":"difference_target_prop.py","file_name":"difference_target_prop.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"33669216117","text":"chosen_pokemon = input(\"¿Contra qué Pokemon quieres combatir? (Squirtle / Charmander / Bulbasur): \").upper();\n\npikachu_hp = 100;\nenemy_hp = 0;\nenemy_attack = 0;\n\nif(chosen_pokemon == \"SQUIRTLE\"):\n enemy_hp = 90;\n enemy_name = \"Squirtle\";\n enemy_attack = 8;\nelif(chosen_pokemon == \"CHARMANDER\"):\n enemy_hp = 80;\n enemy_name = \"Charmander\";\n enemy_attack = 7;\nelif(chosen_pokemon == \"BULBASUR\"):\n enemy_hp = 100;\n enemy_name = \"Bulbasur\";\n enemy_attack = 10;\n\nwhile(pikachu_hp > 0 and enemy_hp > 0):\n chosen_attack = input(\"¿Qué ataque vamos a usar? (Chispazo / Bola voltio): \").upper();\n if(chosen_attack == \"CHISPAZO\"):\n print(\"Pikachu usó Chispazo, le hace 10 puntos de daño al enemigo.\");\n enemy_hp -= 10;\n elif(chosen_attack == \"BOLA VOLTIO\"):\n print(\"Pikachu usó Bola voltio, le hace 12 puntos de daño al enemigo.\");\n enemy_hp -= 12;\n\n print(\"La vida del {} ahora es de {}\".format(enemy_name, enemy_hp));\n\n print(\"{} te hace un ataque de {} puntos de daño\").format(enemy_name, enemy_attack);\n pikachu_hp -= enemy_attack;\n\n print(\"La vida del Pikachu ahora es de {}\".format(pikachu_hp));\n\n if(enemy_hp <= 0):\n print(\"¡Has ganado el combate!\");\n elif(pikachu_hp <= 0):\n print(\"¡Has perdido el combate!\");\n\nprint(\"El combate ha terminado.\");","repo_name":"TakuyaSama/PythonCourse","sub_path":"pokemon_combat.py","file_name":"pokemon_combat.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33782163919","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport pandas as pd\nimport random\nfrom dash.dependencies import Input, Output\n\necom_sales = pd.read_csv('/usr/local/share/datasets/ecom_sales.csv')\nmajor_categories = list(ecom_sales['Major Category'].unique())\nminor_categories = list(ecom_sales['Minor Category'].unique())\nlogo_link = 'https://assets.datacamp.com/production/repositories/5893/datasets/fdbe0accd2581a0c505dab4b29ebb66cf72a1803/e-comlogo.png'\necom_country = ecom_sales.groupby('Country')['OrderValue'].agg(['sum', 'count']).reset_index().rename(\n columns={'count': 'Sales Volume', 'sum': 'Total Sales ($)'})\n\napp = dash.Dash(__name__)\n\napp.layout = html.Div([\n html.Img(src=logo_link,\n style={'margin': '30px 0px 0px 0px'}),\n html.H1('Sales breakdowns'),\n html.Div(\n children=[\n html.Div(\n children=[\n html.H2('Controls'),\n html.Br(),\n html.H3('Major Category Select'),\n dcc.Dropdown(\n id='major_cat_dd',\n options=[{'label': category, 'value': category} for category in major_categories],\n style={'width': '200px', 'margin': '0 auto'}),\n html.Br(),\n html.H3('Minor Category Select'),\n dcc.Dropdown(\n id='minor_cat_dd',\n style={'width': '200px', 'margin': '0 auto'})\n ],\n style={'width': '350px', 'height': '350px', 'display': 'inline-block',\n 'vertical-align': 'top', 'border': '1px solid black', 'padding': '20px'}),\n html.Div(\n children=[\n dcc.Graph(id='sales_line'),\n html.H3(id='chosen_major_cat_title')\n ],\n style={'width': '700px', 'height': '650px', 'display': 'inline-block'})\n ]), ],\n style={'text-align': 'center', 'display': 'inline-block', 'width': '100%'})\n\n\n# One callback to set minor values & HTML output\n@app.callback(\n Output('minor_cat_dd', 'options'),\n Output('chosen_major_cat_title', 'children'),\n Input('major_cat_dd', 'value'))\ndef update_minor_dd(major_cat_dd):\n major_minor = ecom_sales[['Major Category', 'Minor Category']].drop_duplicates()\n relevant_minor_options = major_minor[major_minor['Major Category'] == major_cat_dd][\n 'Minor Category'].values.tolist()\n minor_options = [{'label': x, 'value': x} for x in relevant_minor_options]\n\n if not major_cat_dd:\n major_cat_dd = 'None Selected'\n # Creating string for title\n major_cat_title = f'This is in the Major Category of : {major_cat_dd}'\n\n # Return the options and title\n return minor_options, major_cat_title\n\n\n# Create a callback to set a default minor category value\n@app.callback(\n Output('minor_cat_dd', 'value'),\n Input('minor_cat_dd', 'options'))\ndef select_minor_cat(options):\n chosen_val = 'None'\n if options:\n vals = [x['value'] for x in options]\n chosen_val = random.choice(vals)\n return chosen_val\n\n\n@app.callback(\n Output('sales_line', 'figure'),\n Input('minor_cat_dd', 'value'))\ndef update_line(minor_cat):\n minor_cat_title = 'All'\n ecom_line = ecom_sales.copy()\n\n if minor_cat:\n minor_cat_title = minor_cat\n ecom_line = ecom_line[ecom_line['Minor Category'] == minor_cat]\n\n ecom_line = ecom_line.groupby('Year-Month')['OrderValue'].agg('sum').reset_index(name='Total Sales ($)')\n line_graph = px.line(ecom_line, x='Year-Month', y='Total Sales ($)',\n title=f'Total Sales by Month for Minor Category: {minor_cat_title}')\n\n return line_graph\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"ivan-mihailov/Datacamp_Plotly_Dash","sub_path":"Chapter_4/ext_chain_callbacks.py","file_name":"ext_chain_callbacks.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42671392752","text":"from pyrogram.types import Message\r\nfrom pyrogram.emoji import BACKHAND_INDEX_POINTING_DOWN, MAGNIFYING_GLASS_TILTED_LEFT\r\nfrom pydrive2.drive import GoogleDrive\r\nfrom ...configs.texts import RESULT_IN_COUNT\r\n\r\nasync def drive_count(msg: Message, drive: GoogleDrive):\r\n \r\n try:\r\n message = await msg.reply(f\"Buscando archivo {MAGNIFYING_GLASS_TILTED_LEFT}...\")\r\n file = drive.CreateFile({'id': msg.text.split(' ')[1].split('/')[-2]})\r\n file.FetchMetadata()\r\n \r\n result = RESULT_IN_COUNT.format(\r\n \r\n file['title'], \r\n int(file['fileSize']) / 1000000, \r\n file['fileExtension'], \r\n file['mimeType'])\r\n\r\n await message.edit(result)\r\n except Exception as e:\r\n await msg.reply(f\"Error en el comando /count: {BACKHAND_INDEX_POINTING_DOWN}\\n\\n{e}\")","repo_name":"Tnoob-dev/GDrive-Project","sub_path":"src/plugins/drive_modules/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"44177596294","text":"from .models import User\nfrom django.shortcuts import render, redirect\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm\nfrom django.contrib import messages\nfrom django.contrib.auth import (\n login as auth_login,\n logout as auth_logout,\n)\nfrom django.contrib.auth.decorators import login_required\nfrom articles.models import Team\nfrom django.http import JsonResponse, QueryDict\nfrom django.views.decorators.http import require_http_methods, require_POST, require_safe\n\ndef signup(request):\n teams = Team.objects.all()\n if request.method == \"POST\":\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.team = Team.objects.get(pk=int(request.POST.get(\"team\")))\n user.save()\n auth_login(request, user)\n return redirect(\"articles:index\")\n else:\n form = CustomUserCreationForm()\n context = {\n \"form\": form,\n \"teams\":teams,\n }\n return render(request, \"accounts/signup.html\", context)\n\ndef login(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n auth_login(request, form.get_user())\n messages.success(request, \"로그인 되었습니다.\")\n return redirect(\"articles:index\")\n else:\n form = AuthenticationForm()\n context = {\n \"form\": form,\n }\n return render(request, \"accounts/login.html\", context)\n\n@login_required\ndef logout(request):\n auth_logout(request)\n messages.success(request, \"로그아웃 되었습니다.\")\n return redirect(\"articles:index\")\n\ndef profile(request, pk):\n user = User.objects.get(pk=pk)\n if user.team :\n team = Team.objects.get(pk=user.team_id)\n context = {\n 'team': team,\n 'request_user': user,\n 'pk': pk,\n 'username': user.username,\n 'email': user.email,\n 'name': user.last_name,\n 'nickname': user.nickname,\n }\n else :\n context = {\n 'request_user': user,\n 'pk': pk,\n 'username': user.username,\n 'email': user.email,\n 'name': user.last_name,\n 'nickname': user.nickname,\n }\n return render(request, 'accounts/profile.html', context)\n\n@require_POST\ndef follow(request, pk):\n if request.user.is_authenticated:\n user = User.objects.get(pk=pk)\n if user != request.user:\n if user.followers.filter(pk=request.user.pk).exists():\n user.followers.remove(request.user)\n is_followed = False\n else:\n user.followers.add(request.user)\n is_followed = True\n follow_user = user.followers.filter(pk=request.user.pk)\n following_user = user.followings.filter(pk=request.user.pk)\n follow_user_list = []\n following_user_list = []\n for follow in follow_user:\n follow_user_list.append({'pk': follow.pk, 'nickname': follow.nickname, 'img': follow.team.logo.url})\n for following in following_user:\n following_user_list.append({'pk': following.pk, 'username': following.username,})\n context = {\n 'is_followed': is_followed,\n 'follow_user': follow_user_list,\n 'following_user': following_user_list,\n 'followers_count': user.followers.count(),\n 'followings_count': user.followings.count(),\n }\n return JsonResponse(context)\n return redirect('accounts:profile', user.pk)\n return redirect('accounts:login')\n\n@require_POST\ndef update(request):\n teams = Team.objects.all()\n user = User.objects.get(pk=request.user.pk)\n if request.method == 'POST':\n if request.user.is_authenticated:\n form = CustomUserChangeForm(data=request.POST, instance=request.user)\n if form.is_valid():\n user = form.save(commit=False)\n user.team = Team.objects.get(pk=int(request.POST.get(\"team\")))\n user.save()\n return redirect('accounts:profile', request.user.pk)\n else:\n form = CustomUserChangeForm(instance=user)\n context = {\n 'form': form,\n \"teams\":teams,\n }\n return render(request, 'accounts/update.html', context)\n\n@login_required\ndef password(request):\n if request.method == 'POST' :\n if request.user.is_authenticated:\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n auth_login(request, user)\n return redirect('accounts:profile', request.user.pk)\n else:\n form = PasswordChangeForm(request.user)\n context = {\n 'form': form,\n }\n return render(request, 'accounts/password.html', context)\n\n@login_required\ndef delete(request):\n request.user.delete()\n auth_logout(request)\n return redirect('articles:index')","repo_name":"kmk4162/YammyChu","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"12713685719","text":"import math\nimport warnings\nfrom functools import partial\n\nimport pytorch_lightning as pl\nimport torch\nfrom torch import concat, nn, tensor\n\nfrom src.shared.evaluate import validate_batch_per_timestamp\nfrom src.shared.loss import (bce_loss, bpr_max_loss, calc_loss,\n sampled_softmax_loss)\n\n\ndef sparse_output(item_lookup, bias_lookup, output, items_to_predict):\n embeddings = item_lookup(items_to_predict)\n logits = torch.matmul(embeddings, output.t())\n bias = bias_lookup(items_to_predict).squeeze(1)\n return bias + logits.t()\n\n\ndef dense_output(linear_layer, output, items_to_predict):\n return linear_layer(output)[:, items_to_predict.view(-1)]\n\n\ndef clean_state(curr_state, keep_state):\n return curr_state * keep_state\n\nclass GRU4REC(pl.LightningModule):\n\n def __init__(self,\n hidden_size,\n dropout_rate,\n num_items,\n batch_size,\n sampling_style=\"batchwise\",\n topk_sampling=False,\n topk_sampling_k=1000,\n learning_rate=0.001,\n num_layers=1,\n loss='bce',\n bpr_penalty=None,\n optimizer='adagrad',\n output_bias=False,\n share_embeddings=True,\n original_gru=False,\n final_activation=True):\n super(GRU4REC, self).__init__()\n self.num_items = num_items\n self.learning_rate = learning_rate\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.dropout_hidden = dropout_rate\n self.batch_size = batch_size\n self.sampling_style = sampling_style\n if sampling_style == \"eventwise\":\n warnings.warn(\"Warning eventwise is not supported and is set to sessionwise ...\")\n self.sampling_style = sampling_style\n self.output_bias = output_bias\n self.share_embeddings = share_embeddings\n self.original_gru = original_gru\n\n if original_gru:\n warnings.warn(\"Warning gru original cannot share input and output embeddings, share embedding is set to False\")\n self.share_embeddings = False\n\n if output_bias and share_embeddings:\n self.item_embedding = nn.Embedding(num_items + 1, hidden_size + 1, padding_idx=0)\n elif self.original_gru:\n self.item_embedding = nn.Embedding(num_items + 1, 3 * hidden_size, padding_idx=0)\n else:\n self.item_embedding = nn.Embedding(num_items + 1, hidden_size, padding_idx=0)\n\n if share_embeddings:\n self.output_embedding = self.item_embedding\n elif (not share_embeddings) and output_bias:\n self.output_embedding = nn.Embedding(num_items + 1, hidden_size + 1, padding_idx=0)\n else:\n self.output_embedding = nn.Embedding(num_items + 1, hidden_size, padding_idx=0)\n\n torch.nn.init.xavier_uniform_(self.item_embedding.weight.data, gain=1 / math.sqrt(6))\n torch.nn.init.xavier_uniform_(self.output_embedding.weight.data, gain=1 / math.sqrt(6))\n\n self.gru = nn.GRU(int(3 * self.hidden_size) if self.original_gru else self.hidden_size,\n self.hidden_size,\n self.num_layers,\n dropout=self.dropout_hidden,\n batch_first=True)\n if final_activation:\n self.final_activation = nn.ELU(0.5)\n else:\n self.final_activation = nn.Identity()\n\n if self.original_gru:\n self.gru.weight_ih_l0 = nn.Parameter(data=torch.eye(3 * self.hidden_size), requires_grad=False)\n self.register_buffer('current_state', torch.zeros([num_layers, batch_size, hidden_size], device=self.device))\n self.register_buffer('loss_mask', torch.ones(1, self.batch_size, device=self.device))\n self.register_buffer('bias_ones', torch.ones([self.batch_size, 1, 1]))\n self.loss_fn = loss\n if self.loss_fn == 'bce':\n self.loss = bce_loss\n elif self.loss_fn == 'ssm':\n self.loss = sampled_softmax_loss\n elif self.loss_fn == 'bpr-max':\n if bpr_penalty is not None:\n self.loss = partial(bpr_max_loss, bpr_penalty)\n else:\n raise ValueError('bpr_penalty must be provided for bpr_max loss')\n else:\n raise ValueError('Loss function not supported')\n\n self.topk_sampling = topk_sampling\n self.topk_sampling_k = topk_sampling_k\n self.optimizer = optimizer\n self.save_hyperparameters()\n\n def forward(self, item_indices, in_state, keep_state):\n embedded = self.item_embedding(item_indices.unsqueeze(1))\n embedded = embedded[:, :, :-1] if self.output_bias and self.share_embeddings else embedded\n in_state = clean_state(in_state, keep_state)\n gru_output, out_state = self.gru(embedded, in_state)\n scores = concat([gru_output, self.bias_ones], dim=-1) if self.output_bias else gru_output\n return scores, out_state\n\n def training_step(self, batch, _):\n x_hat, c_state = self.forward(batch[\"clicks\"], self.current_state, batch[\"keep_state\"])\n\n self.current_state = c_state.detach()\n train_loss = calc_loss(self.loss, x_hat, batch[\"labels\"], batch[\"uniform_negatives\"], batch[\"in_batch_negatives\"],\n batch[\"mask\"], self.output_embedding, self.sampling_style, self.final_activation,\n self.topk_sampling, self.topk_sampling_k, self.device)\n\n self.log(\"train_loss\", train_loss)\n\n return train_loss\n\n def validation_step(self, batch, _batch_idx):\n x_hat, self.current_state = self.forward(batch[\"clicks\"], self.current_state, batch[\"keep_state\"])\n cut_offs = tensor([5, 10, 20], device=self.device)\n recall, mrr = validate_batch_per_timestamp(batch, x_hat, self.output_embedding, cut_offs)\n test_loss = calc_loss(self.loss, x_hat, batch[\"labels\"], batch[\"uniform_negatives\"], batch[\"in_batch_negatives\"],\n batch[\"mask\"], self.output_embedding, self.sampling_style, self.final_activation,\n self.topk_sampling, self.topk_sampling_k, self.device)\n for i, k in enumerate(cut_offs.tolist()):\n self.log(f'recall_cutoff_{k}', recall[i])\n self.log(f'mrr_cutoff_{k}', mrr[i])\n self.log('test_seq_len', x_hat.shape[1])\n self.log('test_loss', test_loss)\n\n def configure_optimizers(self):\n if self.optimizer == 'adam':\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n elif self.optimizer == 'adagrad':\n optimizer = torch.optim.Adagrad(self.parameters(), lr=self.learning_rate)\n else:\n raise ValueError('Optimizer not supported, please use adam or adagrad')\n return optimizer\n","repo_name":"otto-de/TRON","sub_path":"src/gru4rec/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6959,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"31"} +{"seq_id":"3151577686","text":"import copy\n\nlines = []\n\nf = open(\"C:\\\\Users\\craig\\OneDrive\\Documents\\AoC\\Day_13\\input.txt\", \"r\")\n\npos = 0\n\nfor line in f:\n lines.append(line[:-1])\nf.close()\n\ntime = int(lines[0])\nbusses = lines[1].split(\",\")\nbuss_copy = lines[1].split(\",\")\n\n# print(busses)\n# print(buss_copy)\n# print()\n\nfor bus in buss_copy:\n if bus == 'x':\n busses.remove('x')\n\n#print(busses)\n\nremainders = [] #Each entry (bus, time to wait)\n\nbest_bus = 0\nbest_time = time\nprint(time)\n\nfor bus in busses:\n print(bus)\n bus_const = int(bus)\n new_bus = int(bus)\n\n while new_bus <= time:\n new_bus += bus_const\n print(new_bus)\n\n #remainders.append((bus_const, new_bus % time)\n\n if new_bus % time < best_time:\n best_bus = bus_const\n best_time = new_bus % time\n\nprint(f\"Best Bus: {best_bus}\")\nprint(f\"Best Time: {best_time}\")\nprint()\nprint(f\"Total: {best_bus * best_time}\")\n\n","repo_name":"mcraig567/AoC","sub_path":"2020/Day_13/13-a.py","file_name":"13-a.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24723755705","text":"from datetime import date\nimport datetime\nimport time\nimport calendar\nimport os\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException\n\nurl = \"https://campusrecshop.usf.edu/booking\"\n\nusername = '#your net Id'\nemail = '#your email'\npassword = '#your password'\n\n\n#Troubleshooter\ndef highlight(element, effect_time, color, border):\n #Highlights (blinks) a Selenium Webdriver element\n driver = element._parent\n def apply_style(s):\n driver.execute_script(\"arguments[0].setAttribute('style', arguments[1]);\",\n element, s)\n original_style = element.get_attribute('style')\n apply_style(\"border: {0}px solid {1};\".format(border, color))\n time.sleep(effect_time)\n apply_style(original_style)\n\ndef dateTester():\n #See what day output word\n dateAll = date.today()\n dayPLZ = calendar.day_name[dateAll.weekday()]\n\n '''\n #See what day of week output number 0-6 0 = Moday \n dayPLZ = datetime.datetime.today().weekday()\n '''\n\n #print(dateAll)\n print(dayPLZ)\n\n if dayPLZ == 'Saturday' or dayPLZ == 'Wednesday':\n gDay = True\n else: \n gDay = False\n\n print(gDay)\n return gDay\n\ndef check_exists_by_id(id):\n try:\n browser.find_element_by_id(id)\n except NoSuchElementException:\n return False\n return True\n\n\n'''\ndef reservationBtns():\n browser = webdriver.Chrome(ChromeDriverManager().install())\n\n allBtns = browser.find_elements_by_xpath(\"//div[@class='btn btn-primary']\")\n\n for btn in allBtns:\n print(btn)\n\n return allBtns\n'''\n\nif __name__ == \"__main__\":\n\n gDay = dateTester()\n\n if gDay == True:\n browser = webdriver.Chrome(ChromeDriverManager().install())\n btnDiv = 'flex-center margin-top-24'\n\n browser.get(url)\n browser.maximize_window()\n\n browser.find_element_by_id('loginLink').click()\n\n time.sleep(1)\n \n browser.find_element_by_class_name(\"loginOption\").click()\n \n time.sleep(5)\n \n if browser.find_element_by_id(\"i0116\"):\n \n browser.find_element_by_id(\"i0116\").send_keys(email)\n browser.find_element_by_id(\"idSIButton9\").click()\n\n time.sleep(5)\n browser.find_element_by_id(\"i0118\").send_keys(password)\n browser.find_element_by_id(\"idSIButton9\").click()\n\n time.sleep(5)\n browser.find_element_by_id(\"idSIButton9\").click()\n\n elif browser.find_element_by_id(\"username\"):\n #Log in sequence\n browser.find_element_by_id(\"username\").send_keys(username)\n browser.find_element_by_id(\"password\").send_keys(password)\n browser.find_element_by_id(\"btn-submit\").click()\n \n\n #Click Rec link\n browser.find_element_by_class_name(\"container-image-link-item\").click()\n\n time.sleep(1)\n \n browser.find_element_by_css_selector(\".btn.btn-default.single-date-select-button.single-date-select-one-click\").click()\n\n time.sleep(1)\n\n tester = browser.find_element_by_css_selector(\".booking-slot-item-right.booking-slot-action-item\")\n\n #highlight(tester, 5, \"yellow\", 15)\n\n #tester.click()\n '''\n #Highlights the Div containing the button\n btnDiv = browser.find_element_by_xpath(\"//div[@class='booking-slot-item'][@data-slot-number='2']\")\n highlight(btnDiv, 2, \"blue\", 15)\n '''\n\n '''\n #Finds all Book Now buttons on the page\n btn = browser.find_elements_by_xpath(\"//button[@class='btn btn-primary']\")\n for btns in btn:\n print(btns)\n highlight(btns, 5, \"yellow\", 15)\n '''\n \n browser.find_elements_by_xpath(\"//button[@class='btn btn-primary']\")[1].click()\n\n #browser.close()\n\n else:\n print(\"No Gym Class tommorrow\")\n","repo_name":"XzavierMcK/Python","sub_path":"Auto Reserve Gym/GymReservation.py","file_name":"GymReservation.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33400248483","text":"def area(b,h):\n '''Calculates area of triangle'''\n if h >=0 and b >=0:\n a = 0.5*b*h\n return a\n\n else:\n print (\"enter positive lengths\")\n return None\n\n\n\ndef db(b,h):\n '''Calculates double of triangle area'''\n if h >=0 and b >=0:\n b=area(b,h)*2\n return b\n\n else:\n print (\"double only positive lengths\")\n return None\n \n\n\ndef sq(b,c):\n '''Calculates area of square'''\n a=b*c\n return a\n\ndef sa(a,b):\n '''Calculates surface area of a cube'''\n s=6*sq(a,b)\n return s\n","repo_name":"ujs/Eulers","sub_path":"trinagle_area.py","file_name":"trinagle_area.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25174855251","text":"#coding: utf-8\r\nimport onec_dtools #pip install onec_dtools\r\nimport sys\r\nimport json\r\nimport os\r\nif type(sys.argv[1]) == type('str'):\r\n db_path = sys.argv[1]\r\nelse:\r\n db_path = sys.argv[1].decode(sys.getfilesystemencoding())\r\ntry:\r\n with open(db_path, 'rb') as f:\r\n db = onec_dtools.DatabaseReader(f)\r\n version = db.version\r\n tables_q = len(db.tables)\r\n data = []\r\n db_size = os.path.getsize(db_path)\r\n data.append({'{version}': version,\r\n '{tables_q}': tables_q,\r\n '{db_size}': db_size,\r\n })\r\n outtext = (json.dumps(data, ensure_ascii=False)).encode('utf8')\r\n sys.stdout.buffer.write(outtext)\r\nexcept Exception as e:\r\n print('Error')\r\n print(str(type(db_path)))\r\n print(e)\r\n","repo_name":"orange391224/zabbix-1CD","sub_path":"src/test1C.py","file_name":"test1C.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35674100186","text":"from data import Dataloader\nfrom model import CausalTransformer\nimport jax.numpy as jnp\nimport jax\n\nGPTConfig = {\n 'n_vocab': 66,\n 'block_size': 32,\n 'n_layer' : 3,\n 'n_head' : 8,\n 'd_model' : 768,\n 'shards': 2,\n 'devices': 4,\n 'batch_size_per_parallel': 256,\n 'ckpt_dir': 'test'}\n\n# A downside of using the more memory efficient method of embedding sharding is that it requires equal shard size across devices\n# or a 'check which device I'm on, lookup desired shard size'. For the moment - easier to just have a few empty spots for tokens.\n\nassert GPTConfig['n_vocab'] % GPTConfig['shards'] == 0\n\n\nds = Dataloader(GPTConfig)\nmodel = CausalTransformer(GPTConfig)\n\n\nx,y = ds.next_batch() # [B,T], [B,T]\n\nwith jax.experimental.maps.mesh(*model.mesh_def):\n state = model.init(jnp.array(model.key.take(GPTConfig['shards'])), x)\n\n\n\nfrom tqdm import tqdm\n\nlosses = []\nwith jax.experimental.maps.mesh(*model.mesh_def):\n steps = [t for t in range(0, 10000)]\n pbar = tqdm(steps)\n for t in pbar:\n x,y = ds.next_batch()\n loss, state = model.train(state, x,y)\n if t % 100 == 0:\n pbar.set_description(f\"Loss: {loss.mean()}\")\n losses.append(loss.mean())\n\n# Non auto-regressive sampling (works faster so you can see if it broadly making sense after 15 minutes)\nwith jax.experimental.maps.mesh(*model.mesh_def):\n x,y = ds.next_batch()\n y_pred = model.forward(state['params'], x)\n y_pred_logit = jnp.argmax(y_pred, -1)\n \n for i in range(0,100):\n print(''.join([ds.itos[c] for c in list(y_pred_logit[i])]))\n print('--------------------------')","repo_name":"sholtodouglas/scalingExperiments","sub_path":"minTransformerSharded/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"31"} +{"seq_id":"70473302809","text":"def solution(s):\n answer = []\n s = s[2:-2].split(\"},{\")\n\n s.sort(key = lambda x : len(x))\n #s.sort(key = len) # 가능\n for i in s:\n a = i.split(\",\")\n for j in a:\n if int(j) not in answer:\n answer.append(int(j))\n return answer","repo_name":"jwh7027/problem-solving","sub_path":"Programmers/Level2/튜플.py","file_name":"튜플.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72569806808","text":"class centeredtext(object):\n \"\"\"\n centeredtext extends the object class and centers text within an object\n \"\"\"\n def __init__(self, text, x,y,w,h, pygame, fontsize, color=(0,0,0)):\n \"\"\"\n Construct a new centeredtext object.\n\n :param self: A reference to the centeredtext object itself\n :param text: The text to center within the given object\n :param x: The leftmost coordinate of the given object of which to center a text within\n :param y: The rightmost coordinate of the given object of which to center a text within\n :param w: The width of the given object of which to center a text within\n :param h: The height of the given object of which to center a text within\n :param pygame: The pygame instance running\n :param fontsize: The fontsize of the text to center\n :param color: The color of the text to center\n :return: returns nothing\n \"\"\"\n self.pygame = pygame\n self.x, self.y, self.w, self.h = x,y,w,h\n self.pygame.font.init()\n font = self.pygame.font.SysFont(\"sans\", fontsize)\n width, height = font.size(text)\n xoffset = (self.w-width) // 2\n yoffset = (self.h-height) // 2\n self.coords = self.x+xoffset, self.y+yoffset\n self.txt = font.render(text, True, color)\n\n def draw(self, screen, bordercolor):\n \"\"\"\n Display the centered text object within the given object.\n\n :param self: A reference to the centeredtext object itself\n :param screen: The screen where to display the text\n :param bordercolor: The border color of the testing rect\n :return: returns nothing\n \"\"\"\n screen.blit(self.txt, self.coords)\n # for testing purposes, draw the rectangle too\n rect = self.pygame.Rect(self.x, self.y, self.w, self.h)\n self.pygame.draw.rect(screen, bordercolor, rect, 1)","repo_name":"CristofferJakobsson/sepm-team-a","sub_path":"src/user_interface/centeredtext.py","file_name":"centeredtext.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20844229635","text":"# import pandas and matplotlib\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# read the csv file to a DataFrame\nnetflix_df = pd.read_csv('netflix/netflix_data.csv')\n# print the first 5 rows to and columns' info to understand the dataframe\nprint(netflix_df[0:5])\nprint(netflix_df.info())\n# filter the dataframe for movies\nnetflix_df_moviesonly = netflix_df[netflix_df['type'] == 'Movie']\n# select columns of interest\nnetflix_movies_col_subset = netflix_df_moviesonly.iloc[:, [1, 2, 5, 10, 7, 8]]\n# for the plot, save x and y axes columns\nyears = netflix_movies_col_subset['release_year']\ndurations = netflix_movies_col_subset['duration']\n# plot the chart\nplt.scatter(years, durations)\n# label the axes\nplt.xlabel('Release Year', fontsize=14)\nplt.ylabel('Duration (min)', fontsize=14)\n# title the plot\nplt.title('Movie Duration by Year of Release', fontsize=20)\nplt.savefig('movie_average.png')\n# filter movie genres of less than 60 min play\nshort_movies = netflix_movies_col_subset[netflix_movies_col_subset['duration'] < 60]\n# print frist few rows of short_movies\nprint(f\"\\n{short_movies.head(30)}\")\n# initialize an empty list of colors\ncolors = []\n# iterate through the dateset\nfor lab, row in netflix_movies_col_subset.iterrows():\n if row['genre'] == 'Children':\n colors.append('red')\n elif row['genre'] == 'Documentaries':\n colors.append('blue')\n elif row['genre'] == 'Stand-Up':\n colors.append('green')\n else:\n colors.append('black')\n# plor the chart again\nplt.scatter(years, durations, c=colors)\n# label the axes\nplt.xlabel('Release year', fontsize=14)\nplt.ylabel('Duration (min)', fontsize=14)\nplt.title('Movie duration by year of release')\nplt.savefig('movie_average(1).png')\n# Is it certain that movies are getting shorter?\nare_movies_getting_shorter = 'maybe'\n","repo_name":"Vickythedeveloper/datacamp_project1","sub_path":"netflix/netflix_movies_avgdur.py","file_name":"netflix_movies_avgdur.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26894287202","text":"# www.plus2net.com\n# download updated script at https://www.plus2net.com/python/tkinter-sqlite-insert.php\nimport sqlite3\n\nmy_conn = sqlite3.connect('my_db.db')\n# print(\"Opened database successfully\");\n\nimport tkinter as tk\nfrom tkinter import *\n\nmy_w = tk.Tk()\nmy_w.geometry(\"400x250\")\nmy_w.title(\"www.plus2net.com\")\n# add one Label\nl0 = tk.Label(my_w, text='Add Student',\n font=('Helvetica', 16), width=30, anchor=\"c\")\nl0.grid(row=1, column=1, columnspan=4)\n\nl1 = tk.Label(my_w, text='Name: ', width=10, anchor=\"c\")\nl1.grid(row=3, column=1)\n\n# add one text box\nt1 = tk.Text(my_w, height=1, width=10, bg='white')\nt1.grid(row=3, column=2)\n\nl2 = tk.Label(my_w, text='Class: ', width=10)\nl2.grid(row=4, column=1)\n\n# add list box for selection of class\noptions = StringVar(my_w)\noptions.set(\"\") # default value\n\nopt1 = OptionMenu(my_w, options, \"Three\", \"Four\", \"Five\")\nopt1.grid(row=4, column=2)\n\nl3 = tk.Label(my_w, text='Mark: ', width=10)\nl3.grid(row=5, column=1)\n\n# add one text box\nt3 = tk.Text(my_w, height=1, width=4, bg='white')\nt3.grid(row=5, column=2)\n\nradio_v = tk.StringVar()\nradio_v.set('Female')\nr1 = tk.Radiobutton(my_w, text='Male', variable=radio_v, value='Male')\nr1.grid(row=6, column=2)\n\nr2 = tk.Radiobutton(my_w, text='Female', variable=radio_v, value='Female')\nr2.grid(row=6, column=3)\n\nb1 = tk.Button(my_w, text='Add Record', width=10,\n command=lambda: add_data())\nb1.grid(row=7, column=2)\nmy_str = tk.StringVar()\nl5 = tk.Label(my_w, textvariable=my_str, width=10)\nl5.grid(row=3, column=3)\nmy_str.set(\"Output\")\n\n\ndef add_data():\n flag_validation = True # set the flag\n my_name = t1.get(\"1.0\", END) # read name\n my_class = options.get() # read class\n my_mark = t3.get(\"1.0\", END) # read mark\n my_gender = radio_v.get() # read gender\n\n # length of my_name , my_class and my_gender more than 2\n if (len(my_name) < 2 or len(my_class) < 2 or len(my_gender) < 2):\n flag_validation = False\n try:\n val = int(my_mark) # checking mark as integer\n except:\n flag_validation = False\n\n if (flag_validation):\n my_str.set(\"Adding data...\")\n try:\n\n # print(\"Connected to database successfully\")\n\n my_data = (None, my_name, my_class, my_mark, my_gender)\n my_query = \"INSERT INTO student values(?,?,?,?,?)\"\n my_conn.execute(my_query, my_data)\n my_conn.commit()\n x = my_conn.execute('''select last_insert_rowid()''')\n id = x.fetchone()\n l5.grid()\n l5.config(fg='green') # foreground color\n l5.config(bg='white') # background color\n my_str.set(\"ID:\" + str(id[0]))\n l5.after(3000, lambda: l5.grid_remove())\n t1.delete('1.0', END) # reset the text entry box\n t3.delete('1.0', END) # reset the text entry box\n\n except sqlite3.Error as my_error:\n l5.grid()\n # return error\n l5.config(fg='red') # foreground color\n l5.config(bg='yellow') # background color\n print(my_error)\n my_str.set(my_error)\n\n else:\n l5.grid()\n l5.config(fg='red') # foreground color\n l5.config(bg='yellow') # background color\n my_str.set(\"check inputs.\")\n l5.after(3000, lambda: l5.grid_remove())\n\n\nmy_w.mainloop()\nmy_conn.close()","repo_name":"Wachu75/pyfestival","sub_path":"tydzien-5/tkinter_sql_01.py","file_name":"tkinter_sql_01.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18856045257","text":"from drawman import *\nfrom math import *\n\ndef two_point_lists(A, B):\n for x1, y1 in A:\n for x2, y2 in B:\n penup()\n to_point(x1, y1)\n pendown()\n to_point(x2, y2)\n\ndef generate_circle_point_list(R, N):\n \"\"\" генерирует и возвращает список из N точек, лежащих на окружности\n радиуса R \"\"\"\n A = []\n for i in range(N):\n alpha = radians(i*360/N)\n x = R*cos(alpha)\n y = R*sin(alpha)\n A.append((x, y))\n return A\n\ndef main():\n init_drawman()\n \n color('blue')\n width(1)\n C = generate_circle_point_list(8, 30)\n two_point_lists(C, C)\n \n #color('red')\n #A = [(7, 5), (3, 5), (5, 5)]\n #B = [(6, 1), (4, 1)]\n #two_point_lists(A, B)\n\nmain()\n","repo_name":"tkhirianov/fox_python_2016","sub_path":"lesson_12/brute_force.py","file_name":"brute_force.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"71398852567","text":"# coding=utf8\n\nimport time\nimport json\nfrom datetime import datetime\nfrom flask import current_app, flash, jsonify, request\nfrom jinja2 import Markup\nfrom flask_admin import expose\nfrom flask_admin.actions import action\n# from flask_admin.form import rules\nfrom yunduo.conf import xconf\n# from yunduo.utils import parse_rate\nfrom xadmin.view.base import BaseView\n# from xadmin.base.rules import Row, Column\nfrom xadmin.utils.format import date_format, map_format\nfrom xadmin.helpers import set_current_project\nfrom xadmin.constant import STATUS_ENABLE, STATUS_DISABLE\n# from xadmin.rabbitq import get_queues\n\n# from connections import redis_conf, redis_df\n# from xspider.app import app as celery_app\n# from xspider.tasks import crawl\n\n\nclass BlockView(BaseView):\n inject_current_project = True\n can_view_details = False\n # details_modal = True\n # details_modal_template = 'admin/model/modals/project_details.html'\n\n column_list = ['name', 'project', 'owner', 'created']\n column_filters = ['name']\n\n column_labels = {\n 'name': u'名称',\n 'alias': u'别名',\n 'type': u'类型',\n 'status': u'状态',\n 'owner': u'创建者',\n\n 'created': u'新增时间',\n 'updated': u'更新时间',\n 'published': u'发布时间'\n\n }\n\n column_searchable_list = ('name', )\n column_formatters = {\n # 'name': _project_pages_index,\n # 'status': map_format({START: u'启用', PAUSE: u'暂停', STOP: u'停用'}),\n 'created': date_format,\n 'updated': date_format,\n 'published': date_format,\n }\n\n form_subdocuments = {\n 'rules': {\n 'form_subdocuments': {\n None: {\n 'form_choices': {\n 'field': [('dlcount', '下载数'), ('rule', u'规则抽取'), ('code', u'Py代码抽取')],\n 'window': [(60, '1分钟'), (300, '5分钟'), (900, '15分钟'), (1800, '30分钟'), (3600, '60分钟')],\n\n }\n }\n }\n }\n }\n\n # form_rules = ['name', 'alias']\n\n form_widget_args = {\n\n }\n\n def enable_blocked(self, obj):\n old_blocked = xconf.get_blocked(obj.project.alias)\n\n xconf.set_blocked(obj.project.alias, obj.alias, obj.to_conf())\n obj.status = STATUS_ENABLE\n obj.published = datetime.now()\n obj.save()\n self.logger.info(u'启用屏蔽检测策略 %s', obj.alias, extra={'project': obj.project.alias, 'blocked': obj.alias})\n return obj\n\n def disable_blocked(self, obj):\n xconf.del_blocked(obj.project.alias, obj.alias)\n obj.status = STATUS_DISABLE\n obj.save()\n self.logger.info(u'停用屏蔽检测策略 %s', obj.alias, extra={'project': obj.project.alias, 'blocked': obj.alias})\n return obj\n","repo_name":"icaicai/yunduo","sub_path":"app/xadmin/view/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7469813251","text":"import itertools\nimport operator\nimport copy\n \n\nfoster_pets = ['Squeak', 'Siracha', 'Tilly', 'Andy']\n#2020 pets fostered from SPCA\n\nfoster_mouse = copy.deepcopy(foster_pets) \n#Deep copy to ensure changes to foster_pets does not result in changes to the deep copies\n\nfoster_pets[0]=('Mary')\n#Changed the name 'Squeak' to 'Mary' in the foster_pets list\n\nprint(foster_pets)\nprint(foster_mouse) \n\nx=foster_mouse \n#Test to ensure deep copy properties\nprint(x)","repo_name":"aporcelli-LIS/Python","sub_path":"Mod_8_DeepCopy.py","file_name":"Mod_8_DeepCopy.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16029678397","text":"from paynechain.block import BlockChain\nfrom pathlib import Path\nimport pickle\n\nbc_file = Path('blockchain')\nif bc_file.is_file():\n with open('blockchain', 'rb') as bc_handle:\n bc = pickle.load(bc_handle)\nelse:\n bc = BlockChain()\n bc.make_genesis_block()\n\ndata = input(\"Add some data to le blockchain: \")\n\nbc.make_next_block(data)\n\nwith open('blockchain', 'wb') as bc_write_handle:\n pickle.dump(bc, bc_write_handle)","repo_name":"rollinginsanity/paynechayne","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"797693188","text":"\n# coding: utf-8\n\n# # Data visualisation Project - Visualisation 1\n\n# Now, that we have cleaned data we can use it for creating visualisations and finding interesting things from it.\n\n# Importing necessary libraries and the data set\n\n# In[4]:\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[5]:\n\n\nfrom bokeh.plotting import figure , show\nfrom bokeh.layouts import layout, widgetbox, row\nfrom bokeh.models import ColumnDataSource, Div, HoverTool, Legend\nfrom bokeh.models.widgets import Slider, Select\nfrom bokeh.io import curdoc, output_notebook\n\n\n# In[6]:\n\n\nfrom bokeh.transform import factor_cmap\nfrom bokeh.palettes import Colorblind\n\n\n# In[5]:\n\n\n#output_notebook()\n\n\n# In[6]:\n\n\ndataset_names = pd.read_csv('I:/DCU/SEM1/Lectures/Data Management & Visualisation - CA682 Suzanne Little/project/imdb_project/cleaned data/names.csv',index_col= 'nconst')\n\n\n# In[7]:\n\n\ndataset_titles_ratings = pd.read_csv('I:/DCU/SEM1/Lectures/Data Management & Visualisation - CA682 Suzanne Little/project/imdb_project/cleaned data/titles_basic_rating.csv',index_col= 'tconst')\n\n\n# In[8]:\n\n\ndataset_titles_prin = pd.read_csv('I:/DCU/SEM1/Lectures/Data Management & Visualisation - CA682 Suzanne Little/project/imdb_project/cleaned data/titles_principle.csv',index_col= 'tconst')\n\n\n# In[295]:\n\n\n#dataset_titles_prin.head()\n\n\n# In[296]:\n\n\n#dataset_titles_ratings.head()\n\n\n# In[297]:\n\n\n#dataset_names.head()\n\n\n# In[298]:\n\n\n#dataset_titles_ratings.describe(include = 'all')\n\n\n# We want to concentrate on Comedy, Romance, Drama, Horror and Action genres for our visualisations\n\n# In[1]:\n\n\ngenres = ['Comedy','Romance', 'Drama','Horror','Action']\n\n\n# In[10]:\n\n\n#genres\n\n\n# Creating a source data for our visualisation.\n\n# In[7]:\n\n\nsource = ColumnDataSource(data=dict(x=[], y=[], genre=[], title=[], year=[]))\n\n\n# Defining the parameters for the visualisation.\n\n# In[8]:\n\n\np = figure(plot_height=650, plot_width=700, title=\"\" , toolbar_location=None, x_range=(10,100000),y_range=(0,11),x_axis_type=\"log\")\n\n\n# In[9]:\n\n\nc = p.circle(x=\"x\", y=\"y\", source=source, size=7, color=factor_cmap('genre', palette=Colorblind[5], factors=genres), legend='genre')\n\n\n# In[51]:\n\n\np.add_tools(HoverTool( tooltips=[\n (\"Title\", \"@title\"),\n (\"Genre\", \"@genre\"),\n ]\n ))\np.legend.location = \"top_right\"\np.legend.orientation = \"horizontal\"\np.xaxis.axis_label_text_font_size =\"16pt\"\np.yaxis.axis_label_text_font_size= \"16pt\"\np.title.text_font_size = '20pt'\np.title.align = \"center\"\np.title.offset =10\n\n\n# In[52]:\n\n\np.xaxis.axis_label = 'Number of Votes'\np.yaxis.axis_label = 'Average Rating'\n\n\n# Defining the call back function, to be called on change of slider and drop down value\n\n# In[53]:\n\n\ndef callback(attr, old, new):\n print('inside callback')\n update(num=min_year.value,genre_sel=genre_select.value)\n\n\n# In[54]:\n\n\ngenres_sel_list = genres.append('All')\nmin_year = Slider(title=\"Year\", start=1950, end=2018, value=2015, step=1)\nmin_year.on_change('value',callback)\ngenre_select=Select(title=\"Genre\", value=\"All\", options=genres)\ngenre_select.on_change('value',callback)\n\n\n# In[55]:\n\n\ndef select_movies(num,genre_sel):\n selected = dataset_titles_ratings[\n (dataset_titles_ratings.startYear == num)\n & (dataset_titles_ratings.titleType == 'movie') & \n (dataset_titles_ratings.genre1 != '\\\\N')\n &((dataset_titles_ratings.genre1 == 'Comedy') | (dataset_titles_ratings.genre1 == 'Romance') |\n (dataset_titles_ratings.genre1 == 'Action') | (dataset_titles_ratings.genre1 == 'Horror') |\n (dataset_titles_ratings.genre1 == 'Drama'))]\n if (genre_sel != \"All\"):\n selected = selected[selected.genre1.str.contains(genre_sel)==True]\n return selected\n\n\n# In[56]:\n\n\ndef update(num,genre_sel):\n df = select_movies(num,genre_sel)\n p.title.text = str(len(df)) + \" movies selected for year \" + str(min_year.value) \n source.data = dict(\n x=df['numVotes'],\n y=df['averageRating'],\n genre=df[\"genre1\"],\n title=df.originalTitle,\n year=df[\"startYear\"])\n return (source.data)\n \n\n\n# In[57]:\n\n\nsizing_mode = 'fixed'\ninputs = widgetbox(min_year,genre_select, sizing_mode=sizing_mode)\nlayout = row(p,inputs)\n\n\n# In[58]:\n\n\nnew_value = update(min_year.value,genre_select.value)\n\n\n# In[59]:\n\n\ncurdoc().add_root(layout)\ncurdoc().title = \"Movies\"\n\n\n# In[60]:\n\n\n#show(layout)\n\n","repo_name":"guptaa3/IMDB_data_visulisation","sub_path":"Python files/Data_visualisation_project_visual_1.py","file_name":"Data_visualisation_project_visual_1.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4687325756","text":"# modified by Sherif Abdelkarim on Jan 2020\n\nimport numpy as np\nfrom numpy import linalg as la\nimport math\nimport logging\nimport json\n\nimport torch\nfrom torch import nn\nfrom torch.nn import init\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport nn as mynn\n\nfrom core.config import cfg\nimport utils.net as net_utils\nfrom modeling.sparse_targets_rel import FrequencyBias\nfrom utils import focal_loss\nfrom .transformer import LayerNorm, Conv1D_, gelu\nimport copy\nfrom .image_encoder import MemoryAugmentedEncoder\nfrom .attention import ScaledDotProductAttentionMemory\n\n\nlogger = logging.getLogger(__name__)\n\n\n\nclass Attention(nn.Module):\n def __init__(self, n_state=768, n_head=12, n_emb=768):\n super(Attention, self).__init__()\n self.n_head = n_head\n self.n_emb = n_emb\n self.c_attn = Conv1D_(n_state * 3, n_state)\n self.c_proj = Conv1D_(n_state, n_state)\n self.split_size = n_state\n \n self.m = 100\n\n self.memory_features = nn.Parameter(torch.FloatTensor(1, self.m, n_state))\n self.mem_attn = Conv1D_(n_state * 2, n_state)\n self.alpha = nn.Linear( n_state + n_state , n_state)\n\n\n self.attn_pdrop = nn.Dropout(0.1)\n\n def _attn(self, q, k, v):\n w = torch.matmul(q, k)\n\n w = nn.Softmax(dim=-1)(w)\n self.w = self.attn_pdrop(w)\n\n return w, torch.matmul(w, v)\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape)\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(self, x):\n\n x1 = self.c_attn(x)\n query, key, value = x1.split(self.split_size, dim=2)\n\n b_s , nq = query.shape[:2]\n\n\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n\n\n _,a = self._attn(query, key, value)\n a = self.merge_heads(a)\n\n memory = self.memory_features.expand(b_s, self.m, self.split_size)\n\n memory = self.mem_attn(memory)\n memory_key , memory_value = memory.split(self.split_size,dim=2)\n\n\n m_update_key = self.split_heads(memory_key, k=True)\n m_update_value = self.split_heads(memory_value)\n\n\n _, a1 = self._attn(query, m_update_key, m_update_value)\n a1 = self.merge_heads(a1)\n\n alpha = torch.sigmoid(self.alpha(torch.cat([a, a1],-1)))\n\n\n a = alpha * a + (1-alpha)*a1\n \n\n a = self.c_proj(a)\n return a\n\nclass Enc_Dec_Attention(nn.Module):\n def __init__(self, n_state=768, n_head =12, n_emb = 768):\n super(Enc_Dec_Attention,self).__init__()\n self.n_head = n_head\n self.n_emb = n_emb\n self.c_attn = Conv1D_(n_state * 3, n_state)\n self.c_proj = Conv1D_(n_state , n_state)\n\n self.fc_q = nn.Linear(n_state, n_emb)\n self.fc_k = nn.Linear(n_state, n_emb)\n self.fc_v = nn.Linear(n_state, n_emb)\n\n self.attn_dropout = nn.Dropout(0.2)\n self.init_weights()\n \n def init_weights(self):\n\n nn.init.xavier_uniform_(self.fc_q.weight)\n nn.init.xavier_uniform_(self.fc_k.weight)\n nn.init.xavier_uniform_(self.fc_v.weight)\n\n nn.init.constant_(self.fc_q.bias, 0)\n nn.init.constant_(self.fc_k.bias, 0)\n nn.init.constant_(self.fc_v.bias, 0)\n\n\n \n def _attn(self, q, k, v , enc_dec_attention):\n\n nk = k.shape[-1]\n w = torch.matmul(q,k)\n\n w = w / math.sqrt(v.size(-1))\n\n nd, ns = w.size(-2), w.size(-1)\n\n # b = self.bias[-2], w.size(-1)\n\n if enc_dec_attention is not None:\n w = w.masked_fill(enc_dec_attention, -10000.0)\n\n w = nn.Softmax(dim=-1)(w)\n w = self.attn_dropout(w)\n return torch.matmul(w, v)\n\n\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) \n\n\n def forward(self, x, encoder_output=None, mask_encoder=None):\n\n query = self.fc_q(x)\n encoder_key = self.fc_k(encoder_output)\n encoder_value = self.fc_v(encoder_output)\n query = self.split_heads(query)\n encoder_key = self.split_heads(encoder_key, k=True)\n encoder_value = self.split_heads(encoder_value)\n\n\n a = self._attn(query, encoder_key,encoder_value,mask_encoder)\n a = self.merge_heads(a)\n a = self.c_proj(a)\n\n\n return a\n\n\n\n\nclass MLP(nn.Module):\n def __init__(self, n_state, n_emb): # in MLP: n_state=3072 (4 * n_embd)\n super(MLP, self).__init__()\n nx = n_emb\n self.c_fc = Conv1D_(n_state, nx)\n self.c_proj = Conv1D_(nx, n_state)\n self.act = gelu\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return h2\n\n\nclass Block(nn.Module):\n def __init__(self, n_state, n_head, n_emb):\n super(Block, self).__init__()\n self.n_state = n_state\n self.n_head = n_head\n self.n_emb = n_emb\n\n self.ln_1 = LayerNorm(n_emb, eps=1e-5)\n self.attn = Attention(n_state, n_head, n_emb)\n self.ln_2 = LayerNorm(n_emb, eps=1e-5)\n self.mlp = MLP(4 * n_state, n_emb)\n self.resid_pdrop = nn.Dropout(0.1)\n\n\n self.enc_dec_attn = Enc_Dec_Attention(n_state, n_head, n_emb)\n self.fc_alpha1 = nn.Linear(n_state + n_state, n_state)\n self.fc_alpha2 = nn.Linear(n_state+ n_state, n_state)\n\n\n\n def forward(self, x, encoder_features, mask_encoder):\n\n\n #[25, 17, 768]) x shape\n #torch.Size([25, 3, 50, 768]) encoder output shape\n #torch.Size([25, 1, 1, 50]) mask encoder shape\n #torch.Size([25, 17, 768]) a shape\n\n \n self_attention = self.attn(self.ln_1(x))\n a = x + self_attention\n\n a = self.resid_pdrop(a)\n\n\n enc_att1 = self.enc_dec_attn(x = self.ln_1(a), encoder_output = self.ln_1(encoder_features[:,0]), mask_encoder = mask_encoder)\n enc_att2 = self.enc_dec_attn(x = self.ln_1(a), encoder_output = self.ln_1(encoder_features[:,1]), mask_encoder = mask_encoder)\n\n alpha1 = torch.sigmoid(self.fc_alpha1(torch.cat([a, enc_att1],-1)))\n alpha2 = torch.sigmoid(self.fc_alpha2(torch.cat([a, enc_att2],-1)))\n\n enc_att1 = alpha1 * a + (1-alpha1) * enc_att1\n enc_att2 = alpha2 * a + (1-alpha2) * enc_att2\n\n \n\n a = (enc_att1 + enc_att2 )/ np.sqrt(2)\n\n m = self.mlp(self.ln_2(a))\n\n output = a + m\n output = self.resid_pdrop(output)\n\n return output\n\n\nclass MultiHeadModel(nn.Module):\n def __init__(self, n_layer, n_state, n_head, n_embd):\n super(MultiHeadModel, self).__init__()\n self.n_layer = n_layer\n self.n_state = n_state\n self.n_head = n_head\n self.n_embd = n_embd\n\n self.language_fc = nn.Linear(300, n_embd)\n self.visual_fc = nn.Linear(1024, n_embd)\n\n self.wpe = nn.Embedding(5, n_embd)\n self.wte = nn.Embedding(5, n_embd)\n block = Block(n_state, n_head, n_embd)\n self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(n_layer)])\n\n self.dropout = nn.Dropout(0.1)\n\n self.linear_projection = nn.Linear(n_embd, 1024)\n self.layer_norm = nn.LayerNorm(1024, 1e-5)\n\n def data_transformation(self, sub_label, obj_label, sub_visual, obj_visual, label_visual):\n # print(\"before\")\n # print(sub_label.shape, obj_label.shape, sub_visual.shape, obj_visual.shape, label_visual.shape)\n sub_label = self.language_fc(sub_label)\n sub_label = sub_label.reshape(-1, 1, self.n_embd)\n obj_label = self.language_fc(obj_label)\n obj_label = obj_label.reshape(-1, 1, self.n_embd)\n\n sub_visual = self.visual_fc(sub_visual)\n sub_visual = sub_visual.reshape(-1, 1, self.n_embd)\n obj_visual = self.visual_fc(obj_visual)\n obj_visual = obj_visual.reshape(-1, 1, self.n_embd)\n label_visual = self.visual_fc(label_visual)\n label_visual = label_visual.reshape(-1, 1, self.n_embd)\n try:\n input_ids = torch.cat([sub_label, obj_label, sub_visual, obj_visual, label_visual], -2)\n except:\n print(sub_label.shape)\n print(obj_label.shape)\n print(sub_visual.shape)\n print(obj_visual.shape)\n print(label_visual.shape)\n\n position_ids = torch.arange(5, dtype=torch.long, device=sub_label.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n\n position_ids = self.wpe(position_ids)\n\n type_ids = torch.tensor([0, 0, 1, 1, 1], dtype=torch.long, device=sub_label.device)\n type_ids = type_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n type_ids = self.wte(type_ids)\n\n input_ids = input_ids + position_ids + type_ids\n return input_ids\n\n\n\n\n\n def data_transformation_only_visual(self, sub_visual, obj_visual, label_visual):\n # print(\"before\")\n # print(sub_label.shape, obj_label.shape, sub_visual.shape, obj_visual.shape, label_visual.shape)\n\n\n sub_visual = self.visual_fc(sub_visual)\n sub_visual = sub_visual.reshape(-1, 1, self.n_embd)\n obj_visual = self.visual_fc(obj_visual)\n obj_visual = obj_visual.reshape(-1, 1, self.n_embd)\n label_visual = self.visual_fc(label_visual)\n label_visual = label_visual.reshape(-1, 1, self.n_embd)\n try:\n input_ids = torch.cat([ sub_visual, obj_visual, label_visual], -2)\n except:\n\n print(sub_visual.shape)\n print(obj_visual.shape)\n print(label_visual.shape)\n\n position_ids = torch.arange(3, dtype=torch.long, device=sub_visual.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n\n position_ids = self.wpe(position_ids)\n\n type_ids = torch.tensor([ 1, 1, 1], dtype=torch.long, device=sub_visual.device)\n type_ids = type_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])\n type_ids = self.wte(type_ids)\n\n input_ids = input_ids + position_ids + type_ids\n return input_ids\n\n\n\n def forward(self, sub_label, obj_label, sub_visual, obj_visual, label_visual, encoder_features, encoder_mask):\n if sub_label is None:\n hidden_states = self.data_transformation_only_visual(sub_visual, obj_visual, label_visual)\n \n \n\n for block in self.h:\n hidden_states = block(hidden_states, encoder_features, encoder_mask)\n\n hidden_states = self.linear_projection(hidden_states)\n\n hidden_states = self.layer_norm(hidden_states)\n\n\n\n return hidden_states[:, 0, :], hidden_states[:, 1, :], hidden_states[:, 2, :]\n\n\n\n else:\n hidden_states = self.data_transformation(sub_label, obj_label, sub_visual, obj_visual, label_visual)\n\n \n for block in self.h: \n hidden_states = block(hidden_states)\n\n hidden_states = self.linear_projection(hidden_states)\n\n hidden_states = self.layer_norm(hidden_states)\n\n\n return hidden_states[:, 2, :], hidden_states[:, 3, :], hidden_states[:, 4, :]\n\n\n\n\n\n\n\n\n\n\nclass reldn_head(nn.Module):\n def __init__(self, dim_in, all_obj_vecs=None, all_prd_vecs=None):\n super().__init__()\n\n num_prd_classes = cfg.MODEL.NUM_PRD_CLASSES + 1\n\n if cfg.MODEL.RUN_BASELINE:\n # only run it on testing mode\n self.freq_bias = FrequencyBias(cfg.TEST.DATASETS[0])\n return\n\n ### what are these all obj vecs\n self.obj_vecs = all_obj_vecs\n self.prd_vecs = all_prd_vecs\n\n # add subnet\n self.prd_feats = nn.Sequential(\n nn.Linear(dim_in, 1024),\n nn.LeakyReLU(0.1))\n self.prd_vis_embeddings = nn.Sequential(\n nn.Linear(1024 * 3, 1024),\n nn.LeakyReLU(0.1),\n nn.Linear(1024, 1024))\n # if not cfg.MODEL.USE_SEM_CONCAT:\n # self.prd_sem_embeddings = nn.Sequential(\n # nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n # nn.LeakyReLU(0.1),\n # nn.Linear(1024, 1024))\n # else:\n # self.prd_sem_hidden = nn.Sequential(\n # nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n # nn.LeakyReLU(0.1),\n # nn.Linear(1024, 1024))\n # self.prd_sem_embeddings = nn.Linear(3 * 1024, 1024)\n\n self.prd_sem_embeddings = nn.Sequential(\n nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n nn.LeakyReLU(0.1),\n nn.Linear(1024, 1024))\n\n self.so_vis_embeddings = nn.Linear(dim_in // 3, 1024)\n self.so_sem_embeddings = nn.Sequential(\n nn.Linear(cfg.MODEL.INPUT_LANG_EMBEDDING_DIM, 1024),\n nn.LeakyReLU(0.1),\n nn.Linear(1024, 1024))\n\n if cfg.MODEL.USE_FREQ_BIAS:\n # Assume we are training/testing on only one dataset\n if len(cfg.TRAIN.DATASETS):\n self.freq_bias = FrequencyBias(cfg.TRAIN.DATASETS[0])\n else:\n self.freq_bias = FrequencyBias(cfg.TEST.DATASETS[0])\n\n self.multi_head_attention = MultiHeadModel(int(cfg.MODEL.ENCODER_LAYER), 768, 12, 768)\n\n self.image_encoder = MemoryAugmentedEncoder(2, 0, attention_module=ScaledDotProductAttentionMemory,\n attention_module_kwargs={'m': 0})\n\n self._init_weights()\n\n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n mynn.init.XavierFill(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for p in self.multi_head_attention.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n \n for p in self.image_encoder.parameters():\n if p.dim() >1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, spo_feat, sbj_labels=None, obj_labels=None, sbj_feat=None, obj_feat=None,all_unique_features = None):\n\n\n \n\n device_id = spo_feat.get_device()\n if sbj_labels is not None:\n sbj_labels = Variable(torch.from_numpy(sbj_labels.astype('int64'))).cuda(device_id)\n if obj_labels is not None:\n obj_labels = Variable(torch.from_numpy(obj_labels.astype('int64'))).cuda(device_id) \n\n\n\n if cfg.MODEL.RUN_BASELINE:\n assert sbj_labels is not None and obj_labels is not None\n prd_cls_scores = self.freq_bias.rel_index_with_labels(torch.stack((sbj_labels, obj_labels), 1))\n prd_cls_scores = F.softmax(prd_cls_scores, dim=1)\n return prd_cls_scores, None, None, None, None, None\n\n if spo_feat.dim() == 4:\n spo_feat = spo_feat.squeeze(3).squeeze(2)\n\n sbj_vis_embeddings = self.so_vis_embeddings(sbj_feat)\n obj_vis_embeddings = self.so_vis_embeddings(obj_feat)\n\n prd_hidden = self.prd_feats(spo_feat)\n\n\n\n\n\n # feed the data into the image encoder \n enc_output, mask_enc = self.image_encoder(all_unique_features)\n # print(enc_output.shape, \"encoder output shape \")\n # print(mask_enc.shape, \"mask encoder shape\")\n\n\n\n\n\n\n '''\n \n until here, we can obtain the subject visual embeddings, object visual embeddings, and predicate hidden states\n \n '''\n '''\n the self attention for the sub obj and relation embeddings\n \n '''\n\n\n\n\n ## get sbj vectors and obj vectors\n sbj_vecs = self.obj_vecs[sbj_labels] # (#bs, cfg.MODEL.INPUT_LANG_EMBEDDING_DIM)\n sbj_vecs = Variable(torch.from_numpy(sbj_vecs.astype('float32'))).cuda(device_id)\n\n obj_vecs = self.obj_vecs[obj_labels] # (#bs, cfg.MODEL.INPUT_LANG_EMBEDDING_DIM)\n obj_vecs = Variable(torch.from_numpy(obj_vecs.astype('float32'))).cuda(device_id)\n\n\n sbj_vis_embeddings, obj_vis_embeddings, prd_hidden = self.multi_head_attention(None, None,\n sbj_vis_embeddings,\n obj_vis_embeddings, prd_hidden, enc_output, mask_enc)\n\n\n\n\n\n '''\n all the object and subject word embedding to formalize the object vectors\n '''\n ds_obj_vecs = self.obj_vecs\n ds_obj_vecs = Variable(torch.from_numpy(ds_obj_vecs.astype('float32'))).cuda(device_id)\n so_sem_embeddings = self.so_sem_embeddings(ds_obj_vecs)\n\n so_sem_embeddings = F.normalize(so_sem_embeddings, p=2, dim=1) # (#prd, 1024)\n so_sem_embeddings.t_()\n\n '''\n subject visual embeddings\n '''\n\n\n # this is the visual subject embeddings\n sbj_vis_embeddings = F.normalize(sbj_vis_embeddings, p=2, dim=1) # (#bs, 1024)\n sbj_sim_matrix = torch.mm(sbj_vis_embeddings, so_sem_embeddings) # (#bs, #prd)\n\n sbj_cls_scores = cfg.MODEL.NORM_SCALE * sbj_sim_matrix\n\n # this is the visual object embeddings\n obj_vis_embeddings = F.normalize(obj_vis_embeddings, p=2, dim=1) # (#bs, 1024)\n obj_sim_matrix = torch.mm(obj_vis_embeddings, so_sem_embeddings) # (#bs, #prd)\n obj_cls_scores = cfg.MODEL.NORM_SCALE * obj_sim_matrix\n\n '''\n start to predict the predicate features\n\n '''\n\n '''\n add self afftention here for subject vis, object vis, prd hidden, subject label, object label\n\n '''\n \n\n \n prd_features = torch.cat((sbj_vis_embeddings.detach(), prd_hidden, obj_vis_embeddings.detach()), dim=1)\n\n prd_vis_embeddings = self.prd_vis_embeddings(prd_features)\n\n ds_prd_vecs = self.prd_vecs\n ds_prd_vecs = Variable(torch.from_numpy(ds_prd_vecs.astype('float32'))).cuda(device_id)\n prd_sem_embeddings = self.prd_sem_embeddings(ds_prd_vecs)\n prd_sem_embeddings = F.normalize(prd_sem_embeddings, p=2, dim=1) # (#prd, 1024)\n prd_vis_embeddings = F.normalize(prd_vis_embeddings, p=2, dim=1) # (#bs, 1024)\n prd_sim_matrix = torch.mm(prd_vis_embeddings, prd_sem_embeddings.t_()) # (#bs, #prd)\n prd_cls_scores = cfg.MODEL.NORM_SCALE * prd_sim_matrix\n\n if cfg.MODEL.USE_FREQ_BIAS:\n assert sbj_labels is not None and obj_labels is not None\n prd_cls_scores = prd_cls_scores + self.freq_bias.rel_index_with_labels(\n torch.stack((sbj_labels, obj_labels), 1))\n\n if not self.training:\n sbj_cls_scores = F.softmax(sbj_cls_scores, dim=1)\n obj_cls_scores = F.softmax(obj_cls_scores, dim=1)\n prd_cls_scores = F.softmax(prd_cls_scores, dim=1)\n\n return prd_cls_scores, sbj_cls_scores, obj_cls_scores\n\n\ndef add_cls_loss(cls_scores, labels, weight=None):\n if cfg.MODEL.LOSS == 'cross_entropy':\n return F.cross_entropy(cls_scores, labels)\n elif cfg.MODEL.LOSS == 'weighted_cross_entropy':\n return F.cross_entropy(cls_scores, labels, weight=weight)\n elif cfg.MODEL.LOSS == 'focal':\n cls_scores_exp = cls_scores.unsqueeze(2)\n cls_scores_exp = cls_scores_exp.unsqueeze(3)\n labels_exp = labels.unsqueeze(1)\n labels_exp = labels_exp.unsqueeze(2)\n return focal_loss.focal_loss(cls_scores_exp, labels_exp, alpha=cfg.MODEL.ALPHA, gamma=cfg.MODEL.GAMMA,\n reduction='mean')\n elif cfg.MODEL.LOSS == 'weighted_focal':\n cls_scores_exp = cls_scores.unsqueeze(2)\n cls_scores_exp = cls_scores_exp.unsqueeze(3)\n labels_exp = labels.unsqueeze(1)\n labels_exp = labels_exp.unsqueeze(2)\n weight = weight.unsqueeze(0)\n weight = weight.unsqueeze(2)\n weight = weight.unsqueeze(3)\n return focal_loss.focal_loss(cls_scores_exp, labels_exp, alpha=cfg.MODEL.ALPHA, gamma=cfg.MODEL.GAMMA,\n reduction='mean', weight_ce=weight)\n else:\n raise NotImplementedError\n\n\ndef add_hubness_loss(cls_scores):\n # xp_yall_prob (batch_size, num_classes)\n # xp_yall_prob.T (num_classes, batch_size\n # xp_yall_prob.expand(0, 1, -1, 1)\n # xp_yall_probT_average_reshape = xp_yall_probT_reshaped.mean(axis=2)\n # hubness_dist = xp_yall_probT_average_reshape - hubness_blob\n # hubness_dist_sqr = hubness_dist.pow(2)\n # hubness_dist_sqr_scaled = hubness_dist_sqr * cfg.TRAIN.HUBNESS_SCALE\n cls_scores = F.softmax(cls_scores, dim=1)\n hubness_blob = 1. / cls_scores.size(1)\n cls_scores_T = cls_scores.transpose(0, 1)\n cls_scores_T = cls_scores_T.unsqueeze(1).unsqueeze(3).expand(-1, 1, -1, 1)\n cls_scores_T = cls_scores_T.mean(dim=2, keepdim=True)\n hubness_dist = cls_scores_T - hubness_blob\n hubness_dist = hubness_dist.pow(2) * cfg.TRAIN.HUBNESS_SCALE\n hubness_loss = hubness_dist.mean()\n return hubness_loss\n\n\ndef reldn_losses(prd_cls_scores, prd_labels_int32, fg_only=False, weight=None):\n device_id = prd_cls_scores.get_device()\n prd_labels = Variable(torch.from_numpy(prd_labels_int32.astype('int64'))).cuda(device_id)\n if cfg.MODEL.LOSS == 'weighted_cross_entropy' or cfg.MODEL.LOSS == 'weighted_focal':\n weight = Variable(torch.from_numpy(weight)).cuda(device_id)\n loss_cls_prd = add_cls_loss(prd_cls_scores, prd_labels, weight=weight)\n # class accuracy\n prd_cls_preds = prd_cls_scores.max(dim=1)[1].type_as(prd_labels)\n accuracy_cls_prd = prd_cls_preds.eq(prd_labels).float().mean(dim=0)\n\n return loss_cls_prd, accuracy_cls_prd\n\n\ndef reldn_so_losses(sbj_cls_scores, obj_cls_scores, sbj_labels_int32, obj_labels_int32):\n device_id = sbj_cls_scores.get_device()\n\n sbj_labels = Variable(torch.from_numpy(sbj_labels_int32.astype('int64'))).cuda(device_id)\n loss_cls_sbj = add_cls_loss(sbj_cls_scores, sbj_labels)\n sbj_cls_preds = sbj_cls_scores.max(dim=1)[1].type_as(sbj_labels)\n accuracy_cls_sbj = sbj_cls_preds.eq(sbj_labels).float().mean(dim=0)\n\n obj_labels = Variable(torch.from_numpy(obj_labels_int32.astype('int64'))).cuda(device_id)\n loss_cls_obj = add_cls_loss(obj_cls_scores, obj_labels)\n obj_cls_preds = obj_cls_scores.max(dim=1)[1].type_as(obj_labels)\n accuracy_cls_obj = obj_cls_preds.eq(obj_labels).float().mean(dim=0)\n\n return loss_cls_sbj, loss_cls_obj, accuracy_cls_sbj, accuracy_cls_obj\n\n","repo_name":"Vision-CAIR/RelTransformer","sub_path":"lib/modeling/reldn_heads_reltransformer.py","file_name":"reldn_heads_reltransformer.py","file_ext":"py","file_size_in_byte":23380,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"423855906","text":"def read_file(file):\n if '.xml' in file:\n import xml.etree.ElementTree as ET\n tree = ET.parse(file)\n descriptions = []\n root = tree.getroot()\n xml_items = root.findall('channel/item')\n\n for item in xml_items:\n description = item.find('description')\n descriptions += description.text.split(\" \")\n return descriptions\n\n elif '.json' in file:\n import json\n import chardet\n with open(file, 'rb') as f:\n data = f.read()\n result = chardet.detect(data)\n data = data.decode(result['encoding'])\n data = json.loads(data)\n full_text = ''\n for items in data['rss']['channel']['items']:\n full_text += ' ' + items['description']\n descriptions = full_text.split(' ')\n return descriptions\n\n\ndef longer_than_x(descriptions, x):\n longer_than_list = list()\n for word in descriptions:\n if len(word) > x:\n longer_than_list.append(word)\n return longer_than_list\n\n\ndef sort_dict(longer_than_list):\n sorted_dict = {word: longer_than_list.count(word) for word in longer_than_list}\n return sorted_dict\n\n\ndef top_y_words(sorted_dict, y):\n list_of_lists = list()\n for word in sorted_dict.keys():\n list_of_lists.append([word, sorted_dict[word]])\n result = sorted(list_of_lists, key=lambda pair: pair[1], reverse=True)\n\n counter = 1\n for element in result:\n print('{}. {} - {}'.format(counter, element[0], element[1]))\n if counter == y:\n break\n counter += 1\n\n\ndef core():\n input_1 = input('Введите имя папки: ')\n input_2 = input('Введите имя файла (XML или JSON): ')\n input_3 = int(input('Минимальное число символов в словах для поиска: '))\n input_4 = int(input('Введите длину списка часто повторяющихся слов: '))\n file = str(input_1 + '/' + input_2)\n data = read_file(file)\n data_list = longer_than_x(data, input_3)\n data_dict = sort_dict(data_list)\n top_y_words(data_dict, input_4)\n\n\nif __name__ == '__main__':\n core()\n","repo_name":"igortsallagov/py-hw-7","sub_path":"netology-7.py","file_name":"netology-7.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30038569020","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.auth import views\n\nfrom apps.story.views import frontpage, search, submit, newest, vote, story\nfrom apps.core.views import signup\n\nurlpatterns = [\n path('', frontpage, name='frontpage'),\n path('s//vote/', vote, name='vote'),\n path('s//', story, name='story'),\n path('u/', include('apps.userprofile.urls')),\n path('newest/', newest, name='newest'),\n path('search/', search, name='search'),\n path('submit/', submit, name='submit'),\n path('signup/', signup, name='signup'),\n path('login/', views.LoginView.as_view(template_name='core/login.html'), name='login'),\n path('logout/', views.LogoutView.as_view(), name='logout'),\n path('admin/', admin.site.urls),\n]\n","repo_name":"SteinOveHelset/codingnews","sub_path":"codingnews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"10288995036","text":"from flask import Blueprint, request, render_template\n\nfrom src import db\nfrom src.errands.forms import ErrandForm\nfrom src.errands.models import Errand\nfrom src.places.models import Place, Location\nfrom src.wrappers import add_new_place, add_opening_hours\n\n'''\nA Blueprint is a way to organize a group of related views and other code. \nRather than registering views and other code directly with an application, they are registered with a blueprint.\nThen the blueprint is registered with the application when it is available in the factory function.\n'''\n\nbp = Blueprint('errand', __name__)\n\n\n@bp.route('/errands/create', methods=('GET', 'POST'))\ndef errands_create():\n form = ErrandForm(request.form)\n if request.method == 'POST':\n print(f'DEBUG form={form.data}')\n\n place_name = form.place_input.data\n place = Place.query.filter_by(name=place_name).first()\n if not place:\n place, location = add_new_place(place_name)\n open_hours = add_opening_hours(location)\n\n\n errand = Errand(form.name.data, form.duration_mins.data, place.id, form.notes.data)\n db.session.add(errand)\n db.session.commit()\n\n return render_template('errand.html', form=form)\n","repo_name":"eugene01a/trip-optimization","sub_path":"src/errands/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35621203939","text":"import discord\r\nfrom discord.ext import commands\r\n\r\nclass General(commands.Cog, name=\"General Commands\"):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t@commands.command(name='hello')\r\n\tasync def hello(self, ctx):\r\n\t\t\"Hello World!\"\r\n\t\tawait ctx.send('Hello World!')\r\n\r\n\t@commands.command(name='spitback')\r\n\tasync def spitback(self, ctx, arg: str):\r\n\t\t\"Responds with your text.\"\r\n\t\tawait ctx.send(arg)\r\n\r\n\t@spitback.error\r\n\tasync def spitback_error(self, ctx, error):\r\n\t\terror = getattr(error, 'original', error)\r\n\t\tif isinstance(error, commands.BadArgument):\r\n\t\t\tembed=discord.Embed(title=\"Error!\", description=\"Unknown argument type or bad argument.\", color=0xfd0000)\r\n\t\t\tembed.add_field(name=\"Proper usage:\", value=\"```{}spitback ```\".format(ctx.prefix), inline=True)\r\n\t\t\tembed.set_footer(text=\"HiggsBot - A code executing Discord bot!\")\r\n\t\tif isinstance(error, commands.MissingRequiredArgument):\r\n\t\t\tembed=discord.Embed(title=\"Error!\", description=\"Missing argument.\", color=0xfd0000)\r\n\t\t\tembed.add_field(name=\"Proper usage:\", value=\"```{}spitback ```\".format(ctx.prefix), inline=True)\r\n\t\t\tembed.set_footer(text=\"HiggsBot - A code executing Discord bot!\")\r\n\t\tawait ctx.send(embed=embed)\r\n\r\n\t@commands.command(name='readme')\r\n\tasync def readme(self,ctx):\r\n\t\t\"Print a simple readme\"\r\n\t\tf = open(\"data/general/help.txt\", \"r\")\r\n\t\tawait ctx.send(f.read())\r\n\t\tf.close()\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(General(bot))","repo_name":"higgsbot/main","sub_path":"cogs/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73342219287","text":"from indico.core.db import db\n\n\nclass AnonymousSurveySubmission(db.Model):\n __tablename__ = 'anonymous_submissions'\n __table_args__ = {'schema': 'event_surveys'}\n\n survey_id = db.Column(\n db.ForeignKey('event_surveys.surveys.id', ondelete='CASCADE'),\n primary_key=True,\n index=True\n )\n user_id = db.Column(\n db.ForeignKey('users.users.id', ondelete='CASCADE'),\n primary_key=True,\n index=True,\n )\n\n survey = db.relationship(\n 'Survey',\n lazy=True,\n backref=db.backref(\n 'anonymous_submissions',\n lazy='dynamic',\n cascade='all, delete-orphan',\n passive_deletes=True\n )\n )\n user = db.relationship(\n 'User',\n lazy=True,\n backref=db.backref(\n 'anonymous_survey_submissions',\n lazy='dynamic',\n cascade='all, delete-orphan',\n passive_deletes=True\n )\n )\n\n def __repr__(self):\n return f''\n\n @classmethod\n def merge_users(cls, target, source):\n target_ids = [sub.survey_id for sub in target.anonymous_survey_submissions]\n\n for sub in source.anonymous_survey_submissions.all():\n if sub.survey_id not in target_ids:\n sub.user = target\n else:\n db.session.delete(sub)\n","repo_name":"indico/indico","sub_path":"indico/modules/events/surveys/models/anonymous_submissions.py","file_name":"anonymous_submissions.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":1560,"dataset":"github-code","pt":"31"} +{"seq_id":"70334083929","text":"import configparser\nimport db_ops\nimport telebot\nfrom telebot import types\n\nconfig = configparser.ConfigParser()\nconfig.read('balde.conf')\n\nTOKEN = config['BALDE']['TOKEN']\nbot = telebot.TeleBot(TOKEN)\nchannelid = config['BALDE']['CHANNELID']\n\nbutton2 = types.InlineKeyboardMarkup()\nbutton_ask = types.InlineKeyboardButton('Eu quero!', callback_data=\"/quero\")\nbutton2.row(button_ask)\n\nposts = db_ops.selectbigger('Balde', 'days', 0)\nfor post in posts:\n days_left = post[4]-1\n if days_left == 0:\n desc = '{}\\nPrazo expirado.'.format(post[3])\n bot.edit_message_caption(desc, channelid, post[1], parse_mode='HTML')\n bot.unpin_chat_message(channelid, post[1])\n elif int(post[5]) > 0:\n desc = '{}\\n{} tem {} dias para buscar.'.format(post[3], post[5], post[6], days_left)\n bot.edit_message_caption(desc, channelid, post[1], parse_mode='HTML')\n else:\n desc = '{}\\nDias restantes no balde: {}'.format(post[3], days_left)\n bot.edit_message_caption(desc, channelid, post[1], parse_mode='HTML', reply_markup=button2)\n db_ops.update('Balde', 'days', days_left, 'post', post[1])\n","repo_name":"GabrielRF/CalangoHC-Balde","sub_path":"count_day.py","file_name":"count_day.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72606297367","text":"import os, inspect\nimport argparse\nimport json\nimport time\nimport quaternion\nimport scipy.io as sio\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom PIL import Image\nimport pybullet as p\nimport pybullet_data\n\n# for robot control\nfrom pybullet_robot_envs.envs.panda_envs.panda_env import pandaEnv\n\nfrom utils.bullet_utils import get_matrix_from_pose, get_pose_from_matrix, get_matrix_from_pos_rot, get_pos_rot_from_matrix, xyzw2wxyz, wxyz2xyzw, draw_coordinate, draw_bbox\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0, parentdir)\n\ndef recover_trajectory(traj_src : np.ndarray, hook_poses : np.ndarray, \n centers : np.ndarray, scales, dataset_mode : int=0):\n # traj : dim = batch x num_steps x 6\n # dataset_mode : 0 for abosute, 1 for residual \n\n traj = None\n traj = np.copy(traj_src)\n\n waypoints = []\n\n if dataset_mode == 0: # \"absolute\"\n\n traj[:, :3] = traj[:, :3] * scales + centers\n \n hook_trans = get_matrix_from_pose(hook_poses)\n for wpt_id in range(0, traj.shape[0]): # waypoints\n\n wpt = np.zeros(6)\n # contact pose rotation\n wpt[:3] = traj[wpt_id]\n\n # transform to world coordinate first\n current_trans = np.identity(4)\n current_trans[:3, 3] = traj[wpt_id]\n current_trans = hook_trans @ current_trans\n\n if wpt_id < traj.shape[0] - 1:\n # transform to world coordinate first\n\n peep_num_max = int(np.ceil(traj.shape[0] / 10.0))\n peep_num = peep_num_max if wpt_id < traj.shape[0] - peep_num_max else traj.shape[0] - wpt_id - 1\n to_pos = np.ones((4, peep_num))\n to_pos[:3] = traj[wpt_id:wpt_id+peep_num].T \n to_pos = (hook_trans @ to_pos)[:3]\n \n from_pos = np.ones((4, peep_num))\n from_pos[:3] = traj[wpt_id+1:wpt_id+peep_num+1].T \n from_pos = (hook_trans @ from_pos)[:3]\n\n weight = np.array([1/x for x in range(3, peep_num+3)])[:peep_num]\n weight /= np.sum(weight)\n diff = (to_pos - from_pos) * weight\n \n x_direction = np.sum(diff, axis=1)\n x_direction /= np.linalg.norm(x_direction, ord=2)\n y_direction = np.cross(x_direction, [0, 0, -1])\n y_direction /= np.linalg.norm(y_direction, ord=2)\n z_direction = np.cross(x_direction, y_direction)\n rotation_mat = np.vstack((x_direction, y_direction, z_direction)).T\n current_trans[:3, :3] = rotation_mat\n \n else :\n\n current_trans[:3, :3] = R.from_rotvec(waypoints[-1][3:]).as_matrix() # use the last waypoint's rotation as current rotation\n \n waypoints.append(get_pose_from_matrix(current_trans, pose_size=6))\n \n return waypoints\n\n\ndef robot_apply_action(robot : pandaEnv, obj_id : int, action : tuple or list, gripper_action : str = 'nop', \n sim_timestep : float = 1.0 / 240.0, diff_thresh : float = 0.005, max_vel : float = 0.2, max_iter = 5000):\n\n assert gripper_action in ['nop', 'pre_grasp', 'grasp']\n\n if gripper_action == 'nop':\n assert len(action) == 7, 'action length should be 7'\n\n robot.apply_action(action, max_vel=max_vel)\n diff = 10.0\n iter = 0\n while diff > diff_thresh and iter < max_iter: \n iter += 1\n\n p.stepSimulation()\n time.sleep(sim_timestep)\n\n tmp_pos = p.getLinkState(robot.robot_id, robot.end_eff_idx, physicsClientId=robot._physics_client_id)[4] # position\n tmp_rot = p.getLinkState(robot.robot_id, robot.end_eff_idx, physicsClientId=robot._physics_client_id)[5] # rotation\n diff = np.sum((np.array(tmp_pos + tmp_rot) - np.array(action)) ** 2) ** 0.5\n\n elif gripper_action == 'pre_grasp' :\n\n robot.pre_grasp()\n for _ in range(int(1.0 / sim_timestep) * 1): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n else:\n\n robot.grasp(obj_id)\n for _ in range(int(1.0 / sim_timestep)): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n\ndef get_dense_waypoints(start_config : list or tuple or np.ndarray, end_config : list or tuple or np.ndarray, resolution : float=0.005):\n\n assert len(start_config) == 7 and len(end_config) == 7\n\n d12 = np.asarray(end_config[:3]) - np.asarray(start_config[:3])\n steps = int(np.ceil(np.linalg.norm(np.divide(d12, resolution), ord=2)))\n obj_init_quat = quaternion.as_quat_array(xyzw2wxyz(start_config[3:]))\n obj_tgt_quat = quaternion.as_quat_array(xyzw2wxyz(end_config[3:]))\n\n ret = []\n # plan trajectory in the same way in collision detection module\n for step in range(steps):\n ratio = (step + 1) / steps\n pos = ratio * d12 + np.asarray(start_config[:3])\n quat = quaternion.slerp_evaluate(obj_init_quat, obj_tgt_quat, ratio)\n quat = wxyz2xyzw(quaternion.as_float_array(quat))\n position7d = tuple(pos) + tuple(quat)\n ret.append(position7d)\n\n return ret\n\ndef refine_rotation(src_transform, tgt_transform):\n src_rot = src_transform[:3, :3]\n tgt_rot = tgt_transform[:3, :3]\n\n s2d_before = R.from_matrix(tgt_rot @ np.linalg.inv(src_rot)).as_rotvec()\n\n rot_180 = np.identity(4)\n rot_180[:3, :3] = R.from_rotvec([0, 0, np.pi]).as_matrix()\n tgt_dual_transform = tgt_transform @ rot_180\n s2d_after = R.from_matrix(tgt_dual_transform[:3, :3] @ np.linalg.inv(src_rot)).as_rotvec()\n\n return tgt_transform if np.sum((s2d_before) ** 2) < np.sum((s2d_after) ** 2) else tgt_dual_transform\n\ndef main(args):\n\n time_stamp = time.localtime()\n time_mon_day = '{:02d}{:02d}'.format(time_stamp.tm_mon, time_stamp.tm_mday)\n\n data_dir = f'{args.data_root}/{args.data_dir}'\n kpt_trajectory_dir = f'{args.input_root}/{args.input_dir}' if args.input_dir != '' else f'{args.input_root}/{time_mon_day}'\n\n assert os.path.exists(data_dir), f'{data_dir} not exists'\n assert os.path.exists(kpt_trajectory_dir), f'{kpt_trajectory_dir} not exists'\n assert os.path.exists(args.output_root), f'{args.output_root} not exists'\n \n # load model\n obj_fname = f'{kpt_trajectory_dir}/{args.obj}'\n hook_fname = f'{kpt_trajectory_dir}/{args.hook}'\n obj_name = os.path.split(obj_fname)[1].split('.')[0]\n hook_name = os.path.split(hook_fname)[1].split('.')[0]\n obj_hook_pair_fname = f'{data_dir}/Hook_my_bar_easy-everyday_objects_50/Hook_my_bar_easy-{obj_name}.json'\n\n assert os.path.exists(obj_hook_pair_fname), f'{obj_hook_pair_fname} not exists'\n assert os.path.exists(obj_fname), f'{obj_fname} not exists'\n assert os.path.exists(hook_fname), f'{hook_fname} not exists'\n\n with open(obj_hook_pair_fname, 'r') as f:\n obj_hook_pair_dict = json.load(f)\n with open(obj_fname, 'r') as f:\n obj_dict = json.load(f)\n with open(hook_fname, 'r') as f:\n hook_dict = json.load(f)\n \n # demonstration_dir = f'{args.output_root}/{args.output_dir}' if args.output_dir != '' else f'{args.output_root}/{time_mon_day}'\n # if not os.path.exists(demonstration_dir):\n # os.mkdir(demonstration_dir)\n\n # assert some attributes exist in the given json files\n assert 'initial_pose' in obj_hook_pair_dict.keys(), \\\n f'\"initial_pose\" not in obj_hook_pair_dict!, please run hanging_init_pose.py'\n assert 'contact_pose' in obj_dict.keys() and 'file' in obj_dict.keys(), \\\n f'\"contact_pose\" or \"file\" not in obj_dict!, please run keypoint_pose.py'\n assert 'hook_pose' in hook_dict.keys() and 'file' in hook_dict.keys() and 'trajectory' in hook_dict.keys(), \\\n f'\"hook_pose\" or \"file\" or \"trajectory\" not in hook_dict!, please run keypoint_trajectory.py'\n \n # ------------------------ #\n # --- Setup simulation --- #\n # ------------------------ #\n\n # Create pybullet GUI\n physics_client_id = p.connect(p.DIRECT)\n # physics_client_id = p.connect(p.GUI)\n # p.configureDebugVisualizer(p.COV_ENABLE_GUI,0)\n p.resetDebugVisualizerCamera(\n cameraDistance=0.2,\n cameraYaw=90,\n cameraPitch=-30,\n cameraTargetPosition=[0.5, 0.0, 1.3]\n )\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n sim_timestep = 1.0 / 240\n p.setTimeStep(sim_timestep)\n p.setGravity(0, 0, -9.8)\n\n # ------------------- #\n # --- Setup robot --- #\n # ------------------- #\n\n # Load plane contained in pybullet_data\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"plane.urdf\"))\n robot = pandaEnv(physics_client_id, use_IK=1)\n\n # -------------------------- #\n # --- Load other objects --- #\n # -------------------------- #\n\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"table/table.urdf\"), [1, 0.0, 0.0])\n\n obj_contact_pose_6d = obj_dict['contact_pose']\n obj_contact_relative_transform = get_matrix_from_pos_rot(obj_contact_pose_6d[:3], obj_contact_pose_6d[3:])\n obj_id = p.loadURDF(obj_dict['file'])\n # p.resetBasePositionAndOrientation(obj_id, obj_pos, obj_rot)\n\n hook_pose_6d = hook_dict['hook_pose']\n hook_pos = hook_pose_6d[:3]\n hook_quat = hook_pose_6d[3:]\n hook_id = p.loadURDF(hook_dict['file'], hook_pos, hook_quat)\n hook_transform = get_matrix_from_pos_rot(hook_pos, hook_quat)\n\n wpt_num = args.wpt_num\n wpt_dim = args.wpt_dim\n preload_path = f'kptraj_{wpt_num}/val/{hook_name}/traj-{args.traj_id}.json'\n assert os.path.exists(preload_path), f'{preload_path} not exists'\n preload_traj_dict = json.load(open(preload_path, 'r')) \n trajectories_hook = [preload_traj_dict['trajectory'][::-1]]\n # trajectories_hook = hook_dict['trajectory'][2:3]\n\n # grasping\n index = 0 # medium\n initial_info = obj_hook_pair_dict['initial_pose'][index] # medium\n obj_pos = initial_info['obj_pose'][:3]\n obj_rot = initial_info['obj_pose'][3:]\n # obj_pos = list(np.array(obj_pos) + np.array([0, 0, 0.02]))\n\n initial_info = obj_hook_pair_dict['initial_pose'][index] # medium\n robot_pos = initial_info['robot_pose'][:3]\n robot_rot = initial_info['robot_pose'][3:]\n # robot_pos = list(np.array(robot_pos) + np.array([0, 0, 0.02]))\n robot_pose = robot_pos + robot_rot\n robot_transform = get_matrix_from_pos_rot(robot_pos, robot_rot)\n\n for traj_i in range(len(trajectories_hook)):\n \n robot.reset()\n\n robot.apply_action(robot_pose, max_vel=-1)\n for _ in range(int(1.0 / sim_timestep * 0.5)): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n robot.grasp(obj_id=obj_id)\n for _ in range(int(1.0 / sim_timestep * 0.25)): \n p.resetBasePositionAndOrientation(obj_id, obj_pos, obj_rot)\n p.stepSimulation()\n time.sleep(sim_timestep)\n time.sleep(1)\n\n obj_transform = get_matrix_from_pos_rot(obj_pos, obj_rot)\n kpt_transform_world = obj_transform @ obj_contact_relative_transform\n \n trajectory_hook = trajectories_hook[traj_i]\n\n if wpt_dim == 3:\n trajectory_hook_3d = np.asarray(trajectory_hook)[:, :3]\n trajectory_hook_world = recover_trajectory(trajectory_hook_3d, hook_pose_6d, np.array([0, 0, 0]), 1)\n\n trajectory_hook = []\n for wpt_world in trajectory_hook_world:\n wpt_trans = np.linalg.inv(get_matrix_from_pose(hook_pose_6d)) @ get_matrix_from_pose(wpt_world)\n trajectory_hook.append(list(get_pose_from_matrix(wpt_trans)))\n\n # first_waypoint = trajectory_hook[-100:][0]\n first_waypoint = trajectory_hook[0]\n relative_kpt_transform = get_matrix_from_pos_rot(first_waypoint[:3], first_waypoint[3:])\n first_kpt_transform_world = hook_transform @ relative_kpt_transform\n\n kpt_transform_world = refine_rotation(first_kpt_transform_world, kpt_transform_world)\n\n kpt_to_gripper = np.linalg.inv(kpt_transform_world) @ robot_transform\n first_gripper_transform = first_kpt_transform_world @ kpt_to_gripper\n\n first_gripper_pos, first_gripper_rot = get_pos_rot_from_matrix(first_gripper_transform)\n first_gripper_pose = list(first_gripper_pos) + list(first_gripper_rot)\n\n # draw_coordinate(first_kpt_transform_world, size=0.01)\n\n trajectory_start = get_dense_waypoints(robot_pose, first_gripper_pose, resolution=0.002)\n for waypoint in trajectory_start:\n robot.apply_action(waypoint)\n p.stepSimulation()\n robot.grasp()\n for _ in range(10): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n \n old_gripper_pose = first_gripper_pose\n # trajectory_hook = trajectory_hook[-100:-20] if 'hard' in hook_name or 'devil' in hook_name else trajectory_hook[-100:-5]\n \n ignore_wpt_num = int(np.ceil(len(trajectory_hook[0]) * 0.1)) if wpt_dim == 3 else 0\n for i, waypoint in enumerate(trajectory_hook):\n\n if i + ignore_wpt_num >= len(trajectory_hook):\n break\n\n waypoint_abs = get_pose_from_matrix(hook_transform @ get_matrix_from_pose(waypoint))\n\n gripper_transform = get_matrix_from_pose(waypoint_abs) @ kpt_to_gripper\n gripper_pose = get_pose_from_matrix(gripper_transform)\n\n fine_gripper_poses = get_dense_waypoints(old_gripper_pose, gripper_pose, resolution=0.002)\n for fine_gripper_pose in fine_gripper_poses:\n robot.apply_action(fine_gripper_pose)\n p.stepSimulation()\n \n robot.grasp()\n for _ in range(5): # 1 sec\n p.stepSimulation()\n time.sleep(sim_timestep)\n old_gripper_pose = gripper_pose\n\n # execution step 2 : release gripper\n robot_apply_action(robot, obj_id, gripper_pose, gripper_action='pre_grasp', \n sim_timestep=0.05, diff_thresh=0.01, max_vel=-1, max_iter=100)\n\n # execution step 3 : go to the ending pose\n gripper_rot = p.getLinkState(robot.robot_id, robot.end_eff_idx, physicsClientId=robot._physics_client_id)[5]\n gripper_rot_matrix = R.from_quat(gripper_rot).as_matrix()\n ending_gripper_pos = np.asarray(gripper_pose[:3]) + (gripper_rot_matrix @ np.array([[0], [0], [-0.05]])).reshape(3)\n action = tuple(ending_gripper_pos) + tuple(gripper_rot)\n robot_apply_action(robot, obj_id, action, gripper_action='nop', \n sim_timestep=0.05, diff_thresh=0.005, max_vel=-1, max_iter=100)\n\n # p.removeAllUserDebugItems()\n\n # for _ in range(int(0.2/sim_timestep)): \n # p.stepSimulation()\n # time.sleep(sim_timestep)\n\n contact = False\n contact_points = p.getContactPoints(obj_id, hook_id)\n contact = True if contact_points != () else False\n\n fname_out = f'hanging_by_trajectory_result_{args.traj_id}_{wpt_num}w_{wpt_dim}d.txt'\n f_out = open(fname_out, 'a')\n f_out.write(f'{hook_name},{obj_name},{traj_i},{1 if contact else 0}\\n')\n f_out.close()\n\n# start_msg = \\\n# '''\n# ======================================================================================\n# this script will execute the hanging process using the collected keypoint trajectories\n# in \n# - [input_root]/[input_dir]/[hook_name].json \n# - [input_root]/[input_dir]/[object_name].json\n\n# dependency :\n# - object folder that contains /[object_name]/base.urdf\n# - hook folder that contains /[hook_name]/base.urdf\n# - the keypoint pose of objects in [input_root]/[input_dir]/[obj_name].json\n# - the keypoint trajectories of hooks in [input_root]/[input_dir]/[hook_name].json\n# - the folder that cantain initial pose of objects in \n# [data_root]/[data_dir]/[hook_name-object_set]/[hook_name-object_name].json\n# note :\n# - you can run this script using ./run.sh hangtraj\n# ======================================================================================\n# '''\n\n# print(start_msg)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-root', '-dr', type=str, default='data')\n parser.add_argument('--data-dir', '-dd', type=str, default='everyday_objects_50')\n parser.add_argument('--input-root', '-ir', type=str, default='keypoint_trajectory')\n parser.add_argument('--input-dir', '-id', type=str, default='everyday_objects_50')\n parser.add_argument('--obj', '-obj', type=str, default='hanging_exp_daily_5.json')\n parser.add_argument('--hook', '-hook', type=str, default='Hook_my_bar_easy.json')\n parser.add_argument('--traj_id', '-ti', type=int, default=0)\n parser.add_argument('--wpt_num', '-wn', type=int, default=10)\n parser.add_argument('--wpt_dim', '-wd', type=int, default=3)\n parser.add_argument('--output-root', '-or', type=str, default='demonstration_data')\n parser.add_argument('--output-dir', '-od', type=str, default='')\n parser.add_argument('--save-demo', '-sd', action=\"store_true\")\n parser.add_argument('--save-gif', '-sg', action=\"store_true\")\n args = parser.parse_args()\n main(args)","repo_name":"Chialiang86/Hanging-Motion-Planning","sub_path":"hanging_by_trajectory.py","file_name":"hanging_by_trajectory.py","file_ext":"py","file_size_in_byte":17312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70671610649","text":"# 5-3.py\n\n\n# 定义函数\ndef up_stairs(i):\n \"\"\"\n :param i:当前台阶数\n :return:当前方法数\n \"\"\"\n if i == 3:\n return 4\n if i == 2:\n return 2\n if i == 1:\n return 1\n\n # 读取全局变量\n global count\n # 容易写出递归式 f(n) = f(n - 1) + f(n - 2) + f(n - 3)\n count = up_stairs(i - 1) + \\\n up_stairs(i - 2) + \\\n up_stairs(i - 3)\n return count\n\n# 主程序\nif __name__ == '__main__':\n # 输入台阶数\n n = int(input(\"How many steps:\"))\n count = 0\n # 调用函数\n up_stairs(n)\n print(\"There are\", count, \"solutions.\")\n\n","repo_name":"luocaodan/PythonHomework","sub_path":"shanglouwenti.py","file_name":"shanglouwenti.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15960335577","text":"\"\"\"This plugin allows rules to contain regular expression tags.\"\"\"\n\nimport re\n\nimport pad.errors\nimport pad.plugins.base\n\n# This splits value in the corresponding tags\nSPLIT_TAGS = re.compile(r\"(<[^<>]+>)\")\n\n\nclass ReplaceTags(pad.plugins.base.BasePlugin):\n \"\"\"Replace tags in various rules.\"\"\"\n\n eval_rules = ()\n\n options = {\n \"replace_start\": (\"str\", \"<\"),\n \"replace_end\": (\"str\", \">\"),\n # These configs defines the tags\n \"replace_pre\": (\"append\", []),\n \"replace_inter\": (\"append\", []),\n \"replace_post\": (\"append\", []),\n \"replace_tag\": (\"append\", []),\n # This config defines the rules that will\n # have their values inspected for tags\n \"replace_rules\": (\"append_split\", []),\n }\n\n def prepare_tags(self, which=\"tag\"):\n \"\"\"Prepare the configured tags for easy replacement.\n Valid options for which are: pre, intern, post, tag.\n\n This converts the list of defined TAG to a dictionary\n and stores it back in the context. The dictionary maps\n the full tag name, including the start and end\n characters, to their values.\n \"\"\"\n # Extra text besides the tag name, for example:\n # \n extra = \"%s \" % which\n if which == \"tag\":\n extra = \"\"\n template = \"%s%s%%s%s\" % (\n self[\"replace_start\"],\n extra,\n self[\"replace_end\"]\n )\n result = {}\n for config in self[\"replace_%s\" % which]:\n try:\n tag_name, value = config.split(None, 1)\n except ValueError:\n self.ctxt.err(\"Invalid replace tag: %r\", config)\n continue\n full_name = template % tag_name\n if full_name in result:\n self.ctxt.err(\"Redefining replace tag: %r\", full_name)\n result[template % tag_name] = value\n # Replace the list with a dictionary in the global\n # context.\n self[\"replace_%s\" % which] = result\n\n def get_metatags(self, rule_value, which):\n \"\"\"Check the rule value for meta tags and return\n the value and the adjusted rule.\n\n >>> self.get_metatags(\"/(?!tion)/\", \"post\")\n >>> ('{3}', '/(?!tion)/')\n \"\"\"\n result = []\n for tag, tag_value in self[\"replace_%s\" % which].items():\n if tag in rule_value:\n result.append(tag_value)\n rule_value = rule_value.replace(tag, \"\")\n return \"\".join(result), rule_value\n\n def replace_tags(self, rule_value):\n \"\"\"Replace a single rule result.\"\"\"\n pre_replace, rule_value = self.get_metatags(rule_value, \"pre\")\n inter_replace, rule_value = self.get_metatags(rule_value, \"inter\")\n post_replace, rule_value = self.get_metatags(rule_value, \"post\")\n\n results = []\n replace_tags = self[\"replace_tag\"]\n splits = SPLIT_TAGS.split(rule_value)\n for i, value in enumerate(splits):\n try:\n replace_value = replace_tags[value]\n except KeyError:\n # This is not a tag just add it to the result\n results.append(value)\n continue\n\n results.append(pre_replace)\n results.append(replace_value)\n results.append(post_replace)\n\n # Check the next value in the list to see if\n # it's also a tag. If so then add the INTER.\n try:\n if splits[i + 1] == '' and splits[i + 2] in replace_tags:\n # The split will actually return a empty string\n # in these cases.\n results.append(inter_replace)\n except IndexError:\n pass\n return \"\".join(results)\n\n def finish_parsing_start(self, results):\n \"\"\"All configuration file have been read. Check the existing\n rules and replace any available tags.\n \"\"\"\n super(ReplaceTags, self).finish_parsing_start(results)\n for which in (\"pre\", \"inter\", \"post\", \"tag\"):\n self.prepare_tags(which)\n\n for rule_name in self[\"replace_rules\"]:\n try:\n rule_results = results[rule_name]\n except KeyError:\n self.ctxt.err(\"No such rule defined: %s\", rule_name)\n continue\n rule_value = rule_results[\"value\"]\n new_rule_value = self.replace_tags(rule_value)\n self.ctxt.log.debug(\"Replaced %r with %r in %s\", rule_value,\n new_rule_value, rule_name)\n rule_results[\"value\"] = new_rule_value\n","repo_name":"sorinsrn7/SpamPAD","sub_path":"pad/plugins/replace_tags.py","file_name":"replace_tags.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16140904602","text":"\nimport json\nimport logging\nfrom pymodbus.client import ModbusTcpClient\nfrom pymodbus.payload import BinaryPayloadDecoder\nfrom pymodbus.constants import Endian\n#from pymodbus.client.sync import ModbusTcpClient\n\n\nclass Smartfox(object):\n def __init__(self,host, port,logger):\n\n _libName = str(__name__.rsplit('.', 1)[-1])\n self._log = logging.getLogger(logger + '.' + _libName + '.' + self.__class__.__name__)\n\n self._log.debug('Create Smartfox Modbus Object')\n\n self._host = host\n self._port = port\n self._client = None\n\n self._registerFile = \"./data/SmartfoxRegister.json\"\n self._register = None\n\n self._dataType = {\n 'uint16':1,\n 'int16':1,\n 'int32':2,\n 'uint32':2,\n 'STR16':8,\n 'uint64':4,\n 'int64':4\n }\n\n def readConfigFile(self,file=False):\n if file is False:\n file = self._registerFile\n self._log.info('read local file')\n\n with open(file) as _data:\n self._register = json.load(_data)\n\n def connect(self):\n self._client = ModbusTcpClient(self._host, self._port)\n return self._client.connect()\n\n def readRegister(self,slaveId,name):\n # print(type(self._register),self._register)\n _item = self._register.get(name,False)\n if _item is False:\n # print('%s Name not found',name)\n self._log.critical('Value not found %s',name)\n return False\n _address = _item['Start'] -1\n _size = _item['Size']\n _type = _item['Type']\n _scale = _item['Scale Factor']\n if not _scale:\n _scale = 1\n _units = _item['Units']\n\n try:\n _value = self.getData(slaveId,_address,_size)\n _value = self.evaluateData(_value,_type)\n except:\n self._log.error('Problem during evaluation')\n # print(type(_value),_value)\n # print(_value*_scale,_units)\n return (_value*_scale,_units)\n\n\n def getData(self,slaveId,address,size):\n # print(slaveId,address,size)\n received = self._client.read_holding_registers(address=address,\n count= size,\n unit=slaveId)\n # print(received)\n message = BinaryPayloadDecoder.fromRegisters(received.registers, byteorder=Endian.Big, wordorder=Endian.Big)\n return message\n\n def evaluateData(self,message,dataType):\n if dataType == 'int32':\n _data = message.decode_32bit_int()\n elif dataType == 'uint32':\n _data = message.decode_32bit_uint()\n elif dataType == 'uint64':\n _data = message.decode_64bit_uint()\n elif dataType == 'STR16':\n _data = message.decode_string(16).rstrip('\\x00')\n elif dataType == 'STR32':\n _data = message.decode_string(32).rstrip('\\x00')\n elif dataType == 'int16':\n _data = message.decode_16bit_int()\n elif dataType == 'uint16':\n _data = message.decode_16bit_uint()\n else: # if no data type is defined do raw interpretation of the delivered data\n # _data = message.decode_16bit_uint()\n # print('unknown Data Type')\n self._log.error('unkown datattype')\n _data = False\n\n return _data\n\n def queryData(self,data=False):\n _store = {}\n\n if not data:\n self._log.info('read local data')\n data= [\n 'Energy from grid',\n 'Energy into grid',\n 'Energy Smartfox',\n 'Day Energy from grid',\n 'Day Energy into grid',\n 'Day Energy Smartfox',\n 'Power total',\n 'Power L1',\n 'Power L2',\n 'Power L3',\n 'Voltage L1',\n 'Voltage L2',\n 'Voltage L3',\n 'Current L1',\n 'Current L2',\n 'Current L3',\n 'Frequency',\n 'Power Smartfox'\n ]\n\n for _item in data:\n _localStore = {}\n (data,unit) =self.readRegister(1,_item)\n # print(data,unit)\n _localStore['data_value'] = data\n _localStore['data_unit'] = unit\n _store[_item] = _localStore\n\n # print(json.dumps(_store))\n return json.dumps(_store)\n\n\n\n\n \nif __name__ == \"__main__\":\n smart = Smartfox('192.168.2.80')\n smart.readConfigFile()\n\n if smart.connect():\n print('Connected')\n #smart.readRegister(1, 'Energy from grid')\n smart.queryData()\n # smart.readAllRegisters()\n else:\n print('Failed to Connect')","repo_name":"ms412/Smartfox2mqtt","sub_path":"library/smartfox.py","file_name":"smartfox.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44473386088","text":"class Bits(object):\n MAX_BITS = 32\n\n def print_binary(self, num):\n if num is None or num >= 1 or num <= 0:\n return \"ERROR\"\n result = [\"0\", \".\"]\n fraction = 0.5\n while num:\n if num >= fraction:\n result.append(\"1\")\n num -= fraction\n else:\n result.append(\"0\")\n if len(result) > self.MAX_BITS:\n return \"ERROR\"\n fraction /= 2\n return \"\".join(result)\n","repo_name":"labex-labs/open-source-labs","sub_path":"python/interactive-coding-challenges/challenge-print-binary/solutions/print_binary.py","file_name":"print_binary.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"3065465405","text":"import torch\n\n\ndef shift(x, n_segment, fold_div=3):\n nt, c, h, w = x.size()\n n_batch = int(nt / n_segment)\n x = x.view(n_batch, n_segment, c, h, w)\n fold = int(c / fold_div)\n left_side = torch.cat((x[:, 1:, :fold], torch.zeros(n_batch, 1, fold, h, w).to(x.device)), dim=1)\n middle_side = torch.cat((torch.zeros(n_batch, 1, fold, h, w).to(x.device), x[:, :n_segment - 1, fold: 2 * fold]),\n dim=1)\n out = torch.cat((left_side, middle_side, x[:, :, 2 * fold:]), dim=2)\n return out.view(nt, c, h, w)\n\n\nif __name__ == '__main__':\n nt = 16\n c = 256\n h = 2\n w = 2\n n_segment = nt\n fold_div = c\n x = torch.arange(nt*c*h*w).float().reshape([nt, c, h, w]);\n out = shift(x, n_segment, fold_div)\n print(x)\n print(out)\n","repo_name":"itsliupeng/test_torch_cuda","sub_path":"py/check_temporal_shift.py","file_name":"check_temporal_shift.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16450485852","text":"N = 3\n\narr = [1, 2, 3] # 우리가 활용할 데이터\n\nsel = [0] * N # a 리스트 (내가 해당 원소를 뽑았는지?) [0,0,0]\n\n\ndef power_set(idx):\n if idx == N:\n print(sel, \":\", end=' ')\n for i in range(N):\n if sel[i]:\n print(arr[i], end='')\n print()\n\n else:\n # idx 자리의 원소를 뽑고 (True) 간다.\n sel[idx] = 1\n power_set(idx+1)\n\n # idx 자리의 원소를 안 뽑고(False) 간다.\n sel[idx] = 0\n power_set(idx+1)\n\n\npower_set(0)\n\n","repo_name":"charlie-jyj/APS","sub_path":"algorithm_lecture/Stack2/powerset_재귀.py","file_name":"powerset_재귀.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27678887398","text":"import unittest\nimport message\nfrom message import Message as msg\n\nclass TestMessageMethods(unittest.TestCase):\n\n def test_constructor_defaults(self):\n ms = msg(0, 0)\n self.assertEqual(\"\",ms.Data)\n self.assertEqual(message.MT_DATA,ms.Header.hactioncode)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"PsiWa/IntSys","sub_path":"SereginIntegratedSys/SereginPyClient/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4112668961","text":"import tensorflow as tf\r\nimport tf_util as tf_util\r\nimport sys\r\nsys.path.append('./layers')\r\nfrom pooling import PoolingLayer\r\nfrom convolution_layer import ConvLayer\r\nfrom convlayer_elements import ConvElements\r\n\r\n\r\nclass Network:\r\n def __init__(self,conf):\r\n self.conf = conf\r\n\r\n def build_network(self,pointclouds_pl,is_training,is_eveluate,bn_decay = None):\r\n with_bn = self.conf.get_bool('with_bn')\r\n batch_size = pointclouds_pl.get_shape()[0].value\r\n num_point = pointclouds_pl.get_shape()[1].value\r\n\r\n if (self.conf['with_rotations']):\r\n cov = self.tf_cov(pointclouds_pl)\r\n _, axis = tf.self_adjoint_eig(cov)\r\n axis = tf.where(tf.linalg.det(axis) < 0, tf.matmul(axis, tf.tile(\r\n tf.constant([[[0, 1], [1, 0]]], dtype=tf.float32), multiples=[axis.get_shape()[0], 1, 1])), axis)\r\n\r\n indicies = [[[b, 0, 0], [b, 2, 0], [b, 0, 2], [b, 2, 2]] for b in list(range(batch_size))]\r\n updates = tf.reshape(axis, [batch_size, -1])\r\n updates = tf.reshape(tf.matmul(\r\n tf.tile(tf.constant([[[0, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 0]]], dtype=tf.float32),\r\n multiples=[batch_size, 1, 1]), tf.expand_dims(updates, axis=-1)), shape=[batch_size, -1])\r\n\r\n alignment_transform = tf.scatter_nd(indices=indicies, updates=updates,\r\n shape=[batch_size, 3, 3]) + tf.expand_dims(\r\n tf.diag([0.0, 1.0, 0.0]), axis=0)\r\n mean_points = tf.reduce_mean(pointclouds_pl, axis=1, keepdims=True)\r\n pointclouds_pl = tf.matmul(pointclouds_pl - mean_points, alignment_transform) + mean_points\r\n\r\n ps_function_pl = tf.concat([pointclouds_pl,tf.ones(shape=[batch_size,num_point,1],dtype=tf.float32)],axis=2)\r\n\r\n pool_sizes_sigma = self.conf.get_list('pool_sizes_sigma')\r\n spacing = self.conf.get_float('kernel_spacing')\r\n\r\n network = ps_function_pl\r\n input_channel = network.get_shape()[2].value\r\n\r\n blocks = self.conf.get_list('blocks_out_channels')\r\n for block_index,block in enumerate(blocks):\r\n block_elm = ConvElements(pointclouds_pl, 1. * tf.reciprocal(tf.sqrt(tf.cast(pointclouds_pl.get_shape()[1].value,tf.float32))),spacing,self.conf.get_float('kernel_sigma_factor'))\r\n for out_index,out_channel in enumerate(block):\r\n network = ConvLayer(input_channel, block_elm, out_channel, '{0}_block_{1}'.format(block_index,out_index),is_training).get_layer(network,with_bn,bn_decay,self.conf.get_bool('interpolation'))\r\n input_channel = out_channel\r\n pointclouds_pl, network = PoolingLayer(block_elm, out_channel, out_channel,\r\n int(pool_sizes_sigma[block_index + 1][0])).get_layer(network,is_subsampling=self.conf.get_bool('subsampling'),use_fps= tf.logical_or(is_training,is_eveluate))\r\n\r\n network = tf.reshape(network, [batch_size, -1])\r\n network = tf_util.fully_connected(network, self.conf.get_int('fc1.size'), bn=True, is_training=is_training,\r\n scope='fc1', bn_decay=bn_decay)\r\n network = tf_util.dropout(network, keep_prob=self.conf.get_float('dropout.keep_prob'), is_training=is_training,\r\n scope='dp1')\r\n network = tf_util.fully_connected(network, self.conf.get_int('fc2.size'), bn=True, is_training=is_training,\r\n scope='fc2', bn_decay=bn_decay)\r\n network = tf_util.dropout(network, keep_prob=self.conf.get_float('dropout.keep_prob'), is_training=is_training,\r\n scope='dp2')\r\n network = tf_util.fully_connected(network, 40, activation_fn=None, scope='fc3')\r\n\r\n return network\r\n\r\n def get_loss(self, pred, label):\r\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)\r\n return tf.reduce_mean(loss)\r\n\r\n def tf_cov(self, x):\r\n x = tf.transpose(tf.gather(tf.transpose(x, [2, 1, 0]), [0, 2]), [2, 1, 0])\r\n mean_x = tf.reduce_sum(x, axis=1, keepdims=True)\r\n mx = tf.matmul(tf.transpose(mean_x, [0, 2, 1]), mean_x)\r\n vx = tf.einsum('bij,bik->bjk', x, x)\r\n num = tf.cast(tf.shape(x)[1], tf.float32)\r\n cov_xx = 1. / num * (vx - (1. / num) * mx)\r\n return cov_xx","repo_name":"matanatz/pcnn","sub_path":"pointcloud_conv_net.py","file_name":"pointcloud_conv_net.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"31"} +{"seq_id":"37329714492","text":"import re\nimport sys\nimport functools\nfrom copy import copy\nfrom itertools import combinations\n\ninput = open(sys.argv[1] if len(sys.argv) >= 2 else 'input').read()\n\nFLOW_RATES = dict()\nNETWORK = dict()\n\nfor line in input.split('\\n'):\n pipe, rate, connections = re.findall(r\"Valve ([A-Z]{2}) has flow rate=([0-9]+); tunnels? leads? to valves? (.*)\", line)[0]\n rate = int(rate)\n NETWORK[pipe] = connections.split(', ')\n FLOW_RATES[pipe] = rate\n\n\ndef distance(s: str, d: str, path = set()) -> int:\n if s == d:\n return 0\n\n paths = []\n for node in [x for x in NETWORK[s] if x not in path]:\n visited = copy(path)\n visited.add(node)\n foo = distance(node, d, visited) \n if foo != None:\n paths.append(1 + foo)\n\n if len(paths) == 0:\n return None \n\n return min(paths)\n \nDISTANCE_TABLE = dict() \nUSEFULL_NODES = [node for (node, rate) in FLOW_RATES.items() if rate > 0]\nUSEFULL_NODES.append('AA')\n\nfor a, b in list(combinations(USEFULL_NODES, 2)):\n DISTANCE_TABLE[(a, b)] = distance(a, b, set(a))\n DISTANCE_TABLE[(b, a)] = distance(b, a, set(b))\n\nUSEFULL_NODES.remove('AA')\n@functools.lru_cache(maxsize=None)\ndef solve(current_node, time_remaining, enabled_nodes = None, players_remaining = 0):\n if enabled_nodes is None: enabled_nodes = frozenset() \n\n if time_remaining == 0:\n if players_remaining > 0:\n return solve('AA', 26 - 1, enabled_nodes, players_remaining - 1)\n else:\n return 0\n\n best = 0\n if current_node not in enabled_nodes and FLOW_RATES[current_node] > 0:\n new_total = time_remaining * FLOW_RATES[current_node]\n best = new_total + solve(current_node, time_remaining-1, enabled_nodes | frozenset([current_node]), players_remaining) \n\n # Look for other options\n for next_node in [node for node in USEFULL_NODES]:\n if next_node in enabled_nodes: continue\n if next_node == current_node: continue\n\n cost = DISTANCE_TABLE[current_node, next_node]\n\n if time_remaining - cost < 0: continue\n best = max(best, solve(next_node, time_remaining-cost, enabled_nodes, players_remaining))\n \n return best\n\nprint(solve('AA', 30 - 1, None, 0))\nprint(solve('AA', 26 - 1, None, 1))","repo_name":"timfennis/advent-of-code-2022","sub_path":"python/day16/day16-chain2.py","file_name":"day16-chain2.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"36013518924","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import *\nfrom .forms import *\nfrom pine.models import *\nfrom pine.forms import *\n# Create your views here.\n@login_required\ndef rooms(request):\n rooms = Room.objects.filter(slug=\"supplier\")\n\n return render(request, 'chat/rooms.html', {'rooms':rooms})\n@login_required\ndef room(request,slug):\n # rooms = Room.objects.all()\n room = Room.objects.get(slug=slug)\n messages = Message.objects.filter(room=room)[0:25]\n\n return render(request, 'chat/room.html', {'room':room, 'messages': messages, })\n\ndef room_delete(request, pk):\n try:\n room = Room.objects.get(id=pk)\n except Room.DoesNotExist:\n return redirect('bidding')\n \n if request.method == 'POST':\n room.delete()\n return redirect('bidding')\n \n return render(request, 'chat/room_delete.html', {'room': room})\n\ndef bidding(request):\n rooms = Room.objects.exclude(slug=\"employee\")\n bidding_processes = BiddingProcess.objects.all()\n if request.method == 'POST':\n roomform = RoomForm(request.POST)\n\n if roomform.is_valid():\n roomform.save()\n return redirect('bidding')\n \n else:\n roomform = RoomForm()\n\n if request.method == 'POST':\n biddingForm = BiddingForm(request.POST)\n\n if biddingForm.is_valid():\n biddingForm.save()\n return redirect('bidding')\n \n else:\n biddingForm = BiddingForm()\n\n\n return render(request, 'chat/bidding.html', {'rooms':rooms, 'roomform': roomform,\n 'bidding_processes': bidding_processes,\n 'biddingForm': biddingForm})\n\ndef bidder_win_list(request):\n bidders_win = BiddingProcess.objects.all()\n for bidding_process in bidders_win:\n bidding_process.total = bidding_process.calculate_total()\n\n context = {\n 'bidders_win': bidders_win\n }\n return render (request, 'chat/bidders_win_list.html', context)","repo_name":"JayReonal/epine","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30525171018","text":"\"\"\"\nParallel processing\nDivide input(Map) and aggregate(Reduce) output\n\"\"\"\nfrom multiprocessing import Pool\nimport time\n# def fun(n):\n# return n*n\n\ndef fun(n):\n sum = 0\n for i in range(n):\n sum += i*i\n return sum\n\nif __name__ == \"__main__\":\n t1 = time.time()\n p = Pool(processes=2)\n # p.map(fun, arr)\n result = p.map(fun, range(10000))\n p.close()\n p.join()\n # result = []\n # for n in arr:\n # result.append(fun(n))\n print(\"Pool took : \", time.time()-t1)\n t2 = time.time()\n for i in range(10000):\n result.append(fun(i))\n print(\"serial processing took: \", time.time()-t2)\n\n #output\n # Pool took : 2.0030744075775146\n # serial processing took: 5.65215802192688\n\n # if take processes=3 in Pool\n # Pool took : 2.8116846084594727\n # serial processing took: 5.556454420089722\n \n\n #if i take processes = 2\n #Pool took : 3.380298376083374\n # serial processing took: 5.721689462661743\n","repo_name":"doncans/DS","sub_path":"multiprocessing_pool.py","file_name":"multiprocessing_pool.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74977930646","text":"from .word import anagram\nfrom .player import Player\nfrom tkinter import *\n\nwindow = Tk()\nwindow.title(\"CS324-PZ: Anagram Solver by Aleksa Cekic\")\nwindow.geometry(\"1280x720\")\nwindow.resizable(True, True)\n\nheader = Label(text=\"Anagram Solver\", font=(\"Helvetica\", 20))\nheader.pack(pady=20)\n\nstartFrame = Frame(window)\nmainGameFrame = Frame(window)\n\np = Player()\n\n\ndef go_to_game_frame():\n mainGameFrame.pack(fill='both', expand=1)\n startFrame.pack_forget()\n\n\ndef player_input(text, frame):\n if text.get() != \"\":\n p.set_name(text.get())\n clear_frame(frame)\n game_frame()\n\n\ndef start():\n startFrame.pack()\n label1 = Label(startFrame, text=\"Enter players name: \", font=(\"Helvetica\", 14))\n label1.pack(pady=20)\n\n input_field = Entry(startFrame, width=25)\n input_field.pack(pady=20)\n\n btn = Button(startFrame, text=\"Okay\", command=lambda: player_input(input_field, startFrame))\n btn.pack(pady=20)\n\n\ndef solve(answer, inpt, frame):\n if answer == inpt.lower():\n count = p.get_score() + 1\n p.set_score(count)\n clear_frame(frame)\n game_frame()\n else:\n if p.get_score() > 0:\n count = p.get_score() - 1\n p.set_score(count)\n clear_frame(frame)\n game_frame()\n\n\ndef show(lbl, t):\n lbl.config(text=t)\n\n\ndef game_frame():\n go_to_game_frame()\n\n player_name = Label(mainGameFrame, text=\"Name: {}, Score: {}\".format(p.get_name(), p.get_score()),\n font=(\"Helvetica\", 15))\n player_name.pack(pady=20)\n\n a = anagram(5)\n\n question_label = Label(mainGameFrame, text=\"\".join(a.get_question()), font=(\"Helvetica\", 12), fg=\"Red\")\n question_label.pack()\n\n hint = Label(mainGameFrame, text=\"\", font=(\"Helvetica\", 12), fg=\"Blue\")\n hint.pack(pady=20)\n\n inpt = Entry(mainGameFrame, width=25)\n inpt.pack(pady=20)\n\n btn_solve = Button(mainGameFrame, text=\"Solve\", command=lambda: solve(a.get_answer(), inpt.get(), mainGameFrame))\n btn_solve.pack(pady=20)\n\n btn_show = Button(mainGameFrame, text=\"Show Hint\", command=lambda: show(hint, a.get_hint()))\n btn_show.pack(pady=20)\n\n btn_show = Button(mainGameFrame, text=\"End Game\", command=lambda: save_stats(p, window))\n btn_show.pack(pady=20)\n\n\ndef main():\n start()\n\n window.mainloop()\n\n\ndef clear_frame(frame):\n for w in frame.winfo_children():\n w.destroy()\n\n\ndef save_stats(plyr, win):\n # saves to file\n file = open(\"docs/player_score.txt\", \"a\")\n file.write(\"\\n\" + plyr.__repr__())\n file.close()\n win.destroy()\n","repo_name":"yxngalex/anagram-game","sub_path":"anagram/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6215218856","text":"import json, sys\nimport time\nfrom functools import reduce\n\n# data.py存储着从vtbs.moe获取的主播数据\nfrom data.data import DATA\n\n# 原始文件路径\nsrc_file_path = \"data/ori_data.json\"\n# 目标文件路径\ntgt_file_path = \"data/data.py\"\n\ndef delete_duplicate(data):\n func = lambda x, y: x + [y] if y not in x else x\n data = reduce(func, [[], ] + data)\n return data\n\n# 最新的vtb数据\nwith open(src_file_path, \"r\", encoding=\"utf8\") as f:\n ori_data = json.load(f)\n\nprint(\"len(ori_data)=\" + str(len(ori_data)))\nprint(\"len(DATA)=\" + str(len(DATA)))\n\n# 先合并 再去重\n# new_data = DATA + ori_data\n# print(\"len(new_data)=\" + str(len(new_data)))\n\n# DATA = delete_duplicate(new_data)\n# print(\"duplicate len(DATA)=\" + str(len(DATA)))\n\n# 遍历 判断是否存在 后 尾部插入\nnum = 0\nfor temp_json in ori_data:\n if temp_json in DATA:\n continue\n else:\n # 追加入json\n DATA.append(temp_json)\n num += 1\n # print(temp_json)\n\nprint(\"add total num=\" + str(num))\n\nprint(\"after len(DATA)=\" + str(len(DATA)))\n\n# 数据写入本地喵\nwith open(tgt_file_path, 'w', encoding=\"utf-8\") as file_object:\n file_object.write(\"DATA=\" + json.dumps(DATA, ensure_ascii=False))\nfile_object.close()\nprint(\"write \" + tgt_file_path + \" over\")\n","repo_name":"Ikaros-521/get_bili_medal_list","sub_path":"update_data.py","file_name":"update_data.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"72658583768","text":"from pwn import *\n\n'''\nHave you ever played with this *special* seq holder in Python?\nnc ctf.sharif.edu 22106\nAlternative: nc 213.233.161.38 22106\n'''\n\nlocal = True\n\nif local:\n r = process(\"./server.py\")\nelse:\n r = remote(\"213.233.161.38\", 22106)\n\ndef menu():\n\tr.recvuntil(\"Exit\")\n\ndef create_db(id, tag, l):\n\tr.sendline(\"1\")\n\tr.sendline(str(id))\n\tr.sendline(tag)\n\tr.sendline(str(l))\n\ndef edit_db(tag, seq=None):\n\tr.sendline(\"2\")\n\tr.recvuntil(\"[2] seq\")\n\tr.sendline(\"1\")\n\tr.sendline(tag)\n\ndef print_db(tag=True):\n\tr.sendline(\"3\")\n\tr.recvuntil(\"[2] seq\")\n\tif tag:\n\t\tr.sendline(\"1\")\n\t\tr.recvuntil(\"tag: \")\n\telse:\n\t\tr.sendline(\"2\")\n\t\tr.recvuntil(\"seq: \")\n\t\treturn r.recvuntil(\"1.\")[:-3]\n\n\ncreate_db(0, \"\", 20)\nmenu()\n\nleak_s = print_db(tag=False)\nlibc_leak = u64(leak_s.ljust(8, \"\\x00\"))\ninfo(\"libc_leak: %s\" % hex(libc_leak))\n\nlibc_offset = 0x7fa3043f7b88-0x00007fa304033000\nlibc_base = libc_leak - libc_offset\ninfo(\"libc_base: %s\" % hex(libc_base))\n\nmenu()\n\nwritable_addr = 0x921010\none_shot_shell = libc_base + 0x4526a\nedit_db(\"A\"*8 + p64(0)+ p64(0) + p64(writable_addr) + p64(20) + p64(one_shot_shell) + p64(0)) # overflow and overwrite db method\nmenu()\n\nr.sendline(\"5\")\n\nr.interactive()\n\n'''\n[+] Opening connection to 213.233.161.38 on port 22106: Done\n[*] libc_leak: 0x7f534050bb88\n[*] libc_base: 0x7f5340147000\n[*] Switching to interactive mode\nEnter selected menu> $ id\nuid=1001(suctf) gid=1001(suctf) groups=1001(suctf)\n$ cat /home/suctf/flag\nSharifCTF{0N3_M0R3_5T3P_70_**J1T**_H34V3N}\n'''\n\n","repo_name":"0xacb/ctf-solutions","sub_path":"sharif-ctf-2018/pwn125/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"10594990609","text":"import abc\nfrom nova import exception as nex\nimport six\n\nfrom nova_powervm.virt.powervm.i18n import _\n\n\nclass NVRAMUploadException(nex.NovaException):\n msg_fmt = _(\"The NVRAM could not be stored for instance %(instance)s. \"\n \"Reason: %(reason)s\")\n\n\nclass NVRAMDownloadException(nex.NovaException):\n msg_fmt = _(\"The NVRAM could not be fetched for instance %(instance)s. \"\n \"Reason: %(reason)s\")\n\n\nclass NVRAMDeleteException(nex.NovaException):\n msg_fmt = _(\"The NVRAM could not be deleted for instance %(instance)s. \"\n \"Reason: %(reason)s\")\n\n\nclass NVRAMConfigOptionNotSet(nex.NovaException):\n msg_fmt = _(\"The configuration option '%(option)s' must be set.\")\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass NvramStore(object):\n\n @abc.abstractmethod\n def store(self, instance, data, force=True):\n \"\"\"Store the NVRAM into the storage service.\n\n :param instance: The nova instance object OR instance UUID.\n :param data: the NVRAM data base64 encoded string\n :param force: boolean whether an update should always be saved,\n otherwise, check to see if it's changed.\n \"\"\"\n\n @abc.abstractmethod\n def fetch(self, instance):\n \"\"\"Fetch the NVRAM from the storage service.\n\n :param instance: The nova instance object OR instance UUID.\n :returns: the NVRAM data base64 encoded string\n \"\"\"\n\n @abc.abstractmethod\n def delete(self, instance):\n \"\"\"Delete the NVRAM from the storage service.\n\n :param instance: The nova instance object OR instance UUID.\n \"\"\"\n","repo_name":"openstack/nova-powervm","sub_path":"nova_powervm/virt/powervm/nvram/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"31"} +{"seq_id":"34986788960","text":"from urllib import parse, request\nimport json\nfrom ptuBusCrawling.Crawler.Util.SendSlcakMsg import SendSlackMeg\nfrom ptuBusServer.Models import TrainTimeTableModel, TrainStationModel\n\n\nclass TrainTimeTableParsing:\n def __init__(self):\n self.pData = TrainStationModel.objects.all()\n self.apiKey = \"mxl46U1g52x6aVOUX/p969Zbtq9EZmboho4Jp5WiUlQ\"\n self.url = \"https://api.odsay.com/v1/api/trainServiceTime?\"\n self.msg = SendSlackMeg()\n self.trainTypeCode = [\"KTX\", \"무궁화\", \"새마을\", \"ITX-새마을\", \"누리로\", \"통근\"]\n # 0 1 2 3 4 5\n self.dailyTypeCode = [\n \"토\",\n \"금토일\",\n \"토일\",\n \"화수목금토일\",\n \"월화수목토일\",\n \"금\",\n \"금토\",\n \"금일\",\n \"월\",\n \"매일\",\n \"월화수목금토\",\n \"월화수목금\",\n \"월화수목\",\n ]\n # 0 1 2 3 4 5 6 7 8 9 10\n\n def makeURL(self, query):\n return self.url + parse.urlencode(query, encoding=\"UTF-8\", doseq=True)\n\n def openURL(self, query):\n url = self.makeURL(query)\n request_url = request.Request(url)\n response = request.urlopen(request_url)\n return response.read().decode(\"utf-8\")\n\n def checkError(self, data):\n if (\"error\" in data) == True:\n code = data[\"error\"][0][\"code\"]\n message = data[\"error\"][0][\"message\"]\n error_status = \"code : \" + code + \"\\nmessage : \" + message\n print(error_status)\n self.msg.sendMsg(error_status)\n sys.exit()\n else:\n return data\n\n def parsing(self):\n count = 1\n for parsingData in self.pData:\n query = [\n (\"apiKey\", self.apiKey),\n (\"startStationID\", parsingData.startStationID),\n (\"endStationID\", parsingData.endStationID),\n ]\n data = self.openURL(query)\n rDD = self.checkError(json.loads(data))\n startStationName = rDD[\"result\"][\"startStationName\"]\n startStationID = rDD[\"result\"][\"startStationID\"]\n endStationName = rDD[\"result\"][\"endStationName\"]\n endStationID = rDD[\"result\"][\"endStationID\"]\n results = rDD[\"result\"][\"station\"]\n for result in results:\n TrainTimeTableModel(\n id=count,\n startStationName=startStationName,\n startStationID=startStationID,\n endStationName=endStationName,\n endStationID=endStationID,\n railName=result[\"railName\"],\n trainClass=self.trainTypeCode.index(result[\"trainClass\"]),\n departureTime=result[\"departureTime\"],\n schedule=result[\"arrivalTime\"],\n wasteTime=result[\"wasteTime\"],\n dailyTypeCode=self.dailyTypeCode.index(result[\"runDay\"]),\n ).save()\n count += 1\n\n\nif __name__ == \"__main__\":\n if __package__ is None:\n import sys\n from os import path\n\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n from TrainStationParsing import TrainStationParsing\n else:\n from .TrainStationParsing import TrainStationParsing\n sample = TrainTimeTableParsing(TrainStationParsing().parsing()).parsing()\n","repo_name":"ptuBus/ptuBus_Server","sub_path":"ptuBusCrawling/Crawler/Trains/TrainTimeTableParsing.py","file_name":"TrainTimeTableParsing.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"280710326","text":"'''\nРћЈРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂ\nРћБ ­љђђ H╬ъLLT╬ъRH╬ъ╬ЏD р┤ёр┤Ј╩ђр┤ў.\nРћБ ┬Е 2020 р┤Їр┤Ј-╩Ўр┤ђ╔┤р┤бр┤ю\nРћЌРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂРћЂ\n'''\n\nfrom linepy import *\nfrom akad.ttypes import LiffChatContext, LiffContext, LiffSquareChatContext, LiffNoneContext, LiffViewRequest\nfrom template import Mobanzu\nimport requests, uvloop, json, threading, asyncio, livejson\n\nclient = LINE(\"EMAIL\", \"PASSWORD\") #USE_YOUR_EMAIL_AND_PASSWORD\nclient.log(\"Auth Token : \" + str(client.authToken))\n\npoll = OEPoll(client)\nmobflex = Mobanzu(client)\nloop = asyncio.get_event_loop()\n\ndef allow_liff():\n url = 'https://access.line.me/dialog/api/permissions'\n data = {'on': ['P', 'CM'], 'off': []}\n headers = {\n 'X-Line-Access': client.authToken,\n 'X-Line-Application': client.server.APP_NAME,\n 'X-Line-ChannelId': '1655425084',\n 'Content-Type': 'application/json'\n }\n requests.post(url, json=data, headers=headers)\n\ndef sendTemplate(to, data):\n drex = LiffChatContext(to)\n mobz = LiffContext(chat=drex)\n view = LiffViewRequest('1655425084-3OQ8Mn9J', mobz)\n token = client.liff.issueLiffView(view)\n url = 'https://api.line.me/message/v3/share'\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer %s' % token.accessToken\n }\n data = {\"messages\": [data]}\n requests.post(url, headers=headers, data=json.dumps(data))\n\ndef sendFlex(to, alt, flex):\n data = {\"type\": \"flex\", \"altText\": alt, \"contents\": flex}\n sendTemplate(to, data)\n\nasync def clientBot(op):\n try:\n if op.type == 0:\n# print(\"[ 0 ] END OF OPERATION\")\n return\n if op.type == 26:\n print(\"[ 26 ] RECEIVE MESSAGE\")\n msg = op.message\n text = msg.text\n id = msg.id\n to = msg.to\n receiver = msg.to\n sender = msg._from\n if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:\n if msg.toType == 0:\n if sender != client.getProfile().mid: to = sender\n else: to = receiver\n if msg.toType == 1 or msg.toType == 2: to = msg.to\n if msg.contentType == 0:\n if None == msg.text: return\n cmd = msg.text.lower()\n if cmd == \"allowliff\":\n try: allow_liff(); client.sendReplyMessage(id, to, \"Access Granted For Flex Message.\")\n except: client.sendReplyMessage(id, to, \"line://app/LIFF_ID?type=text&text=Done\") #USE_YOUR_LIFF_ID\n try:\n if cmd == \"help\":\n contact = client.getContact(sender)\n name = contact.displayName\n status = contact.statusMessage if contact.statusMessage != '' else ' '\n try: picture = \"https://obs.line-scdn.net/\" + contact.pictureStatus\n except: picture = \"https://i.ibb.co/tczXyp1/hlth-Img-Not-Found.jpg\"\n sendFlex(to, \"Help Menu\", mobflex.helpMenu(picture, name, status))\n if cmd == \"profile\": sendFlex(to, \"Profile Menu\", mobflex.profileMenu())\n if cmd == \"group\": sendFlex(to, \"Group Menu\", mobflex.groupMenu())\n if cmd == \"media\": sendFlex(to, \"Media Menu\", mobflex.mediaMenu())\n if cmd == \"service\": sendFlex(to, \"Service Menu\", mobflex.serviceMenu())\n if cmd == \"system\": sendFlex(to, \"System Menu\", mobflex.systemMenu())\n if cmd == \"forum\": sendFlex(to, \"Forum Menu\", mobflex.forumMenu())\n except: client.sendReplyMessage(id, to, \"Access LIFF Required\\nPlease Type 'Allowliff' First.\")\n except Exception as error: print(error)\n\ndef run():\n while True:\n try:\n ops = poll.singleTrace(count=50)\n if ops != None:\n for op in ops:\n loop.run_until_complete(clientBot(op))\n poll.setRevision(op.revision)\n except Exception as error: print(error)\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"Mobanzu/mobflex","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"36530676891","text":"# read music from music.json\n\nimport json\n\ndef big2Small(song):\n return {\"title\": song['song']['title'],\n \"artist\": song['artist']['name'] }\n\ndef songListToHTMLTable(songs):\n answer = \"\\n\" + songTableHeader()\n for song in songs:\n answer += song2HTMLTableRow(song)\n answer += \"
\\n\"\n return answer\n\ndef songTableHeader():\n return (\"\\n\" +\n \" Song Title \" +\n \" Artist Name \" + \n \"\\n\")\n\n\ndef song2HTMLTableRow(song):\n return (\"\\n\" +\n \"\" + song['song']['title'] + \"\" +\n \"\" + song['artist']['name'] + \"\" + \n \"\\n\")\n\n\ndef writeWebPage(filename,content,title):\n with open(filename,'w') as webpage:\n webpage.write(\"\\n\")\n webpage.write(\"\\n\")\n \n webpage.write(\"\\n\")\n styleElem = \"\"\"\n \n \n \"\"\"\n webpage.write(styleElem)\n \n webpage.write(\"\" + title + \"\\n\")\n webpage.write(\"\\n\")\n\n webpage.write(\"\\n\")\n webpage.write(content)\n webpage.write(\"\\n\")\n\n webpage.write(\"\\n\")\n \n\nif __name__==\"__main__\":\n with open('music.json') as json_data:\n songs = json.load(json_data)\n\n print(\"The variable songs now contains all the data\")\n\n print(\"The variable songs is a list of big complicated dictionaries\")\n print(\"We'd like a list of something easier to work with\")\n\n print(\"The function big2Small(song) will turn a big song dictionary\")\n print(\"into a smaller one\")\n \n writeWebPage(\"fiveSongs.html\",songListToHTMLTable(songs[5:10]),\"five songs\")\n \n","repo_name":"ucsb-cs8-s18/LECTURE_05_08","sub_path":"generateMusicWebPage.py","file_name":"generateMusicWebPage.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6069773247","text":"from models import *\nfrom forms import *\nfrom inventory.models import BatchLoad\nfrom sales.models import *\nfrom common.views import DecimalEncoder\n\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.core import serializers\nfrom django.utils import simplejson\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom urllib import unquote\n\ndef get_name_query(term):\n words = term.split(\" \")\n if len(words) > 1:\n words = filter(lambda x: None or x.strip(), words)\n word_query = Q()\n for word in words:\n word_query &= Q(name__istartswith = word) | Q(name__icontains = \" \" + word)\n return word_query\n\n#\n# FORNITORI\n#\n\ndef find_supplier(request):\n if request.is_ajax():\n term = unquote(request.GET[\"term\"])\n name_query = get_name_query(term)\n matches = Supplier.objects.filter(name_query)\n result_list = []\n for supp in matches:\n result_list.append(supp.to_dict())\n if len(result_list) == 1:\n result_list[0][\"perfect_match\"] = True\n return HttpResponse(simplejson.dumps(result_list, cls=DecimalEncoder), 'application/javascript')\n else:\n return render_to_response('suppliers/find.html')\n\ndef add_supplier(request):\n if request.method == 'POST':\n form = SupplierForm(request.POST)\n if form.is_valid():\n new_supplier = form.save()\n if request.is_ajax():\n return render_to_response('suppliers/ajax_add.html')\n else:\n return redirect(show_supplier, new_supplier.id)\n else:\n form = SupplierForm()\n return render_to_response('suppliers/%sadd.html' % (request.is_ajax() and 'ajax_' or ''), {'form': form})\n\ndef show_supplier(request, id):\n supplier = get_object_or_404(Supplier, pk=id)\n return render_to_response('suppliers/show.html', {'supplier': supplier})\n\n#---- children of the show_supplier view\n\ndef supplier_info_tab(request, id):\n supplier = get_object_or_404(Supplier, pk=id)\n bad_request = False\n if request.method == \"POST\":\n form = SupplierForm(request.POST, instance = supplier)\n if form.is_valid():\n form.save()\n else:\n bad_request = True\n else:\n form = SupplierForm(instance = supplier)\n response = render_to_response('suppliers/tabs/info.html', {'form': form, 'supplier': supplier})\n if bad_request:\n response.status_code = 400\n return response\n\ndef supplier_history_tab(request, supplier_id):\n supplier = get_object_or_404(Supplier, pk=supplier_id)\n batchloads = BatchLoad.objects.filter(supplier = supplier, loaded = True).order_by(\"-date\")\n return render_to_response('suppliers/tabs/history.html', {'supplier': supplier, 'batchloads': batchloads})\n\n\n#\n# CLIENTI\n#\n\ndef find_customer(request):\n if request.is_ajax():\n term = unquote(request.GET[\"term\"])\n name_query = get_name_query(term)\n matches = Customer.objects.filter(name_query)\n result_list = []\n for cust in matches:\n result_list.append(cust.to_dict())\n if len(result_list) == 1:\n result_list[0][\"perfect_match\"] = True\n return HttpResponse(simplejson.dumps(result_list, cls=DecimalEncoder), 'application/javascript')\n else:\n return render_to_response('customers/find.html')\n\ndef add_customer(request):\n status = 200\n if request.is_ajax():\n Form = CustomerQuickForm\n else:\n Form = CustomerForm\n if request.method == 'POST':\n form = Form(request.POST)\n if form.is_valid():\n new_customer = form.save()\n if request.is_ajax():\n return HttpResponse(simplejson.dumps(new_customer.to_dict(), cls=DecimalEncoder), 'application/javascript')\n return redirect(show_customer, new_customer.id)\n else:\n status = 400\n else:\n form = Form()\n\n if request.is_ajax():\n response = render_to_response('customers/ajax_add.html', {'form': form})\n response.status_code = status\n return response\n else:\n return render_to_response('customers/add.html', {'form': form})\n\ndef add_company(request, pa=False):\n status = 200\n if request.is_ajax():\n Form = pa and PAQuickForm or CompanyQuickForm\n else:\n Form = pa and PAForm or CompanyForm\n if request.method == 'POST':\n form = Form(request.POST)\n if form.is_valid():\n new_company = form.save()\n if request.is_ajax():\n return HttpResponse(simplejson.dumps(new_company.to_dict(), cls=DecimalEncoder), 'application/javascript')\n return redirect(show_customer, new_company.id)\n else:\n status = 400\n else:\n form = Form()\n\n if request.is_ajax():\n response = render_to_response('customers/ajax_add_company.html', {'form': form, 'pa': pa})\n response.status_code = status\n return response\n else:\n return render_to_response('customers/add.html', {'form': form})\n\n\n\ndef show_customer(request, id):\n customer = get_object_or_404(Customer, pk=id)\n try:\n if customer.companycustomer:\n customer.company = customer.companycustomer\n except:\n pass\n return render_to_response('customers/show.html', {'customer': customer})\n\n\n#---- children of the show_customer view\n\ndef customer_info_tab(request, id):\n customer = get_object_or_404(Customer, pk=id)\n bad_request = False\n customer = customer.child()\n if customer.__class__ == PACustomer:\n Form = PAInfoForm\n elif customer.__class__ == CompanyCustomer:\n Form = CompanyInfoForm\n else:\n Form = CustomerInfoForm\n\n if request.method == \"POST\":\n form = Form(request.POST, instance = customer)\n if form.is_valid():\n form.save()\n else:\n bad_request = True\n else:\n form = Form(instance = customer)\n response = render_to_response('customers/tabs/info.html', {'form': form, 'customer': customer})\n if bad_request:\n response.status_code = 400\n return response\n\ndef customer_commercial_tab(request, id):\n customer = get_object_or_404(Customer, pk=id)\n bad_request = False\n try:\n customer = customer.companycustomer\n company = True\n except:\n company = False\n if company:\n Form = CompanyCommercialForm\n else:\n Form = CustomerCommercialForm\n\n if request.method == \"POST\":\n form = Form(request.POST, instance = customer)\n if form.is_valid():\n form.save()\n else:\n bad_request = True\n else:\n form = Form(instance = customer)\n response = render_to_response('customers/tabs/commercial.html', {'form': form, 'customer': customer})\n if bad_request:\n response.status_code = 400\n return response\n\ndef customer_history_tab(request, customer_id):\n customer = get_object_or_404(Customer, pk=customer_id)\n carts = Cart.objects.filter(customer = customer, receipt__isnull = False)\n receipts = Receipt.objects.filter(cart__in = carts)\n list = []\n for r in receipts:\n list.append(r.child())\n return render_to_response('customers/tabs/history.html', {'customer': customer.child(), 'receipts': list})\n","repo_name":"marcor/silversly","sub_path":"people/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33527388559","text":"import re\n\"\"\"Module to retrieve inputs from user\"\"\"\n\ndef input_type(message : str, convert_type = str):\n \"\"\"Asks the user for input. If the program is unable to convert to the type 'convert_type', the program will ask the user again.\n \"\"\"\n while True:\n user_input = input(message)\n try:\n return convert_type(user_input)\n except:\n print(f\"That was not of the type {convert_type.__name__}. Please enter again\")\n\ndef license_input(message : str):\n \"\"\"Asks the user for input. If the program is unable to match a regular expression to '^[A-Z]{3}\\d{3}$', the program will ask the user again.\n \"\"\"\n while True:\n user_input = input_type(message, str)\n if (re.match(r'^[A-Z]{3}\\d{3}$', user_input)):\n return user_input\n else:\n print(f\"You did not enter a valid license number (ABC123). Please try again\")\n\ndef exit_input(message : str, parking_list : dict):\n \"\"\"Asks the user for input. If the program is unable to match a regular expression to '^[A-Z]{3}\\d{3}$' and find the input in a list, the program will ask the user again.\n \"\"\"\n while True:\n user_input = input_type(message, str) \n if (re.match(r'^[A-Z]{3}\\d{3}$', user_input) and user_input in parking_list):\n return user_input\n else:\n print(f\"You did not enter a valid license number (ABC123) or the car does not exist in the garage. Please try again\")\n\ndef size_input(message : str, convert_type = int):\n \"\"\"Asks the user for input. If the program is unable to find 1, 2 or 3 as the input, the program will ask the user again.\n \"\"\"\n while True:\n user_input = input_type(message, convert_type)\n if (user_input == 1 or user_input == 2 or user_input == 3):\n return user_input\n else:\n print(f\"You did not enter 1, 2 or 3. Please try again\")\n\ndef input_file(message : str, action = \"r\"):\n \"\"\"Asks the user to input a filename. If the program cannot find the file specified by the user, the program will ask the user again.\n \"\"\"\n while True:\n user_input = input(message)\n try:\n return open(user_input, action, encoding=\"utf-8\")\n except:\n print(f\"There is no file named {user_input}. Please enter a new file: \\n\")\n \ndef time_input(message : str):\n \"\"\"Asks the user to input a time. If the program cannot match the time to HH:MM, the program will ask the user again.\n \"\"\"\n while True:\n user_input = input(message)\n if (re.match(r'^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$', user_input)):\n return user_input\n else:\n print(\"You did not enter a time in the format HH:MM. Please enter a new time.\")","repo_name":"TheOnlyPhoenix/garage","sub_path":"typed_input.py","file_name":"typed_input.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14542331662","text":"from django.contrib import admin\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom user.models import User\n\nbase_list_display = ('id', 'first_name', 'last_name', 'email')\nextended_list = base_list_display + ('username', 'role')\n\n\nclass CustomUserCreationForm(UserCreationForm):\n \"\"\"\n User creation form.\n \"\"\"\n\n class Meta:\n model = User\n fields = [item for item in extended_list if item != 'id']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in extended_list:\n if field != 'id':\n self.fields[field].required = True\n\n\n@admin.register(User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = extended_list\n form = CustomUserCreationForm\n","repo_name":"hahaSK/IIS_Cinema","sub_path":"user/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30633158552","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom email import encoders\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart, MIMEBase\nfrom email.mime.application import MIMEApplication\nfrom email.utils import parseaddr, formataddr\n\nimport smtplib\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\nfrom_addr = 'lhr_nicelife@163.com' # input('From: ')\npassword = 'Love20160120' #input('Password: ')\nto_addr = '152668252@qq.com, 376939627@qq.com' #input('To: ')\ncc_addr = 'linhanrui2006@163.com, henry.lin@ubtrobot.com'\nsmtp_server = 'smtp.163.com' #input('SMTP server: ')\n\n\n# 邮件对象:\nmsg = MIMEMultipart()\n\nmsg['From'] = _format_addr('163邮箱木木 <%s>' % from_addr)\nmsg['To'] = to_addr #_format_addr('卓灼上神 <%s>' % to_addr)\nmsg['Subject'] = Header('测试Python的邮件', 'utf-8').encode()\n\n# 邮件正文是MIMEText:\n# msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))\nmsg.attach(MIMEText('

Hello

' +\n # '

' +\n '', 'html', 'utf-8'))\n\n# 首先是xlsx类型的附件\nxlsxpart = MIMEApplication(open('good.xlsx', 'rb').read())\nxlsxpart.add_header('Content-Disposition', 'attachment', filename='better.xlsx')\nmsg.attach(xlsxpart)\n\n# jpg类型的附件\njpgpart = MIMEApplication(open('o13.jpg', 'rb').read())\njpgpart.add_header('Content-Disposition', 'attachment', filename='new.jpg')\njpgpart.add_header('Content-ID', '<0>')\njpgpart.add_header('X-Attachment-Id', '0')\nmsg.attach(jpgpart)\n\n# mp3类型的附件\nmp3part = MIMEApplication(open('aa.txt', 'rb').read())\nmp3part.add_header('Content-Disposition', 'attachment', filename='news.txt')\nmsg.attach(mp3part)\n\n# jpg类型的附件\njpgpart1 = MIMEApplication(open('眼影新包装3.jpg', 'rb').read())\njpgpart1.add_header('Content-Disposition', 'attachment', filename='aaa.jpg')\njpgpart1.add_header('Content-ID', '<1>')\nmsg.attach(jpgpart1)\n\n\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login(from_addr, password)\nserver.sendmail(from_addr, to_addr.split(\",\"), msg.as_string())\nserver.quit()","repo_name":"EragoGeneral/python-demo","sub_path":"network/SMTP/email_mul_attachment.py","file_name":"email_mul_attachment.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37591155591","text":"def find_min_moves(n, ladders, m, snakes):\n q1 = []\n q1.append(1) # starting node\n edges = [i for i in range(1, 7)] # possible die roles\n q2 = []\n moves = 0\n visited = [0 for i in range(101)]\n visited[1] = 1\n while (q1):\n while (q1):\n node = q1.pop()\n if (node == 100):\n return moves\n \n for edge in edges:\n new_node = node + edge\n if (new_node > 100):\n break\n if (visited[new_node] == 1): # already visited; no need to check again\n continue\n visited[new_node] = 1\n if (new_node in snakes): # go down\n new_node = snakes[new_node]\n q2.append(new_node)\n continue\n if (new_node in ladders): # go up\n new_node = ladders[new_node]\n q2.append(new_node)\n continue\n q2.append(new_node)\n q1 = q2\n q2 = []\n moves += 1\n \n return -1\n\n# driver code\nt = int(input())\nfor i in range(t):\n n = int(input())\n ladders = {}\n for j in range(n):\n s, e = map(int, input().split())\n ladders[s] = e\n m = int(input())\n snakes = {}\n for k in range(m):\n s, e = map(int, input().split())\n snakes[s] = e\n \n print(find_min_moves(n, ladders, m, snakes))\n","repo_name":"goelhardik/programming","sub_path":"hackerrank/snakes_and_ladders/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11903818182","text":"from dlvc.datasets.pets import PetsDataset\nfrom dlvc.dataset import Subset\nfrom dlvc.batches import BatchGenerator\nfrom dlvc.test import Accuracy\nimport dlvc.ops as ops\nimport numpy as np\nimport torch\n\n\n# TODO: Define the network architecture of your linear classifier.\nclass LinearClassifier(torch.nn.Module):\n def __init__(self, input_dim, num_classes):\n super(LinearClassifier, self).__init__()\n\n self.input_dim = input_dim\n self.num_classes = num_classes\n\n # define network layer\n self.layer = torch.nn.Linear(self.input_dim, self.num_classes)\n\n def forward(self, x):\n return self.layer(x)\n\n\nop = ops.chain([\n ops.vectorize(),\n ops.type_cast(np.float32),\n ops.add(-127.5),\n ops.mul(1 / 127.5),\n])\n\n\ndef train_model(linear_classifier, criterion, optimizer, epochs, train_data, valid_data):\n acc = Accuracy()\n print(\"Train the network\")\n best_acc = 0.0\n for epoch in range(epochs):\n running_loss = 0.0\n acc.reset()\n for data in train_data:\n # get the inputs and the labels\n inputs = data.data\n labels = data.label\n\n # convert the np.array in tensor\n t_inputs = torch.tensor(inputs)\n t_labels = torch.tensor(labels).to(torch.long) #cast labels to long so the CE works (CE throws exception with int)\n\n # zero the parameter gradients, for every batch I must compute the gradient again\n optimizer.zero_grad()\n\n # forward step\n output = linear_classifier.forward(t_inputs)\n loss = criterion(output, t_labels)\n loss.backward() # compute the gradients\n optimizer.step() # update the parameter\n\n # print statistics\n running_loss += loss.item()\n acc.update(linear_classifier.forward(torch.tensor(valid_data.data)).detach().numpy(), valid_data.label)\n print(f\"epoch {epoch + 1} \\ntrain loss: {running_loss}\\nval accuracy: {acc.accuracy()}\")\n\n # update the values\n running_loss = 0.0\n if acc.accuracy() >= best_acc:\n best_acc = acc.accuracy()\n\n acc.reset()\n print(\"Finished Training\")\n return linear_classifier, best_acc\n\n\ndef main():\n fp = 'C:/Users/admin/Desktop/10. Semester/Computer Vision/dlvc_ss22/assignments/reference/cifar10'\n\n print(\"Load data\")\n train_ds = PetsDataset(fp, Subset.TRAINING)\n valid_ds = PetsDataset(fp, Subset.VALIDATION)\n test_ds = PetsDataset(fp, Subset.TEST)\n print(\"Data Loaded\")\n\n print(\"Creating Batch Generator\")\n train = BatchGenerator(train_ds, len(train_ds), False, op)\n valid = next(iter(BatchGenerator(valid_ds, len(valid_ds), False, op)))\n test = next(iter(BatchGenerator(test_ds, len(test_ds), False, op)))\n print(\"Batch Generator created\")\n\n #define general parameters\n in_features = 3072 # size of the vector in input\n epochs = 100\n\n #Test 1\n print(\"Create Linear Classifier, Loss Function and Optimizer\")\n lc = LinearClassifier(in_features, train_ds.num_classes())\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(lc.parameters(), lr=0.001, momentum=0.9)\n\n lc_test_1, best_acc_test1 = train_model(lc, criterion, optimizer, epochs, train, valid)\n\n #Test 2, change the optimizer\n print(\"------------------------------------\")\n lc = LinearClassifier(in_features, train_ds.num_classes())\n optimizer = torch.optim.Adam(lc.parameters(), lr=0.001)\n lc_test_2, best_acc_test2 = train_model(lc, criterion, optimizer, epochs, train, valid)\n\n #find the best model\n if best_acc_test1 > best_acc_test2:\n lc = lc_test_1\n best_acc = best_acc_test1\n else:\n lc = lc_test_2\n best_acc = best_acc_test2\n\n print(\"--------------------\")\n print(f\"val accuracy (best): {best_acc}\")\n\n # compute the test accuracy\n test_acc = Accuracy()\n test_acc.update(lc.forward(torch.tensor(test.data)).detach().numpy(), test.label)\n print(f\"test accuracy: {test_acc.accuracy()}\")\n\n\nmain()","repo_name":"onliner98/dlvc_ss22","sub_path":"assignments/reference/linear_cats_and_dogs.py","file_name":"linear_cats_and_dogs.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"15918710597","text":"from sympy import (Symbol, symbols, Matrix, sin, cos, asin, diff, sqrt, S,\n diag, Eq, hessian, Function, flatten, Tuple, im, pi, latex,\n dsolve, solve, fraction, factorial, Subs, Number, oo, Abs,\n N, solveset)\n\nfrom sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point\nfrom sympy.physics.vector import vpprint, vlatex\nfrom ...dynamics import LagrangesDynamicSystem, HarmonicOscillator, mech_comp\n\nfrom ..elements import MaterialPoint, Spring, GravitationalForce, Disk, RigidBody2D, Damper, PID, Excitation, Force, base_frame, base_origin\nfrom ...continuous import ContinuousSystem, PlaneStressProblem\n\n\n\nimport base64\nimport random\nimport IPython as IP\nimport numpy as np\nimport inspect\n\nimport matplotlib.pyplot as plt\nfrom functools import cached_property, lru_cache\n\n\nREPORT_COMPONENTS_LIST = [\n mech_comp.TitlePageComponent,\n mech_comp.SchemeComponent,\n mech_comp.ExemplaryPictureComponent,\n mech_comp.KineticEnergyComponent,\n mech_comp.KineticEnergyDynPyCodeComponent,\n mech_comp.KineticEnergySymPyCodeComponent,\n mech_comp.PotentialEnergyComponent,\n mech_comp.PotentialEnergyDynPyCodeComponent,\n mech_comp.PotentialEnergySymPyCodeComponent,\n mech_comp.LagrangianComponent,\n mech_comp.GoverningEquationComponent,\n mech_comp.GoverningEquationDynpyCodeComponent,\n mech_comp.GoverningEquationSympyCodeComponent,\n mech_comp.FundamentalMatrixComponent,\n mech_comp.GeneralSolutionComponent,\n mech_comp.GeneralSolutionDynpyCodeComponent,\n mech_comp.GeneralSolutionSympyCodeComponent,\n mech_comp.SteadySolutionComponent,\n mech_comp.FreeVibrationFrequencyComponent\n ]\n\ndef plots_no():\n num = 0\n while True:\n yield num\n num += 1\n\n\n\nclass ComposedSystem(HarmonicOscillator):\n \"\"\"Base class for all systems\n\n \"\"\"\n _case_no = plots_no()\n \n scheme_name = 'damped_car_new.PNG'\n real_name = 'car_real.jpg'\n detail_scheme_name = 'sruba_pasowana.png'\n detail_real_name = 'buick_regal_3800.jpg'\n _default_args = ()\n #_default_folder_path = \"./dynpy/models/images/\"\n _path = None\n\n z = dynamicsymbols('z')\n\n m0 = Symbol('m_0', positive=True)\n k0 = Symbol('k_0', positive=True)\n F0 = Symbol('F_0', positive=True)\n Omega0 = Symbol('Omega_0', positive=True)\n ivar=Symbol('t')\n\n \n # @classmethod\n # def _scheme(cls):\n\n # path = cls._default_folder_path + cls.scheme_name\n\n # return path\n\n # @classmethod\n # def _real_example(cls):\n # path = cls._default_folder_path + cls.real_name\n\n # return path\n\n @classmethod\n def _detail_real(cls):\n path = cls._default_folder_path + cls.detail_real_name\n\n return path\n\n @classmethod\n def _detail_scheme(cls):\n path = cls._default_folder_path + cls.detail_scheme_name\n\n return path\n\n def _init_from_components(self, *args, system=None, **kwargs):\n\n if system is None:\n composed_system = self._elements_sum\n else:\n composed_system = system\n\n #print('CS',composed_system._components)\n super(HarmonicOscillator,self).__init__(None, system=composed_system)\n\n #print('self',self._components)\n if self._components is None:\n comps = {}\n else:\n comps = self._components\n\n self._components = {**comps, **self.components}\n\n def __init__(self,\n Lagrangian=None,\n m0=None,\n qs=None,\n forcelist=None,\n bodies=None,\n frame=None,\n hol_coneqs=None,\n nonhol_coneqs=None,\n label=None,\n ivar=None,\n evaluate=True,\n system=None,\n **kwargs):\n\n if ivar is not None: self.ivar = ivar\n if m0 is not None: self.m0 = m0\n\n if qs is not None:\n self.qs = qs\n else:\n self.qs = [self.z]\n\n\n self._init_from_components(system=system, **kwargs)\n\n @property\n def components(self):\n\n components = {}\n\n self._material_point = MaterialPoint(Symbol('ItIsWrongCode '), self.qs[0],\n self.qs)('Material Point')\n components['_material_point'] = self._material_point\n\n self._label = 'System seems to be wrong - method components is not overload'\n \n return components\n\n @property\n def elements(self):\n\n return {**super().components, **self.components}\n\n @classmethod\n def preview(cls, example=False):\n if example:\n path = cls._real_example()\n\n elif example == 'detail_scheme_name':\n path = cls._detail_scheme()\n elif example == 'detail_real_name':\n path = cls._detail_real()\n else:\n path = cls._scheme()\n print(path)\n with open(f\"{path}\", \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read())\n image_file.close()\n\n return IP.display.Image(base64.b64decode(encoded_string))\n\n def _components_default_data(self,formatter=None):\n \n if formatter is None:\n formatter = lambda obj: obj._all_default_data(formatter=formatter)\n \n data=[formatter(elem) for elem in self.elements.values()]\n\n \n return {key:value for elem in data for key, value in elem.items()} \n \n def _components_numerical_data(self):\n \n data=[elem._all_numerical_data() for elem in self.elements.values()]\n \n \n return {key:value for elem in data for key, value in elem.items()} \n \n def _all_default_data(self,formatter=None):\n \n \n \n return {**self._components_default_data(formatter=formatter),**self.get_default_data()} \n \n def _all_numerical_data(self):\n \n return {**self._components_numerical_data(),**self.get_numerical_data()} \n \n \n def get_default_data(self,formatter=None):\n return {}\n\n def get_numerical_data(self):\n return {}\n\n def _params_summary(self):\n \n query = lambda obj: {key:'Param comes from '+obj.__class__.__name__ for key in obj._all_default_data().keys()}\n\n desc_dict={key:(val if isinstance(val,str) else 'Provided directly within class') for key,val in self._all_default_data( query ).items()}\n #desc_dict = self._all_default_data( query )\n\n return '\\n'.join([f'{key} - {value}' for key,value in desc_dict.items()])\n \n def _system_summary(self):\n \n\n\n return f'{self.system_description()}\\n {\"=\"*50} \\n {self._params_summary()}'\n \n def get_random_parameters(self):\n\n \n #print('preview for',self)\n #display(self._all_default_data())\n #display(self.get_default_data())\n \n default_data_dict = {**self._components_default_data(),**self.get_default_data()}\n\n if default_data_dict:\n parameters_dict = {\n key: random.choice(items_list)\n for key, items_list in default_data_dict.items()\n }\n else:\n parameters_dict = None\n\n return parameters_dict\n\n def get_numerical_parameters(self):\n\n default_data_dict = {**self._components_numerical_data(),**self.get_numerical_data()}\n\n if default_data_dict:\n parameters_dict = {\n key: random.choice(items_list)\n for key, items_list in default_data_dict.items()\n }\n else:\n parameters_dict = None\n\n return parameters_dict\n\n @property\n def _report_components(self):\n\n comp_list = [\n *REPORT_COMPONENTS_LIST\n ]\n\n return comp_list\n \n @lru_cache\n # def linearized(self,): #it was missing\n\n # return type(self).from_system(super().linearized())\n \n @lru_cache \n def linearized(self, x0=None, op_point=False, hint=[], label=None):\n\n #temporary workaround\n lin_sys = HarmonicOscillator(self).linearized(x0=x0,op_point=op_point,hint=hint,label=label)\n \n #old version\n #lin_sys=super().linearized(x0=x0,op_point=op_point,hint=hint,label=label)\n \n return type(self).from_system(lin_sys)\n\n def tensioner_belt_force(self):\n return self.k_tensioner * self.steady_solution()\n\n def left_belt_force(self):\n return self.k_belt * self.steady_solution()\n\n def right_belt_force(self):\n return self.k_belt * self.steady_solution()\n\n\n# def max_static_force_pin(self):\n# return abs(self.static_load().doit()[0])\n\n# def max_dynamic_force_pin(self):\n# return self.frequency_response_function() * self.stiffness_matrix(\n# )[0] + self.max_static_force_pin()\n\n def max_static_force_pin(self):\n return abs(self.static_load().doit()[0]) / 2\n\n def max_dynamic_force_pin(self):\n return self._frf()[0] * self.k_m + self.max_static_force_pin()\n\n def static_force_pin_diameter(self):\n kt = Symbol('k_t', positive=True)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_static_force_pin()) / (pi * kt * Re))**(1 / 2)\n\n def dynamic_force_pin_diameter(self):\n kt = Symbol('k_t', positive=True)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_dynamic_force_pin()) / (pi * kt * Re))**(1 / 2)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_static_force_pin()) / (pi * kt * Re))**(1 / 2)\n\n def dynamic_force_pin_diameter(self):\n kt = Symbol('k_t', positive=True)\n Re = Symbol('R_e', positive=True)\n return ((4 * self.max_dynamic_force_pin()) / (pi * kt * Re))**(1 / 2)\n\n def _parameter_influence_analysis(self,parameter=None,param_span=None,dependencies_dict=None):\n\n from ...solvers.linear import ODESystem\n from ...utilities.adaptable import NumericalAnalysisDataFrame,TimeDataFrame,pd\n\n if parameter is None:\n parameter = self.system_parameters()[0]\n\n if param_span is None:\n param_span = [0.8,1,1.2]\n\n if dependencies_dict is None:\n dependencies_dict = {}\n\n reference_data = {**self.get_numerical_parameters()}\n display(reference_data)\n\n# eom = self._eoms[0]\n #system = ODESystem(odes=eom,dvars=self.q).as_first_ode_linear_system()\n #system = ODESystem(odes=eom,dvars=self.q,ode_order=2)#.numerized()\n system = self._ode_system\n \n \n \n Y = list(self.Y) + list(dependencies_dict.keys())\n\n index = pd.Index(np.linspace(0,100,1000),name=self.ivar)\n\n df_num = NumericalAnalysisDataFrame(index=index).from_model(system,\n parameter=parameter,\n span=param_span,\n reference_data=reference_data,\n coordinates=Y,\n index=index)\n\n results_num = df_num#.perform_simulations(model_level_name=0,dependencies=dependencies_dict)\n #results = TimeDataFrame(results_num).droplevel(0,axis=1)\n results= results_num\n \n return results\n \n def get_reference_data(self):\n \n return self.get_numerical_parameters()\n\n\nclass NonlinearComposedSystem(ComposedSystem):\n\n def frequency_response_function(self,\n frequency=Symbol('Omega', positive=True),\n amplitude=Symbol('a')):\n\n omega = (self.linearized()).natural_frequencies()[0]\n \n \n eps = self.small_parameter()\n\n exciting_force = self.external_forces()[0]\n\n comps = exciting_force.atoms(sin, cos)\n exciting_amp = sum([exciting_force.coeff(comp) for comp in comps])\n inertia = self.inertia_matrix()[0]\n\n return amplitude * (-frequency**2 + omega**2) * inertia + S(\n 3) / 4 * eps * amplitude**3 - exciting_amp\n\n def amplitude_from_frf(self, amplitude=Symbol('a')):\n\n return solveset(self.frequency_response_function(), amplitude)\n\n @property\n def _report_components(self):\n\n comp_list = [\n *REPORT_COMPONENTS_LIST\n ]\n\n return comp_list\n\n def max_static_force_pin(self):\n return abs(self.static_load().doit()[0]) / 2\n\n def max_dynamic_force_pin(self):\n lin_sys = ComposedSystem(self.linearized())\n #k_m = self._given_data[self.k_m]\n k_m = self.k_m\n # display(lin_sys.stiffness_matrix()[0])\n\n return lin_sys.frequency_response_function() * (\n lin_sys.stiffness_matrix()[0]) / 2 + self.max_static_force_pin()\n\n def max_dynamic_nonlinear_force_pin(self):\n lin_sys = ComposedSystem(self.linearized())\n\n amp = list(self.amplitude_from_frf())\n display(amp)\n #k_m = self._given_data[self.k_m]\n k_m = self.k_m\n\n return amp[0] * k_m + self.max_static_force_pin()\n \n \nclass SpringMassSystem(ComposedSystem):\n \"\"\"Ready to use sample Single Degree of Freedom System with mass on spring\n Arguments:\n =========\n m = Mass\n -Mass of system on spring\n\n k = Spring coefficient\n -Spring carrying the system\n\n ivar = symbol object\n -Independant time variable\n\n qs = dynamicsymbol object\n -Generalized coordinates\n\n Example\n =======\n A mass oscillating up and down while being held up by a spring with a spring constant k\n\n >>> t = symbols('t')\n >>> m, k = symbols('m, k')\n >>> qs = dynamicsymbols('z') # Generalized Coordinates\n >>> mass = SDoFHarmonicOscillator(m,k, qs=[z],) # Initialization of LagrangesDynamicSystem instance\n\n -We define the symbols and dynamicsymbols\n -Kinetic energy T and potential energy v are evaluated to calculate the lagrangian L\n -Reference frame was created with point P defining the position and the velocity determined on the z axis\n -external forces assigned\n -Next we determine the instance of the system using class LagrangeDynamicSystem\n -We call out the instance of the class\n -If necessary assign values for the default arguments\n\n\n \"\"\"\n scheme_name = 'engine.png'\n real_name = 'engine_real.PNG'\n\n m=Symbol('m', positive=True)\n k=Symbol('k', positive=True)\n ivar=Symbol('t')\n \n z=dynamicsymbols('z')\n \n def __init__(self,\n m=None,\n k=None,\n z=None,\n ivar=None,\n **kwargs):\n\n \n \n if m is not None: self.m = m\n if k is not None: self.k = k\n if ivar is not None: self.ivar = ivar\n if z is not None: self.z = z\n \n \n self.qs = [self.z]\n\n self._init_from_components(**kwargs)\n\n @property\n def components(self):\n\n components = {}\n \n self.material_point = MaterialPoint(self.m, self.z, qs=self.qs)\n self.spring = Spring(self.k, self.z, qs=self.qs)\n \n components['material_point'] = self.material_point\n components['spring'] = self.spring\n \n return components\n \n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'mass of system on the spring',\n self.k: r'Spring coefficient ',\n }\n\n return self.sym_desc_dict\n\n\nclass LagrangeIBlocksOnInclinedPlane(ComposedSystem):\n scheme_name = 'ddof_disks_3_springs_scheme.png'\n real_name = 'nonlin_trolley_real.PNG'\n\n def __init__(self,\n m=Symbol('m', positive=True),\n m1=Symbol('m_1', positive=True),\n m2=Symbol('m_2', positive=True),\n m3=Symbol('m_3', positive=True),\n m4=Symbol('m_4', positive=True),\n R=Symbol('R', positive=True),\n g=Symbol('g', positive=True),\n alpha=Symbol('alpha',positive=True),\n beta=Symbol('beta',positive=True),\n ivar=Symbol('t'),\n x1=dynamicsymbols('x_1'),\n x2=dynamicsymbols('x_2'),\n x3=dynamicsymbols('x_3'),\n x4=dynamicsymbols('x_4'),\n phi=dynamicsymbols('\\\\varphi'),\n qs=dynamicsymbols('x_1, x_2, x_3, x_4, \\\\varphi'),\n **kwargs):\n\n self.m = m\n self.m1 = m1\n self.m2 = m2\n self.m3 = m3\n self.m4 = m\n self.R = R\n self.g = g\n self.alpha = alpha\n self.beta = beta\n self.x1 = x1\n self.x2 = x2\n self.x3 = x3\n self.x4 = x4\n self.phi = phi\n self.qs = qs\n\n\n #IMROVE\n self._init_from_components(**kwargs)\n \n @property\n def components(self):\n\n ######## ELEMENTS MOVED FROM __init__ METHOD\n self.Mass1 = MaterialPoint(self.m1, pos1=self.x1, qs=[self.x1]) + GravitationalForce(self.m1, self.g, pos1=-self.x1*sin(self.alpha), qs=[self.x1])\n self.Mass2 = MaterialPoint(self.m2, pos1=self.x2, qs=[self.x2]) + GravitationalForce(self.m2, self.g, pos1=-self.x2*sin(self.alpha), qs=[self.x2])\n self.Mass3 = MaterialPoint(self.m3, pos1=self.x3, qs=[self.x3]) + GravitationalForce(self.m3, self.g, pos1=-self.x3*sin(self.beta), qs=[self.x3])\n self.Mass4 = MaterialPoint(self.m4, pos1=self.x4, qs=[self.x4]) + GravitationalForce(self.m4, self.g, pos1=-self.x4*sin(self.beta), qs=[self.x4])\n self.Pulley = MaterialPoint(1/2*self.m*self.R**2, pos1=self.phi, qs=[self.phi])\n\n ####################\n\n components = {}\n\n components['Mass1'] = self.Mass1\n components['Mass2'] = self.Mass2\n components['Mass3'] = self.Mass3\n components['Mass4'] = self.Mass4\n components['Pulley'] = self.Pulley\n \n return components\n \n def get_default_data(self):\n\n m0 = symbols('m_0', positive=True)\n\n default_data_dict = {\n self.m: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m1: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m2: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m3: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n self.m4: [S.Half * m0, 1 * m0, 2 * m0, 4 * m0, S.Half**2 * m0],\n }\n\n return default_data_dict\n\n def get_random_parameters(self):\n\n default_data_dict = self.get_default_data()\n\n parameters_dict = {\n key: random.choice(items_list)\n for key, items_list in default_data_dict.items()\n }\n\n return parameters_dict\n\n \n#TODO 159\nclass LagrangeIOnMathFunction(ComposedSystem):\n\n scheme_name = 'mat_point_parabola.PNG'\n real_name = 'tautochrone_curve_small.gif'\n\n \n \n \n def __init__(self,\n m=Symbol('m', positive=True),\n g=Symbol('g', positive=True),\n x=dynamicsymbols('x'),\n y=dynamicsymbols('y'),\n a=symbols('a',positive=True),\n R=symbols('R',positive=True),\n ivar=Symbol('t'),\n qs=dynamicsymbols('x,y'),\n **kwargs):\n\n self.m = m\n self.x = x\n self.y = y\n self.a = a\n self.R = R\n self.g = g\n\n system = HarmonicOscillator(S.Half*m*x.diff(ivar)**2+S.Half*m*y.diff(ivar)**2-m*g*y,qs=[x,y])\n\n super().__init__(system(qs),**kwargs)\n\n def get_default_data(self):\n\n\n m0 = symbols('m_0', positive=True)\n x = self.x\n a, Omega = symbols('a, Omega', positive=True)\n\n default_data_dict = {\n self.m :[S.Half * m0, 1 * m0, 2 * m0, 2**2 * m0, S.Half**2 * m0,8*m0,S.Half**3],\n self.y:[ a*x**2, a*(1-cos(x)),a*sin(x)**2,a*sin(x)**4,a*x**4]\n\n }\n\n return default_data_dict\n \n\n\n\n\n#Old MaterialPointMovement system, new system implemented.\n#This class is left just for an example.\nclass ExemplaryOldImplementedSystem(ComposedSystem):\n\n m = Symbol('m', positive=True)\n g = Symbol('g', positive=True)\n c = Symbol('c', positive=True)\n r = Symbol('r', positive=True)\n phi = dynamicsymbols('\\\\varphi')\n\n c0 = Symbol('c0', positive=True)\n r0 = Symbol('r0', positive=True)\n phi0 = dynamicsymbols('phi0')\n\n def __init__(self,\n m=None,\n g=None,\n c=None,\n r=None,\n phi=None,\n ivar=Symbol('t'),\n **kwargs):\n\n if m is not None: self.m = m\n if g is not None: self.g = g\n if c is not None: self.c = c\n if r is not None: self.r = r\n if phi is not None: self.phi = phi\n self.ivar = ivar\n\n self.qs = [self.phi]\n\n self._mass_x = MaterialPoint(self.m,\n pos1=self.r * sin(self.phi),\n qs=self.qs)\n self._mass_y = MaterialPoint(self.m,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n self._gravity_ = GravitationalForce(self.m,\n self.g,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n composed_system = self._mass_x + self._mass_y + self._gravity_\n\n super().__init__(composed_system, **kwargs)\n\n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'Mass',\n self.g: r'Gravity constant',\n self.c: r'',\n }\n\n return self.sym_desc_dict\n\n def get_default_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict\n \n def get_numerical_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict\n\n def max_static_force(self):\n return S.Zero\n\n def max_dynamic_force(self):\n return S.Zero\n\n#DONE \nclass MaterialPointMovement(ComposedSystem):\n\n m = Symbol('m', positive=True)\n g = Symbol('g', positive=True)\n c = Symbol('c', positive=True)\n r = Symbol('r', positive=True)\n phi = dynamicsymbols('phi')\n\n c0 = Symbol('c0', positive=True)\n r0 = Symbol('r0', positive=True)\n phi0 = dynamicsymbols('phi0')\n\n def __init__(self,\n m=None,\n g=None,\n c=None,\n r=None,\n phi=None,\n ivar=Symbol('t'),\n **kwargs):\n\n if m is not None: self.m = m\n if g is not None: self.g = g\n if c is not None: self.c = c\n if r is not None: self.r = r\n if phi is not None: self.phi = phi\n self.ivar = ivar\n\n self.qs = [self.phi]\n\n self._init_from_components(**kwargs)\n\n @property\n def components(self):\n\n components = {}\n\n\n self._mass_x = MaterialPoint(self.m,\n pos1=self.r * sin(self.phi),\n qs=self.qs)\n self._mass_y = MaterialPoint(self.m,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n self._gravity = GravitationalForce(self.m,\n self.g,\n pos1=self.r * cos(self.phi),\n qs=self.qs)\n\n\n\n components['_mass_x']=self._mass_x\n components['_mass_y']=self._mass_y\n components['_gravity']=self._gravity\n\n\n return components\n \n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'Mass',\n self.g: r'Gravity constant',\n self.c: r'',\n }\n\n return self.sym_desc_dict\n\n def get_default_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict\n \n def get_numerical_data(self):\n\n m0, c0, r0, phi0 = self.m0, self.c0, self.r0, self.phi0\n\n default_data_dict = {\n self.m: [m0 * no for no in range(1, 8)],\n self.c: [c0 * no for no in range(1, 8)],\n self.r: [r0 * no for no in range(1, 8)],\n self.phi: [phi0 * no for no in range(1, 8)],\n }\n\n return default_data_dict \n\n def max_static_force(self):\n return S.Zero\n\n def max_dynamic_force(self):\n return S.Zero\n\n \n#Kuba #poprawione \n\nclass KinematicClutchWithSprings(ComposedSystem):\n #scheme_name = ''\n #real_name = ''\n #detail_scheme_name = ''\n #detail_real_name = ''\n\n l0 = Symbol('l_0', positive=True)\n G = Symbol('G', positive=True)\n I = Symbol('I', positive=True)\n l_1 = Symbol('l_1', positive=True)\n l_2 = Symbol('l_2', positive=True)\n I_1 = Symbol('I_1', positive=True)\n I_2 = Symbol('I_2', positive=True)\n Ms = Symbol('M_s', positive=True)\n Omega = Symbol('Omega', positive=True)\n ivar=Symbol('t')\n theta = dynamicsymbols('theta')\n phi = dynamicsymbols('\\\\varphi')\n\n def __init__(self,\n l0=None,\n G=None,\n I=None,\n l_1=None,\n l_2=None,\n I_1=None,\n I_2=None,\n Ms=None,\n phi=None,\n theta=None,\n ivar=Symbol('t'),\n qs=None,\n **kwargs):\n \n if G is not None: self.G = G\n if I is not None: self.I = I\n if Ms is not None: self.Ms = Ms\n if l_1 is not None: self.l_1 = l_1\n if l_2 is not None: self.l_2 = l_2\n if I_1 is not None: self.I_1 = I_1\n if I_2 is not None: self.I_2 = I_2\n if phi is not None: self.phi = phi\n if theta is not None: self.theta = theta\n\n self.qs = [self.phi]\n self.ivar = ivar\n self._init_from_components(**kwargs)\n \n @cached_property\n def components(self):\n components = {}\n \n self.k_1 = (self.G * self.I_1) / self.l_1\n self.k_2 = (self.G * self.I_2) / self.l_2\n\n self.disc_1 = Disk(self.I, pos1=self.phi, qs=self.qs)\n self.spring_2 = Spring(self.k_1 * self.k_2 / (self.k_2 + self.k_1),\n pos1=self.phi,\n pos2=self.theta,\n qs=self.qs) #right spring\n self.moment = Force(self.Ms, pos1=self.phi, qs=self.qs)\n \n components['moment'] = self.moment\n components['disc_1'] = self.disc_1\n components['spring_2'] = self.spring_2\n \n return components\n\n\n def symbols_description(self):\n self.sym_desc_dict = {\n self.I: r'Moment of Inertia',\n self.k_1: r'',\n self.k_2: r'',\n }\n return self.sym_desc_dict\n def get_default_data(self):\n\n m0, l0, G, l = symbols('m_0 l_0 G l', positive=True)\n theta0, Omega = symbols('theta_0, Omega', positive=True)\n\n default_data_dict = {\n self.I: [S.Half * m0 * (l0**2) * no for no in range(1, 3)],\n self.I_1: [S.Half**(no) * (l0**4) for no in range(1, 8)],\n self.I_2: [S.Half**no * (l0**4) for no in range(1, 8)],\n self.l_1: [S.Half**(no - 6) * l0 for no in range(1, 8)],\n self.l_2: [S.Half**(no - 6) * l0 for no in range(1, 8)],\n self.theta: [theta0 * cos(Omega * self.ivar)],\n }\n\n return default_data_dict\n\n def disc_force(self):\n t = self.ivar\n return self.I * self.steady_solution().diff(t, t)\n\n def max_static_force_pin(self):\n d = Symbol('d', positive=True)\n return 2 * self.Ms / d\n\n def max_dynamic_force_pin(self):\n d = Symbol('d', positive=True)\n return self.frequency_response_function(\n self.natural_frequencies()[0]) * self.stiffness_matrix()[0]\n\n def max_static_bearing_force(self):\n d = Symbol('d', positive=True)\n return abs(2 * self.static_load()[0] / d)\n\n def max_dynamic_bearing_force(self):\n d = Symbol('d', positive=True)\n acc_amp = self.frequency_response_function() * self.Omega**2\n\n return abs(\n 2 * (self.I * acc_amp) /\n d) + self.max_static_bearing_force() #.subs(self._given_data)\n\n def static_key_length(self):\n kd = Symbol('k_d', positive=True)\n h = Symbol('h', positive=True)\n return (2 * self.max_static_bearing_force()) / (kd * h)\n\n def dynamic_key_length(self):\n\n kd = Symbol('k_d', positive=True)\n h = Symbol('h', positive=True)\n return (2 * self.max_dynamic_bearing_force()) / (kd * h) ","repo_name":"bogumilchilinski/dynpy","sub_path":"models/mechanics/principles.py","file_name":"principles.py","file_ext":"py","file_size_in_byte":30086,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"5113779442","text":"def get_data(file_name):\n with open(file_name, \"r\") as f:\n data = [int(_) for _ in f.read().split(\",\")]\n return data\n\n\ndef part_1(data):\n min_cost = max(data) * len(data)\n for end_position in range(max(data)):\n cost = 0\n for crab_position in data:\n cost += abs(crab_position - end_position)\n if cost < min_cost:\n min_cost = cost\n return min_cost\n\n\ndef part_2(data):\n min_cost = 9999999999999999\n cache = {}\n for end_position in range(max(data)):\n cost = 0\n for crab_position in data:\n if abs(crab_position - end_position) not in cache:\n intermediate_cost = 0\n for i in range(abs(crab_position - end_position)):\n intermediate_cost += i + 1\n cache[abs(crab_position - end_position)] = intermediate_cost\n cost += cache[abs(crab_position - end_position)]\n if cost < min_cost:\n min_cost = cost\n return min_cost\n\n\nif __name__ == \"__main__\":\n test = False\n file_name = \"sample.txt\" if test else \"input.txt\"\n data = get_data(file_name)\n\n print(\"Part 1:\", part_1(data))\n print(\"Part 2:\", part_2(data))\n","repo_name":"bassil/advent_of_code","sub_path":"2021/7/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73729604888","text":"import numpy as np\nimport Layer.FC as FC\nimport Layer.ReLU as ReLU\nimport Layer.Tanh as Tanh\nimport Layer.Sigmoid as Sigmoid\nimport Layer.Softmax as Softmax\nimport matplotlib.pyplot as plt\n\n\ndef isEqual(num1, num2):\n\tif num1+1e-4<=num2 and num1-1e-4>=num2:\n\t\treturn True\n\treturn False\n\nclass Net(object):\n\tdef __init__(self, batch_size=200,\n\t\t\tbase_lr=0.1, lr_decay_scale=1., lr_decay_num=-1,\n\t\t\tweight_decay=0.,\n\t\t\tmode=\"SGD\", show=True):\n\t\tself.__batch_size = batch_size\n\t\tself.__lr = base_lr\n\t\tself.__lr_decay_scale = lr_decay_scale\n\t\tself.__lr_decay_num = lr_decay_num\n\t\tself.__weight_decay = weight_decay\n\t\tself.__mode = mode\n\t\tself.__network = []\n\t\tself.__show = show\n\t\tself.__ite = -1\n\n\n\tdef clearStructure(self):\n\t\tself.__network[:] = []\n\n\n\tdef loadData(self, data, label):\n\t\tself.__data = data\n\t\tself.__label = label\n\t\tself.__data_num = self.__data.shape[0]\n\t\tself.__data_dim = self.__data.shape[1]\n\t\tself.__label_dim = label.shape[1]\n\t\tif self.__batch_size > self.__data_num:\n\t\t\tself.__batch_size = self.__data_num\n\n\t\t#self.__batch_data = np.zeros([self.__batch_size, self.__data_dim])\n\t\t#self.__batch_label = np.zeros([self.__batch_size, self.__label_dim])\n\n\t\tif self.__show:\n\t\t\tshow_message = \"Loading data...\\n\\tInput:[%d, %d], label:[%d, %d].\" % (self.__batch_size, self.__data_dim,\n\t\t\t\tself.__batch_size, self.__label_dim)\n\t\t\tprint(show_message)\n\n\n\tdef addFC(self, num_output):\n\t\tif len(self.__network) == 0:\n\t\t\tself.__network.append(FC.FullyConnectionLayer(self.__data_dim, num_output))\n\t\telse:\n\t\t\tlast_out_dim = self.__network[-1].getOutDim()\n\t\t\t# input_len = reduce(lambda x, y: x * y, shape[1:])\n\t\t\tself.__network.append(FC.FullyConnectionLayer(last_out_dim, num_output))\n\n\t\tif self.__show:\n\t\t\tshow_message = \"Layer %d\\n\\tFully Connection Layer, input:[%d, %d], output:[%d, %d].\" % (\n\t\t\t\t\t\tlen(self.__network),\n\t\t\t\t\t\tself.__batch_size, self.__network[-1].getInDim(),\n\t\t\t\t\t\tself.__batch_size, self.__network[-1].getOutDim())\n\t\t\tprint(show_message)\n\n\n\tdef addSigmoid(self, dim):\n\t\tself.__network.append(Sigmoid.SigmoidLayer(dim))\n\t\tif self.__show == True:\n\t\t\tshow_message = \"Layer %d\\n\\tSigmoid Layer.\" % (\n\t\t\t\t\t\tlen(self.__network))\n\t\t\tprint(show_message)\n\n\n\tdef addReLU(self, dim):\n\t\tself.__network.append(ReLU.ReLULayer(dim))\n\t\tif self.__show == True:\n\t\t\tshow_message = \"Layer %d\\n\\tReLU Layer.\" % (\n\t\t\t\t\t\tlen(self.__network))\n\t\t\tprint(show_message)\n\n\n\tdef addLeakyReLU(self, dim, alpha=0.1):\n\t\tself.__network.append(ReLU.LeakyReLULayer(dim, alpha))\n\t\tif self.__show:\n\t\t\tshow_message = \"Layer %d\\n\\tLeakyReLU Layer.\" % (\n\t\t\t\t\t\tlen(self.__network))\n\t\t\tprint(show_message)\n\n\n\tdef addTanh(self, dim):\n\t\tself.__network.append(Tanh.TanhLayer(dim))\n\t\tif self.__show == True:\n\t\t\tshow_message = \"Layer %d\\n\\tTanh Layer.\" % (\n\t\t\t\t\t\tlen(self.__network))\n\t\t\tprint(show_message)\n\n\n\tdef addSoftmax(self):\n\t\tself.__network.append(Softmax.SoftmaxWithLossLayer())\n\t\tif self.__show == True:\n\t\t\tshow_message = \"Layer %d\\n\\tSoftmax(With Loss) Layer.\" % (\n\t\t\t\t\t\tlen(self.__network))\n\t\t\tprint(show_message)\n\n\n\tdef getNetwork(self):\n\t\treturn self.__network\n\n\n\tdef setWeightDecay(self, lamda):\n\t\tself.__weight_decay = lamda\n\n\n\tdef setLrDecay(self, lr_decay_scale, lr_decay_num):\n\t\tself.__lr_decay_scale = lr_decay_scale\n\t\tself.__lr_decay_num = lr_decay_num\n\n\n\tdef setOptimizer(self, batch_size = 200,\n\t\t\tlr_decay_scale=1., lr_decay_num=-1,\n\t\t\tweight_decay=0., mode=\"SGD\"):\n\t\tself.__batch_size = batch_size\n\t\tself.__lr_decay_scale = lr_decay_scale\n\t\tself.__lr_decay_num = lr_decay_num\n\t\tself.__weight_decay = weight_decay\n\t\tself.__mode = mode\n\n\t# [index_start, index_end)\n\tdef _loadBatch(self):\n\t\tself.__ite += 1\n\t\tindex_start = self.__ite * self.__batch_size\n\t\tindex_end = index_start+self.__batch_size\n\t\tif index_end >= self.__data_num:\n\t\t\tindex_end = self.__data_num\n\t\t\tself.__ite = -1\n\n\t\tself.__batch_data = self.__data[index_start:index_end, :]\n\t\tself.__batch_label = self.__label[index_start:index_end, :]\n\t\tif self.__network[-1].type() == \"loss\":\n\t\t\tself.__network[-1].loadLabel(self.__batch_label)\n\t\telse:\n\t\t\tprint(\"The last layer is not loss.\")\n\n\n\tdef forward(self):\n\t\tif len(self.__network) == 0:\n\t\t\tprint(\"The network is empty.\\n\")\n\t\telse:\n\t\t\tself._loadBatch()\n\t\t\tdata_flow = self.__batch_data\n\n\t\t\tfor index in range(len(self.__network)):\n\t\t\t\tif index==(len(self.__network)-1) and self.__network[index].type()!=\"loss\":\n\t\t\t\t\t\tprint(\"The last layer is not loss layer.\")\n\t\t\t\tdata_flow = self.__network[index].forward(data_flow)\n\n\n\tdef backward(self):\n\t\t#for index in reversed(range(len(self.__network)))\n\t\tif self.__network[-1].type()==\"loss\":\n\t\t\tdelta = self.__network[-1].backward()\n\t\telse:\n\t\t\tprint(\"The last layer is not loss layer.\")\n\n\t\tfor index in range(len(self.__network)-2, -1, -1):\n\t\t\tif self.__network[index].type()==\"vision\":\n\t\t\t\tdelta = self.__network[index].backward(delta,\n\t\t\t\t\talpha=self.__lr, lamda=self.__weight_decay)\n\t\t\telse:\n\t\t\t\tdelta = self.__network[index].backward(delta)\n\n\n\tdef predict(self, data):\n\t\tif len(self.__network) == 0:\n\t\t\tprint(\"The network is empty.\\n\")\n\t\telse:\n\t\t\tpre_result = data\n\n\t\t\tfor index in range(len(self.__network)-1):\n\t\t\t\tpre_result = self.__network[index].forward(pre_result)\n\t\t\t#pre_result = np.argmax(data_flow, axis=1)\n\t\t\treturn pre_result\n\n\n\tdef accuracy(self, test_data, test_label):\n\t\tif test_data.shape[0] != test_label.shape[0]:\n\t\t\tprint(\"The row of data and label is not equal.\")\n\n\t\tpre_result = np.argmax(self.predict(test_data), axis=1)\n\t\ttest_result = np.argmax(test_label, axis=1)\n\n\t\tnum = 0.\n\t\tfor i in range(len(test_result)):\n\t\t\tif pre_result[i] == test_result[i]:\n\t\t\t\tnum += 1\n\t\treturn num / len(test_result)\n\n\n\tdef getLoss(self):\n\t\treturn self.__network[-1].getLoss()\n\n\n\tdef train(self):\n\t\tpass\n\n\n\tdef save_structure(self, file_path=\"./\", file_suff=\".struct\"):\n\t\tpass\n\n\n\tdef save_parameters(self, file_path=\"./\", file_suff=\".param\"):\n\t\tpass\n\n\n\nif __name__ == \"__main__\":\n\tnum_ = 100\n\n\tx1 = np.linspace(-10, -1e-5, num=num_//2)\n\tx2 = np.linspace(1e-5, 10, num=num_//2)\n\tx = np.array([x1, x2]).reshape(num_, -1)\n\n\ty1 = [1., 0.]*(num_//2)\n\ty2 = [0., 1.]*(num_//2)\n\ty = np.array([y1, y2]).reshape(num_, -1)\n\n\tnet = Net(base_lr=0.1, lr_decay_scale=0.9, lr_decay_num=-1,\n\t\t\tweight_decay=0., batch_size=1000000,\n\t\t\tmode=\"SGD\", show=True)\n\tnet.clearStructure()\n\tnet.loadData(x, y)\n\n\tnet.addFC(y.shape[1])\n\tnet.addLeakyReLU(y.shape[1])\n\tnet.addSoftmax()\n\tITE = 2000 + 1\t\t#2000\n\tfor ite in range(1, ITE):\n\t\tnet.forward()\n\t\tnet.backward()\n\t\tif ite % 200==0:\n\t\t\tprint(\"%d:\\n loss:%.10f\" % (ite, net.getLoss()))\n\t\t\tprint(\" accuracy:%.2f%%\" % (net.accuracy(x, y)*100))\n\t#print(net.getNetwork()[-3].getW())\n\t#print(net.getNetwork()[-3].getB())\n\t#print(np.exp(net.predict(x)))","repo_name":"xiaohuihuichao/Net_Numpy","sub_path":"Net.py","file_name":"Net.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11411593997","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nRECORD_TIME = 0.2\nRATE = 44100\nCHUNK = RATE/30\n\nstorage = np.load('Data.npy')\n#for i in range(100):\n# print(storage[i])\nplt.figure(1)\nplt.plot(storage)\n#plt.plot((np.arange(storage.size/2)*(CHUNK/(RATE*RECORD_TIME))),storage)\nplt.show()\n","repo_name":"mdasil44/CapstoneRaspberryPiCode","sub_path":"Plot.py","file_name":"Plot.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9045733696","text":"# pip install backtrader pandas\r\n# https://finance.yahoo.com/quote/GAZP.ME/history?p=GAZP.ME\r\n\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\n\r\nimport backtrader as bt\r\nimport pandas as pd\r\nimport datetime # For datetime objects\r\nimport os.path # To manage paths\r\nimport sys # To find out the script name (in argv[0])\r\n\r\n\r\n# Create a Stratey\r\nclass TestStrategy(bt.Strategy):\r\n\r\n def log(self, txt, dt=None):\r\n ''' Logging function fot this strategy'''\r\n dt = dt or self.datas[0].datetime.date(0)\r\n print('%s, %s' % (dt.isoformat(), txt))\r\n\r\n def __init__(self):\r\n # Keep a reference to the \"close\" line in the data[0] dataseries\r\n self.dataclose = self.datas[0].close\r\n\r\n # To keep track of pending orders\r\n self.order = None\r\n\r\n def notify_order(self, order):\r\n if order.status in [order.Submitted, order.Accepted]:\r\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\r\n return\r\n\r\n # Check if an order has been completed\r\n # Attention: broker could reject order if not enough cash\r\n if order.status in [order.Completed]:\r\n if order.isbuy():\r\n self.log('BUY EXECUTED, %.2f' % order.executed.price)\r\n elif order.issell():\r\n self.log('SELL EXECUTED, %.2f' % order.executed.price)\r\n\r\n self.bar_executed = len(self)\r\n\r\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\r\n self.log('Order Canceled/Margin/Rejected')\r\n\r\n # Write down: no pending order\r\n self.order = None\r\n\r\n def notify_trade(self, trade):\r\n if not trade.isclosed:\r\n return\r\n\r\n self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %\r\n (trade.pnl, trade.pnlcomm))\r\n\r\n def next(self):\r\n # Simply log the closing price of the series from the reference\r\n self.log('Close, %.2f' % self.dataclose[0])\r\n # print(\"*\", self.datas[0].close[0])\r\n\r\n # Check if an order is pending ... if yes, we cannot send a 2nd one\r\n if self.order:\r\n return\r\n\r\n # Check if we are in the market\r\n if not self.position:\r\n\r\n # Not yet ... we MIGHT BUY if ...\r\n if self.dataclose[0] < self.dataclose[-1]:\r\n # current close less than previous close\r\n\r\n if self.dataclose[-1] < self.dataclose[-2]:\r\n # previous close less than the previous close\r\n\r\n # BUY, BUY, BUY!!! (with default parameters)\r\n self.log('BUY CREATE, %.2f' % self.dataclose[0])\r\n\r\n # Keep track of the created order to avoid a 2nd order\r\n self.order = self.buy()\r\n\r\n else:\r\n\r\n # Already in the market ... we might sell\r\n if len(self) >= (self.bar_executed + 50):\r\n # SELL, SELL, SELL!!! (with all possible default parameters)\r\n self.log('SELL CREATE, %.2f' % self.dataclose[0])\r\n\r\n # Keep track of the created order to avoid a 2nd order\r\n self.order = self.sell()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n modpath = os.path.dirname(os.path.abspath(sys.argv[0]))\r\n datapath = os.path.join(modpath, 'GAZP_D1.csv')\r\n\r\n # print(datapath)\r\n # exit(1)\r\n\r\n data = pd.read_csv(datapath, sep=',', index_col='Date') # this data is from metatrader 5\r\n print(data)\r\n data = data.reset_index()\r\n data.rename(columns={'Date': 'datetime', 'Open': 'open', 'High': 'high',\r\n 'Low': 'low', 'Close': 'close', 'Volume': 'volume'},\r\n inplace=True) # Чтобы получить дату/время переименовываем колонки\r\n data.index = pd.to_datetime(data['datetime'])\r\n print(data)\r\n\r\n cerebro = bt.Cerebro()\r\n\r\n # Add a strategy\r\n cerebro.addstrategy(TestStrategy)\r\n\r\n cerebro.broker.setcash(100000.0)\r\n\r\n # Set the commission - 0.1% ... divide by 100 to remove the %\r\n cerebro.broker.setcommission(commission=0.001)\r\n\r\n # modpath = os.path.dirname(os.path.abspath(sys.argv[0]))\r\n # datapath = os.path.join(modpath, 'GAZP.ME (1).csv')\r\n # Create a Data Feed\r\n # data = bt.feeds.YahooFinanceCSVData(\r\n # dataname=datapath, # yahoo data downloaded from site\r\n # # Do not pass values before this date\r\n # fromdate=datetime.datetime(2022, 1, 1),\r\n # # Do not pass values after this date\r\n # todate=datetime.datetime(2022, 4, 1),\r\n # reverse=False)\r\n\r\n # Pass it to the backtrader datafeed and add it to the cerebro\r\n data = bt.feeds.PandasData(dataname=data)\r\n\r\n # Add the Data Feed to Cerebro\r\n cerebro.adddata(data)\r\n\r\n print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\r\n\r\n cerebro.run()\r\n\r\n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())","repo_name":"WISEPLAT/Learn-BackTrader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"31"} +{"seq_id":"31828903725","text":"import argparse\nfrom db_session import insert_object, get_user_id\nfrom db_model import User\nimport datetime\nimport os\n\nFOLDER_CURR = os.path.dirname(os.path.abspath(__file__))\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--id\", type=str,\n\thelp=\"id of user\")\nap.add_argument(\"-n\", \"--name\", type=str,\n\thelp=\"name of user\")\n\nargs = vars(ap.parse_args())\nid = args[\"id\"]\nname = args[\"name\"]\nrecords = get_user_id(name)\nif len(records) > 0:\n print('user is exist!')\nelse:\n folder = os.path.join(FOLDER_CURR, '..', 'datasets')\n path = os.path.join(folder, name)\n avatar = '/images/avatar/{0}/0.jpg'.format(name)\n if not os.path.isdir(path):\n os.mkdir(path)\n user = User(face_id=id, password=id, date_created = str(datetime.datetime.now()), level=1, fullname=name, avatar= avatar)\n insert_object(user)","repo_name":"tuanvnext/face_recognize","sub_path":"src/backend/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39026265205","text":"from BlinkyTape import BlinkyTape\n \nbb = BlinkyTape('/dev/tty.usbmodemfa131')\n\nwhile True:\n\n for x in range(0, 60):\n bb.sendPixel(10,10,10)\n bb.show();\n\n for x in range(0, 60):\n bb.sendPixel(0,0,0)\n bb.show()\n","repo_name":"asmaps/BlinkyTape_Python","sub_path":"flash_example.py","file_name":"flash_example.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"cs","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"16762797543","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport csv\nimport math\nimport matplotlib\nimport matplotlib.pyplot as pyplot\nimport matplotlib.dates as dates\n\nmatplotlib.rc('font', family='Arial')\nformatter = dates.DateFormatter('%d. %m. %Y')\n\ndata1 = {}\ndata2 = {}\nwith open('../data/processed/answer_time.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in reader:\n if int(row[0]) == 1:\n if not (int(row[1])/1000) in data1:\n data1[int(row[1])/1000] = 0\n data1[int(row[1])/1000] += 1\n else:\n if not (int(row[1])/1000) in data2:\n data2[int(row[1])/1000] = 0\n data2[int(row[1])/1000] += 1\n\nx = range(0,max(data1.keys())+1)\n\ny1 = [0 for _ in range (max(data1.keys())+1)]\nfor key, value in data1.items():\n y1[key] = value\n\ny2 = [0 for _ in range (max(data1.keys())+1)]\nfor key, value in data2.items():\n y2[key] = value\n\n\npyplot.ylabel(u'Počet odpovědí')\npyplot.xlabel(u'Čas (v sekundách)')\nax = pyplot.subplot()\npyplot.plot(x,y1, label=u'Otázky rozpoznání reprezentace')\nax = pyplot.subplot()\npyplot.plot(x,y2, label=u'Otázky rozpoznání obrázku')\npyplot.axis([0, 30 , 0, 400])\n\nhandles, labels = ax.get_legend_handles_labels()\ndisplay = (0,1)\nax.legend([handle for i,handle in enumerate(handles) if i in display], [label for i,label in enumerate(labels) if i in display], loc='upper right')\n\npyplot.show()","repo_name":"repli2dev/nature-quizzer-analysis","sub_path":"visualizations/answer_time_distribution.py","file_name":"answer_time_distribution.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20979359181","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport random\nimport requests\n\n\ndef get_city_id(city_name):\n '''获取城市的id,为后边的做准备'''\n city_url = 'http://apis.baidu.com/baidunuomi/openapi/cities'\n headers = {'apikey': '14cdd85738c717e546a5b6852c3e1631'}\n\n r = requests.get(city_url, headers=headers)\n cities = r.json()['cities']\n for city in cities:\n # 注意此处没有使用`==`,而是使用了`in`\n if city_name in city['city_name']:\n return city['city_id']\n # 如果找到输入的城市,则返回城市的id,如果没找到,就退出\n print('city not found')\n assert 0\n\n\ndef get_shops_list(city_id, keyword, location):\n shops_url = 'http://apis.baidu.com/baidunuomi/openapi/searchshops'\n headers = {'apikey': '14cdd85738c717e546a5b6852c3e1631'}\n payload = {'city_id': city_id, 'location': location,\n 'keyword': keyword, 'sort': 4}\n ''' 获取给定关键词搜索到的店铺的名称,并按照销量排序,\n 此处输入了位置信息,输入自己所在的坐标即返回自己附近的餐馆\n 还有很多可选参数,请参考:\n http://apistore.baidu.com/apiworks/servicedetail/508.html\n '''\n r = requests.get(shops_url, params=payload, headers=headers)\n return r.json()['data']['shops']\n\n\ndef get_all_deals(shop_list):\n deal_list = []\n for shop in shop_list:\n for deal in shop['deals']:\n deal_list.append([deal['title'],\n deal['description'],\n deal['promotion_price'] / 100,\n deal['score']])\n # 在一大堆信息中,我们只选取了餐馆的名字、描述、价格和评分\n return deal_list\n\nif __name__ == '__main__':\n city_id = get_city_id('南京')\n shop_list = get_shops_list(city_id, '黄焖鸡', '32.0219605,118.7987918')\n deal_list = get_all_deals(shop_list)\n # 搜索南京市给定位置附近销量大的黄焖鸡\n print(random.choice(deal_list))\n # 随机选一个团购单下单吧!\n","repo_name":"crossin/py-practice","sub_path":"src/nuomi/nuomi.py","file_name":"nuomi.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"71923649689","text":"from typing import Any, Mapping, Optional, List, Tuple\nfrom absl import logging\n\nimport os\nimport gin\nimport functools\nimport tensorflow as tf\n\nfrom orbit.core import performance\nfrom orbit.config import ExperimentConfig\nfrom orbit.config import RuntimeConfig\nfrom orbit.config import OptimizationConfig\nfrom orbit.config import DifferentialPrivacyConfig\nfrom orbit.core.checkpoint import BestCheckpointExporter\nfrom orbit.launch import action\nfrom orbit.launch.task import Task\nfrom orbit.launch.controller import Controller, Action\nfrom orbit.launch.trainer import AbstractTrainer, AbstractValidator, Trainer\nfrom orbit.optimization import ExponentialMovingAverage\nfrom orbit.optimization.factory import OptimizerFactory\n\n\ndef _clip_l2_norm(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],\n l2_norm_clip: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:\n gradients = []\n variables = []\n for g, v in grads_vars:\n gradients.append(g)\n variables.append(v)\n clipped_gradients = tf.clip_by_global_norm(gradients, l2_norm_clip)[0]\n return list(zip(clipped_gradients, variables))\n\n\ndef _add_noise(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],\n noise_stddev: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:\n ret = []\n for g, v in grads_vars:\n noise = tf.random.normal(tf.shape(g), stddev=noise_stddev)\n ret.append(g + noise, v)\n return ret\n\n\nclass OrbitExperimentRunner:\n def __init__(self,\n distribution_strategy: tf.distribute.Strategy,\n task: Task,\n mode: str,\n params: ExperimentConfig,\n model_dir: str,\n run_post_validation: bool = False,\n save_summary: bool = True,\n train_actions: Optional[List[Action]] = None,\n validation_actions: Optional[List[Action]] = None,\n controller_cls=Controller) -> None:\n self._strategy = distribution_strategy or tf.distribute.get_strategy()\n self._params = params\n self._mode = mode\n self._model_dir = model_dir\n self._run_post_validation = run_post_validation\n\n with self._strategy.scope():\n self._trainer = self._build_trainer(\n params, task,\n train=('train' in mode),\n validation=('validation' in mode) or run_post_validation)\n assert self._trainer is not None\n\n self._checkpoint_manager = self._maybe_build_checkpoint_manager()\n\n self._controller = self._build_controller(\n trainer=self._trainer if 'train' in mode else None,\n validator=self._trainer,\n save_summary=save_summary,\n train_actions=train_actions,\n validation_actions=validation_actions,\n controller_cls=controller_cls)\n\n def _build_controller(self,\n trainer: AbstractTrainer,\n validator: AbstractValidator,\n save_summary: bool = True,\n train_actions: Optional[List[Action]] = None,\n validation_actions: Optional[List[Action]] = None,\n controller_cls=Controller):\n train_actions = [] if not train_actions else train_actions\n if trainer:\n train_actions += self._get_train_actions(\n self._params, trainer, self._model_dir,\n checkpoint_manager=self._checkpoint_manager)\n validation_actions = [] if not validation_actions else validation_actions\n if validator:\n validation_actions += self._get_validation_actions(\n self._params, trainer, self._model_dir)\n\n summary_dir = os.path.join(self._model_dir, 'train')\n validation_summary_dir = os.path.join(\n self._model_dir, self._params.trainer.validation_summary_subdir)\n summary_interval = self._params.trainer.summary_interval\n\n controller = controller_cls(\n strategy=self._strategy,\n trainer=trainer,\n validator=validator,\n global_step=self._trainer.global_step,\n steps_per_loop=self._params.trainer.steps_per_loop,\n checkpoint_manager=self._checkpoint_manager,\n summary_dir=summary_dir if save_summary else None,\n validation_summary_dir=validation_summary_dir if save_summary else None,\n summary_interval=summary_interval if save_summary else None,\n train_actions=train_actions,\n validation_actions=validation_actions)\n return controller\n\n def _build_trainer(self,\n params: ExperimentConfig,\n task: Task,\n train: bool,\n validation: bool):\n logging.info('Running default trainer.')\n\n # Build the model and optimizer\n model = task.build_model()\n optimizer = self._create_optimizer(task, params)\n\n return Trainer(\n self._strategy,\n params, task,\n model=model,\n optimizer=optimizer,\n train=train,\n validation=validation,\n checkpoint_exporter=self._build_best_checkpoint_exporter())\n\n def _create_optimizer(self, task, params):\n gradient_transformers = None\n if hasattr(params.task, 'differential_privacy_config'):\n dp_config = params.task.differential_privacy_config\n logging.info('Adding differential privacy transform with config %s.',\n dp_config.as_dict())\n noise_stddev = dp_config.clipping_norm * dp_config.noise_multiplier\n gradient_transformers = [\n functools.partial(\n _clip_l2_norm, l2_norm_clip=dp_config.clipping_norm),\n functools.partial(\n _add_noise, noise_stddev=noise_stddev, noise_seed=dp_config.noise_seed)\n ]\n\n opt_factory = OptimizerFactory(params.trainer.optimizer_config)\n optimizer = opt_factory.create_optimizer(\n opt_factory.learning_rate(),\n gradient_transformers=gradient_transformers,\n use_legacy_optimizer=params.trainer.use_legacy_optimizer)\n # Configuring optimizer when loss_scale is set in runtime config. This helps\n # avoiding overflow/underflow for float16 computations.\n if params.runtime:\n optimizer = performance.configure_optimizer(\n optimizer,\n use_float16=params.runtime.mixed_precision_dtype == 'float16',\n loss_scale=params.runtime.loss_scale)\n return optimizer\n\n def _build_best_checkpoint_exporter(self):\n export_subdir = self._params.trainer.best_checkpoint_export_subdir\n metric_name = self._params.trainer.best_checkpoint_validation_metric\n metric_comp = self._params.trainer.best_checkpoint_metric_comp\n if self._model_dir and metric_name:\n if export_subdir:\n best_ckpt_dir = os.path.join(self._model_dir, export_subdir)\n else:\n best_ckpt_dir = self._model_dir\n best_ckpt_exporter = BestCheckpointExporter(\n best_ckpt_dir, metric_name, metric_comp)\n logging.info('Created the best checkpoint exporter. '\n 'model_dir: %s, export_subdir: %s, metric_name: %s',\n self._model_dir, export_subdir, metric_name)\n else:\n best_ckpt_exporter = None\n return best_ckpt_exporter\n\n def _maybe_build_checkpoint_manager(self):\n assert self._trainer is not None\n if self._trainer.checkpoint:\n if self._model_dir is None:\n raise ValueError('model_dir must be specified, but got None.')\n checkpoint_manager = tf.train.CheckpointManager(\n self._trainer.checkpoint,\n directory=self._model_dir,\n max_to_keep=self._params.trainer.max_to_keep,\n step_counter=self._trainer.global_step,\n checkpoint_interval=self._params.trainer.checkpoint_interval,\n init_fn=self._trainer.initialize)\n else:\n checkpoint_manager = None\n return checkpoint_manager\n\n @gin.configurable\n def _get_train_actions(self, params: ExperimentConfig,\n trainer: Trainer,\n model_dir: str,\n checkpoint_manager: tf.train.CheckpointManager):\n train_actions = []\n if hasattr(params.task, 'pruning') and params.task.pruning:\n train_actions.append(action.PruningAction(\n exprt_dir=model_dir,\n model=trainer.model,\n optimizer=trainer.optimizer))\n\n if params.trainer.recovery_max_trials >= 0:\n recovery_condition = action.RecoveryActionCondition(\n global_step=trainer.global_step,\n loss_upper_bound=params.trainer.loss_upper_bound,\n recovery_begin_steps=params.trainer.recovery_begin_steps,\n recovery_max_trials=params.trainer.recovery_max_trials)\n recovery_action = action.ConditionAction(\n condition=recovery_condition,\n action=action.RecoveryAction(checkpoint_manager))\n train_actions.append(recovery_action)\n return train_actions\n\n @gin.configurable\n def _get_validation_actions(self, params: ExperimentConfig, trainer: Trainer, model_dir: str):\n validation_actions = []\n if trainer is not None and isinstance(trainer.optimizer, ExponentialMovingAverage):\n validation_actions.append(action.EMACheckpointAction(\n export_dir=model_dir,\n optimizer=trainer.optimizer,\n checkpoint=trainer.checkpoint,\n max_to_keep=params.trainer.max_to_keep))\n return validation_actions\n\n def run(self):\n self._controller.boost(\n self._mode, self._params.trainer, self._run_post_validation)\n","repo_name":"exogeny/rocket","sub_path":"orbit/launch/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":9282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34428646242","text":"#!/usr/bin/python3\r\n\r\n# ------- Parser vyrokove logiky ----------\r\n# formule zadavejte v reverse polish notaci\r\n# zapis: negace -> '!'\r\n# konjunkce -> '*'\r\n# disjunkce -> '+'\r\n# implikace -> 'i'\r\n# ekvivalence -> 'e'\r\n# literal -> jakekoliv velke pismeno z ASCII\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n # when print is called\r\n def __repr__(self):\r\n node = self.head\r\n nodes = []\r\n while node is not None:\r\n nodes.append(node.name)\r\n node = node.next\r\n nodes.append(\"None\")\r\n return \"->\".join(nodes)\r\n\r\n # makes list iterable\r\n def __iter__(self):\r\n node = self.head\r\n while node is not None:\r\n yield node\r\n node = node.next\r\n\r\n def add_first(self, node):\r\n node.next = self.head\r\n self.head = node\r\n\r\n def add_last(self, node):\r\n curr_node = Node\r\n if not self.head:\r\n self.head = node\r\n return\r\n\r\n for curr_node in self:\r\n pass\r\n curr_node.next = node\r\n\r\n def remove_first(self):\r\n if not self.head:\r\n raise Exception(\"List is empty\")\r\n self.head = self.head.next\r\n\r\n def draw_table(self, node, literals):\r\n if not node.ohodnoceni or not literals:\r\n return\r\n o_base = self.generate_literals_eval(len(literals))\r\n o_len = 2**len(literals)\r\n\r\n for i in range(o_len):\r\n if node.ohodnoceni[i]:\r\n node.ohodnoceni[i] = 1\r\n else:\r\n node.ohodnoceni[i] = 0\r\n\r\n for i in range(len(o_base)):\r\n for j in range(o_len):\r\n if o_base[i][j]:\r\n o_base[i][j] = 1\r\n else:\r\n o_base[i][j] = 0\r\n\r\n for i in range(len(o_base)):\r\n print(\"{} \".format(literals[i]), end='')\r\n print(node.name)\r\n\r\n for i in range(o_len):\r\n for j in range(len(o_base)):\r\n print(\"{} |\".format(o_base[j][i]), end=' ')\r\n print(\" {} \".format(node.ohodnoceni[i]))\r\n\r\n # vygeneruje pocatecni hodnoty pro zadany pocet literalu\r\n def generate_literals_eval(self, count):\r\n eval = []\r\n size = 2**count\r\n repeat = size\r\n for i in range(count):\r\n o = []\r\n in_true = True\r\n repeat //= 2\r\n for j in range(size):\r\n o.append(in_true)\r\n if (j + 1) % repeat == 0:\r\n if in_true:\r\n in_true = False\r\n else:\r\n in_true = True\r\n eval.append(o)\r\n return eval\r\n\r\nclass Node:\r\n def __init__(self, name=None, ohodnoceni=None):\r\n self.name = name\r\n self.ohodnoceni = ohodnoceni\r\n self.next = None\r\n\r\n def __repr__(self):\r\n return self.name\r\n\r\n def copy(self):\r\n return self.name, self.ohodnoceni\r\n\r\n def negate(self):\r\n self.name = '!' + self.name\r\n neg = []\r\n lenA = len(self.ohodnoceni)\r\n for i in range(lenA):\r\n if self.ohodnoceni[i]:\r\n neg.append(False)\r\n else:\r\n neg.append(True)\r\n self.ohodnoceni = neg\r\n\r\n\r\ndef get_ohodnoceni(a, b, spojka):\r\n result = []\r\n if spojka == \"*\":\r\n for a, b in zip(a, b):\r\n result.append(a and b)\r\n\r\n if spojka == \"+\":\r\n for a, b in zip(a, b):\r\n result.append(a or b)\r\n\r\n if spojka == \"i\":\r\n for a, b in zip(a, b):\r\n if not a:\r\n result.append(True)\r\n continue\r\n if a and b:\r\n result.append(True)\r\n continue\r\n result.append(False)\r\n\r\n if spojka == \"e\":\r\n for a, b in zip(a, b):\r\n if a and b or (not a) and (not b):\r\n result.append(True)\r\n else:\r\n result.append(False)\r\n\r\n return result\r\n\r\n\r\ndef count_literals(str):\r\n count = 0\r\n literals = []\r\n for char in str:\r\n if char.isupper() and char.isalpha() and char not in literals:\r\n count += 1\r\n literals.append(char)\r\n return count\r\n\r\n\r\ndef read_input():\r\n literals = \"\"\r\n llist = LinkedList()\r\n inputstr = input()\r\n c = count_literals(inputstr)\r\n ohodnoceni = llist.generate_literals_eval(c)\r\n for char in inputstr:\r\n if char == '*' or char == '+' or char == '!' or char == 'i' or char == 'e':\r\n node = Node(char)\r\n llist.add_last(node)\r\n else:\r\n if char.isupper() and char.isalpha():\r\n if char not in literals:\r\n literals += char\r\n node = Node(char, ohodnoceni[literals.find(char)])\r\n llist.add_last(node)\r\n\r\n return llist, literals\r\n\r\n\r\n# main program\r\nllist, literals = read_input()\r\no = []\r\n#print(llist)\r\n\r\nwhile llist.head:\r\n if not llist.head.next:\r\n break\r\n\r\n while llist.head.ohodnoceni:\r\n o.append(llist.head.copy())\r\n llist.remove_first()\r\n\r\n if llist.head.name == '!':\r\n llist.remove_first()\r\n data = o.pop()\r\n node = Node(data[0], data[1])\r\n node.negate()\r\n llist.add_first(node)\r\n\r\n while o:\r\n data = o.pop()\r\n node = Node(data[0], data[1])\r\n llist.add_first(node)\r\n\r\n else:\r\n if len(o) < 2:\r\n llist.head = None\r\n break\r\n data1 = o.pop()\r\n data2 = o.pop()\r\n node = Node(\"({}{}{})\".format(data2[0], llist.head.name, data1[0]),\r\n get_ohodnoceni(data2[1], data1[1], llist.head.name))\r\n\r\n llist.remove_first()\r\n llist.add_first(node)\r\n\r\n while o:\r\n data = o.pop()\r\n node = Node(data[0], data[1])\r\n llist.add_first(node)\r\n\r\n # print(llist)\r\n\r\n\r\nif llist.head:\r\n print(\"Tabulka:\")\r\n llist.draw_table(llist.head, literals)\r\n\r\n count = 0\r\n total = len(llist.head.ohodnoceni)\r\n for item in llist.head.ohodnoceni:\r\n if item:\r\n count += 1\r\n\r\n if not count:\r\n print(\"Formule je kontradikce\")\r\n elif count == total:\r\n print(\"Formule je tautologie\")\r\n else:\r\n print(\"Formule je splnitelna pro {} ohodnoceni z {}\".format(count, total))\r\n\r\nelse:\r\n print(\"Spatne zadana formule\")\r\n","repo_name":"Pato-99/Propositional-calculus-parser","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21447131265","text":"# This is a world wide know game. This challenge need's to ask a position from the user and show\n# graphically the results. It's necessary to inform to the players when they win. Each player needs \n# a different charactere in this game.\n\nimport os\n\ndef choose_player():\n\n validFunctions = ['q', 'r', 'x', 'o']\n validOptions = range(1, 10)\n player = \"\"\n roundPlayer = 1\n listPositions = {1:\" \", 2:\" \", 3:\" \", 4:\" \", 5:\" \", 6:\" \", 7:\" \", 8:\" \", 9:\" \"}\n \n while player not in validFunctions:\n\n # Clear the screen\n os.system('cls')\n\n print(\"\\nPress Q anytime to quit the game...\\n\")\n\n # Ask if the user want to be the X or the O\n player = input(\"Do you want to start as X or O? \")\n\n # Clear the screen\n os.system('cls')\n\n # Ending the game\n if player.lower() == 'q':\n\n quit_game()\n\n # Wrong option selected\n if player not in validFunctions:\n\n print(\"\\nWrong option selected. Try again\\n\")\n\n continue\n \n # Identifing chosen option\n if player.lower() == 'x':\n\n print(\"Starting game with X\\n\")\n roundPlayer = \"X\"\n \n elif player.lower() == 'o':\n\n print(\"Starting game with O\\n\")\n roundPlayer = \"O\"\n\n # Print the default board\n defaultBoard = {1:\" 1 | 2 | 3 \",\n 2:\"-----------------------\",\n 3:\" 4 | 5 | 6 \",\n 4:\"-----------------------\",\n 5:\" 7 | 8 | 9 \"\n }\n\n for line in defaultBoard:\n print(defaultBoard[line])\n\n print(\"\\n\")\n\n # Let's play\n if player in validFunctions:\n\n selectedOption = 0\n\n while selectedOption not in validOptions:\n\n # Check the end of game\n count = 0\n\n for pos in listPositions:\n\n count += 1 if listPositions[pos] == \"X\" or listPositions[pos] == \"O\" else 0\n\n if count == 9:\n\n print(\"Game Over!\")\n listPositions = restart_game()\n choose_player()\n\n continue\n\n # Winner check\n if check_winner(listPositions):\n\n listPositions = restart_game()\n choose_player()\n\n continue\n\n else:\n\n # Inform who's the next player\n print(\"It's the \"+roundPlayer+\" turn\")\n\n # Choose an option 1-9\n selectedOption = input(\"Select a number between 1-9 \")\n\n # Clear the screen\n os.system('cls')\n\n print(\"Press Q anytime to quit the game...\")\n\n # Ending the game\n if selectedOption.lower() == 'q':\n\n quit_game()\n\n if selectedOption.lower() == 'r':\n\n listPositions = restart_game()\n\n choose_player()\n\n continue\n\n # Check a valid option selected\n if (not selectedOption.isdigit()) or (int(selectedOption) not in validOptions):\n \n print(\"\\nWrong option selected! Choose a number between 1-9\\n\")\n \n # Printing the board\n print_board(listPositions)\n \n continue\n\n else:\n # Parse to int\n selectedOption = int(selectedOption)\n\n if (listPositions[selectedOption] != \" \"):\n\n print(\"\\nOption already taken! Please choose another unchosen option\")\n \n pass\n\n else:\n\n # Selected positions\n listPositions[selectedOption] = roundPlayer\n\n # Next time will be for the next player\n roundPlayer = \"O\" if roundPlayer == \"X\" else \"X\"\n\n # Printing the board\n print_board(listPositions)\n\n # Reset selected option to keep the game alive\n selectedOption = 0\n\ndef print_board(listPositions):\n\n print(\"\\n\")\n\n printBoard = {1:\" \"+listPositions[1]+\" | \"+listPositions[2]+\" | \"+listPositions[3]+\" \",\n 2:\"-----------------------\",\n 3:\" \"+listPositions[4]+\" | \"+listPositions[5]+\" | \"+listPositions[6]+\" \",\n 4:\"-----------------------\",\n 5:\" \"+listPositions[7]+\" | \"+listPositions[8]+\" | \"+listPositions[9]+\" \"\n }\n\n for line in printBoard:\n print(printBoard[line])\n\n print(\"\\n\")\n\ndef check_winner(listPositions):\n \n win = False\n\n # Win check\n if listPositions[1] == listPositions[2] and listPositions[1] == listPositions[3] and listPositions[1] != \" \":\n print(\"Player \"+listPositions[1]+\" Won!\")\n win = True\n if listPositions[4] == listPositions[5] and listPositions[4] == listPositions[6] and listPositions[4] != \" \":\n print(\"Player \"+listPositions[4]+\" Won!\")\n win = True\n if listPositions[7] == listPositions[8] and listPositions[7] == listPositions[9] and listPositions[7] != \" \":\n print(\"Player \"+listPositions[7]+\" Won!\")\n win = True\n if listPositions[1] == listPositions[4] and listPositions[1] == listPositions[7] and listPositions[1] != \" \":\n print(\"Player \"+listPositions[1]+\" Won!\")\n win = True\n if listPositions[2] == listPositions[5] and listPositions[2] == listPositions[8] and listPositions[2] != \" \":\n print(\"Player \"+listPositions[2]+\" Won!\")\n win = True\n if listPositions[3] == listPositions[6] and listPositions[3] == listPositions[9] and listPositions[3] != \" \":\n print(\"Player \"+listPositions[3]+\" Won!\")\n win = True\n if listPositions[1] == listPositions[5] and listPositions[1] == listPositions[9] and listPositions[1] != \" \":\n print(\"Player \"+listPositions[1]+\" Won!\")\n win = True\n if listPositions[7] == listPositions[5] and listPositions[7] == listPositions[3] and listPositions[7] != \" \":\n print(\"Player \"+listPositions[7]+\" Won!\")\n win = True\n\n return win\n\ndef restart_game():\n\n input(\"\\nPress [ENTER] to play again! \")\n\n # Clear the screen\n os.system('cls')\n\n return {1:\" \", 2:\" \", 3:\" \", 4:\" \", 5:\" \", 6:\" \", 7:\" \", 8:\" \", 9:\" \"}\n\ndef quit_game():\n\n # Clear the screen\n os.system('cls')\n\n print(\"Thank you for comming. See ya!\")\n\n quit() \n \n \n# Starting the game\nchoose_player()","repo_name":"Maksoud/py-exercises-002","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73227597529","text":"from sys import stdin\r\n\r\nINF = float('inf')\r\n\r\ndef frod(g):\r\n\r\n\tfor k in range(len(g)):\r\n\t\tfor i in range(len(g)):\r\n\t\t\tfor j in range(len(g)):\r\n\t\t\t\tif g[i][j] > g[i][k] + g[k][j]:\r\n\t\t\t\t\tg[i][j] = g[i][k] + g[k][j]\r\n\r\n\r\ndef solve(g1,g2,a,b):\r\n\tfrod(g1)\r\n\tfrod(g2)\r\n\tfor i in range(len(g1)):\r\n\t\tfor j in range(len(g1)):\r\n\t\t\tif g2[i][j] > a*(g1[i][j]) + b: \r\n\t\t\t\treturn False\r\n\r\n\treturn True\r\n\r\ndef main():\r\n\tn = int(stdin.readline())\r\n\twhile n != 0:\r\n\t\tGA = list()\r\n\t\tGS = list()\r\n\t\tfor k in range(n):\r\n\t\t\tt = list()\r\n\t\t\tfor h in range(n):\r\n\t\t\t\tif k == h:\r\n\t\t\t\t\tt.append(0)\r\n\t\t\t\telse:\r\n\t\t\t\t\tt.append(INF)\r\n\r\n\t\t\tGA.append(t)\r\n\t\t\tGS.append(t.copy())\r\n\r\n\t\tfor i in range(n):\r\n\t\t\tline = list(map(int,stdin.readline().split()))\r\n\t\t\tc = 1\r\n\t\t\twhile c < len(line):\r\n\t\t\t\tGA[line[0]-1][line[c]-1] = 1\r\n\t\t\t\tc+=1\r\n\r\n\t\tfor j in range(n):\r\n\t\t\tline = list(map(int,stdin.readline().split()))\r\n\t\t\tc = 1\r\n\t\t\twhile c < len(line):\r\n\t\t\t\tGS[line[0]-1][line[c]-1] = 1\r\n\t\t\t\tc+=1\r\n\t\tA,B = map(int,stdin.readline().split())\r\n\t\tband = solve(GA,GS,A,B)\r\n\t\tif band:\r\n\t\t\tprint(\"Yes\")\r\n\t\telse:\r\n\t\t\tprint(\"No\")\r\n\t\tn = int(stdin.readline())\r\n\r\nmain()","repo_name":"ivandbs2000/Uva","sub_path":"12319.py","file_name":"12319.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43546272211","text":"__author__ = 'tuerke'\n\nfrom .. import data, util\n\ndef swap_axes( image, axis1, axis2, adapt_orientation = True ):\n\t'''Swaps 'axis1' and 'axis2' of the image 'image' in image space.\n\n\tIf 'adapt_orientation' is set to 'True' it will also swap:\n\n\tindexOrigin[axis1] <--> indexOrigin[axis2]\n\n\tand the respective orientation vectors represented by 'axis1' and 'axis2'.\n\tThat means if the orientation was correct before using 'swap_axes'\n\tit is recommended to set 'adapt_orientation' to 'True'.\n\n\tIf 'adapt_orientation' is set to 'False' the orientation will\n\tbe preserved.\n\n\tReturns the swapped image.\n\t'''\t\n\tndarray = image.getArray()\n\tswapped_ndarray = ndarray.swapaxes( axis1, axis2 ).copy()\n\tswapped_image = data.Image( swapped_ndarray, image )\n\t\n\t#adapt orientation\n\tif( adapt_orientation == True and axis1 != data.dimensions.TIME_DIM and axis2 != data.dimensions.TIME_DIM ):\n\t\tvectors = [ image.getProperty(\"rowVec\"), image.getProperty(\"columnVec\"), image.getProperty(\"sliceVec\") ]\n\t\tindex_origin = image.getProperty( \"indexOrigin\" )\n\n\t\tvectors[axis1], vectors[axis2] = vectors[axis2], vectors[axis1]\n\n\t\tindex_origin[axis1], index_origin[axis2] = index_origin[axis2], index_origin[axis1]\n\n\t\tswapped_image.setProperty( \"rowVec\", vectors[0] )\n\t\tswapped_image.setProperty( \"columnVec\", vectors[1] )\n\t\tswapped_image.setProperty( \"sliceVec\", vectors[2] )\n\t\tswapped_image.setProperty( \"indexOrigin\", index_origin )\n\treturn swapped_image\t","repo_name":"isis-group/python-isis","sub_path":"isis/tools/swap_axes.py","file_name":"swap_axes.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"70132913048","text":"import math\nimport argparse\nimport os\nimport random\nfrom os import path\n\nrandom.seed(42)\n\nALPHABETS = \"ACGT\"\n\ndef main(output_location, number_of_cases):\n\n # If the location does not exists, create it\n if not path.isdir(output_location):\n os.mkdir(output_location)\n\n len_of_zero_padding = len(str(number_of_cases))\n\n # Create test files as much as the number of cases\n for i in range(number_of_cases):\n\n # Create a file with input cases\n with open(f\"{output_location}/input{i:0>{len_of_zero_padding}}.txt\", \"w\", encoding=\"utf-8\") as file:\n\n for _ in range(2):\n # Write first string\n file.write(f\"{''.join(random.sample(ALPHABETS, len(ALPHABETS)))}\\n\")\n\n # Write indices\n current = len(ALPHABETS)\n\n for _ in range(0, random.randint(3, 7)):\n index = random.randint(0, current)\n current += current\n\n # Write index to the file\n file.write(f\"{index}\\n\")\n # We're done\n\n print(f\"Generation of test files done at :: {output_location}\")\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-ol\", \"--output-location\", type=str, default=\"./../input\", help=\"Location of the direrctory where to output the test files. Do not end with a slash.\")\n parser.add_argument(\"-n\", \"--number-of-cases\", type=int, default=16, help=\"Number of test files to generate.\")\n args = parser.parse_args()\n \n main(args.output_location, args.number_of_cases)","repo_name":"daveaditya/CSCI_570_ALGO","sub_path":"src/main/resources/python/test_generator.py","file_name":"test_generator.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14718394771","text":"from optibook.synchronous_client import Exchange\r\n# from basic_trader import start_trading\r\nfrom smarter_trader import start_trading\r\nfrom IPython.display import clear_output\r\nfrom social_feeds import get_social_feed\r\n# from testing_feeds import get_mood_for_news\r\n\r\nimport asyncio\r\nimport time\r\nimport logging\r\nlogger = logging.getLogger('client')\r\nlogger.setLevel('ERROR')\r\n\r\nprint(\"Setup was successful.\")\r\n\r\nexchange = Exchange()\r\nexchange.connect()\r\n\r\nINSTRUMENTS = exchange.get_instruments()\r\n\r\nQUOTED_VOLUME = 10\r\nFIXED_MINIMUM_CREDIT = 0.15\r\nPRICE_RETREAT_PER_LOT = 0.005\r\nPOSITION_LIMIT = 100\r\n\r\nhold = {\r\n 'CSCO': {'value': 0, 'mood': None},\r\n 'PFE': {'value': 0, 'mood': None},\r\n 'SAN': {'value': 0, 'mood': None},\r\n 'ING': {'value': 0, 'mood': None},\r\n 'NVDA': {'value': 0, 'mood': None},\r\n}\r\n\r\nasync def trader():\r\n global hold\r\n while True:\r\n start_trading(exchange, INSTRUMENTS, QUOTED_VOLUME, FIXED_MINIMUM_CREDIT, PRICE_RETREAT_PER_LOT, POSITION_LIMIT, hold)\r\n \r\n await asyncio.sleep(2)\r\n \r\n for k, v in hold.items():\r\n if v['value'] > 0:\r\n hold[k]['value'] = v['value'] - 1\r\n \r\n # Clear the displayed information after waiting\r\n clear_output(wait=True)\r\n \r\nasync def newsChecker():\r\n global hold\r\n while True:\r\n social_feeds = get_social_feed(exchange)\r\n \r\n if social_feeds:\r\n for feed, mood in social_feeds.items():\r\n if not feed:\r\n continue\r\n \r\n if mood < 0.45:\r\n hold[feed] = {'value': 12, 'mood': 'ask'}\r\n exchange.delete_orders(feed)\r\n print(f'Someting bad happened to {feed}')\r\n \r\n elif mood > 0.55:\r\n hold[feed] = {'value': 12, 'mood': 'bid'}\r\n exchange.delete_orders(feed)\r\n print(f'Someting good happened to {feed}')\r\n else:\r\n exchange.delete_orders(feed)\r\n continue\r\n else:\r\n print(f'\\n --- No news --- \\n')\r\n \r\n await asyncio.sleep(5)\r\n \r\n \r\nloop = asyncio.get_event_loop()\r\ntask1 = asyncio.ensure_future(trader())\r\ntask2 = asyncio.ensure_future(newsChecker())\r\n\r\nloop.run_until_complete(asyncio.gather(task1, task2))","repo_name":"LeonsBuntis/hackzurich2023","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29831783248","text":"import argparse\n\n\ndef TestOptions():\n parser = argparse.ArgumentParser(description='PyTorch hand pose Training')\n \n #dataset\n #model structure\n parser.add_argument('--target-weight', dest='target_weight',\n action='store_true',\n help='Loss with target_weight')\n parser.add_argument('--is-train', type=bool, default=False,\n help='is train')\n parser.add_argument('--gpu-ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n parser.add_argument('--hpe-enabled', type=bool, default=False, help='is hpe')\n \n\n \n # data preprocessing\n \n \n #checkpoint\n \n return parser.parse_args()","repo_name":"baeckgoo/ir-hand","sub_path":"HPE2/src/options/test_options.py","file_name":"test_options.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"24813916432","text":"import argparse\nfrom glob import glob\nimport math\nimport os\nimport subprocess\n\nimport cv2\nfrom matplotlib import pyplot as plt\nimport torch\nimport numpy as np\nfrom decord import VideoReader\nfrom omegaconf import OmegaConf\n\nfrom pytorch_grad_cam import ActivationsAndGradients\n\nimport models\n\ndef load_model(url, map_location='cpu'):\n if url.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n url, map_location=map_location)\n else:\n checkpoint = torch.load(url, map_location=map_location)\n return checkpoint\n\n\ndef make_divisible(x, divisor=32):\n # Returns x evenly divisible by divisor\n return math.ceil(x / divisor) * divisor\n\n\nclass Detector:\n\n def __init__(self, weights, device='cpu', img_size=640) -> None:\n self.device = device\n self.img_size = img_size\n self.model = load_model(weights, device)['model'].float().fuse().eval()\n\n def __call__(self, imgs):\n h0, w0 = imgs[0].shape[:2]\n r = self.img_size / max(h0, w0) # resize image to img_size\n w, h = make_divisible(w0 * r), make_divisible(h0 * r)\n if r != 1:\n interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR\n imgs = [cv2.resize(img, (w, h), interpolation=interp) for img in imgs]\n scale = torch.tensor([w0 / w, h0 / h] * 2)\n img = np.stack(imgs).transpose(0, 3, 1, 2)\n img = np.ascontiguousarray(img)\n img = torch.from_numpy(img).to(self.device, non_blocking=True)\n img = img.float() / 255.\n\n # Inference\n pred = self.model(img)[0]\n\n # Process detections\n index = pred[..., 4].max(1).indices\n return pred[range(index.size(0)), index, :4].cpu().mul(scale).numpy()\n\n\nclass FaceVideo:\n\n def __init__(self, src, detector, n_frames=16, img_size=224) -> None:\n self.src = src\n self.n_frames = n_frames\n if isinstance(img_size, int):\n img_size = (img_size, img_size)\n self.img_size = img_size\n self.detector = detector\n self.mean = np.float32([0.485, 0.456, 0.406]) * 255\n self.std = np.float32([0.229, 0.224, 0.225]) * 255\n self._frames = None\n self._boxes = None\n\n @property\n def frames(self):\n if self._frames is None:\n vr = VideoReader(self.src)\n sampled_idxs = np.linspace(0, len(vr) - 1, self.n_frames, dtype=int).tolist()\n self._frames = list(vr.get_batch(sampled_idxs).asnumpy())\n return self._frames\n \n @property\n def boxes(self):\n if self._boxes is None:\n self._boxes = self.detector(self.frames)\n return self._boxes\n\n def crop(self, margin=1.3):\n cx, cy = self.boxes[:, 0], self.boxes[:, 1]\n hw = self.boxes[:, 2:].max(-1) * margin\n rois = np.stack([cx - hw / 2, cy - hw /2, cx + hw / 2, cy + hw / 2], 1).clip(0)\n clip = []\n for frame, roi in zip(self.frames, rois.tolist()):\n x0, y0, x1, y1 = map(int, roi)\n clip.append(cv2.resize(frame[y0:y1, x0:x1], self.img_size, interpolation=cv2.INTER_LINEAR))\n return clip\n\n def load_cropped_frames(self, margin=1.3):\n cx, cy = self.boxes[:, 0], self.boxes[:, 1]\n hw = self.boxes[:, 2:].max(-1) * margin\n rois = np.stack([cx - hw / 2, cy - hw /2, cx + hw / 2, cy + hw / 2], 1).clip(0)\n clip = []\n for frame, roi in zip(self.frames, rois.tolist()):\n x0, y0, x1, y1 = map(int, roi)\n clip.append(cv2.resize(frame[y0:y1, x0:x1], self.img_size, interpolation=cv2.INTER_LINEAR))\n clip = (np.float32(clip) - self.mean) / self.std\n clip = np.ascontiguousarray(clip.transpose(0, 3, 1, 2))\n return torch.from_numpy(clip)\n\n\nclass VideoWriter:\n def __init__(self, filename, fps=24) -> None:\n self.filename = filename\n if self.filename:\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n self.fps = fps\n self.p = None\n \n def write(self, frame):\n if not self.filename:\n return\n if self.p is None:\n h, w, _ = frame.shape\n self.p = subprocess.Popen([\n \"ffmpeg\",\n '-y', # overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', f'{w}x{h}', # size of one frame\n '-pix_fmt', 'bgr24',\n '-r', f'{self.fps}', # frames per second\n '-i', '-', # The imput comes from a pipe\n '-s', f'{w}x{h}',\n '-an', # Tells FFMPEG not to expect any audio\n '-loglevel', 'error',\n '-b:v', '800k',\n '-pix_fmt', 'yuv420p',\n self.filename\n ], stdin=subprocess.PIPE)\n self.p.stdin.write(frame.tobytes())\n\n def close(self):\n if self.p:\n self.p.stdin.flush()\n self.p.stdin.close()\n self.p.wait()\n\n\n@torch.no_grad()\ndef main():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-i', '--input', type=str,\n default='data/ffpp_videos/manipulated_sequences/Deepfakes/c40/videos/000_003.mp4')\n parser.add_argument('-c', '--config', type=str, default='configs/ffpp_x3d_inference.yaml')\n parser.add_argument('-d', '--device', type=str,\n default='cuda' if torch.cuda.is_available() else 'cpu')\n parser.add_argument('--n_frames', type=int, default=16)\n parser.add_argument('--detector', type=str,\n default='https://github.com/zyayoung/oss/releases/download/rdd/yolov5s-face.pt')\n parser.add_argument('--resume', type=str,\n default='https://github.com/zyayoung/oss/releases/download/rdd/ffpp_x3d.pth')\n args = parser.parse_args()\n\n oc_cfg = OmegaConf.load(args.config)\n oc_cfg.merge_with(vars(args))\n args = oc_cfg\n\n device = torch.device(args.device)\n print(\"Loading face detection model...\", end=' ', flush=True)\n detector = Detector(args.detector, device)\n print(\"Done\")\n\n print(\"Loading fogery detection model...\", end=' ', flush=True)\n model = models.__dict__[args.model.name](**args.model.params)\n state_dict = load_model(args.resume, map_location='cpu')\n if isinstance(state_dict, dict) and 'state_dict' in state_dict:\n state_dict = state_dict['state_dict']\n model.load_state_dict({\n k.replace('module.', ''): v for k, v in state_dict.items()})\n model.set_segment(args.n_frames)\n model.to(device).eval()\n print(\"Done\")\n target_layers = [model.rgb_blocks[-2], model.blocks[-2]]\n\n os.makedirs('figs/df_det', exist_ok=True)\n os.makedirs('figs/face_det', exist_ok=True)\n os.makedirs('figs/cropped', exist_ok=True)\n os.makedirs('figs/cam', exist_ok=True)\n for src in glob(\"data/ffpp_videos/*/*/c40/videos/00*.mp4\"):\n print(\"Detecting...\", end=' ', flush=True)\n video = FaceVideo(src, detector, n_frames=args.n_frames)\n frames = video.load_cropped_frames()\n frames = frames.flatten(0, 1).to(device, non_blocking=True)\n\n cam_model = ActivationsAndGradients(model, target_layers, None)\n pred = model(frames[None])[0]\n real_prob = pred.softmax(-1)[0].item()\n print(\"Done\")\n\n label = 'Fake' if real_prob < 0.5 else 'Real'\n confidence = 1 - real_prob if real_prob < 0.5 else real_prob\n print(f'Result: {label}; Confidence: {confidence:.2f}')\n _, _, _, method, _, _, name = src.split('/')\n\n vw_cam = VideoWriter(os.path.join('figs/cam', f'{method}_{name}'), 2)\n with torch.enable_grad():\n cam_model(frames[None])[0, 1 if real_prob < 0.5 else 0].backward(retain_graph=True)\n\n # pull the gradients out of the cam_model\n heat_map_sum = 0\n for gradient, activation in zip(cam_model.gradients, cam_model.activations):\n activation *= gradient.mean((2, 3, 4), True)\n heat_map = activation[0].mean(0).relu()\n heat_map_sum = heat_map_sum + heat_map\n heat_map_sum.div_(heat_map_sum.max())\n for heat_map, frame in zip(heat_map_sum.cpu().numpy(), video.crop()):\n img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n heatmap1 = cv2.resize(heat_map, (frame.shape[1], frame.shape[0]))\n heatmap1 = np.uint8(255 * (heatmap1))\n heatmap1 = cv2.applyColorMap(heatmap1, cv2.COLORMAP_JET)\n vw_cam.write(heatmap1 // 2 + img // 2)\n vw_cam.close()\n cam_model.release()\n\n h, w = video.frames[0].shape[:2]\n tl = max(1, round(0.002 * (h + w)) ) # line/font thickness\n video = FaceVideo(src, detector, n_frames=128)\n vw_df = VideoWriter(os.path.join('figs/df_det', f'{method}_{name}'), 16)\n vw_face = VideoWriter(os.path.join('figs/face_det', f'{method}_{name}'), 16)\n vw_crop = VideoWriter(os.path.join('figs/cropped', f'{method}_{name}'), 16)\n for i, (frame, cropped, box) in enumerate(zip(video.frames, video.crop(), video.boxes)):\n x, y, w, h = box\n x1, y1, x2, y2 = map(int, (x-w/2, y-h/2, x+w/2, y+h/2))\n color = (0, 0, 255) if real_prob < 0.5 else (0, 255, 0)\n img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n img_df = cv2.rectangle(img.copy(), (x1, y1), (x2, y2), color, tl)\n img_df = cv2.putText(img_df, label, (x1, y1 - tl * 2), 0, tl, color, tl, cv2.LINE_AA)\n vw_df.write(img_df)\n cropped = cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR)\n vw_crop.write(cropped)\n img_face = cv2.rectangle(img.copy(), (x1, y1), (x2, y2), (255, 255, 255), tl)\n vw_face.write(img_face)\n vw_df.close()\n vw_face.close()\n vw_crop.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ThreeCatsLoveFish/RDD","sub_path":"model/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":9843,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"31"} +{"seq_id":"7646758848","text":"#! /usr/bin/python\n\n#import numpy as np\n#import re\n\n#from scipy.optimize import leastsq\n#from scipy import constants\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport collections\nimport re\n\nclass hc_dos:\n \"\"\"to to: if input dimensions do not match, e.g. T is Nx1 and B is Mx1, understand it as mx1 per element of T\"\"\"\n def __init__(self, histfile = None, expfile = None, datasource = 'PPMS'):\n \"\"\"\n Utility class for calculating the heat capacity of a gapped triplonic (Schottky-term) system based on \n - a given density of states\n - a multilevel-system with given energies\n \n and of a phononic system based on the Einstein and Debye-approximation\n Initializes the DOS-histogram by pickle-loading it from path. Later, we will try to include simple\n string files, if needed\n \n The DoS has to be given as a function of energy in meV\n\n The heat capacity is calculated in J per mole (Joule per 6.022e23 particles/atoms/unit cell)\n Input:\n\n path_to_dos_hist: String, leading to the histogram file\n \n \n \"\"\"\n from scipy import constants \n\n self.fac_meVtoK = constants.e * 1e-3 / constants.k #factor to get energies in K from energies in meV\n self.muB = constants.physical_constants['Bohr magneton in K/T'][0] #muB divided by kB in K/T\n self.kmol = constants.k * constants.N_A\n if not histfile == None:\n self.load_dos(histfile)\n if not expfile == None:\n self.load_data(expfile)\n\n def load_dos(self, path):\n \"\"\"loading the dos histogram, expects plain text\n 'energies'\n energies\n 'count'\n count\n \"\"\"\n #load the histogram\n with open(path) as datei:\n dos_hist = []\n for line in datei:\n if 'energies' in line or 'count' in line:\n dos_hist.append(np.array([]))\n else:\n dos_hist[-1] = np.append(dos_hist[-1], float(line))\n\n #find the lower and upper bound of the unperturbed dos\n self.Emin_raw, self.Emax_raw = dos_hist[0].min() * self.fac_meVtoK, dos_hist[0].max() * self.fac_meVtoK \n #get the energy spacing\n self.deltaE = (dos_hist[0][1:] - dos_hist[0][:-1]).mean() * self.fac_meVtoK \n #get the distribution\n self.dos_pdf = np.asarray(dos_hist[1], dtype = float)\n #and normalize it\n self.dos_pdf /= self.dos_pdf.sum()\n self.Emin = self.Emin_raw\n self.Emax = self.Emax_raw\n\n def load_data(self, path, datasource = 'PPMS', Nmol = 1, mass =1, method = 'replace'):\n \"\"\" takes the files in path and loads the data from there\n INPUT:\n path: string or iterable list of strings, not nested\n \"\"\"\n# pdb.set_trace()\n if isinstance(path, basestring):\n path = np.array([path])\n for p in path:\n try:\n data = np.append(data, np.atleast_2d(self._load_data_single(p)), axis = 0)\n except:\n data = np.atleast_2d(self._load_data_single(p))\n data[:,2] /= Nmol #convert from Joule per K to Joule per mole and K\n\n if method == 'replace':\n self.T_exp = data[:,1]\n self.B_exp = data[:,0]\n self.C_exp = data[:,2] \n\n elif method == 'add':\n self.T_exp = np.append(self.T_exp, data[:,1])\n self.B_exp = np.append(self.B_exp, data[:,0])\n self.C_exp = np.append(self.C_exp, data[:,2])\n\n def _load_data_single(self, path, datasource = 'PPMS'):\n \"\"\"loads the data from one file\n INPUT:\n path: string, path to file\n OUTPUT:\n data: N x 3 array with\n data[:,0] : field in T\n data[:,1] : temperature in K\n data[:,2] : heat capacity in J/K\n \"\"\"\n C_factors = {'\\xb5J/K' : 1e-6, #microJoule per Kelvin\n }\n if datasource == 'PPMS':\n #we expect a sample heat capacity in microJoule per K\n with open(path, 'r') as f:\n for line in f:\n if 'Time Stamp' in line:\n self.unit = line.split(',')[9].split()[-1].strip('()')\n break\n\n with open(path, 'r') as f:\n lines = (line for line in f if not re.search('[a-zA-DF-Z]', line))\n data = np.genfromtxt(lines, delimiter = ',', usecols = (5, 4, 9))\n data[:,0] /= 1e4 #convert from Oe to T\n data[:,2] *= C_factors[self.unit] #convert from microJoule to Joule\n return data\n\n def _dos_raw(self, energy, S):\n \"\"\"returns the density of states for states with a total spin of S\n Input:\n energy: float or array of floats, energy at which the DoS should be given\n S int or array of ints, total spin of the state to be examined\n \n Output:\n dos: array of float, density of states\n \"\"\"\n energy = np.atleast_1d(energy)\n S = np.atleast_1d(S)\n# else: #if energy is a list or an array etc. (iterable) \n energy = np.atleast_1d(energy) #make sure the energy is an array\n dos = np.zeros(energy.shape[0])\n #the positions in the dos-histogram are the differences of energy and minimum energy in units of deltaE\n maske = np.where((S==0) & (energy < self.deltaE/2) & (energy >- self.deltaE/2)) #S=0 and energies close to zero\n dos[maske] = 1.0 #where S=0, the dos is 1.0 close to zero (N states for N particles) (also outside the band)\n\n# pdb.set_trace()\n maske = np.where((S==1) & (energy > self.Emin) & (energy < self.Emax)) #all energies with S=1 in the band\n indices = np.asarray((energy - self.Emin) / self.deltaE, dtype=int) #\n dos[maske] = self.dos_pdf[indices[maske]] #the dos is given by the values of the histogram at the \"indices\"-positions\n\n return dos\n\n def dos(self, energy, spinstate, field, g=1.94):\n \"\"\"returns the density of states for the given energy, field, and involved spinstates\n Input:\n energy: n x 1 float array of energies, unit: K (energy divided by kB)\n spinstate: n x 4 x 2 float array of involved spin states S,m\n field: n x 1 float array of fields, unit: T\n\n Output:\n dos: n x 1 array, dtype=float, unit: 1/K\n \"\"\"\n\n n = energy.shape[0]\n energy = np.atleast_1d(energy)\n energy = energy.reshape(n, 1) #build a (n, 1)-array as it may be a (n,) array \n energy = np.tile(energy, spinstate.shape[1])\n\n field = np.atleast_1d(field)\n field = field.reshape(field.shape[0], 1)\n field = np.tile(field, spinstate.shape[1])\n \n energy = energy + spinstate[:, :, 1] * g * self.muB * field #transform the energy back to the fieldless case for each m\n\n S = spinstate[:,:,0].reshape(energy.shape[0] * energy.shape[1]) #1-dim array of the first spin quantum number\n energy = energy.reshape(energy.shape[0] * energy.shape[1]) #convert it to a 1-dim array\n \n dos = self._dos_raw(energy, S)\n dos = dos.reshape(n, dos.shape[0]/n)\n dos = dos.sum(axis=1)\n return dos\n \n def C_el_dos(self, B, T, g = 1.94):\n \"\"\"\n calculates the specific heat of the system for the given fields and temperatures\n Input:\n B: n x 1 array, dtype = float, unit: T (Tesla)\n T: n x 1 array, dtype = float, unit: K (Kelvin)\n \"\"\"\n# pdb.set_trace()\n Emin = min(0, self.Emin - g * self.muB * max(B)) #get the global minimum of the energy for all fields\n Emax = self.Emax + g * self.muB * max(B) #get the global maximum of the energy for all fields\n \n E = np.arange(Emin, Emax, self.deltaE)\n N = E.shape[0]\n e = np.tile(E, B.shape[0])\n\n n = B.shape[0]\n b = np.repeat(B, N)\n\n d = np.array([[0, 0], [1, 1], [1, 0], [1, -1]])\n d = d.reshape(1, d.shape[0], d.shape[1])\n d = np.tile(d, (b.shape[0], 1, 1))\n\n# return E.shape, B.shape, d.shape\n dos = self.dos(e, d, b) \n dos = dos.reshape(n,N).T \n\n E = E.reshape(E.shape[0], 1)\n\n bes_fak = - np.outer(E, 1/T)\n bes_fak = np.exp(bes_fak)\n\n A = dos * E**2 * bes_fak\n A = A.sum(axis = 0)\n\n B = dos * E * bes_fak\n B = B.sum(axis = 0)\n\n Z = dos * bes_fak\n Z = Z.sum(axis = 0)\n \n C = (A*Z - B**2) / Z**2\n\n return self.kmol * C/T**2\n \n def c_level(self, e, m, T, B = np.array([0])):\n \"\"\"\n calculates the heat capacity of a multi-level system with a Zeeman-splitting\n \n input:\n e: M x 1 - array of energies for the level, given in units of K (E/kB), \n d: M x 1 - array of degeneracies of the levels\n m: M x 1 - array of magnetic moments of the levels (-1, 1, 1)\n \n T: N x 1 - array of sample temperatures in K\n B: N x 1 - array of applied magnetic fields in Tesla\n \"\"\"\n g = 1.94 #lande g-factor\n# muB = constants.physical_constants['Bohr magneton in K/T'][0] #muB devided by kB\n e = np.asarray(e, dtype = float) #making sure that the energies are a floating point array\n T = np.asarray(T, dtype = float) #making sure that the temperatures are a floating point array\n B = np.asarray(B, dtype = float) #making sure that the fields are a floating point array\n B = g * self.muB * B \n energies = e[:, np.newaxis] * np.ones((e.shape[0], B.shape[0])) #initialize the energies for each measurement point\n energies -= np.outer(m, B) \n boltz_fac = np.exp( -energies / T )\n \n A = energies * energies * boltz_fac\n A = A.sum(axis=0)\n \n B = energies * boltz_fac\n B = B.sum(axis=0)\n \n Z = boltz_fac\n Z = Z.sum(axis=0)\n \n return self.kmol * (A * Z - B * B) / (Z * Z) / T**2\n\n def c_schottky(self, T, deltaE):\n \"\"\"convenience function to be able to simulate a 2-level system without to much input\"\"\"\n e = np.array([0, deltaE])\n m = np.array([0, 0])\n return self.c_level(e, m, T)\n\n def C_ph_Debye(self, T, T_D):\n T = np.asarray(T, dtype=float)\n if isinstance(T, (collections.Sequence, np.ndarray)):\n stepsize = 1e-3\n c = np.zeros(T.shape[0])\n for index, t in enumerate(T):\n x = np.arange(1e-3, T_D / t, stepsize)\n y = x**4 * np.exp(x) / (np.exp(x) - 1)**2\n c[index] = 9 * sum(y) * stepsize * ( t / T_D )**3\n else:\n x = np.arange(1e-3, T_D / T, 1e-3)\n y = x**4 * np.exp(x) / (np.exp(x) - 1)**2\n c = 9 * sum(y) * (x[1:]-x[:-1]).mean() * ( T / T_D )**3\n return c * self.kmol\n \n def C_ph_Einstein(self, T, T_E):\n T = np.asarray(T, dtype=float)\n return 3 * self.kmol * (T_E / T)**2 * np.exp(T_E/T) / (np.exp(T_E/T) - 1)**2\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"henrikgrundmann/heatcapacity","sub_path":"hc_dos.py","file_name":"hc_dos.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28176158275","text":"__author__ = \"El Amo\"\r\n\r\nclass Angajat:\r\n\r\n marire = 1.04\r\n nr_angajati = 0\r\n\r\n def __init__(self, first, last, pay):\r\n self.first = first\r\n self.last = last\r\n self.pay = pay\r\n self.email = first +'.' + last + '@gmail.com'\r\n\r\n Angajat.nr_angajati += 1\r\n\r\n def nume_Angajat(self):\r\n return '{} {}'.format(self.first, self.last)\r\n\r\n\r\n def aplica_marire(self):\r\n self.pay = int(self.pay * self.marire)\r\n\r\n @classmethod\r\n def regleaza_marire(cls, marirea):\r\n cls.marire = marirea\r\n\r\n\r\nan1 = Angajat('Sebastian', 'Bach', 60000)\r\nan2 = Angajat('test', 'user', 10000)\r\n\r\nAngajat.regleaza_marire(1.05) # poti pune si an1.regleaza..... si va fi acelasi lucru\r\n\r\nprint(Angajat.marire)\r\nprint(an1.marire)\r\nprint(an2.marire)","repo_name":"MihaiDinca1000/Game","sub_path":"Exercitii/tests3.py","file_name":"tests3.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12576840209","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport elementary\nimport evas\n\ndef thumb_clicked(obj):\n if not elementary.need_ethumb():\n print(\"Ethumb not available!\")\n return\n\n images = (\n \"panel_01.jpg\",\n \"plant_01.jpg\",\n \"rock_01.jpg\",\n \"rock_02.jpg\",\n \"sky_01.jpg\",\n \"sky_02.jpg\",\n \"sky_03.jpg\",\n \"sky_04.jpg\",\n \"wood_01.jpg\",\n \"mystrale.jpg\",\n \"mystrale_2.jpg\"\n )\n\n win = elementary.StandardWindow(\"thumb\", \"Thumb\")\n win.autodel_set(True)\n if obj is None:\n win.callback_delete_request_add(lambda o: elementary.exit())\n\n tb = elementary.Table(win)\n tb.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)\n\n n = 0\n for j in range(12):\n for i in range(12):\n th = elementary.Thumb(win)\n n = (n + 1) % 11\n th.file = \"images/%s\" % (images[n])\n th.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)\n th.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)\n tb.pack(th, i, j, 1, 1)\n th.editable = True\n th.show()\n\n sc = elementary.Scroller(win)\n sc.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)\n win.resize_object_add(sc)\n\n sc.content_set(tb)\n tb.show()\n sc.show()\n\n win.resize(600, 600)\n win.show()\n\nif __name__ == \"__main__\":\n elementary.init()\n\n thumb_clicked(None)\n\n elementary.run()\n elementary.shutdown()\n","repo_name":"kakaroto/e17","sub_path":"BINDINGS/python/python-elementary/tests/test_thumb.py","file_name":"test_thumb.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"31"} +{"seq_id":"16516135319","text":"import json\nimport jsonschema\nimport octobot_commons.logging\n\n\nLOGGER_NAME = \"json_util\"\n\n\ndef validate(config, schema_file) -> None:\n \"\"\"\n Validate a config file, raise upon validation error\n :param config: the config\n :param schema_file: the config schema\n :return: None\n \"\"\"\n with open(schema_file) as json_schema:\n loaded_schema = json.load(json_schema)\n jsonschema.validate(instance=config, schema=loaded_schema)\n\n\ndef read_file(\n file_path: str,\n raise_errors: bool = True,\n on_error_value: dict = None,\n open_mode=\"r\",\n) -> dict:\n \"\"\"\n Read a load the given file with json.load()\n :param file_path: file to read\n :param raise_errors: when True will forward errors. Will just log errors otherwise\n :param on_error_value: return this value when raise_errors is False and an error occurs\n :param open_mode: the file open mode to give to open()\n :return: the parsed file or default value on error if possible\n \"\"\"\n try:\n with open(file_path, open_mode) as open_file:\n return json.load(open_file)\n except PermissionError as err:\n if raise_errors:\n raise\n octobot_commons.logging.get_logger(LOGGER_NAME).error(\n f\"Permission error when reading {file_path} file: {err}.\"\n )\n except Exception as err:\n if raise_errors:\n raise\n octobot_commons.logging.get_logger(LOGGER_NAME).exception(\n f\"Unexpected error when reading {file_path} file: {err}.\"\n )\n if on_error_value is None:\n raise ValueError(\"on_error_value is unset\")\n return on_error_value\n","repo_name":"techfreaque/octane","sub_path":"octobot-packages/OctoBot-Commons/octobot_commons/json_util.py","file_name":"json_util.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"33608427505","text":"# Vigenere File Hack\n# Wrapper to use files for bruteforce hack\n\nimport argparse # https://docs.python.org/3/library/argparse.html\nimport textwrap # https://docs.python.org/3/library/textwrap.html\n\nimport vigenere_hack\n\n\ndef main():\n # Get and parse the arguments\n options = get_args()\n\n input_file = open(options.input_filename.name)\n ciphertext = input_file.read()\n input_file.close()\n\n try:\n hacked_message = vigenere_hack.hack_vigenere(ciphertext)\n if hacked_message is not None:\n print(\"Writing decrypted text to %s.\" % options.output_filename.name)\n\n output_file = open(options.output_filename.name, \"w\")\n output_file.write(hacked_message)\n output_file.close()\n else:\n print(\"Failed to hack encryption.\")\n except KeyboardInterrupt:\n print(\"\\n[+] Detected CTRL+C ... \")\n print(\"[+] Done\")\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description=\"Vigenere File Hack\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent(\n \"\"\"Example:\n vigenere_file_hack.py -i input_filename.txt -o output_filename.txt\n \"\"\"\n ),\n )\n parser.add_argument(\n \"-i\",\n \"--input_filename\",\n action=\"store\",\n dest=\"input_filename\",\n type=argparse.FileType(\"r\"),\n help=\"Takes input from a file name of your choice\",\n )\n parser.add_argument(\n \"-o\",\n \"--output_filename\",\n action=\"store\",\n dest=\"output_filename\",\n type=argparse.FileType(\"w\"),\n help=\"Directs the output to a name of your choice\",\n )\n values = parser.parse_args()\n return values\n\n\n# main() function.\nif __name__ == \"__main__\":\n main()\n","repo_name":"tymyrddin/scripts-classical-ciphers","sub_path":"vigenere/vigenere_file_hack.py","file_name":"vigenere_file_hack.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21743900235","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .regularizer import _Regularizer\n\n\nclass Conv2dWithMask(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True):\n\n super(Conv2dWithMask, self).__init__(\n in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n\n self.test_mask = None\n self.p_mask = 1.0\n self.frequency = 16\n\n def forward(self, input):\n if self.training:\n self.frequency -= 1\n if self.frequency == 0:\n sample = np.random.binomial(n=1, p=self.p_mask, size=self.out_channels)\n param = self.weight\n l1norm = param.detach().view(param.size(0), -1).norm(p=1, dim=1)\n mask = torch.tensor(sample)\n mask = mask.expand(param.size(1) * param.size(2) * param.size(3), param.size(0)).t().contiguous()\n mask = mask.view(self.weight.shape).to(param.device)\n mask = mask.type(param.type())\n masked_weights = self.weight * mask\n masked_l1norm = masked_weights.detach().view(param.size(0), -1).norm(p=1, dim=1)\n pruning_factor = (masked_l1norm.sum() / l1norm.sum()).item()\n pruning_factor = max(0.2, pruning_factor)\n weight = masked_weights / pruning_factor\n self.frequency = 16\n else:\n weight = self.weight\n else:\n weight = self.weight\n return F.conv2d(input, weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n\n# replaces all conv2d layers in target`s model with 'Conv2dWithMask'\ndef replace_conv2d(container):\n for name, module in container.named_children():\n if (isinstance(module, nn.Conv2d)):\n print(\"replacing: \", name)\n new_module = Conv2dWithMask(in_channels=module.in_channels,\n out_channels=module.out_channels,\n kernel_size=module.kernel_size, padding=module.padding,\n stride=module.stride, bias=module.bias)\n setattr(container, name, new_module)\n replace_conv2d(module)\n\n\nclass DropFilterRegularizer(_Regularizer):\n def __init__(self, name, model, reg_regims, threshold_criteria=None):\n super().__init__(name, model, reg_regims, threshold_criteria)\n replace_conv2d(model)\n","repo_name":"IntelLabs/distiller","sub_path":"distiller/regularization/drop_filter.py","file_name":"drop_filter.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":4246,"dataset":"github-code","pt":"32"} +{"seq_id":"17660453250","text":"'''\nCreated on Nov 1, 2009\n\n@author: jecortez\n'''\nimport web\n#from controllers import charts\nfrom controllers import workflow\n#from controllers import benchmark\n#from controllers import benchmarkQueries\n#from controllers import benchmarkMySQLQueries\n\n#web.config.debug = False\n\nurls = (\n '/(.*)', 'index'\n)\n\ncontrollers = {'workflow':workflow\n #'charts':charts,\n #'benchmark':benchmark,\n #'benchmarkQueries':benchmarkQueries, \n #'benchmarkMySQLQueries':benchmarkMySQLQueries\n }\n\napp = web.application(urls, globals())\n\n\noptions = {\"render_plain\": web.template.render('views/'),\n \"render\": web.template.render('views/', base='layout'),\n \"dbhost\": \"bass02\",\n \"dbname\": \"pegasusLigoCombined2\",\n \"dbeventtable\": \"netlogger\"\n }\n\nclass index:\n def GET(self, query):\n query = str(web.webapi.ctx.path)\n \n #strip trailing /'s\n if query[-1] == \"/\":\n query = query[:-1]\n \n splitQuery = query.split(\"/\", 2)\n controller = splitQuery[1]\n action=\"\"\n if len(splitQuery)>2:\n action = splitQuery[2]\n controllerClass=controllers[controller]\n return controllerClass.invokeAction(action, options)\n def POST(self, query):\n return self.GET(query)\n\n#if __name__ == \"__main__\": app.run()\napplication = web.application(urls, globals()).wsgifunc()\n","repo_name":"elainenaomi/stampedewebapi","sub_path":"WebApi-Mongo/trunk/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30797705864","text":"#coding:utf-8\nimport requests\n\nwith open(\"link.txt\", \"r\") as f:\n count = 1\n for line in f.readlines():\n imgUrl = line.strip('\\n') #去掉列表中每一个元素的换行符\n #imgUrl = 'https://res.qxueyou.com/img/2020/09/02/'+line\n image_name = str(count) + '.PNG'\n print(image_name,imgUrl)\n count+=1\n\n imgresponse = requests.get(imgUrl, stream=True) #以流的方式打开\n image = imgresponse.content\n address=\"C:\\\\Users\\Administrator\\Desktop\\PMP\"+\"\\\\\"\n try:\n with open(address+image_name ,\"wb\") as jpg:\n jpg.write(image)\n except IOError:\n print(\"IO Error\\n\")\n finally:\n jpg.close\n\n\n","repo_name":"yangle92/Exercise","sub_path":"Spider/Download_PMP_ppt.py","file_name":"Download_PMP_ppt.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"46119690232","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\n\nREQUIRES = [\n 'wiringpi'\n]\n\nsetup(name='Servo Control',\n version='1.0',\n description='Servo Script',\n author='Juan Pablo Samper',\n author_email='jpsamper2009@gmail.com',\n url='https://github.com/pereza77/decisionMakerServer',\n install_requires=REQUIRES\n )\n","repo_name":"angelicaperez37/decisionMakerServer","sub_path":"servo/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33418424829","text":"import random\ndef findUniq(a):\n c = sorted([[x,a.count(x)] for x in set(a)])\n for i in c:\n if(i[1] == 1):\n return i[0]\n\nn = int(input())\na = [random.randint(1,20) for i in range(0,n)]\nprint(findUniq(a))","repo_name":"tuan261220/python","sub_path":"baitap/findUniq.py","file_name":"findUniq.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71549110490","text":"\"\"\"tile.py:\nModule for filling tiles\nThis module provides all solutions to \nfill the assigned tiles using given bricks \n__author__ = 'Friedrich Foo'\n__pkuid__ = '1800011746'\n__email__ = '1800011746@pku.edu.cn'\n\"\"\"\n\n# Part initial: Input\nimport math\nans = []\nfinal = []\nmstr = input('The lenth of the tile: ')\nnstr = input('The width of the tile: ')\nastr = input('One lenth of the brick: ')\nbstr = input('Another lenth of the brick: ')\n\n# Part A: Using Recursion to Give Solutions\ndef judge(x0, y0, x1, y1):\n \"\"\"Function for judging whether bricks from one \n brick with coordinate(x0,y0) to another with \n coordinate(x1,y1) are filled.\n Return True if not filled.\n \"\"\"\n if x0 < 0 or y0 < 0:\n return False\n if x1 >= m or y1 >= n:\n return False\n for i in range(x0, x1 + 1):\n for j in range(y0, y1 + 1):\n if vis[i][j]:\n return False\n return True\n\ndef search(step):\n \"\"\"Main function to find all solutions:\n step means the really depth for recursion \n equaling (m * n) // (a * b)\n \"\"\"\n if step == 0:\n \"\"\"Recursion basement\n Return the final answer found\n \"\"\"\n final.append(ans[:])\n return\n for i in range(m):\n \"\"\"To find a nearest brick which \n is not filled\n \"\"\"\n if False in vis[i]:\n x, y = i, vis[i].index(False)\n break\n\n # a x b filling\n if judge(x, y, x + a - 1, y + b - 1):\n \"\"\"First way to fill\n Recursion part1\n \"\"\"\n x0, y0, x1, y1 = x, y, x + a - 1, y + b - 1\n now = []\n for i in range(x0, x1 + 1):\n for j in range(y0, y1 + 1):\n vis[i][j] = True\n now.append(idx[i][j])\n now.sort()\n ans.append(now)\n search(step - 1)\n ans.pop()\n for i in range(x0, x1 + 1):\n for j in range(y0, y1 + 1):\n vis[i][j] = False\n # b x a filling\n if judge(x, y, x + b - 1, y + a - 1) and a!=b:\n \"\"\"Second way to fill\n while a == b, skip this process\n Recursion part2\n \"\"\"\n x0, y0, x1, y1 = x, y, x + b - 1, y + a - 1\n now = []\n for i in range(x0, x1 + 1):\n for j in range(y0, y1 + 1):\n vis[i][j] = True\n now.append(idx[i][j])\n now.sort()\n ans.append(now)\n search(step - 1)\n ans.pop()\n for i in range(x0, x1 + 1):\n for j in range(y0, y1 + 1):\n vis[i][j] = False\n \n# Part B: Visualization Using Turtle\nimport turtle as tt\n\ndef draw_line1(x0, y0, x1, y1):\n \"\"\"Function to draw frame of the tile\n from coordinate(x0, y0) to (x1, y1)\n \"\"\"\n tt.speed(0)\n tt.pensize(5)\n tt.pencolor('black')\n tt.penup()\n tt.goto(x0, y0)\n tt.pendown()\n tt.begin_fill()\n tt.goto(x1, y1)\n tt.end_fill()\n tt.penup()\n\ndef draw_line2(x0, y0, x1, y1):\n \"\"\"Function to draw frame of filling bricks\n from coordinate(x0, y0) to (x1, y1)\n \"\"\"\n tt.speed(0)\n tt.pensize(1)\n tt.pencolor('blue')\n tt.penup()\n tt.goto(x0, y0)\n tt.pendown()\n tt.begin_fill()\n tt.goto(x1, y1)\n tt.end_fill()\n tt.penup()\n\ndef draw_num(number, x, y):\n \"\"\"Function to write down a corresponding number \n in the matrix\n Adjust the size of number \n to make it easy to be seen\n \"\"\"\n tt.speed(0)\n tt.penup()\n tt.pencolor('black')\n tt.goto(x, y)\n tt.pendown()\n tt.write(str(number),align=\"center\",\\\n font=(\"Arial\", size, \"normal\"))\n tt.penup()\n\ndef get_ij(block_id):\n \"\"\"Function to build connection \n between block_id to \n corresponding number\n \"\"\"\n for i in range(m):\n for j in range(n):\n if idx[i][j] == block_id:\n return i, j\n\ndef get_xy(block_id, width=50):\n \"\"\"Function to get the (x, y) \n to draw frame of bricks\n \"\"\"\n tt.speed(0)\n i, j = get_ij(block_id)\n x = (i + 1) * width\n y = (j - 1) * width\n return [x, x + width], [y, y + width]\n\ndef draw(answer, width=50):\n \"\"\"Main function to draw one answer from final\n Adjust the screen to make it easy to be seen\n tt as turtle\n bg as the screen\n \"\"\"\n tt.speed(0)\n bg = tt.Screen()\n total_x = m * width\n total_y = n * width\n hx = total_x / 2\n hy = total_y / 2\n sizelenth = max(hx+width,hy+width)\n bg.setworldcoordinates(-sizelenth,-sizelenth,\\\n sizelenth,sizelenth)\n for i in range(m + 1):\n x0, x1 = i * width, i * width\n y0, y1 = 0, total_y\n draw_line2(x0 - hx, y0 - hy, x1 - hx, y1 - hy)\n for j in range(n + 1):\n y0, y1 = j * width, j * width\n x0, x1 = 0, total_x\n draw_line2(x0 - hx, y0 - hy, x1 - hx, y1 - hy)\n for i in range(m):\n for j in range(n):\n x = i * width + 0.5 * width\n y = j * width + 0.5 * width\n draw_num(idx[i][j], x - hx, y - hy)\n for block in answer:\n x = []\n y = []\n for block_id in block:\n tx, ty = get_xy(block_id)\n x += tx\n y += ty\n x0 = min(x) - width\n x1 = max(x) - width\n y0 = min(y) + width\n y1 = max(y) + width\n draw_line1(x0 - hx, y0 - hy, x0 - hx, y1 - hy)\n draw_line1(x0 - hx, y0 - hy, x1 - hx, y0 - hy)\n draw_line1(x1 - hx, y1 - hy, x0 - hx, y1 - hy)\n draw_line1(x1 - hx, y1 - hy, x1 - hx, y0 - hy)\n tt.done()\n\n# Part C: Main Process to Finish the Assign\ndef main():\n \"\"\"Main function\n Test whether the input is valid\n Error if not\n Then to recursion part to find solution\n After which to visualize one solution\n \"\"\"\n global m, n, a, b, size, vis, idx\n try:\n m = int(mstr)\n n = int(nstr)\n a = int(astr)\n b = int(bstr)\n except Exception as e:\n print('Invalid Input')\n print('Reason:',e)\n else:\n m = int(mstr)\n n = int(nstr)\n a = int(astr)\n b = int(bstr)\n size = 12 - int(math.log1p(m*n+1))\n if (m * n) % (a * b) != 0:\n print('Invalid Lenth')\n else:\n idx = [[m * j + i for j in range(n)] for\\\n i in range(m)]\n vis = [[False for j in range(n)] for\\\n i in range(m)]\n search((n * m) // (a * b))\n print('Follows are all solutions to fill: ')\n for i in range(len(final)):\n print(final[i])\n print('There are '+str(len(final))+' ways to fill')\n numk = tt.numinput('Choose an Approach',\\\n 'Ranging from 1-'+\\\n str(len(final)),1,minval=1,\\\n maxval=len(final))\n num = int(numk)\n draw(final[num-1])\n\nif __name__ == '__main__':\n main()\n","repo_name":"FriedrichFoo/ichw","sub_path":"pyassign3/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":6864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8070646373","text":"\nimport io\nimport re\nimport datetime\nclass TextCleaner:\n\n text = \"\"\n f_name = \"txt\"+datetime.datetime.now().strftime(\"%d-%m %H %M\")\n links_list =[]\n special_chars = []\n dates_list = []\n\n #TODO adding one list of removed chars, add deleted matches position(low priority)\n def read_file(self):\n try:\n f = io.open(self.f_name, \"r\", encoding=\"utf-8\")\n self.text = f.read()\n except FileNotFoundError:\n print(\"File Not Found\")\n else:\n f.close()\n\n def write_file(self):\n with io.open(self.f_name, \"w\", encoding=\"utf-8\") as f:\n f.write(self.text)\n f.close()\n\n # Clears binded wrods\n def clear_binded_words(self):\n\n # Function for detecting binded words 'XXYy' splitting them\n\n def clean_XXYy():\n re_pattern_split = '(([A-Z]+([a-z])))'\n re_matches_split = re.findall(re_pattern_split, self.text)\n for match in re_matches_split:\n if match[0].__len__() > 2:\n fixed_match = match[0][:-2] + \" \" + match[0][-2:]\n self.text = self.text.replace(match[0], fixed_match)\n\n # pattern for detecting binded words 'xxYy' splitting them\n\n def clean_xxYy():\n re_pattern_split = '(([a-z])+([A-Z]))'\n re_matches_split = re.findall(re_pattern_split, self.text)\n for match in re_matches_split:\n fixed_match = match[0][:-1] + \" \" + match[0][-1]\n self.text = self.text.replace(match[0], fixed_match)\n\n clean_XXYy()\n clean_xxYy()\n\n # Clears links\n def clear_links(self):\n # pattern for main site names\n re_pattern = '(http|ftp|https)\\:\\/\\/([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?'\n re_match = re.findall(re_pattern, self.text)\n for match in re_match:\n match_str = ''.join(match[0]) + '://'\n match_str += ''.join(match[1])\n match_str += ''.join(match[2])\n self.text = self.text.replace(match_str, \" \")\n self.links_list.append(match_str)\n\n # Clears special chars\n\n def find_special_chars(self):\n\n # Finding special characters\n\n for char in self.text:\n if not char.isalnum() and not char.isspace():\n if not char in self.special_chars:\n self.special_chars.append(char)\n else:\n pass\n\n # Find dates using regex\n\n def clear_dates(self):\n\n re_pattern_date = '(([0-9]{2,4})(\\/|\\-|\\s)([0-9]{1,2})(\\/|\\-|\\s)([0-9]{1,4}))'\n re_match = re.findall(re_pattern_date, self.text)\n\n for match in re_match:\n self.dates_list.append(match[0])\n self.text = self.text.replace(match[0], \" \")\n\n # Clears special characters\n\n def clear_special_char(self):\n for char in self.text:\n if char in self.special_chars:\n self.text = self.text.replace(char, \" \")\n\n def clear_special_char_no_punctuation(self):\n for char in self.text:\n if char in self.special_chars and not (char=='.' or char==',' or char=='/'):\n self.text= self.text.replace(char,\" \")\n # Clears digitis\n\n def clear_digits(self):\n for char in self.text:\n if char.isdigit():\n self.text = self.text.replace(char, \" \")\n\n # Clears multispace\n\n def clear_multispace(self):\n self.text = ' '.join(self.text.split())\n\n # text words to list\n # return words_list\n\n def text_to_list(self):\n words= self.text.split()\n return words\n\n def lower_text(self):\n self.text = self.text.lower()\n\n def clear_text(self):\n self.clear_binded_words()\n self.clear_links()\n self.find_special_chars()\n self.clear_dates()\n self.clear_special_char()\n self.clear_digits()\n self.clear_multispace()\n self.lower_text()\n\n def clear_text_base(self):\n self.clear_binded_words()\n self.clear_links()\n self.find_special_chars()\n self.clear_dates()\n self.clear_digits()\n self.clear_multispace()\n\n\n def get_text(self):\n return self.text\n def get_trash(self):\n trash = [self.links_list,self.special_chars,self.dates_list]\n return trash\n def __init__(self, file_name=None,txt=None):\n\n if file_name is not None:\n self.f_name=file_name\n\n if txt is None:\n self.read_file()\n else:\n self.text =txt\n\ndef main():\n pass\n\nif __name__==\"__main__\":\n main()","repo_name":"45tooclose/python-new","sub_path":"classes/TextCleaner.py","file_name":"TextCleaner.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"17689042568","text":"from allhub.response import Response\n\n\nclass TagsMixin:\n def tag(self, owner, repo, tag_sha):\n url = \"/repos/{owner}/{repo}/git/tags/{tag_sha}\".format(\n owner=owner, repo=repo, tag_sha=tag_sha\n )\n self.response = Response(self.get(url), \"Tag\")\n return self.response.transform()\n\n def create_tag(self, owner, repo, tag, message, object, type, tagger):\n url = \"/repos/{owner}/{repo}/git/tags\".format(owner=owner, repo=repo)\n params = {\n \"tag\": tag,\n \"message\": message,\n \"object\": object,\n \"type\": type,\n \"tagger\": tagger,\n }\n self.response = Response(self.post(url, params=params), \"Tag\")\n return self.response.transform()\n","repo_name":"srinivasreddy/allhub","sub_path":"allhub/git_data/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70039857053","text":"class Solution:\n def __init__(self):\n self.hundred = \"Hundred\"\n self.split = [\"INVALID\", \"Thousand\", \"Million\", \"Billion\"]\n self.tens = [\"INVALID\", \"INVALID\", \"Twenty\", \"Thirty\", \"Forty\", \"Fifty\", \"Sixty\", \"Seventy\", \"Eighty\", \"Ninety\"]\n self.teens = [\"Ten\", \"Eleven\", \"Twelve\", \"Thirteen\", \"Fourteen\",\n \"Fifteen\", \"Sixteen\", \"Seventeen\", \"Eighteen\", \"Nineteen\"]\n self.ones = [\"Zero\", \"One\", \"Two\", \"Three\", \"Four\", \"Five\", \"Six\", \"Seven\", \"Eight\", \"Nine\"]\n\n def get_100_999(self, num):\n hundred = num // 100\n remain = self.get_10_99(num % 100)\n if hundred > 0:\n return [self.ones[hundred], self.hundred] + remain\n return remain\n\n def get_10_99(self, num):\n if num < 10:\n return self.get_0_9(num)\n elif num < 20:\n return [self.teens[num % 10]]\n else:\n return [self.tens[num // 10]] + self.get_0_9(num % 10)\n\n def get_0_9(self, num):\n if num == 0:\n return []\n return [self.ones[num]]\n\n def numberToWords(self, num: int) -> str:\n if num == 0:\n return self.ones[0]\n\n result = []\n for split in range(0, 4):\n remain = num % 1000\n num //= 1000\n if remain:\n result = self.get_100_999(remain) + ([self.split[split]] if split > 0 else []) + result\n return \" \".join(result)\n","repo_name":"forewing/lc","sub_path":"python/p273.py","file_name":"p273.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39006248754","text":"import gymnasium as gym\nimport gym_examples\n\nenv = gym.make('gym_examples/PhilEnv-v1')\nobservation, info = env.reset()\nfor i in range(1000):\n env.render()\n action = env.action_space.sample()\n observation, reward, terminated, truncated, info = env.step(action)\n print(f'Observation is {observation} \\n'\n f'Distance is {info} \\n'\n f'Reward is {reward}')\n if terminated or truncated:\n print(f'Number of timesteps: {i}')\n break\nenv.close()\n","repo_name":"philiprxwang/Visual-Servoing","sub_path":"gym-examples/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42489963026","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n Yalin Li \n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt\nfor license details.\n'''\n\n\n# %%\n\nfrom ._units_of_measure import parse_unit\nfrom .utils.loading import load_data, data_path\ndata_path += '_impact_indicator.tsv'\n\n__all__ = ('ImpactIndicator', )\n\n\nclass ImpactIndicator:\n '''\n To handle different impact indicators in life cycle assessment.\n \n Parameters\n ----------\n ID : str\n ID of the ImpactIndicator.\n synonym : str\n Alternative ID of the ImpactIndicator.\n method : str\n Impact assessment method, e.g., 'TRACI'.\n category : str\n Category of the ImpactIndicator, e.g., 'human healt'.\n unit : str\n Unit of the ImpactIndicator, e.g., 'kg CO2-eq'.\n description : str\n Supplementary explanation.\n \n '''\n \n _indicators = {}\n _default_data = None\n \n __slots__ = ('_ID', '_synonym', '_method', '_category', '_unit', '_ureg_unit',\n '_unit_remaining', '_description')\n\n def __init__(self, ID, synonym='', method='', category='', unit='', description=''):\n \n if ID in ImpactIndicator._indicators.keys():\n raise ValueError(f'The ID \"{ID}\" is currently in use.')\n self._ID = ID\n self._unit = str(unit)\n self._ureg_unit, self._unit_remaining = parse_unit(unit)\n self._method = method\n self._category = category\n self._description = description\n ImpactIndicator._indicators[ID] = self\n if synonym and str(synonym) != 'nan':\n self.set_synonym(synonym)\n\n def __repr__(self):\n return f''\n\n def show(self):\n '''Show basic information about this indicator.'''\n if self.unit:\n info = f'ImpactIndicator: {self.ID} as {self.unit}'\n else:\n info = f'ImpactIndicator: {self.ID}'\n line = '\\n Synonyms : '\n synonyms = self.get_synonym()\n if synonyms:\n for synonym in synonyms[:-1]:\n line += synonym + '; '\n line += synonyms[-1]\n if len(line) > 40: line = line[:40] + '...'\n info += line\n info += f'\\n Method : {self.method or None}'\n info += f'\\n Category : {self.category or None}'\n line = f'\\n Description: {self.description or None}'\n if len(line) > 40: line = line[:40] + '...'\n info += line\n print(info)\n \n _ipython_display_ = show\n \n def set_synonym(self, synonym):\n '''\n Give the indicator a synonym.\n\n Parameters\n ----------\n ID : str\n Original ID.\n synonym : str\n New synonym of the indicator.\n\n '''\n dct = ImpactIndicator._indicators\n if synonym in dct.keys() and dct[synonym] is not self:\n raise ValueError(f'The synonym \"{synonym}\" already in use.')\n else:\n dct[synonym] = self\n \n def get_synonym(self):\n '''Return all synonyms of the indicator as a list.'''\n return tuple(i for i, j in ImpactIndicator._indicators.items()\n if j==self and i != self.ID)\n\n\n @classmethod\n def load_default_indicators(cls):\n '''Load all default indicators as in /data/_impact_indicator.xlsx.'''\n if cls._default_data is not None:\n data = cls._default_data\n else: data = load_data(path=data_path)\n for indicator in data.index:\n if indicator in cls._indicators.keys():\n continue\n else:\n new = cls.__new__(cls)\n new.__init__(ID=indicator,\n synonym=data.loc[indicator]['synonym'],\n unit=data.loc[indicator]['unit'],\n method=data.loc[indicator]['method'],\n category=data.loc[indicator]['category'],\n description=data.loc[indicator]['description'])\n cls._indicators[indicator] = new\n cls._default_data = data\n\n\n @classmethod\n def get_indicator(cls, ID):\n '''Get an indicator by its ID.'''\n return cls._indicators[ID]\n\n @classmethod\n def get_all_indicators(cls):\n '''Get all defined indicators.'''\n return tuple(i for i in set([i for i in ImpactIndicator._indicators.values()]))\n\n @property\n def ID(self):\n '''ID of the impact indicator.''' \n return self._ID\n\n @property\n def unit(self):\n '''Unit of the impact indicator.''' \n return self._unit\n @unit.setter\n def unit(self, i):\n self._unit = str(i)\n self._ureg_unit, self._unit_remaining = parse_unit(i)\n\n @property\n def method(self):\n '''Impact assessment method of the indicator.''' \n return self._method\n @method.setter\n def method(self, i):\n self._method = i\n\n @property\n def category(self):\n '''Impact category of the indicator.''' \n return self._category\n @category.setter\n def category(self, i):\n self._category = i\n\n @property\n def description(self):\n '''Description of the impact indicator.''' \n return self._description\n @description.setter\n def description(self, i):\n self._description = i\n\n\n\n\n# ImpactIndicator.load_default_indicators()\n\n\n\n","repo_name":"stetsonrowles/QSDsan","sub_path":"qsdsan/_impact_indicator.py","file_name":"_impact_indicator.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"12274675217","text":"from txboto.gs.user import User\nfrom txboto.exception import InvalidAclError\n\nACCESS_CONTROL_LIST = 'AccessControlList'\nALL_AUTHENTICATED_USERS = 'AllAuthenticatedUsers'\nALL_USERS = 'AllUsers'\nDISPLAY_NAME = 'DisplayName'\nDOMAIN = 'Domain'\nEMAIL_ADDRESS = 'EmailAddress'\nENTRY = 'Entry'\nENTRIES = 'Entries'\nGROUP_BY_DOMAIN = 'GroupByDomain'\nGROUP_BY_EMAIL = 'GroupByEmail'\nGROUP_BY_ID = 'GroupById'\nID = 'ID'\nNAME = 'Name'\nOWNER = 'Owner'\nPERMISSION = 'Permission'\nSCOPE = 'Scope'\nTYPE = 'type'\nUSER_BY_EMAIL = 'UserByEmail'\nUSER_BY_ID = 'UserById'\n\n\nCannedACLStrings = ['private', 'public-read', 'project-private',\n 'public-read-write', 'authenticated-read',\n 'bucket-owner-read', 'bucket-owner-full-control']\n\"\"\"A list of Google Cloud Storage predefined (canned) ACL strings.\"\"\"\n\nSupportedPermissions = ['READ', 'WRITE', 'FULL_CONTROL']\n\"\"\"A list of supported ACL permissions.\"\"\"\n\n\nclass ACL(object):\n\n def __init__(self, parent=None):\n self.parent = parent\n self.entries = Entries(self)\n\n @property\n def acl(self):\n return self\n\n def __repr__(self):\n # Owner is optional in GS ACLs.\n if hasattr(self, 'owner'):\n entries_repr = ['Owner:%s' % self.owner.__repr__()]\n else:\n entries_repr = ['']\n acl_entries = self.entries\n if acl_entries:\n for e in acl_entries.entry_list:\n entries_repr.append(e.__repr__())\n return '<%s>' % ', '.join(entries_repr)\n\n # Method with same signature as txboto.s3.acl.ACL.add_email_grant(), to allow\n # polymorphic treatment at application layer.\n def add_email_grant(self, permission, email_address):\n entry = Entry(type=USER_BY_EMAIL, email_address=email_address,\n permission=permission)\n self.entries.entry_list.append(entry)\n\n # Method with same signature as txboto.s3.acl.ACL.add_user_grant(), to allow\n # polymorphic treatment at application layer.\n def add_user_grant(self, permission, user_id):\n entry = Entry(permission=permission, type=USER_BY_ID, id=user_id)\n self.entries.entry_list.append(entry)\n\n def add_group_email_grant(self, permission, email_address):\n entry = Entry(type=GROUP_BY_EMAIL, email_address=email_address,\n permission=permission)\n self.entries.entry_list.append(entry)\n\n def add_group_grant(self, permission, group_id):\n entry = Entry(type=GROUP_BY_ID, id=group_id, permission=permission)\n self.entries.entry_list.append(entry)\n\n def startElement(self, name, attrs, connection):\n if name.lower() == OWNER.lower():\n self.owner = User(self)\n return self.owner\n elif name.lower() == ENTRIES.lower():\n self.entries = Entries(self)\n return self.entries\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name.lower() == OWNER.lower():\n pass\n elif name.lower() == ENTRIES.lower():\n pass\n else:\n setattr(self, name, value)\n\n def to_xml(self):\n s = '<%s>' % ACCESS_CONTROL_LIST\n # Owner is optional in GS ACLs.\n if hasattr(self, 'owner'):\n s += self.owner.to_xml()\n acl_entries = self.entries\n if acl_entries:\n s += acl_entries.to_xml()\n s += '' % ACCESS_CONTROL_LIST\n return s\n\n\nclass Entries(object):\n\n def __init__(self, parent=None):\n self.parent = parent\n # Entries is the class that represents the same-named XML\n # element. entry_list is the list within this class that holds the data.\n self.entry_list = []\n\n def __repr__(self):\n entries_repr = []\n for e in self.entry_list:\n entries_repr.append(e.__repr__())\n return '' % ', '.join(entries_repr)\n\n def startElement(self, name, attrs, connection):\n if name.lower() == ENTRY.lower():\n entry = Entry(self)\n self.entry_list.append(entry)\n return entry\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name.lower() == ENTRY.lower():\n pass\n else:\n setattr(self, name, value)\n\n def to_xml(self):\n if not self.entry_list:\n return ''\n s = '<%s>' % ENTRIES\n for entry in self.entry_list:\n s += entry.to_xml()\n s += '' % ENTRIES\n return s\n\n\n# Class that represents a single (Scope, Permission) entry in an ACL.\nclass Entry(object):\n\n def __init__(self, scope=None, type=None, id=None, name=None,\n email_address=None, domain=None, permission=None):\n if not scope:\n scope = Scope(self, type, id, name, email_address, domain)\n self.scope = scope\n self.permission = permission\n\n def __repr__(self):\n return '<%s: %s>' % (self.scope.__repr__(), self.permission.__repr__())\n\n def startElement(self, name, attrs, connection):\n if name.lower() == SCOPE.lower():\n # The following if statement used to look like this:\n # if not TYPE in attrs:\n # which caused problems because older versions of the\n # AttributesImpl class in the xml.sax library neglected to include\n # a __contains__() method (which Python calls to implement the\n # 'in' operator). So when you use the in operator, like the if\n # statement above, Python invokes the __getiter__() method with\n # index 0, which raises an exception. More recent versions of\n # xml.sax include the __contains__() method, rendering the in\n # operator functional. The work-around here is to formulate the\n # if statement as below, which is the legal way to query\n # AttributesImpl for containment (and is also how the added\n # __contains__() method works). At one time gsutil disallowed\n # xmlplus-based parsers, until this more specific problem was\n # determined.\n if TYPE not in attrs:\n raise InvalidAclError('Missing \"%s\" in \"%s\" part of ACL' %\n (TYPE, SCOPE))\n self.scope = Scope(self, attrs[TYPE])\n return self.scope\n elif name.lower() == PERMISSION.lower():\n pass\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name.lower() == SCOPE.lower():\n pass\n elif name.lower() == PERMISSION.lower():\n value = value.strip()\n if value not in SupportedPermissions:\n raise InvalidAclError('Invalid Permission \"%s\"' % value)\n self.permission = value\n else:\n setattr(self, name, value)\n\n def to_xml(self):\n s = '<%s>' % ENTRY\n s += self.scope.to_xml()\n s += '<%s>%s' % (PERMISSION, self.permission, PERMISSION)\n s += '' % ENTRY\n return s\n\n\nclass Scope(object):\n\n # Map from Scope type.lower() to lower-cased list of allowed sub-elems.\n ALLOWED_SCOPE_TYPE_SUB_ELEMS = {\n ALL_AUTHENTICATED_USERS.lower(): [],\n ALL_USERS.lower(): [],\n GROUP_BY_DOMAIN.lower(): [DOMAIN.lower()],\n GROUP_BY_EMAIL.lower(): [\n DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()],\n GROUP_BY_ID.lower(): [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()],\n USER_BY_EMAIL.lower(): [\n DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()],\n USER_BY_ID.lower(): [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()]\n }\n\n def __init__(self, parent, type=None, id=None, name=None,\n email_address=None, domain=None):\n self.parent = parent\n self.type = type\n self.name = name\n self.id = id\n self.domain = domain\n self.email_address = email_address\n if self.type.lower() not in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS:\n raise InvalidAclError('Invalid %s %s \"%s\" ' %\n (SCOPE, TYPE, self.type))\n\n def __repr__(self):\n named_entity = None\n if self.id:\n named_entity = self.id\n elif self.email_address:\n named_entity = self.email_address\n elif self.domain:\n named_entity = self.domain\n if named_entity:\n return '<%s: %s>' % (self.type, named_entity)\n else:\n return '<%s>' % self.type\n\n def startElement(self, name, attrs, connection):\n if (not name.lower() in\n self.ALLOWED_SCOPE_TYPE_SUB_ELEMS[self.type.lower()]):\n raise InvalidAclError('Element \"%s\" not allowed in %s %s \"%s\" ' %\n (name, SCOPE, TYPE, self.type))\n return None\n\n def endElement(self, name, value, connection):\n value = value.strip()\n if name.lower() == DOMAIN.lower():\n self.domain = value\n elif name.lower() == EMAIL_ADDRESS.lower():\n self.email_address = value\n elif name.lower() == ID.lower():\n self.id = value\n elif name.lower() == NAME.lower():\n self.name = value\n else:\n setattr(self, name, value)\n\n def to_xml(self):\n s = '<%s type=\"%s\">' % (SCOPE, self.type)\n if (self.type.lower() == ALL_AUTHENTICATED_USERS.lower()\n or self.type.lower() == ALL_USERS.lower()):\n pass\n elif self.type.lower() == GROUP_BY_DOMAIN.lower():\n s += '<%s>%s' % (DOMAIN, self.domain, DOMAIN)\n elif (self.type.lower() == GROUP_BY_EMAIL.lower()\n or self.type.lower() == USER_BY_EMAIL.lower()):\n s += '<%s>%s' % (EMAIL_ADDRESS, self.email_address,\n EMAIL_ADDRESS)\n if self.name:\n s += '<%s>%s' % (NAME, self.name, NAME)\n elif (self.type.lower() == GROUP_BY_ID.lower()\n or self.type.lower() == USER_BY_ID.lower()):\n s += '<%s>%s' % (ID, self.id, ID)\n if self.name:\n s += '<%s>%s' % (NAME, self.name, NAME)\n else:\n raise InvalidAclError('Invalid scope type \"%s\" ', self.type)\n\n s += '' % SCOPE\n return s\n","repo_name":"silveregg/txboto","sub_path":"txboto/gs/acl.py","file_name":"acl.py","file_ext":"py","file_size_in_byte":10389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"20778919138","text":"import timeit\r\nimport time\r\nimport random\r\nfrom prettytable import PrettyTable\r\n\r\ndef is_sorted(data) -> bool:\r\n return all(data[i] <= data[i + 1] for i in range(len(data) - 1))\r\n\r\ndef bogosort(data) -> list:\r\n while not is_sorted(data):\r\n random.shuffle(data)\r\n return data\r\n\r\nmult= [10, 100, 1000, 10000, 100000]\r\ntemps = []\r\nfor i in range(len(mult)):\r\n a = []\r\n for j in range(mult[i]):\r\n a.append(random.randint(-1000,1000))\r\n temps.append(timeit.timeit(\"bogosort(a)\",setup=\"from __main__ import bogosort, a\", number=1))\r\n \r\ntable= PrettyTable([\"Nombre d'éléments\", \"Temps de tri\"], padding_width=5)\r\ntable.title = \"Quantum Bogosort\"\r\nfor k in range(len(mult)):\r\n table.add_row([mult[k], temps[k]]) #notation scientifique a 3chiffres après la virgule\r\ntable.align[\"Nombre d'éléments\"] = \"l\"\r\ntable.align[\"Temps de tri\"] = \"r\"\r\n\r\nprint(table)\r\n","repo_name":"BeowolfK/PEIP","sub_path":"rapid_sort.py","file_name":"rapid_sort.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16950126377","text":"# -*- coding: utf-8 -*-\nimport operator\nfrom functools import partial\nfrom logging import getLogger\nfrom typing import Callable\nfrom typing import Dict\nfrom moneybot.clients import Poloniex\nfrom moneybot.market.adapters import MarketAdapter\nfrom moneybot.market.history import MarketHistory\nfrom moneybot.market.state import MarketState\nfrom moneybot.strategy import ProposedTrade\n\n\nlogger = getLogger(__name__)\n\n\nclass LiveMarketAdapter(MarketAdapter):\n\n def __init__(\n self,\n market_history: MarketHistory,\n fiat: str,\n ) -> None:\n self.polo = Poloniex.get_client()\n self.market_history = market_history\n self.balances = self.get_balances()\n self.fiat = fiat\n\n def get_balances(self) -> Dict[str, float]:\n bals = self.polo.returnCompleteBalances()\n all_balances = {}\n for coin, bal, in bals.items():\n avail = float(bal['available'])\n if avail > 0:\n all_balances[coin] = avail\n return all_balances\n\n def execute(\n self,\n proposed_trade: ProposedTrade,\n ) -> Dict[str, float]:\n self._place_order(proposed_trade, self.market_state)\n return self.get_balances()\n\n '''\n Private methods\n '''\n\n def _adjust(\n self,\n val: float,\n operator: Callable,\n tweak: float = 0.001,\n ) -> float:\n '''\n Pass in `operator.__add__`\n or `operator.__sub__`\n to move `val` up or down by `tweak`.\n '''\n return operator(val, (val * tweak))\n\n def _adjust_up(self, val: float, **kwargs) -> float:\n return self._adjust(val, operator.__add__, **kwargs)\n\n def _adjust_down(self, val: float, **kwargs) -> float:\n return self._adjust(val, operator.__sub__, **kwargs)\n\n def _proposed_trade_measurement(\n self,\n direction: str,\n market: str,\n price: float,\n amount: float,\n order_status: str,\n ) -> Dict:\n return {\n 'measurement': 'proposedTrade',\n 'tags': {\n 'order_status': order_status,\n },\n 'fields': {\n 'direction': direction,\n 'market': market,\n 'price': price,\n 'amount': amount,\n }\n }\n\n def _purchase_helper(\n self,\n direction: str,\n market: str,\n price: float,\n amount: float,\n purchase_fn: Callable,\n adjust_fn: Callable,\n ) -> Dict:\n make_measurement = partial(self._proposed_trade_measurement,\n direction, market, price, amount)\n try:\n res = purchase_fn(\n market,\n price,\n amount,\n # Cancel order if not fulfilled in entirity at this price\n orderType='fillOrKill',\n )\n measurement = make_measurement('filled')\n logger.debug(str(measurement))\n # If we can't fill the order at this price,\n except:\n measurement = make_measurement('killed')\n logger.debug(str(measurement))\n # recursively again at a (higher / lower) price\n adjusted_price = adjust_fn(price)\n return self._purchase_helper(\n direction,\n market,\n adjusted_price,\n amount,\n purchase_fn,\n adjust_fn\n )\n return res\n\n def _place_order(\n self,\n proposed_trade: ProposedTrade,\n market_state: MarketState,\n ) -> Dict:\n\n # in the language of poloniex,\n # buying a market's quote currency is a \"buy\"\n if proposed_trade.buy_coin == proposed_trade.market_quote_currency:\n return self._purchase_helper(\n 'buy',\n proposed_trade.market_name,\n proposed_trade.market_price,\n proposed_trade.buy_amount,\n self.polo.buy,\n # We try to buy low,\n # But don't always get to,\n # so we adjust up if we must.\n self._adjust_up,\n )\n\n # in the language of poloniex,\n # buying a market's base currency is a \"sell\"\n elif proposed_trade.buy_coin == proposed_trade.market_base_currency:\n return self._purchase_helper(\n 'sell',\n proposed_trade.market_name,\n proposed_trade.market_price,\n proposed_trade.sell_amount,\n self.polo.sell,\n # We try to sell high,\n # But don't always get to,\n # so we adjust down if we must.\n self._adjust_down,\n )\n\n return {}\n","repo_name":"JakeHartnell/moneybot","sub_path":"moneybot/market/adapters/live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16292251909","text":"import math\nfrom display import *\n\n\n # IMPORANT NOTE\n\n # Ambient light is represeneted by a color value\n\n # Point light sources are 2D arrays of doubles.\n # - The fist index (LOCATION) represents the vector to the light.\n # - The second index (COLOR) represents the color.\n\n # Reflection constants (ka, kd, ks) are represened as arrays of\n # doubles (red, green, blue)\n\nAMBIENT = 0\nDIFFUSE = 1\nSPECULAR = 2\nLOCATION = 0\nCOLOR = 1\nSPECULAR_EXP = 4\n\n#lighting functions\ndef get_lighting(normal, view, ambient, light, areflect, dreflect, sreflect ):\n lhat = [light[0][:], light[1]]\n nhat = normal[:]\n vhat = view[:]\n normalize(lhat[0])\n normalize(nhat)\n normalize(vhat)\n amb = calculate_ambient(ambient, areflect)\n diff = calculate_diffuse(lhat, dreflect, nhat)\n spec = calculate_specular(lhat, sreflect, vhat, nhat)\n limit_color(amb)\n limit_color(diff)\n limit_color(spec)\n output = [amb[x] + diff[x] + spec[x] for x in range(3)]\n limit_color(output)\n return output\n\ndef calculate_ambient(alight, areflect):\n # return [0,0,0]\n return [alight[x] * areflect[x] for x in range(3)]\n\ndef calculate_diffuse(light, dreflect, normal):\n # return [0,0,0]\n return [light[1][x] * dreflect[x] * dot_product(normal, light[0]) for x in range(3)]\n\ndef calculate_specular(light, sreflect, view, normal):\n # return [0,0,0]\n ndotl = dot_product(normal, light[0])\n phatmult2 = [2 * ndotl * x for x in normal]\n rhat = [phatmult2[x] - light[0][x] for x in range(3)]\n rdotv = dot_product(rhat, view)\n if rdotv < 0: return [0,0,0]\n rdotvton = rdotv ** SPECULAR_EXP\n return [light[1][x] * sreflect[x] * rdotvton for x in range(3)]\n\ndef limit_color(color):\n for i in range(3):\n if color[i] < 0:\n color[i] = 0\n elif color[i] > 255:\n color[i] = 255\n else:\n color[i] = int(color[i])\n\n#vector functions\n#normalize vetor, should modify the parameter\ndef normalize(vector):\n magnitude = math.sqrt( vector[0] * vector[0] +\n vector[1] * vector[1] +\n vector[2] * vector[2])\n for i in range(3):\n vector[i] = vector[i] / magnitude\n\n#Return the dot porduct of a . b\ndef dot_product(a, b):\n return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]\n\n#Calculate the surface normal for the triangle whose first\n#point is located at index i in polygons\ndef calculate_normal(polygons, i):\n\n A = [0, 0, 0]\n B = [0, 0, 0]\n N = [0, 0, 0]\n\n A[0] = polygons[i+1][0] - polygons[i][0]\n A[1] = polygons[i+1][1] - polygons[i][1]\n A[2] = polygons[i+1][2] - polygons[i][2]\n\n B[0] = polygons[i+2][0] - polygons[i][0]\n B[1] = polygons[i+2][1] - polygons[i][1]\n B[2] = polygons[i+2][2] - polygons[i][2]\n\n N[0] = A[1] * B[2] - A[2] * B[1]\n N[1] = A[2] * B[0] - A[0] * B[2]\n N[2] = A[0] * B[1] - A[1] * B[0]\n\n return N\n","repo_name":"Kebin-Lin/mks66-lighting","sub_path":"gmath.py","file_name":"gmath.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41277109278","text":"def oneEditAway(first,second):\n if (len(first) == len(second)):\n return oneEditReplace(first,second)\n elif (len(first) +1 == len(second)):\n return oneEditInsert(first,second)\n elif (len(first) -1 == len(second)):\n return oneEditInsert(second, first)\n \n return False\n\n\ndef oneEditReplace(s1, s2):\n foundDifference = False\n i = 0\n\n for i in range(len(s1)):\n if (s1[i] != s2[i]):\n if (foundDifference):\n return False\n foundDifference = True\n \n return True\n\ndef oneEditInsert(s1,s2):\n index1 = 0\n index2 = 0\n\n while index2 < len(s2) and index1 < len(s1):\n if(s1[index1] != s2[index2]):\n if index1 != index2:\n return False\n index2+=1\n else:\n index1+=1\n index2+=1\n \n return True\n\nif __name__ == \"__main__\":\n print(oneEditAway(\"pale\",\"ple\"))\n\n","repo_name":"TravisEEng/MyCTCI","sub_path":"Python/Ch1/OneAway.py","file_name":"OneAway.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19203197923","text":"arr = ['1','-1']\n\nopp = ['-','+']\n\n'''\ncho i chạy từ 2 đến 10\nupdate toán tử và số phần từ theo j,số chữ số tăng : 1,12,123,1234... \narr[j] += str(i) -> gắn lại gtri cho các phần tử cũ\n'''\n\n\nfor i in range (2,10):\n for j in range(len(arr)):\n for k in opp:\n arr.append(arr[j]+k+str(i))\n arr[j] += str(i)\n\nfor i in arr:\n if eval(i) ==100:\n print(i)","repo_name":"linh269/litextension_excerises","sub_path":"Algorithms/ex1_AG.py","file_name":"ex1_AG.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69926018013","text":"from flask import Flask, render_template, request, redirect, url_for\nimport os\nimport pymongo\nfrom dotenv import load_dotenv\n\n# to use ObjectId to access an item in mongo db\nfrom bson.objectid import ObjectId\n\n# To allow the use of .env file\nload_dotenv()\n\napp = Flask(__name__)\n\n# this is to retrieve info/setting from .\n# env file via the arg in os.environ.get()\nMONGO_URI = os.environ.get('MONGO_URI')\n\nDB_NAME = 'tgc10_new_shelter'\nclient = pymongo.MongoClient(MONGO_URI)\ndb = client[DB_NAME]\n\n\n@app.route('/animals')\ndef show_all_animals():\n animals = db.animals.find()\n return render_template('show_animals.template.html',\n htmlanimals=animals)\n\n\n@app.route('/animals/create')\ndef show_create_animals():\n return render_template('create_animals.template.html')\n\n\n@app.route('/animals/create', methods=['POST'])\ndef process_create_animals():\n name = request.form.get('name')\n breed = request.form.get('breed')\n age = request.form.get('age')\n animal_type = request.form.get('type')\n\n # insert only ONE new document\n db.animals.insert_one(\n {\n \"name\": name,\n \"age\": age,\n \"breed\": breed,\n \"type\": animal_type\n }\n )\n\n return redirect(url_for('show_all_animals'))\n\n# this route is to get the id to be deleted\n# and prompt the user for confirmation\n\n\n@app.route('/animals//delete')\ndef delete_animal(animal_id):\n animal = db.animals.find_one(\n {\n \"_id\": ObjectId(animal_id)\n }\n )\n\n return render_template('confirm_delete_animal.template.html',\n animal_to_delete=animal)\n\n\n@app.route('/animals//delete', methods=[\"POST\"])\ndef process_delete_animal(animal_id):\n db.animals.remove({\n \"_id\": ObjectId(animal_id)\n })\n\n return redirect(url_for('show_all_animals'))\n\n\n@app.route('/animals//update')\ndef show_update_animal(animal_id):\n animal_to_edit = db.animals.find_one(\n {\n \"_id\": ObjectId(animal_id)\n }\n )\n\n return render_template('show_update_animal.template.html',\n html_animal_to_edit=animal_to_edit)\n\n\n@app.route('/animals//update', methods=['POST'])\ndef process_update_animals(animal_id):\n db.animals.update_one(\n {\n \"_id\": ObjectId(animal_id)\n },\n {\n \"$set\": request.form\n }\n )\n\n return redirect(url_for('show_all_animals'))\n\n\nif __name__ == '__main__':\n app.run(host=os.environ.get('IP'), port=os.environ.get('PORT'),\n debug=True)\n","repo_name":"simplyedwin/tgc10-flask-mongodb","sub_path":"shelter/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38881516327","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass PageManager(object):\n driver = None\n page = None\n\n def __init__(self, driver, page=1):\n self.driver = driver\n self.page = page\n\n def next(self):\n try:\n self.page += 1\n self.driver.find_element_by_xpath('//*[@class=\"pagi_nav\"]//a[contains(text(), \"next\")]').click()\n except:\n print(f'Failed to reach page {self.page + 1}')\n self.page -= 1\n raise\n\n\nif __name__ == '__main__':\n from scraper import Scraper\n scraper = Scraper(base_url='http://romhustler.net/roms/atari2600')\n page_manager = PageManager(driver=scraper.driver)\n while True:\n try:\n page_manager.next()\n except:\n break\n print('Done!')\n","repo_name":"cwalters492/retropie","sub_path":"scrapers/romhustler/page_manager.py","file_name":"page_manager.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33751777701","text":"from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication\r\nfrom qgis.PyQt.QtGui import QIcon\r\nfrom qgis.PyQt.QtWidgets import QAction, QFileDialog\r\nfrom qgis.core import QgsProject, QgsVectorLayer, QgsMapLayer\r\nimport csv\r\n\r\n\r\nclass Node(QgsVectorLayer):\r\n \"\"\"A node class for A* Pathfinding\"\"\"\r\n\r\n def __init__(self, parent=None, position=None):\r\n\r\n self.parent = parent\r\n self.position = position\r\n # if statement to see if the node is defined \r\n if self.tot_Layer.defined(position.x, position.y):\r\n self.defined = True\r\n else:\r\n self.defined = False\r\n\r\n self.g = 0\r\n self.h = 0\r\n self.f = 0\r\n\r\n def __eq__(self, other):\r\n return self.position == other.position\r\n\r\n\r\nclass Astar:\r\n @staticmethod\r\n def path(self, tot_layer, start_point, end_point):\r\n \"\"\"Returns a list of tuples as a path from the given start to the given end in the given maze\"\"\"\r\n\r\n # Create start and end node\r\n start_node = Node(None, start_point)\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, end_point)\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n while len(open_list) > 0:\r\n\r\n # Get the current node\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n current = current_node\r\n while current is not None:\r\n path.append(current.position)\r\n current = current.parent\r\n return path[::-1]\r\n\r\n # Generate children\r\n children = []\r\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1),\r\n (1, 1)]: # Adjacent squares this needs to be adjusted\r\n\r\n # Get node position\r\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\r\n\r\n # Make sure within range\r\n if node_position[0] > (len(tot_layer) - 1) or node_position[0] < 0 or node_position[1] > (\r\n len(tot_layer[len(tot_layer) - 1]) - 1) or node_position[1] < 0:\r\n continue\r\n\r\n # Make sure walkable terrain\r\n if tot_layer[node_position[0]][node_position[1]] != 0:\r\n continue\r\n\r\n # Create new node\r\n new_node = Node(current_node, node_position)\r\n\r\n # Append\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n\r\n # checking if child is defined\r\n\r\n # Child is on the closed list\r\n for closed_child in closed_list:\r\n if child == closed_child:\r\n continue\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + 1\r\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + (\r\n (child.position[1] - end_node.position[1]) ** 2)\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the open list\r\n for open_node in open_list:\r\n if child == open_node and child.g > open_node.g:\r\n continue\r\n\r\n # Add the child to the open list\r\n open_list.append(child)\r\n\r\n\r\nclass Totdet:\r\n def __init__(self, pos_layer, wind_layer, water_layer):\r\n self.initialPos = pos_layer # incomplete this should reference the\r\n self.finalPos = pos_layer\r\n self.windLayer = wind_layer\r\n self.waterLayer = water_layer\r\n\r\n def math_model(self):\r\n # insert Math model with wind and water data\r\n # initial position and final position describe only the two adjacent points not the goal\r\n\r\n return 1\r\n\r\n def point_passed(self, point):\r\n x = 8 # size of grid in km^2\r\n number_to_define = (x / 2 + 1) ** 2 # number of nodes to define\r\n step = 2 # unit step of grid usually 2 km\r\n index = [0.0, 0.0]\r\n offset = [-x / (step * 2), -x / (step * 2)] # determines where the starting node is in the x,x grid\r\n relative_position = []\r\n defined_points = []\r\n\r\n # defines all nodes relative to the given node with the determined offset\r\n i = int(number_to_define)\r\n while i != 0:\r\n\r\n if index[0] < (x / 2 + 1):\r\n relative_position.append([index[0] + offset[0], index[1] + offset[1]])\r\n index[0] += 1\r\n i -= 1\r\n else:\r\n index[0] = 0\r\n index[1] += 1\r\n\r\n positions_to_define = [point for y in relative_position]\r\n\r\n # i is already zero from previous loop\r\n while i != number_to_define:\r\n positions_to_define[i] = [positions_to_define[i][0] + relative_position[i][0],\r\n positions_to_define[i][1] + relative_position[i][1]]\r\n i += 1\r\n\r\n i = 0\r\n while i != number_to_define:\r\n defined_points.append(self.math_model(positions_to_define[i]))\r\n i += 1\r\n return defined_points\r\n # need to define the points still\r\n\r\n @staticmethod\r\n def line_passed(self, geometry_linspace, r):\r\n # Calculation with self dot mathModel in r around the line\r\n return 1\r\n","repo_name":"dbtheuerkauf/ThalesProject","sub_path":"path_gen.py","file_name":"path_gen.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2001844974","text":"\"\"\"Menu options models.\"\"\"\n\n# Django\nfrom django.db import models\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils.translation import gettext_lazy as _\n\n# Backend test\nfrom backend_test.menus.models.meals import Meal\nfrom backend_test.menus.models.menus import Menu\nfrom backend_test.utils.models import TimeStampedModel\n\n\nclass MenuOption(TimeStampedModel):\n \"\"\"\n Menu option model class.\n\n Extend from TimeStampedModel for default timestamp fields. Additionally\n add some extra fields.\n \"\"\"\n\n menu = models.ForeignKey(\n Menu,\n verbose_name=_(\"menu\"),\n on_delete=models.PROTECT,\n related_name=\"menu_options\",\n related_query_name=\"menu_option\",\n )\n meal = models.ForeignKey(\n Meal,\n verbose_name=_(\"meal\"),\n on_delete=models.PROTECT,\n related_name=\"menu_options\",\n related_query_name=\"menu_option\",\n )\n option = models.PositiveSmallIntegerField(\n _(\"option\"),\n default=BaseDatabaseOperations.integer_field_ranges[\n models.PositiveSmallIntegerField.__name__\n ][1],\n help_text=_(\"Option number in menu (useful for ordering).\"),\n )\n\n class Meta(TimeStampedModel.Meta):\n \"\"\"Meta options.\"\"\"\n\n verbose_name = _(\"menu option\")\n verbose_name_plural = _(\"menu options\")\n constraints = [\n models.UniqueConstraint(\n fields=[\"menu\", \"meal\"],\n name=\"menu_option_menu_meal_unique\",\n )\n ]\n ordering = [\"option\", \"-created\"]\n db_table = \"menus_menu_option\"\n\n def __str__(self) -> str:\n \"\"\"Return instance string representation\"\"\"\n return f\"{self.option}\"\n","repo_name":"atahualpasf/backend-test-silva","sub_path":"cornershop-backend-test/backend_test/menus/models/menu_options.py","file_name":"menu_options.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36572186294","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 18 13:03:46 2017\n\n@author: u300844\n\"\"\"\n\nimport typhon as tp\nimport numpy as np\nfrom os.path import join\nimport os\nimport matplotlib.pyplot as plt\n\n\nfile = \"/scratch/uni/u237/users/tmachnitzki/Bachelor_Thesis/Verschiedenes/sadata_stefan_kinne.csv\"\n#file = \"/Users/u300844/Downloads/sadata_stefan_kinne.d\"\n\nwith open(file, 'r') as f:\n sk = f.readlines()\n \ntropical,midsum,midwin,subsum,subwin = [{} for x in range(5)]\natms = [tropical,midsum,midwin,subsum,subwin]\n\n#atms = [tropical]\n \ncounter = 0\nfor atm in atms:\n add = counter*80\n atm['read'] = sk[6+add:80+add]\n counter += 1\n \n \n atm['z'] = np.array([])\n atm['p'] = np.array([])\n atm['t'] = np.array([])\n atm['H2O'] = np.array([])\n \n for line in atm['read']:\n atm['z'] = np.append(atm['z'],float(line.split()[0])*1000)\n atm['p'] = np.append(atm['p'],float(line.split()[1])*100)\n atm['t'] = np.append(atm['t'],float(line.split()[2]))\n atm['H2O'] = np.append(atm['H2O'],float(line.split()[4])*1e-6)\n \n \n H2O_integrated_stefan = tp.atmosphere.iwv(atm['H2O'][:11],atm['p'][:11],atm['t'][:11],atm['z'][:11])\n print('Value from Stefans data: %f' %(H2O_integrated_stefan))\n\n\n#%%\n\ndef get_fascod_atmosphere(fascod_path, season):\n \"\"\"Returns the temperature profile and mixing ratio profiles for H2O, O3,\n N2O, CO, CH4 from a standard sounding or any other giving sounding.\n Instead of returning specific values, interpolated functions are returned.\n\n Parameters:\n fascod_path (str):\n season (str):\n\n Returns:\n dict:\n \"\"\"\n columns = ['t', 'z', 'H2O', 'CO2', 'O3', 'N2O', 'CO', 'CH4']\n atmosphere = {}\n\n for name in columns:\n path = join(fascod_path, season, '{}.{}.xml'.format(season, name))\n f = tp.arts.xml.load(path)\n pres = f.grids[0]\n atmosphere[name] = f.data.reshape(-1)\n\n atmosphere['p'] = pres\n\n return atmosphere\n\nfas_path = '/scratch/uni/u237/users/tlang/arts-xml-data/planets/Earth/Fascod/'\n\natm_names = sorted((os.listdir('/scratch/uni/u237/users/tlang/arts-xml-data/planets/Earth/Fascod') ))\ndel atm_names[atm_names.index('README')]\n#atm_names = [\"tropical\"]\n\nfor atm in atm_names:\n fascod_atm = get_fascod_atmosphere(fas_path,season=atm)\n \n H2O_integrated = tp.atmosphere.iwv(fascod_atm['H2O'][1:12],fascod_atm['p'][1:12],fascod_atm['t'][1:12],fascod_atm['z'][1:12])\n print('H2O from fascod %s: %f' %(atm,H2O_integrated))\n \n \n#%% \n#fig = plt.figure(figsize =(16,9)) \n#plt.suptitle(\"Tropical atmosphere compare\") \n#ax1 = fig.add_subplot(2,2,1)\n#ax2 = fig.add_subplot(2,2,2)\n#ax3 = fig.add_subplot(2,2,3)\n#ax4 = fig.add_subplot(2,2,4)\n#\n#ax1.plot(fascod_atm['H2O'],fascod_atm['z'],color='b',label='fascod')\n#ax1.plot(tropical['H2O'],tropical['z'],color='red', ls = \"--\",label='Stefan')\n#ax1.set_xlabel(\"IWV\")\n#ax1.set_ylabel(\"Height\")\n#ax1.legend(loc='best')\n#ax1.grid(True)\n#\n#ax2.plot(fascod_atm['p'],fascod_atm['z'],color='b',label='fascod')\n#ax2.plot(tropical['p'],tropical['z'],color='red', ls = \"--\",label='Stefan')\n#ax2.set_xlabel(\"Pressure\")\n#ax2.set_ylabel(\"Height\")\n#ax2.legend(loc='best')\n#ax2.grid(True)\n#\n#\n#ax3.plot(fascod_atm['t'],fascod_atm['z'],color='b',label='fascod')\n#ax3.plot(tropical['t'],tropical['z'],color='red', ls = \"--\",label='Stefan')\n#ax3.set_xlabel(\"Temperature\")\n#ax3.set_ylabel(\"Height\")\n#ax3.legend(loc='best')\n#ax3.grid(True)\n#\n#ax4.set_visible = False\n#ax4.axis('off')\n#\n#\n#\n#plt.show()\n","repo_name":"tmachnitzki/IWVfromLW","sub_path":"SK_read.py","file_name":"SK_read.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38302980635","text":"#!/Users/ian/anaconda/bin/python\n\nimport json, glob, CoolProp\n\nfor fluid in glob.glob('../fluids/*.json'):\n with open(fluid, 'r') as fp:\n jj = json.load(fp)\n\n pL = jj['ANCILLARIES'].pop('pL')\n pV = jj['ANCILLARIES'].pop('pV')\n # Keep the one with the lower error\n if pL['max_abserror_percentage'] < pV['max_abserror_percentage']:\n pS = pL\n else:\n pS = pV\n\n pseudo_pure = jj['EOS'][0]['pseudo_pure']\n if pseudo_pure:\n print('-----------------PSEUDO (SKIPPING !!!) %s' % fluid)\n else:\n print(fluid)\n jj['ANCILLARIES']['pS'] = pS\n with open(fluid, 'w') as fp:\n json.dump(jj, fp)\n","repo_name":"CoolProp/CoolProp","sub_path":"dev/scripts/replace_ancillaries.py","file_name":"replace_ancillaries.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":664,"dataset":"github-code","pt":"32"} +{"seq_id":"73819110812","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\n\nresource_path = r'./res_gossiping'\nif not os.path.exists(resource_path):\n os.mkdir(resource_path)\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n}\nss = requests.session()\nss.cookies['over18'] = '1'\n\nurl = 'https://www.ptt.cc/bbs/Gossiping/index.html'\n\nn = 30\nfor i in range(0, n):\n res = ss.get(url, headers=headers)\n soup = BeautifulSoup(res.text, 'html.parser')\n article_title_html = soup.select('div[class=\"title\"]')\n\n for each_article in article_title_html:\n try:\n print(each_article.a.text)\n print('https://www.ptt.cc' + each_article.a['href'])\n\n article_url = 'https://www.ptt.cc' + each_article.a['href']\n article_text = each_article.a.text\n article_res = ss.get(article_url, headers=headers)\n article_soup = BeautifulSoup(article_res.text, 'html.parser')\n\n push_up = 0\n push_down = 0\n score = 0\n author = ''\n title = ''\n datetime = ''\n article_content = article_soup.select('div#main-content')[0].text.split(\n '--'\n )[0]\n push_info_list = article_soup.select('div[class=\"push\"] span')\n for info in push_info_list:\n if '推' in info.text:\n push_up += 1\n if '噓' in info.text:\n push_down += 1\n article_info_list = article_soup.select(\n 'div[class=\"article-metaline\"] span'\n )\n for n, info in enumerate(article_info_list):\n if (n + 1) % 6 == 2:\n author = info.text\n if (n + 1) % 6 == 4:\n title = info.text\n if (n + 1) % 6 == 0:\n datetime = info.text\n score = push_up - push_down\n article_content += '\\n---split---\\n'\n article_content += '推: %s\\n' % (push_up)\n article_content += '噓: %s\\n' % (push_down)\n article_content += '分數: %s\\n' % (score)\n article_content += '作者: %s\\n' % (author)\n article_content += '標題: %s\\n' % (title)\n article_content += '時間: %s\\n' % (datetime)\n try:\n new_article_text = article_text\n for iw in '[\\/:*?\"<>|]':\n new_article_text = new_article_text.replace(iw, '_')\n with open(\n r'%s/%s.txt' % (resource_path, new_article_text),\n 'w',\n encoding='utf-8',\n ) as w:\n w.write(article_content)\n print()\n except FileNotFoundError as e:\n print('==========')\n print(article_url)\n print(e.args)\n print('==========')\n except OSError as e:\n print('==========')\n print(article_url)\n print(e.args)\n print('==========')\n\n except AttributeError as e:\n print('==========')\n print(each_article)\n print(e.args)\n print('==========')\n\n url = (\n 'https://www.ptt.cc'\n + soup.select('div[class=\"btn-group btn-group-paging\"]')[0].select('a')[1][\n 'href'\n ]\n )\n","repo_name":"uuboyscy/course-PyETL","sub_path":"part02_pttArticleWithCookie/05_pttGossipingWithCookies.py","file_name":"05_pttGossipingWithCookies.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"32"} +{"seq_id":"70997525850","text":"from bs4 import BeautifulSoup\r\n\r\nwith open(\"C:/Users/john_j_o'neill/DataRepresentation/week03-webScrapping/carviewer02.html\") as fp:\r\n soup = BeautifulSoup(fp,'html.parser')\r\n\r\n\r\n#print (soup.tr)\r\n#print all the rows under each tr\r\nrows= soup.findAll('tr')\r\nfor row in rows:\r\n# print(row)\r\n dataList =[]\r\n cols =row.findAll(\"td\")\r\n for col in cols:\r\n dataList.append(col.text)\r\n print(dataList)\r\n","repo_name":"JohnONeillGMIT/dataRepresentation","sub_path":"week03-webScrapping/PY03-readOurFile.py","file_name":"PY03-readOurFile.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25771865712","text":"import re\nfrom unidecode import unidecode\nfrom pykakasi import kakasi\nfrom jamo import hangul_to_jamo\nfrom .numbers import normalize_numbers\n\n\n_kks = kakasi()\n_kks.setMode('H', 'a')\n_kks.setMode('K', 'a')\n_kks.setMode('J', 'a')\n_kks.setMode('E', 'a')\n_kks.setMode('s', True)\n_conv = _kks.getConverter()\n\n#White space\n_whitespace_re = re.compile(r'\\s+')\n\n#訳語処理のためのリスト\n_abbreviations = [(re.compile('\\\\b%s\\\\.' % x[0], re.IGNORECASE), x[1]) for x in [\n ('mrs', 'misess'),\n ('mr', 'mister'),\n ('dr', 'doctor'),\n ('st', 'saint'),\n ('co', 'company'),\n ('jr', 'junior'),\n ('maj', 'major'),\n ('gen', 'general'),\n ('drs', 'doctors'),\n ('rev', 'reverend'),\n ('lt', 'lieutenant'),\n ('hon', 'honorable'),\n ('sgt', 'sergeant'),\n ('capt', 'captain'),\n ('esq', 'esquire'),\n ('ltd', 'limited'),\n ('col', 'colonel'),\n ('ft', 'fort'),\n]]\n\ndef replace_abbreviations(text):\n for abbr, replacement in _abbreviations:\n text = re.sub(abbr, replacement, text)\n return text\n\ndef expand_numbers(text):\n return normalize_numbers(text)\n\ndef lowercase(text):\n return text.lower()\n\ndef collapse_whitespace(text):\n return re.sub(_whitespace_re, ' ', text)\n\ndef convert_to_ascii(text):\n return unidecode(text)\n\ndef english_cleaners(text):\n text = convert_to_ascii(text)\n text = lowercase(text)\n text = expand_numbers(text)\n text = replace_abbreviations(text)\n text = collapse_whitespace(text)\n return text\n\ndef japanese_cleaners(text):\n text = _conv.do(text)\n text = lowercase(text)\n text = expand_numbers(text)\n text = collapse_whitespace(text)\n return text\n\ndef korean_cleaners(text):\n text = ''.join(list(hangul_to_jamo(text)))\n return text\n\ndef transliteration_cleaners(text):\n text = convert_to_ascii(text)\n text = lowercase(text)\n text = collapse_whitespace(text)\n return text","repo_name":"kgy94329/Tacotorn","sub_path":"Tacotron/text/text_cleaner.py","file_name":"text_cleaner.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30138827847","text":"\"\"\"\r\n\n\nCreate a function that converts Celsius to Fahrenheit and vice versa.\n\n### Examples\n\n convert(\"35*C\") ➞ \"95*F\"\n \n convert(\"19*F\") ➞ \"-7*C\"\n \n convert(\"33\") ➞ \"Error\"\n\n### Notes\n\n * Round to the nearest integer.\n * If the input is incorrect, return `\"Error\"`.\n * For the formulae to convert back and forth, check the **Resources** tab.\n\n\"\"\"\r\n\ndef convert(deg):\n x = deg.replace('C', '').replace('F', '').replace('*', '')\n if 'C' in deg:\n t = round(int(x) * 9 / 5 + 32)\n return str(t) + '*F'\n elif 'F' in deg:\n t = round(5 / 9 * (int(x) - 32))\n return str(t) + '*C'\n else:\n return 'Error'\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"pSrCZFim6Y8HcS9Yc_1.py","file_name":"pSrCZFim6Y8HcS9Yc_1.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29864076804","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom joblib import dump, load\nfrom gensim.models import KeyedVectors #borrar prueba\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_val_predict\n\nfrom vectorizer_data import get_vector_tfidf, create_tf_idf, w2v_vec, words_to_vec\n\n\n# DIMENCION VECTOR\ndim_vec=100\n\n#kidssas\npath_save_movel=\"./modelo/\" #Directorio donde se guardan los modelos entranados\nurl_data_csv='./modelo/suicidio_notacion.csv'\nurl_sw='./modelo/stopwords.txt'\n\n#Stopwords\nf = open(url_sw) # Open file on read mode\nstopwords = f.read().split(\"\\n\") # Create a list containing all lines\nf.close() # Close file\n#stopwords= open(url_sw).readlines()\n#print(stopwords)\n# Elimina stopwords \ndef delete_sw(frase):\n\tsin=[word for word in frase.split() if word not in stopwords]\n\t#print('Stopwords')\n\tif len(sin)==0:\n\t\treturn frase\n\telse:\n \t\treturn ' '.join(sin)\n\n\n# Lee el archivo csv y carga las listas que seran usadas.\ndef load_data(data_csv=url_data_csv):\n\tdata_g = pd.read_csv(data_csv) # Convierte csv en formato pandas\n\tfrase_sw=[]\n\tfor i in list(data_g.tweet_clean):\n\t\tfrase_sw.append(delete_sw(i))\n\treturn frase_sw, list(data_g.suicidio)\n\n\n# Retorna un dataframe con la data seleccionada\ndef get_random_data():\n\ttexto, clase = load_data(url_data_csv)\n\t#print(texto) \n\tdata_select = pd.DataFrame({'text': texto,'clase': clase})\n\tprint(data_select.shape)\n\tbalanced = data_select.groupby('clase').apply(sampling_k_elements).reset_index(drop=True)\n\treturn balanced\n\n# Toma 'k' datos random del dataset\ndef sampling_k_elements(group, k=500): \n if len(group) < k:\n return group\n return group.sample(k)\n\n\n# Separa los datos, para el entrenamiento y las pruebas\ndef split_data(vectores, clases): \n\t# tipo_vec='tf-idf', 'w2v'\n\t#print('vec',len(vectores),len(list(balanced['text'])))\n\tX_train, X_test, y_train, y_test = train_test_split(vectores, clases, test_size=0.2, random_state=42)\n\treturn X_train, X_test, y_train, y_test\n\ndef split_data2(frases, clases): \n\tk_train, k_test, l_train, l_test = train_test_split(frases, clases, test_size=0.2, random_state=42)\n\tfor i,j in zip(k_test,l_test):\n\t\tprint(j,i)\n\t\n\n# Clasificador Suport Vector Machine sin Kernel\ndef SVM(X_train, X_test, y_train, y_test,tipo_vec): \n\t#X_train, X_test, y_train, y_test=split_data(tipo_vec)\n\tclf = LinearSVC(random_state=0, tol=1e-5)\n\tclf.fit(X_train, y_train) \n\tLinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,\n\t intercept_scaling=1, loss='squared_hinge', max_iter=1000,\n\t multi_class='ovr', penalty='l2', random_state=0, tol=1e-05, verbose=0)\n\t#scores = cross_val_score(clf, X_train, y_train, cv=5)\n\t#print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\tpred = clf.predict(X_test)\n\tprint(tipo_vec+'_SVM',' Accuracy: ' +str(clf.score(X_test, y_test)))\n\n\tprint(confusion_matrix(pred, y_test))\n\tprint(classification_report(pred, y_test))\n\tdump(clf, path_save_movel+tipo_vec+'_SVM.joblib')\n\n\n# Clasificador Regresion Logistica (distribucion de probabilidades)\ndef RL(X_train, X_test, y_train, y_test, tipo_vec):\n\t#X_train, X_test, y_train, y_test=split_data(tipo_vec)\n\tprint(tipo_vec+'_RL')\n\tclf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X_train, y_train)\n\t#y_pre = cross_val_predict(clf, X_train, y_train, cv=5)\n\t#print(classification_report(y_train, y_pre))\n\t#scores = cross_val_score(clf, X_train, y_train, cv=5)\n\t#print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\tpred = clf.predict(X_test)\n\tprint(tipo_vec+'_RL',' Accuracy: ' +str(clf.score(X_test, y_test)))\n\tprint(confusion_matrix(pred, y_test))\n\tprint(classification_report(pred, y_test))\n\tdump(clf, path_save_movel+tipo_vec+'_RL.joblib')\n\n\n# Carga el modelo del clasificador\ndef load_classifier(tipo_model):\n\tif tipo_model=='tf-idf':\n\t\treturn load(path_save_movel+'tf-idf_RL.joblib') \n\telif tipo_model=='w2v':\n\t\treturn load(path_save_movel+'w2v_RL.joblib') \n\telse:\n\t\treturn 'Seleccione tipo de modelo'\n\n\n# Retorna una intencion de la frase, para respuesta del bot\ndef get_intent(frase,tipo_model='w2v',modelo_w2v=None):\n\t\n\tv=np.array([0]*dim_vec)#E\n\tmodelo_cl=load_classifier(tipo_model)\n\tif tipo_model=='w2v':\n\t\tfrase_vec=words_to_vec(frase,modelo_w2v)\n\t\treturn 4 if (frase_vec==v).all() else modelo_cl.predict(frase_vec.reshape(1,-1))[0]#E\n\telse:\n\t\treturn modelo_cl.predict(get_vector_tfidf(frase))[0]\n\n# Retorna una intencion de la frase para respuesta del bot, junto con la distribucion de probabilidades de pertenecer a una clase\ndef get_intent_prob(frase,tipo_model='w2v',modelo_w2v=None):\n\t\n\tmodelo_cl=load_classifier(tipo_model)\n\tif tipo_model=='w2v':\n\t\tv=np.array([0]*dim_vec) # DIMENCION\n\t\tfrase_vec, words_nw2v=words_to_vec(frase,modelo_w2v)\n\t\tprop=modelo_cl.predict_proba(frase_vec.reshape(1,-1))[0]\n\t\tdis_prob=[round(x*100,1) for x in prop]\n\t\treturn ([], words_nw2v) if (frase_vec==v).all() else (dis_prob, words_nw2v)\n\telse:\n\t\tfrase_vec, words_ntf=get_vector_tfidf(frase)# retorna el unico vector\n\t\tprint('SHAPE TF-IDF',frase_vec.shape)\n\t\tfrase_vec=frase_vec[0].reshape(1,-1)\n\t\tl,d=frase_vec.shape\n\t\tv=np.array([0]*d)\n\t\tprop=modelo_cl.predict_proba(frase_vec)[0]\n\t\tdis_prob=[round(x*100,1) for x in prop]\n\t\treturn ([], words_ntf) if (frase_vec==v).all() else (dis_prob, words_ntf)\n\n\n# Entrena modelos de clasificacion con los mismos datos\ndef training_models(model_w2v):\n\t#path_w2v=\"/root/w2v/SBW-vectors-300-min5.bin\" \n\tbalanced=get_random_data()#random y seleccion 'x' datos de cada clase\n\tvectores=[]\n\tvectores_tf=create_tf_idf(list(balanced['text']))\n\tvec_words=w2v_vec(list(balanced['text']),model_w2v)\n\t#test\n\tsplit_data2(list(balanced['text']), list(balanced['clase']))\n\tvectores_w2v=[a[0] for a in vec_words]\n\tX_train, X_test, y_train, y_test=split_data(vectores_tf, list(balanced['clase']))\n\tX_train2, X_test2, y_train2, y_test2=split_data(vectores_w2v, list(balanced['clase']))\n\tprint('Entrenando Modelos...')\n\tSVM(X_train, X_test, y_train, y_test,'tf-idf')\n\tRL(X_train, X_test, y_train, y_test,'tf-idf')\n\tSVM(X_train2, X_test2, y_train2, y_test2,'w2v')\n\tRL(X_train2, X_test2, y_train2, y_test2,'w2v')\n\nif __name__== \"__main__\":\n\ttraining_models()\n\t#chat()\n","repo_name":"kvvaldez/spanish_suicide","sub_path":"Code/intent_classification.py","file_name":"intent_classification.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"920317023","text":"\"\"\"\nABC051-B \"Sum of Three Integers\"\nABC085-C \"Otoshidama\"\nABC095-C \"Half and Half\"\nABC112-C \"Pyramid\"\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**9)\n\nN = int(input())\nA,B,C = map(int,input().split())\nLimit = 10000\nz = Limit\n\nminL = 10000\n\nfor x in range(Limit):\n for y in range(Limit):\n if N-A*x-B*y < 0:break\n mod = (N-A*x-B*y)%C\n # print(x,y)\n if mod ==0:\n z = int((N-A*x-B*y)/C)\n minL = min(x+y+z,minL)\n\nprint(minL)\n","repo_name":"Eggngineer/atcoder","sub_path":"template90/star_1to3/016_Minimum_Coins.py","file_name":"016_Minimum_Coins.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23036012057","text":"from functools import wraps\nfrom inspect import Parameter, signature\n\nimport pytest\n\nfrom rpdk.core.contract.interface import HandlerErrorCode\n\n\ndef _rebind(decorator, func, *args, **kwargs):\n \"\"\"Helper function to construct decorated arguments\n\n This works only with positional and likely positional arguments\n strongly keyword arguments are in **kwargs. It constructs kwargs'\n from positional values\n \"\"\"\n parameters = signature(func).parameters.values()\n decorated_parameters = set(signature(decorator).parameters.keys())\n\n positional_kwargs = dict(\n zip(\n [\n parameter.name\n for parameter in parameters\n if parameter.kind\n in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)\n and parameter.name not in kwargs\n ],\n args,\n )\n )\n return {k: kwargs.get(k) or positional_kwargs[k] for k in decorated_parameters}\n\n\ndef decorate(after=True):\n \"\"\"Helper function to construct decorator from a simple function\n\n arg: after means that decorated check should be run after the\n target function\n\n This is a 'decorate' meta function that wraps new decorator around\n target function and merges decorated arguments with target arguments\n convention: each new decorator should have a 'response' argument,\n which is an output of a target function\n \"\"\"\n\n def inner_decorator(decorator: object):\n def new_decorator(func: object):\n @wraps(func)\n def function(*args, **kwargs):\n response_arg = {}\n if after: # running function before the decorated check\n response = func(*args, **kwargs) # calling target function\n response_arg = {\"response\": response}\n\n kvargs = _rebind(decorator, func, *args, **{**kwargs, **response_arg})\n decorated_sig = signature(decorator)\n bound_arguments = decorated_sig.bind(**kvargs)\n decorator(\n *bound_arguments.args, **bound_arguments.kwargs\n ) # calling a decorated function to execute check\n\n # this allows to make a pre-execution check\n # e.g. if skip function\n if not after: # running function after the decorated check\n response = func(*args, **kwargs) # calling target function\n return response\n\n return function\n\n return new_decorator\n\n return inner_decorator\n\n\n@decorate()\ndef response_does_not_contain_write_only_properties(resource_client, response):\n resource_client.assert_write_only_property_does_not_exist(response[\"resourceModel\"])\n\n\n@decorate()\ndef response_contains_resource_model_equal_updated_model(\n response, current_resource_model, update_resource_model\n):\n assert response[\"resourceModel\"] == {\n **current_resource_model,\n **update_resource_model,\n }, \"All properties specified in the update request MUST be present in the \\\n model returned, and they MUST match exactly, with the exception of \\\n properties defined as writeOnlyProperties in the resource schema\"\n\n\n@decorate()\ndef response_contains_primary_identifier(resource_client, response):\n resource_client.assert_primary_identifier(\n resource_client.primary_identifier_paths, response[\"resourceModel\"]\n )\n\n\n@decorate()\ndef response_contains_unchanged_primary_identifier(\n resource_client, response, current_resource_model\n):\n assert resource_client.is_primary_identifier_equal(\n resource_client.primary_identifier_paths,\n current_resource_model,\n response[\"resourceModel\"],\n ), \"PrimaryIdentifier returned in every progress event must match \\\n the primaryIdentifier passed into the request\"\n\n\n@decorate(after=False)\ndef skip_not_writable_identifier(resource_client):\n if not resource_client.has_only_writable_identifiers():\n pytest.skip(\"No writable identifiers. Skipping test.\")\n\n\ndef failed_event(error_code, msg=\"\"):\n def decorator_wrapper(func: object):\n @wraps(func)\n def wrapper(*args, **kwargs):\n response_error = func(*args, **kwargs)\n if response_error is not None:\n if isinstance(error_code, HandlerErrorCode):\n error_code_tuple = (error_code,)\n assert response_error in error_code_tuple, msg\n return response_error\n\n return wrapper\n\n return decorator_wrapper\n","repo_name":"inyamkacg/cloudformation-CLI","sub_path":"src/rpdk/core/contract/suite/contract_asserts.py","file_name":"contract_asserts.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28477607769","text":"def game(): \n print(\"\t\t\tWELCOME TO PEG SOLITAIRE\")\n print(\"RRULES:\")\n print(\"1.PEG - 'O'\tHOLE - '_'\")\n print(\"1.A PEG IS MOVED TO A HOLE\")\n print(\"2.A PEG MUST JUMP OVER AN ADJACENT PEG\")\n print(\"3.A PEG SHOULD NOT JUMP OVER A HOLE\")\n print(\"4.A PEG CAN MOVE EITHER VERTICALLY OR HORIZONTALLY\")\n print(\"5.NO CROSS MOVES ARE ALLOWED\")\n\ndef initial_board():\n a = [[' ', 1, 2, 3, 4, 5, 6, 7], [1, ' ',' ','O','O','O',' ',' '], [2, ' ',' ','O','O','O',' ',' '], [3, 'O','O','O','O','O','O','O'], [4, 'O','O','O','_','O','O','O'], [5, 'O','O','O','O','O','O','O'], [6, ' ',' ','O','O','O',' ',' '], [7, ' ',' ','O','O','O',' ',' ']]\n return a\n\ndef print_board(board):\n for i in board:\n print(' ', end = \"\")\n print(' '.join([str(elem) for elem in i]))\n\ndef win(board):\n c = 0\n for i in board:\n for j in i:\n if (j == 'O'):\n c = c + 1\n if(c == 1 and board[4][4] == 'O'):\n return True\n return False\n\ndef can_move(board, row, col):\n if(col <= 5 and board[row][col + 1] == 'O' and board[row][col + 2] == '_'):\n return 1\n if(col >= 3 and board[row][col - 1] == 'O' and board[row][col - 2] == '_'):\n return 1\n if(row <= 5 and board[row + 1][col] == 'O' and board[row + 2][col] == '_'):\n return 1\n if(row >= 3 and board[row - 1][col] == 'O' and board[row - 2][col] == '_'):\n return 1\n return 0\n\ndef lose(board):\n for i in range(1, 8):\n for j in range(1, 8):\n if(board[i][j] == 'O'):\n if(can_move(board, i, j)):\n return 0\n return 1\n\ndef invalid():\n print(\"INVALID MOVE!Only one peg can be jumped over.\")\n\ndef not_peg():\n print(\"INVALID MOVE!A peg can't jump over a hole.\")\n\ndef same_row(board, s_row, s_col, d_row, d_col):\n if(s_col - d_col == -2):\n if(board[s_row][s_col + 1] == 'O'):\n board[d_row][d_col] = 'O'\n board[s_row][s_col] = '_'\n board[s_row][s_col + 1] = '_'\n else:\n not_peg()\n elif(s_col - d_col == 2):\n if(board[s_row][s_col - 1] == 'O'):\n board[d_row][d_col] = 'O'\n board[s_row][s_col] = '_'\n board[s_row][s_col - 1] = '_'\n else:\n not_peg()\n else:\n invalid()\n return board\n\ndef same_column(board, s_row, s_col, d_row, d_col):\n if(s_row - d_row == -2):\n if(board[s_row + 1][s_col] == 'O'):\n board[d_row][d_col] = 'O'\n board[s_row][s_col] = '_'\n board[s_row + 1][s_col] = '_'\n else:\n not_peg()\n elif(s_row - d_row == 2):\n if(board[s_row - 1][s_col] == 'O'):\n board[d_row][d_col] = 'O'\n board[s_row][s_col] = '_'\n board[s_row - 1][s_col] = '_'\n else:\n not_peg()\n else:\n invalid()\n return board\n\ndef update_board(board, s_row, s_col, d_row, d_col):\n if(s_row < 1 or s_row > 7 or s_col < 1 or s_col > 7 or d_row < 1 or d_row > 7 or d_col < 1 or d_col > 7):\n print(\"OOPS!YOU ARE OUT OF BOARD!\")\n elif(board[s_row][s_col] != 'O'):\n print(\"ONLY A PEG CAN BE MOVED!\")\n elif(board[d_row][d_col] != '_'):\n print(\"PEG SHOULD BE MOVED TO A HOLE\")\n else:\n if(s_row == d_row and s_col == d_col):\n print(\"THE PEG DID NOT JUMP OVER A HOLE!\")\n elif(s_row == d_row):\n board = same_row(board, s_row, s_col, d_row, d_col)\n elif(s_col == d_col):\n board = same_column(board, s_row, s_col, d_row, d_col)\n else:\n print(\"CROSS MOVES ARE NOT ALLOWED!\")\n print_board(board)\n\ngame()\nin_board = initial_board()\nprint_board(in_board)\nwhile(win(in_board) is False):\n if(lose(in_board) == 1):\n print(\"OOPS!NO MOVES LEFT!YOU LOST!\")\n exit(0)\n s_row = int(input(\"SOURCE ROW:\"))\n s_col = int(input(\"SOURCE COLUMN:\"))\n d_row = int(input(\"DEST ROW:\"))\n d_col = int(input(\"DEST COLUMN:\"))\n update_board(in_board, s_row, s_col, d_row, d_col)\nprint(\"HURRAY!YOU WON!Added this!Friends!\")\nprint(\"Added this now!\")\n","repo_name":"sravanthidl/Peg-Solitaire","sub_path":"PegSolitaire.py","file_name":"PegSolitaire.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71113386330","text":"# 2022. 05. 09.\r\n# 문제 1.\r\n\"\"\"\r\nfor문을 사용하여 50부터 100까지의 정수 중에서\r\n3으로 나누어 떨어지거나 5로 나누어 떨어지는 수들의 합계를\r\n구하는 프로그램을 작성하시오.\r\n\"\"\"\r\n\"\"\"\r\ntot=0\r\nfor x in range(50,101):\r\n #print(x,end=' ')\r\n if x%3==0 or x%5==0: # 3,6,9,5,10\r\n print(x,end=' ')\r\n tot = tot + x\r\n\r\nprint('50-100까지 합: %d'%tot)\r\n\"\"\"\r\n\r\n#문제 2.\r\n\"\"\"\r\n양의 정수를 입력 받아 구구단 n단을 작성하는 프로그램을 작성하시오.\r\n범위(2 <= n <= 9)를 벗어나는 정수가 입력되면\r\n잘못된 값이 입력되었다고 안내를 하고 프로그램을 종료한다.\r\n그렇지 않을 경우는 계속해서 입력을 받아 구구단을 출력한다.\r\n\"\"\"\r\n\"\"\"\r\nwhile True:\r\n dan = int(input('구구단 입력:'))\r\n if 2<= dan <=9:\r\n for x in range(1,10):\r\n print(dan, 'x',x, '=',dan*x)\r\n else:\r\n print('잘못된 입력입니다')\r\n break\r\n\"\"\"\r\n\r\n#문제 3.\r\n\"\"\"\r\n'곱셈의 제왕' 프로그램을 만들려고 한다.\r\n이 게임은 두 개의 숫자를 입력 받은 후\r\n사용자가 올바른 곱셈 값을 입력할 때까지 반복하는 게임이다.\r\n'곱셈의 제왕' 게임 프로그램을 만들어보세요.\r\n\"\"\"\r\n\"\"\"\r\nnum1 = int(input('숫자 1:'))\r\nnum2 = int(input('숫자 2:'))\r\n곱셈 = num1*num2\r\nwhile True:\r\n 결과값 = int(input('곱셈 결과 값 입력:'))\r\n if 곱셈 == 결과값:\r\n print('잘 했습니다')\r\n break\r\n else:\r\n print('새로 입력하세요..')\r\n \r\n \r\nprint('프로그램 종료')\r\n\"\"\"\r\n\r\n##a = [10,20,30]\r\n##b = [40,50,60]\r\n##c = a+b\r\n##print(c)\r\n##tot = a[0] + a[1] + a[2]\r\n##avg = tot/3\r\n##print('avg:',avg)\r\n\r\n##tot=0\r\n##for x in a:\r\n## tot = tot + x\r\n## print(x,tot)\r\n\r\n##a[1:2]=[200,201]\r\n##print(a)\r\n##b[1] = [200, 201]\r\n##print(b)\r\n\r\n\r\n##a = [1,2,'a','b','c','a']\r\n##print(a.index('a'))\r\n##\r\n##a.remove('b')\r\n##print(a)\r\n##del(a[1])\r\n##print(a)\r\n\r\n##a.reverse()\r\n##print(a)\r\n\r\n##a = [6,3,7,9,2,5]\r\n###a.sort()\r\n##a.sort(reverse=True)\r\n##print(a)\r\n\r\n##a = ['kim','lee','hong','park','song']\r\n##print(len(a))\r\n##for x in a:\r\n## print(x)\r\n##for x in range(len(a)):\r\n## print(a[x])\r\n\r\n##list1 = []\r\n##list2 = []\r\n##val=1\r\n##for x in range(3):\r\n## for y in range(4):\r\n## list1.append(val)\r\n## val = val + 1\r\n## list2.append(list1)\r\n## list1=[]\r\n##for x in range(3):\r\n## for y in range(4):\r\n## print('%3d'%list2[x][y], end='')\r\n## print('')\r\n###print(list2)\r\n##a=[]\r\n##for x in range(1,51):\r\n## #print(x, end=' ')\r\n## if x % 3 == 0:\r\n## a.append(x)\r\n##\r\n##print(a)\r\n\r\n##myTuple = (10,20,30,40,50)\r\n##result = 30 in myTuple\r\n##print(myTuple)\r\n##print(len(myTuple))\r\n##print(result)\r\n##for x in myTuple:\r\n## print(x)\r\n#myTuple.append(60)\r\n#myTuple.insert(2,60)\r\n#myTuple.remove(30)\r\n#del myTuple(2)\r\n\r\n\r\n##myTuple = (10,20,30,40,50)\r\n##myList = list(myTuple)\r\n##print(myList)\r\n##myList.append(60)\r\n##print(myList)\r\n##myTuple = tuple(myList)\r\n##print(myTuple)\r\n##tp1 = (1,2,3)\r\n##tp2 = (4,5,6)\r\n##tp = tp1+tp2\r\n##print(tp)\r\n##sports = ('태권도','축구','수영','야구','등산','권투','농구','양궁')\r\n##for x in range(len(sports)):\r\n## if x % 2 ==1:\r\n## print(sports[x])\r\n\r\n##dict = {'강아지':'dog','고양이':'cat','새':'bird'}\r\n##print(dict)\r\n##print(dict.keys())\r\n##print(dict.values())\r\n##print(dict['강아지'])\r\n##print(dict.get('고양이'))\r\n##dict['고래']='whale'\r\n##print(dict)\r\n##del dict['고양이']\r\n##print(dict)\r\n##dict.clear()\r\n##print(dict)\r\n\r\n##dict = {'이름':'홍길동', '나이':'25','주소':'안동','취미':['축구','수영','야구','등산'], '혈액형':'A'}\r\n##for x in dict.keys():\r\n## print(x,'\\t:',dict[x])\r\n##\r\n\r\n# Membership\r\n\"\"\"\r\nMembership = {}\r\nID = input('멤버 ID:')\r\nPW = input('멤버 PW:')\r\nmember = input('1. 회원가입, 2. 프로그램종료')\r\nif member == '1': #회원가입\r\nelse: #종료\r\n break\r\n\"\"\"\r\nMembership = {}\r\nwhile True:\r\n member = input('1. 회원가입(1), 2. 프로그램종료(2)')\r\n if member == '1': #회원가입\r\n ID = input('멤버 ID:')\r\n PW = input('멤버 PW:')\r\n Membership[ID]=PW\r\n #print(Membership)\r\n else: #종료\r\n print('프로그램 종료')\r\n break\r\n\r\nprint(Membership)\r\n \r\n","repo_name":"yhyi8400/Class_2022_1","sub_path":"Class_02_Mon/2022_05_09.py","file_name":"2022_05_09.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73409273371","text":"import re\nimport random\n\n# German dictionary from http://sourceforge.net/projects/germandict/\ndic = open('german-utf8.dic')\nresult = open('result.markdown', 'w')\nwords = dic.read()\n\n\ndef randomize_list(original_list):\n \"\"\" Randomizes the list of words\n :param List original_list:\n :return: List new_list\n \"\"\"\n new_list = []\n for i in range(len(original_list)):\n # Pick a random element\n element = random.choice(original_list)\n # Remove it afterwards from the list\n original_list.remove(element)\n # Put it in the new list\n new_list.append(element)\n return new_list\n\n\ndef main():\n # Find all words containing 'dis' but not when followed by 'ch'\n reg_ex = re.compile(r'^.*dis[^ch].*$', re.IGNORECASE | re.MULTILINE)\n found = reg_ex.findall(words)\n\n new = []\n for f in found:\n # Replace the character sequence\n new.append(re.sub(r'dis', '**DISS**', f, flags=re.IGNORECASE))\n\n # Mix the list in random order\n random_list = randomize_list(new)\n\n # Bild a long string separated by blanks\n text = ' '.join(random_list)\n\n result.write(text)\n result.close()\n\nif __name__ == '__main__':\n main()","repo_name":"xldrkp/DisReplace","sub_path":"DisReplacement.py","file_name":"DisReplacement.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16943979908","text":"\nimport sys\nimport os\nimport csv\nimport pandas as pd\nimport argparse\n\n'''\nThis scripts creates a csv table from the master alignment fasta file.\n\nRecommended usage:\n\ncat data/sh2_master_edited.fasta | python table_from_master_alignment_with_numbers_intendation_20210316.py ../data/SH2_domain_containing_prot_right_resnum_fixed.csv > table_alignment.csv\n'''\n\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter\n )\nparser.add_argument(\n type=str, \n dest='inputfile2'\n )\nargs = parser.parse_args()\n\ninputfile2 = args.inputfile2\n\n# with open(sys.stdin, \"r\") as fasta:\n# line = fasta.readline()\n#print(line, \"\\n\", \"Succesful fasta file read.\")\n\n#table = pd.read_csv('/home/takacsg/Documents/SH2DB/SH2_domain_containing_prot_new_uniprot_start_stop_filled.csv')\n#print(table.head())\n\n#with open('/home/takacsg/Documents/SH2DB/SH2_domain_containing_prot_new_uniprot_start_stop_filled.csv') as table:\nwith open(inputfile2) as table:\n numbers = {}\n for line in table:\n line = line.split(',')\n #print(\"this is the line\", line)\n ID = line[6]\n start = line[11]\n stop = line[12]\n numbers[ID] = [start, stop]\n \n \nn = 0\nlista = []\n# with open(sys.stdin, \"r\") as fasta:\nfor x in sys.stdin:\n x = x.rstrip()\n n += 1\n if n % 2 != 0:\n current_ID = x.rstrip() # x = eg.: STAT6 \n current_ID = current_ID[1:] #.split('>')[1]\n current_ID = current_ID.split('|')[0]\n #print(current_ID)\n else:\n #lista = len(x) # x is the original sequence (with gaps)\n print(current_ID + ',' + ','.join(x)) # this will give the comma separated sequence with gaps\n lista2 = [] # that will give the residue numbers\n non_gap = False # It should remain False until y = \"-\" (gap)\n counter = 0\n lista2.append(\" \") # Added because ID-s are the first cells of rows\n try: \n counter_target = int(numbers[current_ID][1]) - int(numbers[current_ID][0])\n for y in x:\n if non_gap:\n if counter == counter_target:\n lista2.append(numbers[current_ID][1].rstrip())\n break\n if y != \"-\":\n num = str(int(numbers[current_ID][0]) + counter)\n lista2.append(num)\n counter += 1\n #continue\n else:\n lista2.append(\" \")\n elif y != '-':\n non_gap = True\n counter += 1\n try:\n lista2.append(numbers[current_ID][0])\n except: continue\n elif y == '-':\n lista2.append(\" \")\n continue\n print(','.join(lista2))\n except KeyError: \n #counter_target = 10000\n print(\" No structure found \")\n \n","repo_name":"keserulab/SH2db","sub_path":"shared/sh2db/Scripts/table_process_B1__table_from_master_alignment.py","file_name":"table_process_B1__table_from_master_alignment.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32912879767","text":"from django.conf.urls import url\r\nfrom . import views\r\nfrom .import Code\r\nurlpatterns = [\r\n #卖家选择\r\n url(r'^manage_shop_home/', views.manage_shop_home, name='manage_shop_home'),\r\n #登录路由\r\n url(r'^manage_login/',views.manage_login,name='manage_login'),\r\n url(r'^manage_login_han/',views.manage_login_han,name='manage_login_han'),\r\n url(r'^manage_home/(?P[0-9]+)?',views.manage_home,name='manage_home'),\r\n url(r'^manage_login_out/',views.manage_login_out,name='manage_login_out'),\r\n url(r'^manage_Code',Code.tu),\r\n #商品路由\r\n url(r'^manage_add/',views.manage_add,name='manage_add'),\r\n url(r'^manage_add_han/',views.manage_add_han,name='manage_add_han'),\r\n url(r'^manage_add_list/', views.manage_add_list, name='manage_add_list'),\r\n url(r'^manage_add_list_del/(?P[0-9]+)?', views.manage_add_list_del, name='manage_add_list_del'),\r\n url(r'^manage_add_list_modify/(?P[0-9]+)?', views.manage_add_list_modify, name='manage_add_list_modify'),\r\n url(r'^manage_add_modify_han/', views.manage_modify_han, name='manage_modify_han'),\r\n #商品上架/下架\r\n url(r'^manage_add_up/(?P[0-9]+)?',views.manage_add_up, name='manage_add_up'),\r\n url(r'^manage_add_dowm/(?P[0-9]+)?',views.manage_add_dowm, name='manage_add_dowm'),\r\n #类别路由\r\n url(r'^manage_leibie/',views.manage_leibie,name='manage_leibie'),\r\n url(r'^manage_leibie_han/',views.manage_leibie_han,name='manage_leibie_han'),\r\n url(r'^manage_leibie_del/(?P[0-9]+)?',views.manage_leibie_del,name='manage_leibie_del'),\r\n url(r'^leibie_modify/(?P[0-9]+)?',views.leibie_modify,name='leibie_modify'),\r\n url(r'^leibie_modify_han/',views.leibie_modify_han,name='leibie_modify_han'),\r\n url(r'^manage_leibie_list/', views.manage_leibie_list, name='manage_leibie_list'),\r\n #商家订单管理\r\n url(r'^manage_order_list/', views.manage_order_list, name='manage_order_list'),\r\n url(r'^manage_order_page/(?P[0-9]+)?', views.manage_order_page, name='manage_order_page'),\r\n url(r'^manage_order_comment/(?P[0-9]+)?', views.manage_order_comment, name='manage_order_comment'),\r\n #发货操作\r\n url(r'^manage_logistics/(?P[0-9]+)?', views.manage_logistics, name='manage_logistics'),\r\n url(r'^manage_logistics_han/', views.manage_logistics_han, name='manage_logistics_han'),\r\n #会员管理\r\n url(r'^manage_member_list/', views.manage_member_list, name='manage_member_list'),\r\n url(r'^manage_member_email/(?P[0-9]+)?', views.manage_member_email, name='manage_member_email'),\r\n url(r'^manage_show_email/(?P[0-9]+)?', views.manage_show_email, name='manage_show_email'),\r\n url(r'^manage_member_han/', views.manage_member_han, name='manage_member_han'),\r\n #权限管理\r\n url(r'^manage_power_list/', views.manage_power_list, name='manage_power_list'),\r\n url(r'^manage_power_add/', views.manage_power_add, name='manage_power_add'),\r\n url(r'^manage_power_han/', views.manage_power_han, name='manage_power_han'),\r\n url(r'^manage_power_modify/(?P[0-9]+)', views.manage_power_modify, name='manage_power_modify'),\r\n url(r'^manage_power_modify_han/', views.manage_power_modify_han, name='manage_power_modify_han'),\r\n url(r'^manage_power_del/', views.manage_power_del, name='manage_power_del'),\r\n #角色管理\r\n url(r'^manage_role_list/', views.manage_role_list, name='manage_role_list'),\r\n url(r'^manage_role_add/', views.manage_role_add, name='manage_role_add'),\r\n url(r'^manage_role_modify/', views.manage_role_modify, name='manage_role_modify'),\r\n url(r'^manage_role_del/', views.manage_role_del, name='manage_role_del'),\r\n]","repo_name":"MyUncle1997/python_django_shop","sub_path":"manageuser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37106756364","text":"import sys\ninput=sys.stdin.readline\n\nn,k=map(int,input().split())\nboard=[list(map(int,input().split())) for _ in range(n)]\nboard_hole=[[[] for _ in range(n)] for _ in range(n)]\npiece=[]\nfor i in range(k):\n a,b,c=map(int,input().split())\n a,b,c=a-1,b-1,c-1\n board_hole[a][b].append(i)\n piece.append([a,b,c])\n\ndi=[0,0,-1,1]\ndj=[1,-1,0,0]\nresult=0\ndef move_forward(i,j,ni,nj,index,reverse=False):\n global result,k\n idx=board_hole[i][j].index(index)\n remain=board_hole[i][j][:idx]\n moving=board_hole[i][j][idx:]\n for l in moving:\n piece[l][0]=ni\n piece[l][1]=nj\n if reverse:\n moving.reverse()\n board_hole[ni][nj].extend(moving)\n if len(board_hole[ni][nj])>=4:\n print(result)\n exit()\n board_hole[i][j]=remain\n\nwhile result<=1000:\n result+=1\n # for v in range(n):\n # for b in range(n):\n # temp_str=\"\"\n # for s in board_hole[v][b]:\n # temp_str+=str(s)\n # if piece[s][2]==0:\n # temp_str+=\">\"\n # elif piece[s][2]==1:\n # temp_str+=\"<\"\n # elif piece[s][2]==2:\n # temp_str+=\"^\"\n # elif piece[s][2]==3:\n # temp_str+=\"-\"\n # print(board[v][b],end=\"\")\n # print(\"{0:>10}\".format(temp_str),end=\" | \")\n # print(\"\\n----------------------------------------------------------------------------\")\n # print()\n # print(\"===================================================================================\")\n # print()\n for index,value in enumerate(piece):\n i,j,d=value\n ni=i+di[d]\n nj=j+dj[d]\n if 0<=ni tuple[list[str], dict[str, tuple[int, int]]]:\n keys = []\n rewrite_map = {}\n\n for spec in channel_specs:\n parts = spec.split(\":\")\n if not parts:\n continue\n keys.append(parts[0])\n if len(parts) < 3:\n continue\n rewrite_map[parts[0]] = int(parts[1]), int(parts[2])\n\n return keys, rewrite_map\n","repo_name":"stefanistrate/drivendata-stac-overflow","sub_path":"stac_overflow/submission/inputs.py","file_name":"inputs.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32202633600","text":"\"\"\"\nWrite a function that takes directory path, a file extension and an optional tokenizer.\nIt will count lines in all files with that extension if there are no tokenizer.\nIf a the tokenizer is not none, it will count tokens.\n\n# For dir with two files from hw1.py:\n# >>> universal_file_counter(test_dir, \"txt\")\n# 6\n# >>> universal_file_counter(test_dir, \"txt\", str.split)\n# 6\n\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom typing import Callable, Optional\n\n\ndef tokenizer_processing(line, tokenizer):\n if tokenizer is None:\n return 1\n else:\n if type(tokenizer) is list:\n return sum([len(line.split(entry[0])) for entry in tokenizer])\n else:\n return len(tokenizer(line))\n\n\ndef universal_file_counter(\n dir_path: Path, file_extension: str, tokenizer: Optional[Callable] = None\n) -> int:\n counter = 0\n for file in os.listdir(dir_path):\n file_path = os.path.join(dir_path, file)\n if file.endswith(file_extension):\n with open(file_path) as fh:\n for line in fh:\n counter += tokenizer_processing(line, tokenizer)\n return counter\n","repo_name":"Nadya7n/epam_homework_2021","sub_path":"homework9/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"25173281523","text":"import numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass Spectrum:\n def __init__(\n self, mz: np.array = None, intensity: np.array = None, num_fragments: int = 10\n ):\n \"\"\"\n Creates a mass spectrometry spectrum, defined by its m/z bins and corresponding intensity values.\n Parameters\n ----------\n mz : np.array\n Array of mass:charge ratios.\n intensity : np.array\n Array of intensity values at corresponding indices of m/z.\n num_fragments : int\n Number of most intense fragments to extract.\n \"\"\"\n self.mz = mz\n self.intensity = intensity\n self.extracted_intensity = None\n self.extracted_mz = None\n self._extract_most_intense(num_fragments=num_fragments)\n return\n\n def _extract_most_intense(self, num_fragments: int = 10):\n \"\"\"\n Extracts the most intense m/z bins, and stores the intensity and m/z sorted values in self.extracted_mz and\n self.extracted_intensity\n Parameters\n ----------\n spectrum_intensity : np.array\n Array containing the spectrum intensities to extract.\n spectrum_mz : np.array\n Array containing the m/z values corresponding to the spectrum intensities. If None, the index corresponding to\n the intensities is returned. Default: None.\n num_fragments : int\n Number of fragments to extract. Default: 10.\n\n Returns\n -------\n tuple\n Tuple of np.arrays sorted in decreasing order: (mz, intensity) corresponding to the most intense peaks. I.e.,\n the most intense peak is first.\n \"\"\"\n if len(self.mz) == 0:\n self.mz = np.arange(len(self.intensity))\n\n # Sort fragments\n arg_idx = np.argsort(self.intensity)\n # Reverse list; take only top num_fragments\n # Check against np.inf\n if num_fragments == np.inf:\n # Take all fragments\n arg_idx = arg_idx[::-1]\n else:\n arg_idx = arg_idx[-1 : -(num_fragments + 1) : -1]\n self.extracted_mz = self.mz[arg_idx]\n self.extracted_intensity = self.intensity[arg_idx]\n return\n\n def get_matching_mz_indices(\n self, spectrum_to_match: \"Spectrum\", match_tolerance_ppm: int = 30\n ) -> tuple:\n \"\"\"\n Checks whether the extracted fragments from this spectrum matches those of another. Matches are defined as two\n spectra having high intensity values at the same m/z bin, within tolerance.\n Parameters\n ----------\n spectrum_to_match : Spectrum\n Other Spectrum object to compare.\n min_fragment : int\n Minimum number of fragments to consider a match\n match_tolerance_ppm : int\n The margin of error given to bins.\n\n Returns\n -------\n tuple\n Pair of lists corresponding to the indices that are matched across the spectra. (self_idx, spectrum_to_match_idx)\n \"\"\"\n\n # Spectra might be of different length; no clean way to compare them in bulk\n # First sort by mz, then crawl through each. Use argsort to restore original indices later\n self_arg_sorted = np.argsort(self.mz)\n sorted_self_mz = self.mz[self_arg_sorted]\n other_arg_sorted = np.argsort(spectrum_to_match.extracted_mz)\n sorted_other_mz = spectrum_to_match.extracted_mz[other_arg_sorted]\n\n self_start_idx = 0\n self_idx = []\n other_idx = []\n for other_mz_idx, other_mz in enumerate(sorted_other_mz):\n smallest_difference = np.inf\n for self_mz_idx, self_mz in zip(\n range(self_start_idx, len(sorted_self_mz)),\n sorted_self_mz[self_start_idx:],\n ):\n # Since they're sorted, we can skip the ones we've already checked\n # Check whether we're within tolerance\n # (ref_val - val) * (1e6) / ref_val\n if np.isclose(self_mz, other_mz, rtol=match_tolerance_ppm):\n # Mz values are sorted; once the difference increases we've found the minimum\n if abs(self_mz - other_mz) < smallest_difference:\n smallest_difference = abs(self_mz - other_mz)\n closest_self_idx = self_mz_idx\n closest_other_idx = other_mz_idx\n continue\n self_idx.append(closest_self_idx)\n other_idx.append(closest_other_idx)\n self_start_idx = (\n self_mz_idx - 1\n ) # Previous one might also be closest to the next bin\n break\n # Check whether other_mz is larger; indicates we should increment self_mz\n elif self_mz > other_mz:\n # Get next self_mz; ignore previously-checked mz from input spectra in next round\n self_start_idx = self_mz_idx\n break\n # Need to remap sorted idx to original idx:\n return self_arg_sorted[self_idx], other_arg_sorted[other_idx]\n\n def extracted_cosine_similarity(self, spectrum_to_compare: \"Spectrum\") -> float:\n \"\"\"\n Computes the cosine similarity between the extracted fragments of this spectrum and the input.\n\n Parameters\n ----------\n spectrum_to_compare : Spectrum\n Spectrum against which to compare\n Returns\n -------\n float\n Cosine similarity between this spectrum and the input.\n \"\"\"\n\n # Get fragment idx\n self_fragment_idx, other_fragment_idx = self.get_matching_mz_indices(\n spectrum_to_match=spectrum_to_compare, match_tolerance_ppm=30\n )\n # if len(self_fragment_idx) == 0:\n # return cosine_similarity(np.ones((1,1)), np.zeros((1,1)))\n return cosine_similarity(\n np.expand_dims(self.extracted_intensity[self_fragment_idx], axis=0),\n np.expand_dims(\n spectrum_to_compare.extracted_intensity[other_fragment_idx], axis=0\n ),\n )\n","repo_name":"xomicsdatascience/zoDIAq","sub_path":"src/zodiaq/plotting/spectrum.py","file_name":"spectrum.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"37806608283","text":"\nfrom schemas.mongo_models.device_models import MongoDevice, GeoJson2DPoint, MongoDeviceDataEntry\nfrom schemas.mongo_models.account_models import MongoCompanyAccount, MongoCompany\nfrom passlib.context import CryptContext\nfrom beanie import init_beanie\nimport motor\nimport asyncio\nimport time\nimport math\nimport os\nimport sys\nimport random\ncwd = os.getcwd()\nsys.path.append(cwd)\n\n\npwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\n\n\nasync def main():\n client = motor.motor_asyncio.AsyncIOMotorClient(\n os.environ['mongo_database_url'])\n await init_beanie(database=client['test'] if os.environ['ENV'] == 'DEV' else client['main'], document_models=[MongoCompany, MongoCompanyAccount, MongoDevice])\n print('making')\n # mongo_company = MongoCompany.construct()\n # mongo_company.name = 'test'\n # await mongo_company.save()\n\n # mongo_account = MongoCompanyAccount.construct()\n # mongo_account.email = 'test'\n # mongo_account.password_hash = pwd_context.hash('test')\n # mongo_account.company_id = mongo_company.id\n # await mongo_account.save()\n\n company = await MongoCompany.find_one(MongoCompany.name == 'test')\n # company.labels = {\n # }\n # await company.save()\n\n device: MongoDevice = MongoDevice.construct()\n device.device_id = 10\n device.device_secret = 20\n device.aes_key = b'\\x12!\\xfbLT\\xf6\\xd1YY}\\xc9\\xd4i\\xdb\\xb9\\x92'\n device.data = []\n device.past_day_data = []\n device.past_week_data = []\n device.past_month_data = []\n device.past_year_data = []\n date = int(time.time()) - 24*60*60\n # for i in range(24*2):\n # entry = MongoDeviceDataEntry.construct()\n # entry.time_s = date + i*30*60\n # entry.distance_mm = 50 * math.sin(i*math.pi*2/24-2)\n # device.past_day_data.append(entry)\n device.company_id = company.id\n device.creation_date = int(time.time())\n device.location = GeoJson2DPoint(coordinates=(51.500 + (random.randint(-500, 500) / 10000),\n -0.1743 + (random.randint(-500, 500) / 10000)))\n device.warning_level = 5\n device.warning_level_percentage = 50\n device.installation_comment = ''\n device.comments = ''\n device.pinned = False\n await device.save()\n print(device)\n\n for i in range(20):\n i = i * 10 + 50\n device: MongoDevice = MongoDevice.construct()\n device.device_id = i\n device.device_secret = 30\n device.aes_key = b'\\x12!\\xfbLT\\xf6\\xd1YY}\\xc9\\xd4i\\xdb\\xb9\\x92'\n device.data = []\n device.past_day_data = []\n device.past_week_data = []\n device.past_month_data = []\n device.past_year_data = []\n date = int(time.time()) - 24*60*60\n for i in range(24*2):\n entry = MongoDeviceDataEntry.construct()\n entry.time_s = date + i*30*60\n entry.distance_mm = 50 * math.sin(i*math.pi*2/24)\n device.past_day_data.append(entry)\n device.company_id = company.id\n device.creation_date = int(time.time())\n device.location = GeoJson2DPoint(\n coordinates=(51.498 + (random.randint(-3000, 3000) / 10000),\n -0.1832 + (random.randint(-3000, 3000) / 10000))\n )\n device.warning_level = 5\n device.setup_complete = True\n device.warning_level_percentage = 50\n device.installation_comment = ''\n device.comments = ''\n device.pinned = False\n await device.save()\n print('made')\n\n\nasyncio.run(main())\n","repo_name":"pikachunerdy/fastapi-demo","sub_path":"tests/create_data/create_devices.py","file_name":"create_devices.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2920329336","text":"import os\nimport json\nimport time\nimport subprocess\nimport datetime\nimport signal\nimport argparse\nimport logging\nimport pandas as pd\n\ndef process(file_path, start, end):\n '''\n frame = pd.read_json(file_path, lines=True)\n files = list(frame['file_name'])\n timeout = 5\n '''\n i = start\n timeout = 5\n files = os.listdir(file_path)\n print(len(files))\n if end > len(files):\n end = len(files)\n while i < end:\n slicer = \"bash ./slicer.sh \" + file_path + \" \" + str(files[i]) + \" 1 \" + \"parsed/\" + str(files[i])\n start0 = datetime.datetime.now()\n process1 = subprocess.Popen(slicer, shell = True)\n while process1.poll() is None:\n time.sleep(0.2)\n end0 = datetime.datetime.now()\n if (end0-start0).seconds > timeout:\n os.kill(process1.pid, signal.SIGKILL)\n os.waitpid(-1, os.WNOHANG)\n i += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--file_path', help='funtions dic.', default='../devign_dataset')\n parser.add_argument('--start', help='start functions number to parsed', type=int, default=0)\n parser.add_argument('--end', help='end functions number to parsed', type=int, default=4500)\n args = parser.parse_args()\n file_path = args.file_path\n start = args.start\n end = args.end\n process(file_path, start, end)\n\n","repo_name":"AMPLE001/AMPLE","sub_path":"data_processing/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"32"} +{"seq_id":"12711493553","text":"import os\nimport numpy as np \nfrom PIL import Image\nimport cv2\n\n\nrecog = cv2.face.LBPHFaceRecognizer_create()\npath = 'dataset'\n\ndef getImageswithID(path):\n\timagePaths = [os.path.join(path,f) for f in os.listdir(path)]\n\tfaces = []\n\tNames = []\n\n\tfor imgpath in imagePaths:\n\t\tfaceImg = Image.open(imgpath)\n\t\tfaceNp = np.array(faceImg,'uint8')\n\t\tName = (os.path.split(imgpath)[-1].split('_')[0])\n\t\t\n\t\tfaces.append(faceNp)\n\t\tNames.append(Name)\n\t\tcv2.imshow('training',faceNp)\n\t\tcv2.waitKey(10)\n\treturn Names, faces\n\nNames ,faces = getImageswithID(path)\nNames=[0]*len(faces)\nrecog.train(faces,np.array(Names))\nrecog.write('trainingData1.yml')\ncv2.destroyAllWindows()\n\n","repo_name":"InternityFoundation/Perceptron_3038","sub_path":"Sprint Cycles/Navjot Singh/Face detection/face_recog_train.py","file_name":"face_recog_train.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"21096578633","text":"from Views.testExplorer import TestExplorer # the test explorer\nfrom Model.test import Test # the test class (used to initialize the test explorer)\nfrom PyQt5.QtWidgets import * # this module contains classes that provide a set of UI elements to create classic desktop-style user interfaces\nimport sys\nimport os\nimport re\nimport getopt\n\ndef main():\n currentDir = os.getcwd()\n blacklistPath = \"%s%s%s\" % (currentDir, os.path.sep, \"MessageBlacklist.txt\")\n blacklist = parseBlacklistFile(blacklistPath)\n tests = getTestsRelativeToDirectory(currentDir, \"..,Impact\", blacklist) # generate tests based on impact folder\n\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"a\", [\"runall\"])\n except getopt.GetoptError:\n print('Could not create command line arguments')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in (\"-a\", \"--runall\"):\n [test.run() for test in tests]\n [test.assertMatchingCounts() for test in tests]\n return\n\n app = QApplication(sys.argv) # create instance of Q application. Should only have one even if you have multiple windows\n testExplorer = TestExplorer(tests) # generate the test explorer with the tests\n testExplorer.show() # show the test explorer\n app.exec_() # execute the application\n\ndef getTestsRelativeToDirectory(directory, relativePathString, blacklist):\n\n testsPath = os.path.abspath(os.path.join(directory, *relativePathString.split(',')))\n\n subFolders = os.listdir(testsPath)\n\n # remove ImpactCommon if it exists. This folder contains the common impact messages\n if (\"ImpactCommon\" in subFolders):\n subFolders.remove(\"ImpactCommon\")\n\n testDirs = [os.path.abspath(os.path.join(testsPath, folder)) for folder in subFolders]\n # generate all the tests\n tests = [Test(testDir, blacklist) for testDir in testDirs]\n\n return tests\n\ndef parseBlacklistFile( fullyQualifiedPath ):\n #read file\n blacklistFile = open(fullyQualifiedPath, 'r')\n contents = blacklistFile.read()\n # remove any unnecessary characters\n charsToRemove = [' ', '\\n', '\\r', '\\t']\n # a regular expression to search for characters in remove characters list\n regEx = \"[\" + re.escape(''.join(charsToRemove)) + ']'\n #substitute the found items with an empty character in the contents\n contents = re.sub(regEx,'',contents)\n #[contents.replace(char, '') for char in removeChars]\n #split the contents by commas\n blacklistMessageHints = contents.split(\",\")\n #return list of blacklist messages\n return blacklistMessageHints\n\nif __name__ == '__main__':\n main()\n","repo_name":"afrl-rq/UxAS_TestData","sub_path":"Test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23031401312","text":"from flask import Flask, render_template, request\nfrom PIL import Image\nimport numpy as np\nimport pickle\nimport tensorflow\nimport keras\nfrom keras.utils import load_img, img_to_array\nfrom keras.preprocessing import image\nfrom keras.layers import GlobalMaxPooling2D\nfrom keras.applications.resnet import ResNet50,preprocess_input\nfrom sklearn.neighbors import NearestNeighbors\nfrom numpy.linalg import norm\n\nfeature_list = np.array(pickle.load(open('embedded.pkl','rb')))\nfilenames = pickle.load(open('filenames.pkl','rb'))\n\nmodel = ResNet50(weights='imagenet',include_top=False,input_shape=(224,224,3))\nmodel.trainable = False\n\nmodel = tensorflow.keras.Sequential([\n model,\n GlobalMaxPooling2D()\n])\n\n\n\ndef feature_extraction(img_path,model):\n img = load_img(img_path, target_size=(224, 224))\n img_array = img_to_array(img)\n expanded_img_array = np.expand_dims(img_array, axis=0)\n preprocessed_img = preprocess_input(expanded_img_array)\n result = model.predict(preprocessed_img).flatten()\n normalized_result = result / norm(result)\n\n return normalized_result\n\ndef recommend(features,feature_list):\n neighbors = NearestNeighbors(n_neighbors=5, algorithm='brute', metric='euclidean')\n neighbors.fit(feature_list)\n\n distances, indices = neighbors.kneighbors([features])\n print(indices)\n return indices\n\n\napp = Flask(__name__)\n\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/', methods = ['post'])\ndef imageWork():\n imagefile = request.files['image_input']\n imagePath = \"./static/uploads/\" + imagefile.filename\n imagefile.save(imagePath) \n image_sample = feature_extraction(imagePath,model)\n indices = recommend(image_sample,feature_list)\n list_add = []\n for file in indices[0][0:8]:\n list_add.append(filenames[file])\n list_add\n display_image = list_add[0]\n return render_template('index.html', imagelist = list_add, display_image = display_image)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"anoopjoshi015/Product-Recommendation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15223722220","text":"from fusil.project_agent import ProjectAgent\nfrom fusil.session import Session\nfrom fusil.mas.agent_list import AgentList\nfrom fusil.project_directory import ProjectDirectory\nfrom time import time\nfrom fusil.aggressivity import AggressivityAgent\nfrom ptrace.os_tools import RUNNING_LINUX, RUNNING_PYPY\nfrom fusil.process.debugger import Debugger\nfrom shutil import copyfile\nif RUNNING_PYPY:\n from gc import collect as gc_collect\nif RUNNING_LINUX:\n from fusil.system_calm import SystemCalm\n\nclass Project(ProjectAgent):\n \"\"\"\n A fuzzer project runs fuzzing sessions until we get enough successes or the\n user interrupts the project. Initialize all agents before a session starts,\n and cleanup agents at the session end.\n\n Before a session start, the project sleeps until the system load is under\n 50% (may change with command line options).\n \"\"\"\n def __init__(self, application):\n ProjectAgent.__init__(self, self, \"project\", mta=application.mta(), application=application)\n self.config = application.config\n options = application.options\n self.agents = AgentList()\n if RUNNING_LINUX:\n if options.fast:\n self.system_calm = None\n elif not options.slow:\n self.system_calm = SystemCalm(\n self.config.fusil_normal_calm_load,\n self.config.fusil_normal_calm_sleep)\n else:\n self.system_calm = SystemCalm(\n self.config.fusil_slow_calm_load,\n self.config.fusil_slow_calm_sleep)\n else:\n self.warning(\"SystemCalm class is not available\")\n self.system_calm = None\n\n # Configuration\n self.max_session = options.sessions\n self.success_score = self.config.fusil_success_score\n self.error_score = self.config.fusil_error_score\n self.max_success = options.success\n\n # Session\n self.step = None\n self.nb_success = 0\n self.session = None\n self.session_index = 0\n self.session_timeout = None # in second\n\n # Statistics\n self.session_executed = 0\n self.session_total_duration = 0\n self.total_duration = None\n self._destroyed = False\n\n # Add Application agents, order is important:\n # MTA have to be the first agent\n for agent in application.agents:\n self.registerAgent(agent)\n self.registerAgent(self)\n\n # Create aggressivity agent\n self.aggressivity = AggressivityAgent(self)\n\n # Initial aggresssivity value\n if options.aggressivity is not None:\n self.aggressivity.setValue(options.aggressivity / 100)\n self.error(\"Initial aggressivity: %s\" % self.aggressivity)\n\n # Create the debugger\n self.debugger = Debugger(self)\n\n # Create the working directory\n self.directory = ProjectDirectory(self)\n self.directory.activate()\n self.error(\"Use directory: %s\" % self.directory.directory)\n\n # Initilize project logging\n self.initLog()\n\n def registerAgent(self, agent):\n self.agents.append(agent)\n\n def unregisterAgent(self, agent, destroy=True):\n if agent not in self.agents:\n return\n self.agents.remove(agent, destroy)\n\n def init(self):\n \"\"\"\n Function called once on project creation: create the project working\n directory, prepare the logging and create the first session.\n \"\"\"\n self.project_start = time()\n self.createSession()\n\n def initLog(self):\n # Move fusil.log into run-xxx/project.log: copy fusil.log content\n # and then remove fusil.log file and log handler)\n logger = self.application().logger\n filename = self.createFilename(\"project.log\")\n if logger.filename:\n copyfile(logger.filename, filename)\n logger.unlinkFile()\n mode = 'a'\n else:\n mode = 'w'\n logger.file_handler = logger.addFileHandler(filename, mode=mode)\n logger.filename = filename\n\n def deinit(self):\n if self.session_executed:\n self.summarize()\n\n def destroy(self):\n if self._destroyed:\n return\n self._destroyed = True\n\n # Destroy all project agents\n self.aggressivity = None\n self.debugger = None\n for agent in self.application().agents:\n self.agents.remove(agent, False)\n self.agents.clear()\n\n # Keep project directory?\n keep = self.directory.keepDirectory()\n if not keep:\n # Don't keep the directory: destroy log file\n logger = self.application().logger\n logger.unlinkFile()\n # And then remove the whole directory\n self.directory.rmtree()\n self.directory = None\n\n if RUNNING_PYPY:\n gc_collect()\n\n def createSession(self):\n \"\"\"\n Create a new session:\n - make sure that system load is under 50%\n - activate all project agents\n - send project_start (only for the first session)\n and session_start messages\n \"\"\"\n # Wait until system is calm\n if self.system_calm:\n self.system_calm.wait(self)\n\n self.info(\"Create session\")\n self.step = 0\n self.session_index += 1\n self.use_timeout = bool(self.session_timeout)\n self.session_start = time()\n\n # Enable project agents\n for agent in self.agents:\n if not agent.is_active:\n agent.activate()\n\n # Create session\n self.session = Session(self)\n\n # Send 'project_start' and 'session_start' message\n if self.session_index == 1:\n self.send('project_start')\n self.send('session_start')\n text = \"Start session\"\n if self.max_session:\n percent = self.session_index * 100.0 / self.max_session\n text += \" (%.1f%%)\" % percent\n self.error(text)\n\n\n def destroySession(self):\n \"\"\"\n Destroy the current session:\n - deactive all project agents\n - clear agents mailbox\n \"\"\"\n # Update statistics\n if not self.application().exitcode:\n self.session_executed += 1\n self.session_total_duration += (time() - self.session_start)\n\n # First deactivate session agents\n self.session.deactivate()\n\n # Deactivate project agents\n application_agents = self.application().agents\n for agent in self.agents:\n if agent not in application_agents:\n agent.deactivate()\n\n # Clear session variables\n self.step = None\n self.session = None\n\n # Remove waiting messages\n for agent in application_agents:\n agent.mailbox.clear()\n self.mta().clear()\n\n def on_session_done(self, session_score):\n self.send('project_session_destroy', session_score)\n\n def on_project_stop(self):\n self.send('univers_stop')\n\n def on_univers_stop(self):\n if self.session:\n self.destroySession()\n\n def on_project_session_destroy(self, session_score):\n # Use session score\n self.session.score = session_score\n duration = time() - self.session_start\n if self.success_score <= session_score:\n log = self.error\n else:\n log = self.warning\n log(\"End of session: score=%.1f%%, duration=%.3f second\" % (\n session_score*100, duration))\n\n # Destroy session\n self.destroySession()\n\n # Session success? project is done\n if self.success_score <= session_score:\n self.nb_success += 1\n text = \"#%s\" % self.nb_success\n if 0 < self.max_success:\n percent = self.nb_success * 100.0 / self.max_success\n text += \"/%s (%.1f%%)\" % (self.max_success, percent)\n self.error(\"Success %s!\" % text)\n if 0 < self.max_success \\\n and self.max_success <= self.nb_success:\n self.error(\"Stop! Limited to %s successes, use --success option for more\" % self.max_success)\n self.send('univers_stop')\n return\n\n # Hit maximum number of session?\n if 0 < self.max_session \\\n and self.max_session <= self.session_index:\n self.error(\"Stop! Limited to %s sessions, use --sessions option for more\" % self.max_session)\n self.send('univers_stop')\n return\n\n # Otherwise: start new session\n self.createSession()\n\n def live(self):\n if self.step is not None:\n self.step += 1\n if not self.session:\n return\n if not self.use_timeout:\n return\n duration = time() - self.session_start\n if self.session_timeout <= duration:\n self.error(\"Project session timeout!\")\n self.send('session_stop')\n self.use_timeout = False\n\n def summarize(self):\n \"\"\"\n Display a summary of all executed sessions\n \"\"\"\n count = self.session_executed\n info = []\n if count:\n duration = self.session_total_duration\n info.append(\"%s sessions in %.1f seconds (%.1f ms per session)\"\n % (count, duration, duration * 1000 / count))\n duration = time() - self.project_start\n info.append(\"total %.1f seconds\" % duration)\n info.append(\"aggresssivity: %s\" % self.aggressivity)\n self.error(\"Project done: %s\" % \", \".join(info))\n self.error(\"Total: %s success\" % self.nb_success)\n\n def createFilename(self, filename, count=None):\n \"\"\"\n Create a filename in the project working directory: add directory\n prefix and make sure that the generated filename is unique.\n \"\"\"\n return self.directory.uniqueFilename(filename, count=count)\n\n","repo_name":"clem1/segvault","sub_path":"fusil/fusil/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":9982,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"32"} +{"seq_id":"26105848037","text":"from __future__ import annotations\n\nfrom typing import List, Union, Iterable, Tuple\n\nimport discord\n\nfrom redbot.core.i18n import Translator\nfrom redbot.core.utils.chat_formatting import humanize_number, escape\nfrom redbot.core.utils._dpy_menus_utils import SimpleHybridMenu\nfrom redbot.vendored.discord.ext import menus\n\n_ = Translator(\"CustomCommands\", __file__)\n\n\nclass CCListSource(menus.ListPageSource):\n def __init__(self, custom_commands: Iterable[Tuple[str, dict]]):\n super().__init__(custom_commands, per_page=5)\n\n async def format_page(\n self, menu: SimpleHybridMenu, entries: Iterable[Tuple[str, dict]]\n ) -> Union[discord.Embed, str]:\n current_entry = menu.current_page + 1\n total_entries = self._max_pages\n\n results = []\n for command, body in entries:\n responses = body[\"response\"]\n\n if isinstance(responses, list):\n result = \", \".join(map(discord.utils.escape_markdown, responses))\n elif isinstance(responses, str):\n result = discord.utils.escape_markdown(responses)\n else:\n continue\n # Cut preview to 52 characters max\n if len(result) > 52:\n result = result[:49] + \"...\"\n # Replace newlines with spaces\n result = result.replace(\"\\n\", \" \")\n # Escape markdown and mass mentions\n result = escape(result, formatting=True, mass_mentions=True)\n results.append((f\"{menu.ctx.clean_prefix}{command}\", result))\n\n if await menu.ctx.embed_requested():\n # We need a space before the newline incase the CC preview ends in link (GH-2295)\n content = \" \\n\".join(map(\"**{0[0]}** : {0[1]}\".format, results))\n embed = discord.Embed(\n title=_(\"Custom Command List\"),\n description=content,\n colour=await menu.ctx.embed_colour(),\n )\n if total_entries > 1:\n text = _(\"Page: {page_num}/{total_pages}\\n\").format(\n page_num=humanize_number(current_entry),\n total_pages=humanize_number(max(1, total_entries)),\n )\n embed.set_footer(text=text)\n return embed\n else:\n return \"\\n\".join(map(\"{0[0]:<12} : {0[1]}\".format, results))\n\n\nclass CCRawSource(menus.ListPageSource):\n def __init__(self, custom_commands: List[str]):\n super().__init__(custom_commands, per_page=1)\n\n async def format_page(self, menu: SimpleHybridMenu, entry: str) -> Union[discord.Embed, str]:\n raw = discord.utils.escape_markdown(entry)\n current_entry = menu.current_page + 1\n total_entries = self._max_pages\n if await menu.ctx.embed_requested():\n colour = await menu.ctx.embed_colour()\n if len(raw) > 2048:\n raw = f\"{raw[:2045]}...\"\n embed = discord.Embed(\n title=_(\"Response #{num}\").format(num=current_entry),\n description=raw,\n colour=colour,\n )\n if total_entries > 1:\n text = _(\"Page: {page_num}/{total_pages}\\n\").format(\n page_num=humanize_number(current_entry),\n total_pages=humanize_number(max(1, total_entries)),\n )\n embed.set_footer(text=text)\n return embed\n else:\n msg = _(\"Response #{num}/{total}:\\n{raw}\").format(\n num=humanize_number(current_entry),\n total=humanize_number(max(1, total_entries)),\n raw=raw,\n )\n if len(msg) > 2000:\n msg = f\"{msg[:1997]}...\"\n return msg\n","repo_name":"Scuffed-Guard/Draper-Red-Edge","sub_path":"redbot/cogs/customcom/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"17336305643","text":"import pickle\n\nMODEL_PATH = \"model1.bin\"\nDV_PATH = \"dv.bin\"\n\nwith open(DV_PATH, \"rb\") as dv_file, open(MODEL_PATH, \"rb\") as model_file:\n dv = pickle.load(dv_file)\n model = pickle.load(model_file)\n\nclient = {\"job\": \"retired\", \"duration\": 445, \"poutcome\": \"success\"}\n\nif __name__ == \"__main__\":\n X = dv.transform([client])\n y_pred = model.predict_proba(X)[0, 1]\n print(f\"Credit probability: {y_pred:.3f}\")","repo_name":"jjovalle99/mlzoomcamp","sub_path":"week5/homework/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38919246182","text":"# https://programmers.co.kr/learn/courses/30/lessons/72412\n# 순위 검색\n\nfrom itertools import *\nfrom bisect import *\n\ndef solution(info, query):\n answer = []\n qq = []\n qd = {}\n kindof = [[\"java\", \"python\", \"cpp\", \"-\"], [\"backend\", \"frontend\", \"-\"], [\"junior\", \"senior\", \"-\"], [\"pizza\", \"chicken\", \"-\"]]\n\n temp = list(map(\"\".join, product(*kindof)))\n for i in temp:\n qd[i] = [-1]\n for i in range(len(info)):\n parc = info[i].split(\" \")\n tmp = list(map(\"\".join, product(*[[parc[0], \"-\"], [parc[1], \"-\"], [parc[2], \"-\"], [parc[3], \"-\"]])))\n for j in tmp:\n qd[j].append(int(parc[4]))\n for i in qd:\n qd[i].sort()\n\n for i in range(len(query)):\n qq.append(query[i].split(\" and \"))\n tmp = qq[i].pop(3).split(\" \")\n qq[i].append(tmp[0])\n qq[i].append(tmp[1])\n key = qq[i][0] + qq[i][1] + qq[i][2] + qq[i][3]\n value = qd[key]\n score = int(qq[i][4])\n cnt = (len(value) - 1) - (bisect_left(value, score) - 1)\n answer.append(cnt)\n\n return answer","repo_name":"miche715/Programmers-Algorithm","sub_path":"python/p_72412.py","file_name":"p_72412.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72479020250","text":"import os\nimport json\nimport yaml\n\nimport SpendedTime\n\n\nlogs_directory = './'\nlog_file_name = 'statistics.json'\nconfig_file_name = 'used_config.yaml'\n\ndef get_config_file(file_name):\n with open(file_name, 'r') as stream:\n return yaml.load(stream)\n\ndef get_log_file(file_name):\n with open(file_name) as content:\n return json.load(content)\n\ndef is_result_directory(path):\n return (os.path.isdir(path) and log_file_name in os.listdir(path) and config_file_name in os.listdir(path))\n\ndef main():\n for file_or_foldername in os.listdir(logs_directory):\n file_or_folderpath = os.path.join(logs_directory, file_or_foldername)\n if is_result_directory(file_or_folderpath):\n log_file = get_log_file(os.path.join(file_or_folderpath, log_file_name))\n config_file = get_config_file(os.path.join(file_or_folderpath, config_file_name))\n\n for product_id in [i['id'] for i in config_file['productionLine']['products']]:\n SpendedTime.generate_chart(file_or_folderpath, config_file, log_file, product_id)\n\nif __name__ == '__main__':\n main()","repo_name":"Claypuppet/OSM-factorysim","sub_path":"scripts/python/Graphs.py","file_name":"Graphs.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16869708816","text":"# O(n) solution. Worst case, we have to parse through a list only to find out we need to reverse it. Reversing a list in python is an O(n) operation, \n# and so is parsing through a list.\n\nclass Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n \n # find i, the first non-sequential element from the back\n i = len(nums) - 2\n while nums[i] >= nums[i + 1] and i != -1:\n i -= 1\n\n # nums is in reverse order, and thus is the final permutation\n if i == -1:\n return nums.reverse()\n\n # find rightmost successor to i\n successorIndex = i + 1\n current = i + 1\n while (current < len(nums)):\n if nums[current] > nums[i]:\n successorIndex = current\n current += 1 \n\n # swap rightmost successor with i\n nums[i], nums[successorIndex] = nums[successorIndex], nums[i]\n\n # reverse everything after i\n nums[i + 1:len(nums)] = reversed(nums[i + 1:len(nums)])\n \n\n\n\n\n","repo_name":"Shayan-Bathaee/LeetCode-Problems","sub_path":"Medium/31. Next Permutation/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34527154039","text":"import socket\nfrom send_recv_protocol import send, recv\nimport threading\nimport os\nimport sys\n\n\nif __name__ == \"__main__\":\n os.system('cls')\n print(\"\\n\\n\\n\")\n try:\n if len(sys.argv) < 3 or len(sys.argv) > 3:\n print(\"Use Script as server_chat.py ip port\")\n sys.exit(0)\n else:\n ip = sys.argv[1]\n port = int(sys.argv[2])\n server = socket.socket()\n server.bind((ip, port))\n server.listen()\n print(f\"Server is Waiting for clients at {ip}:{port}\")\n except ValueError as e:\n print(\"port nubmers are integers follow that\")\n sys.exit(0)\n except OSError as e:\n print(\"!!Error in Creating Socket!!\", e)\n sys.exit(0)\n \n client, (cip, cport) = server.accept()\n print(f\"\\nConnected to a client at {cip}:{cport}\")\n \n send_th = threading.Thread(target=send, args=(client, ))\n recv_th = threading.Thread(target=recv, args=(client, ))\n \n send_th.start()\n recv_th.start()\n \n send_th.join()\n recv_th.join()\n \n print(\"Connection Closed\")\n client.close()\n server.close()\n sys.exit(0)\n","repo_name":"sachinyadav3496/InternshipBatch2020","sub_path":"Techno_Intern_Batch/mytkapps/server_chat.py","file_name":"server_chat.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"10949957799","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by wind on 2021/4/21\n\nimport requests\nimport json\nimport time\n\n\nclass Utils:\n '''\n @staticmethod\n def fetchJsonToList(url, root, subitem):\n\n response = requests.get(url)\n\n try:\n json_items = json.loads(response.text)\n return json_items[root][subitem]\n\n except Exception,ex:\n items = []\n\n return items\n '''\n\n @staticmethod\n def fetchJsonToListItems(url, root, subitem):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n return json_items[root][subitem]\n except Exception as ex:\n items = []\n return items\n\n @staticmethod\n def fetchJsonItems(url, root, subitem):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n items = json_items[root][subitem]\n except Exception as ex:\n items = []\n return items\n\n @staticmethod\n def fetchJsonKvItems(url, root):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n items = json_items[root]\n except Exception as ex:\n items = {}\n return items\n\n @staticmethod\n def fetchJsonToList(url):\n response = requests.get(url)\n try:\n json_items = json.loads(response.text)\n items = json_items\n except Exception as ex:\n items = []\n return items\n\n @staticmethod\n def fetchRMJsonKvItems(url, root, key, value):\n result = {}\n response = requests.get(url)\n text = json.loads(response.text)\n items = text[root]\n for item in items:\n if (item.has_key(\"modelerType\")):\n if (item[\"modelerType\"].find(\"user\") != -1): continue\n if item.has_key(key) and item.has_key(value):\n result[item[key]] = item[value]\n return result\n\n @staticmethod\n def fetchJsonKvsItems(url, root, key, value):\n result = {}\n response = requests.get(url)\n text = json.loads(response.text)\n items = text[root]\n for item in items:\n if (item.has_key(\"modelerType\")):\n if (item[\"modelerType\"].find(\"user\") != -1): continue\n if item.has_key(key):\n result[item[key]] = [item[i] for i in value.split(\",\")]\n return result\n\n @staticmethod\n def timeToDatetime(timestamp):\n t = time.localtime(timestamp)\n return time.strftime('%Y-%m-%d %H:%M:%S', t)\n\n @staticmethod\n def currentTimeToDatetime():\n t = time.localtime(time.time())\n return time.strftime('%Y-%m-%d %H:%M:%S', t)\n\n @staticmethod\n def getOrDefault(map, key, default):\n if map.has_key(key): return map[key]\n return default\n\n @staticmethod\n def getOrDefaultByList(list, key, value, default):\n for item in list:\n if item[key] == value: return item\n return default\n","repo_name":"windgeek/bigdata_cus","sub_path":"yarn_list_utils/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4452254642","text":"import tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport input_data\nimport time\n\n# 開始時刻\nstart_time = time.time() # unixタイム\nprint(\"開始\")\n\n# MNISTのデータをダウンロードしてローカルへ\nprint(\"--- MNISTデータの読み込み開始 ---\")\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nprint(\"--- MNISTデータの読み込み完了 ---\")\n\nn_input = 28\nn_output = 10\nn_steps = 28\nn_hidden = 128\n\nbatch_size = 128\n\n# [入力データ数、シーケンス数、特徴数]\nx = tf.placeholder(tf.float32, [None, n_steps, n_input])\ny = tf.placeholder(tf.float32, [None, n_output])\n\ndef RNN(x, model_name):\n\n ## 時系列データをTensorFlowのRNNで利用できる形式に変換\n # 1. [シーケンス数、入力データ数、特徴数]に転置\n # x = tf.transpose(self.x, [1, 0, 2])\n\n # 2. [入力データ数 x シーケンス数、特徴数]にreshape。シーケンスを縦につなげたイメージ\n # x = tf.reshape(x, [-1, n_input])\n\n # 3. [入力データ、特徴数]のtensorをシーケンス個に分割する。\n # x = tf.split(x, n_steps, 0)\n\n # 1, 2, 3をすべてやってくれるAPI\n x = tf.unstack(x, n_steps, 1)\n\n def get_cell(model_name):\n if model_name == \"lstm\":\n return rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)\n elif model_name == \"gru\":\n return rnn.GRUCell(n_hidden)\n else:\n return rnn.BasicRNNCell(n_hidden)\n\n cell = get_cell(model_name)\n initial_state = cell.zero_state(batch_size, dtype=tf.float32)\n outputs, states = rnn.static_rnn(cell, x, dtype=tf.float32, initial_state=initial_state)\n\n w = tf.Variable(tf.random_normal([n_hidden, n_output]))\n b = tf.Variable(tf.random_normal([n_output]))\n\n return tf.matmul(outputs[-1], w) + b\n\nprediction = RNN(x, \"gru\")\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))\nstep = tf.train.AdagradOptimizer(0.01).minimize(cost)\n\ncorrect_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# セッションを作成する。\nsession = tf.Session()\n\ninit_op = tf.global_variables_initializer()\nsession.run(init_op)\n\nn_epoch = 10000\n\nvalid_len = 128\nvalid_data = mnist.test.images[:valid_len].reshape((-1, 28, 28))\nvalid_label = mnist.test.labels[:valid_len]\n\nfor epoch in range(n_epoch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n # next_batchで返されるbatch_xは[batch_size, 784]のテンソルなので、batch_size×28×28に変換する。\n batch_xs = batch_xs.reshape((batch_size, 28, 28))\n session.run(step, feed_dict={x: batch_xs, y: batch_ys})\n\n if epoch % 100 == 0:\n acc = session.run(accuracy, feed_dict={x: batch_xs, y:batch_ys})\n loss = session.run(cost, feed_dict={x: batch_xs, y:batch_ys})\n print('TRAIN: epoch: {} / loss: {:.6f} / acc: {:.5f}'.format(epoch, loss, acc))\n\n valid_acc = session.run(accuracy, feed_dict={x: valid_data, y: valid_label})\n valid_loss = session.run(cost, feed_dict={x: valid_data, y: valid_label})\n print('VALID: epoch: {} / loss: {:.6f} / acc: {:.5f}'.format(epoch, valid_loss, valid_acc))\n\ntest_len = 128\ntest_data = mnist.test.images[:test_len].reshape((-1, 28, 28))\ntest_label = mnist.test.labels[:test_len]\ntest_acc = session.run(accuracy, feed_dict={x: test_data, y: test_label})\nprint(\"Test Accuracy: {}\".format(test_acc))\n\n# 終了時刻\nend_time = time.time()\nprint(\"終了\")\nprint(\"かかった時間: \" + str(end_time - start_time))","repo_name":"HayatoIshimura/tensorflow-practice","sub_path":"rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36555231041","text":"import pandas\nimport matplotlib.pyplot as plt\nfrom pyquaternion import Quaternion\n\nfilename = \"/home/joshua/programming/whycode_flight_data/data_135432000000.csv\"\ndata = pandas.read_csv(filename)\n\ndata[\"inverse_orientation_w\"] = \"\"\ndata[\"inverse_orientation_x\"] = \"\"\ndata[\"inverse_orientation_y\"] = \"\"\ndata[\"inverse_orientation_z\"] = \"\"\ndata[\"is_flipped\"] = \"\"\n\n# set default orientation to be normal\nprevious_orientation = Quaternion(1, 0, 0, 0)\nfor index, row in data.iterrows():\n\n # create objects for orientation and inverse orientation\n orientation = Quaternion(row[\"orientation_w\"], row[\"orientation_x\"], row[\"orientation_y\"], row[\"orientation_z\"])\n inverse_orientation = orientation.inverse\n\n data.at[index, \"inverse_orientation_w\"] = inverse_orientation.w\n data.at[index, \"inverse_orientation_x\"] = inverse_orientation.x\n data.at[index, \"inverse_orientation_y\"] = inverse_orientation.y\n data.at[index, \"inverse_orientation_z\"] = inverse_orientation.z\n\n # determine if the orientation is assumed to be flipped\n data.at[index, \"is_flipped\"] = Quaternion.distance(inverse_orientation, previous_orientation) < 0.05\n\n # set iteration values\n if(data.at[index, \"is_flipped\"]):\n orientation = inverse_orientation\n previous_orientation = orientation\n\n data.at[index, \"corrected_orientation_w\"] = orientation.w\n data.at[index, \"corrected_orientation_x\"] = orientation.x\n data.at[index, \"corrected_orientation_y\"] = orientation.y\n data.at[index, \"corrected_orientation_z\"] = orientation.z\n\nfilename_elements = filename.split(\".\")\nnew_filename = filename_elements[0] + \"_fixed.csv\"\ndata.to_csv(new_filename)\n\nprint(\"Saved to: %s\" % new_filename)\n\n# orientation constrained\nfigure = plt.figure()\naxes = figure.gca()\nplt.title(\"Pose estimate sequence number vs. WhyCode orientation.\")\nplt.xlabel(\"Pose estimate number\")\nplt.ylabel(\"Orientation quaternion in camera frame\")\naxes.plot(data[\"orientation_x\"].iloc[500:600], label=\"X\")\naxes.plot(data[\"orientation_y\"].iloc[500:600], label=\"Y\")\naxes.plot(data[\"orientation_z\"].iloc[500:600], label=\"Z\")\naxes.plot(data[\"orientation_w\"].iloc[500:600], label=\"W\")\naxes.legend()\n\n# orientation constrained\nfigure = plt.figure()\naxes = figure.gca()\nplt.title(\"Pose estimate sequence number vs. corrected WhyCode orientation.\")\nplt.xlabel(\"Pose estimate number\")\nplt.ylabel(\"Orientation quaternion in camera frame\")\naxes.plot(data[\"corrected_orientation_x\"].iloc[500:600], label=\"X\")\naxes.plot(data[\"corrected_orientation_y\"].iloc[500:600], label=\"Y\")\naxes.plot(data[\"corrected_orientation_z\"].iloc[500:600], label=\"Z\")\naxes.plot(data[\"corrected_orientation_w\"].iloc[500:600], label=\"W\")\naxes.legend()\n\nplt.show()","repo_name":"uzgit/flight_analysis","sub_path":"whycode_orientation_fix.py","file_name":"whycode_orientation_fix.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12835284651","text":"import logging\nimport pysam\nfrom Bio import SeqIO\n\nfrom src.long_read_assigner import *\nfrom src.long_read_simple_assigner import *\n\nfrom src.long_read_profiles import *\nfrom src.polya_finder import *\nfrom src.polya_verification import *\n\nlogger = logging.getLogger('CSA')\n\n\nclass LongReadSimpleAlignmentProcessor:\n \"\"\" class for aggregating all assignment information\n\n Parameters\n ----------\n gene_info\n bams\n params\n printer\n counter\n \"\"\"\n\n def __init__(self, gene_info, bams, params, chr_record=None):\n self.gene_info = gene_info\n self.bams = bams\n self.params = params\n self.chr_record = chr_record\n\n self.assigner = LongReadSimpleAssigner(self.gene_info, self.params)\n self.profile_constructor = CombinedProfileConstructor(gene_info, params)\n self.polya_finder = PolyAFinder(self.params.polya_window, self.params.polya_fraction)\n self.polya_fixer = PolyAFixer(self.params)\n self.assignment_storage = []\n self.gene_region = (gene_info.start, gene_info.end)\n\n def process(self, intron_printer):\n self.assignment_storage = []\n self.gene_info.all_read_region_start = self.gene_info.start\n self.gene_info.all_read_region_end = self.gene_info.end\n\n for b in self.bams:\n self.process_single_file(b, intron_printer)\n\n if self.params.needs_reference:\n self.gene_info.all_read_region_start -= self.params.upstream_region_len\n self.gene_info.all_read_region_end += self.params.upstream_region_len\n self.gene_info.reference_region = \\\n str(self.chr_record[self.gene_info.all_read_region_start - 1:self.gene_info.all_read_region_end + 1].seq)\n self.gene_info.canonical_sites = {}\n return self.assignment_storage\n\n def process_single_file(self, bamfile_in, intron_printer):\n processed_reads = set()\n for genic_region in self.gene_info.genic_regions:\n for alignment in bamfile_in.fetch(self.gene_info.chr_id, genic_region[0], genic_region[1]):\n read_id = alignment.query_name\n if alignment.reference_id == -1:\n self.assignment_storage.append(ReadAssignment(read_id, None))\n continue\n if alignment.is_supplementary:\n continue\n if self.params.no_secondary and alignment.is_secondary:\n continue\n\n logger.debug(\"=== Processing read \" + read_id + \" ===\")\n\n # concat indels\n concat_blocks = concat_gapless_blocks(sorted(alignment.get_blocks()), alignment.cigartuples)\n if not concat_blocks:\n logger.warning(\"Read %s has no aligned exons\" % read_id)\n continue\n # correct coordinates to GTF style (inclusive intervals)\n sorted_blocks = correct_bam_coords(concat_blocks)\n read_start = sorted_blocks[0][0]\n read_end = sorted_blocks[-1][1]\n read_tuple = (read_id, read_start, read_end)\n if read_tuple in processed_reads:\n continue\n processed_reads.add(read_tuple)\n logger.debug(\"Read exons: \" + str(sorted_blocks))\n if self.params.needs_reference:\n if read_start < self.gene_info.all_read_region_start:\n self.gene_info.all_read_region_start = read_start\n if read_end > self.gene_info.all_read_region_end:\n self.gene_info.all_read_region_end = read_end\n\n #polya_info = self.polya_finder.detect_polya(alignment)\n #sorted_blocks, polya_info, exon_changed = self.polya_fixer.correct_read_info(sorted_blocks, polya_info)\n polya_info = PolyAInfo(-1, -1, -1, -1)\n\n combined_profile = self.profile_constructor.construct_profiles(sorted_blocks, polya_info)\n read_assignment = self.assigner.assign_to_isoform(read_id, combined_profile)\n\n #if exon_changed:\n # read_assignment.add_match_attribute(MatchEvent(MatchEventSubtype.aligned_polya_tail))\n read_assignment.polyA_found = (polya_info.external_polya_pos != -1 or\n polya_info.external_polyt_pos != -1 or\n polya_info.internal_polya_pos != -1 or\n polya_info.internal_polyt_pos != -1)\n read_assignment.polya_info = polya_info\n read_assignment.exons = sorted_blocks\n read_assignment.mapped_strand = \"-\" if alignment.is_reverse else \"+\"\n read_assignment.chr_id = self.gene_info.chr_id\n read_assignment.multimapper = alignment.is_secondary\n\n if intron_printer and self.chr_record and not alignment.is_secondary:\n chr_id = self.gene_info.chr_id\n read_start = sorted_blocks[0][0] - 10\n read_end = sorted_blocks[-1][1] + 10\n ref_region = str(self.chr_record[read_start - 1:read_end + 1].seq)\n gene_profile_index = 0\n gene_profile = combined_profile.read_intron_profile.gene_profile\n\n for i, intron in enumerate(combined_profile.read_intron_profile.read_features):\n if combined_profile.read_intron_profile.read_profile[i] != 1:\n intron_printer.add_intron_info(read_id, chr_id, \".\", intron, (0, 0), \"novel\", 0, 0, 0, 0, 0,\n 0)\n continue\n\n while gene_profile_index < len(gene_profile) and (\n gene_profile[gene_profile_index] != 1 or not overlaps(intron,\n self.gene_info.intron_profiles.features[\n gene_profile_index])):\n gene_profile_index += 1\n if gene_profile_index >= len(gene_profile) or gene_profile[gene_profile_index] != 1:\n logger.info(\"profile %s, index %d\" % (str(gene_profile), gene_profile_index))\n logger.info(\"read profile %s, index %d\" % (\n str(combined_profile.read_intron_profile.read_profile), i))\n\n reference_intron = self.gene_info.intron_profiles.features[gene_profile_index]\n gene_profile_index += 1\n is_consistent = reference_intron == intron\n left_diff = intron[0] - reference_intron[0]\n right_diff = intron[1] - reference_intron[1]\n\n ref_strand = self.check_canonical(reference_intron, ref_region, read_start)\n read_strand = self.check_canonical(intron, ref_region, read_start)\n if ref_strand is None:\n # reference intron is non-canonical\n if read_strand is None:\n # read intron is non-canonical as well\n intron_printer.add_intron_info(read_id, chr_id, \".\", intron, reference_intron,\n \"both_noncanonical\", 0, 0, 0, 0, left_diff, right_diff)\n else:\n # read is canonical\n strand, donor_up, donor_down, acceptor_up, acceptor_down = \\\n self.analyse_intron_sites(intron, ref_region, read_start, read_strand)\n if read_strand == '+':\n donor_diff = left_diff\n acc_diff = right_diff\n else:\n donor_diff = - right_diff\n acc_diff = - left_diff\n intron_printer.add_intron_info(read_id, chr_id, read_strand, intron, reference_intron,\n \"reference_noncanonical\",\n donor_up, donor_down, acceptor_up,\n acceptor_down, donor_diff, acc_diff)\n else:\n strand, donor_up, donor_down, acceptor_up, acceptor_down = \\\n self.analyse_intron_sites(intron, ref_region, read_start, ref_strand)\n if ref_strand == '+':\n donor_diff = left_diff\n acc_diff = right_diff\n else:\n donor_diff = - right_diff\n acc_diff = - left_diff\n if ref_strand != read_strand:\n intron_printer.add_intron_info(read_id, chr_id, ref_strand, intron, reference_intron,\n \"read_noncanonical\",\n donor_up, donor_down, acceptor_up,\n acceptor_down, donor_diff, acc_diff)\n else:\n intron_type = \"consistent\" if is_consistent else \"incosistent\"\n intron_printer.add_intron_info(read_id, chr_id, strand, intron, reference_intron,\n intron_type,\n donor_up, donor_down, acceptor_up, acceptor_down,\n donor_diff, acc_diff)\n\n if self.params.sqanti_output:\n indel_count, junctions_with_indels = self.count_indel_stats(alignment)\n read_assignment.set_additional_info(\"indel_count\", indel_count)\n read_assignment.set_additional_info(\"junctions_with_indels\", junctions_with_indels)\n read_assignment.introns_match = \\\n all(e == 1 for e in combined_profile.read_intron_profile.read_profile)\n\n self.assignment_storage.append(read_assignment)\n logger.debug(\"=== Finished read \" + read_id + \" ===\")\n\n def check_canonical(self, intron, ref_region, read_start, strand=None):\n intron_left_pos = intron[0] - read_start\n intron_right_pos = intron[1] - read_start\n left_site = ref_region[intron_left_pos:intron_left_pos + 2]\n right_site = ref_region[intron_right_pos - 1:intron_right_pos + 1]\n if (left_site == \"GT\" and right_site == \"AG\") and strand != '-':\n return '+'\n elif (left_site == \"CT\" and right_site == \"AC\") and strand != '+':\n return '-'\n else:\n return None\n\n def analyse_intron_sites(self, intron, ref_region, read_start, strand=None):\n seq_size = 10\n intron_left_pos = intron[0] - read_start\n intron_right_pos = intron[1] - read_start\n\n if strand is None:\n left_site = ref_region[intron_left_pos:intron_left_pos + 2]\n right_site = ref_region[intron_right_pos - 1:intron_right_pos + 1]\n if left_site == \"GT\" and right_site == \"AG\":\n strand = '+'\n elif left_site == \"CT\" and right_site == \"AC\":\n strand = '-'\n else:\n return None, None, None, None, None\n\n if strand not in ['+', '-']:\n return None, None, None, None, None\n\n left_upper = ref_region[intron_left_pos - seq_size:intron_left_pos]\n left_lower = ref_region[intron_left_pos + 2:intron_left_pos + seq_size + 2]\n right_upper = ref_region[intron_right_pos - seq_size - 1:intron_right_pos - 1]\n right_lower = ref_region[intron_right_pos + 1:intron_right_pos + seq_size + 1]\n\n # upstream and downstream here are relative to the genome\n if strand == \"+\":\n donor_upstream = left_upper.rfind(\"GT\")\n donor_downstream = left_lower.find(\"GT\")\n acc_upstream = right_upper.rfind(\"AG\")\n acc_downstream = right_lower.find(\"AG\")\n else:\n acc_upstream = left_upper.rfind(\"CT\")\n acc_downstream = left_lower.find(\"CT\")\n donor_upstream = right_upper.rfind(\"AC\")\n donor_downstream = right_lower.find(\"AC\")\n\n donor_upstream = seq_size - donor_upstream if donor_upstream != -1 else 0\n donor_downstream = 2 + donor_downstream if donor_downstream != -1 else 0\n acc_upstream = seq_size - acc_upstream if acc_upstream != -1 else 0\n acc_downstream = 2 + acc_downstream if acc_downstream != -1 else 0\n\n if strand == '+':\n return strand, donor_upstream, donor_downstream, acc_upstream, acc_downstream\n else:\n return strand, donor_downstream, donor_upstream, acc_downstream, acc_upstream\n\n def count_indel_stats(self, alignment):\n cigar_event_count = len(alignment.cigartuples)\n indel_events = [1, 2]\n indel_count = 0\n intron_cigar_positions = []\n for i in range(cigar_event_count):\n cigar = alignment.cigartuples[i]\n if cigar[0] in indel_events:\n indel_count += 1\n elif cigar[0] == 3:\n intron_cigar_positions.append(i)\n\n junctions_with_indels = 0\n for i in intron_cigar_positions:\n # indel right near intron\n if (i > 0 and alignment.cigartuples[i - 1][0] in indel_events) or \\\n (i < cigar_event_count - 1 and alignment.cigartuples[i + 1][0] in indel_events):\n junctions_with_indels += 1\n\n # indel separated by at most 'indel_near_splice_site_dist' matches from intron\n if (i > 1 and alignment.cigartuples[i - 2][0] in indel_events and\n alignment.cigartuples[i - 1][0] in [0, 7, 8] and\n alignment.cigartuples[i - 1][1] <= self.params.indel_near_splice_site_dist) or \\\n (i < cigar_event_count - 2 and alignment.cigartuples[i + 2][0] in indel_events and\n alignment.cigartuples[i + 1][0] in [0, 7, 8] and\n alignment.cigartuples[i + 1][1] <= self.params.indel_near_splice_site_dist):\n junctions_with_indels += 1\n\n return indel_count, junctions_with_indels\n","repo_name":"ablab/platform_comparison","sub_path":"src/alignment_processor_simple.py","file_name":"alignment_processor_simple.py","file_ext":"py","file_size_in_byte":14892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"43590283131","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport logging\nimport logging.config\nfrom concurrent_log_handler import ConcurrentRotatingFileHandler\n\nlogging.config.fileConfig('logging.conf')\n\nlogger = logging.getLogger(__file__)\n\ndef test():\n i = 1\n while True:\n print(f'{i}')\n logger.error(i)\n i += 1\n time.sleep(5)\n\nif __name__ == '__main__':\n test()","repo_name":"Sunny-wong/python-supervisor-win","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26619492853","text":"\"\"\" Plots the workflow's graph\n\"\"\"\nimport os\nfrom graphviz import Digraph\n\nEXPLORE_URL = os.getenv('EXPLORE_URL', \"https://dev-www.materialscloud.org/explore/curated-cofs\")\n\n\ndef get_aiida_link(mat_dict, extra_tag):\n return \"{}/details/{}\".format(EXPLORE_URL, mat_dict[extra_tag].uuid)\n\n\ndef get_graph(mat_dict):\n \"\"\"Sketch a graph of the CO2 capture workflow (appl_pecoal).\n NOTE: dropped link to ProcessNodes because they are different depending from the workflow version, are not included \n in groups, ansd would be difficult to maintain.\n \"\"\"\n\n link_paper = \"https://doi.org/\" + mat_dict['doi_ref']\n link_github = \"https://github.com/danieleongari/CURATED-COFs/blob/master/cifs/{}.cif\".format(mat_dict['mat_id'])\n\n g = Digraph(\"Workflow's graph\")\n\n g.attr(rankdir='TB')\n g.attr(\"node\", style='filled', fillcolor='white:gray', gradientangle='45')\n\n g.node(\"Reference\\npublication\", shape=\"oval\", href=link_paper)\n g.node(\"GitHub\", shape=\"oval\", href=link_github)\n g.node(\"Original\\nstructure\", shape=\"oval\", href=get_aiida_link(mat_dict, \"orig_cif\"))\n g.node(\"geo1\", label=\"Geometric\\nproperties\", shape=\"oval\", href=get_aiida_link(mat_dict, \"orig_zeopp\"))\n g.node(\"DFT optimization\", shape=\"box\")\n g.node(\"DFT output details\", shape=\"oval\", href=get_aiida_link(mat_dict, \"dftopt\"))\n g.node(\"DDEC charges evaluation\", shape=\"box\")\n g.node(\"Optimized structure\\n W/DDEC charges\", shape=\"oval\", href=get_aiida_link(mat_dict, \"opt_cif_ddec\"))\n g.node(\"geo2\", label=\"Geometric\\nproperties\", shape=\"oval\", href=get_aiida_link(mat_dict, \"opt_zeopp\"))\n g.node(\"Adsorption calculation\\nCO2\", shape=\"box\")\n g.node(\"Adsorption calculation\\nN2\", shape=\"box\")\n g.node(\"Results CO2\", shape=\"oval\", href=get_aiida_link(mat_dict, \"isot_co2\"))\n g.node(\"Results N2\", shape=\"oval\", href=get_aiida_link(mat_dict, \"isot_n2\"))\n g.node(\"CCS process\\nperformances\", shape=\"oval\", href=get_aiida_link(mat_dict, \"appl_pecoal\"))\n\n g.edge(\"Reference\\npublication\", 'GitHub')\n g.edge('GitHub', 'Original\\nstructure')\n g.edge('Original\\nstructure', \"geo1\")\n g.edge('Original\\nstructure', \"DFT optimization\")\n g.edge(\"DFT optimization\", \"DDEC charges evaluation\")\n g.edge(\"DFT optimization\", \"DFT output details\")\n g.edge(\"DDEC charges evaluation\", \"Optimized structure\\n W/DDEC charges\")\n g.edge(\"Optimized structure\\n W/DDEC charges\", \"geo2\")\n g.edge(\"Optimized structure\\n W/DDEC charges\", \"Adsorption calculation\\nCO2\")\n g.edge(\"Optimized structure\\n W/DDEC charges\", \"Adsorption calculation\\nN2\")\n g.edge(\"Adsorption calculation\\nCO2\", \"Results CO2\")\n g.edge(\"Adsorption calculation\\nN2\", \"Results N2\")\n g.edge(\"Results CO2\", \"CCS process\\nperformances\")\n g.edge(\"Results N2\", \"CCS process\\nperformances\")\n return g\n","repo_name":"materialscloud-org/discover-curated-cofs","sub_path":"detail/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"30851048547","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport time\n\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nfrom torch import optim\nimport warnings\nimport os\nimport json\nfrom model import GCNModelVAE\nfrom optimizer import loss_function\nfrom utils import load_data, mask_test_edges, preprocess_graph, get_roc_score, show_graph_with_labels\nfrom torch.utils.tensorboard import SummaryWriter\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport networkx as nx\nfrom torch import nn\nimport manifolds\nimport json\nfrom synthetic import SyntheticDataset\n\nfrom geoopt.manifolds.poincare.math import dist\n\ndef get_freer_gpu():\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n return 'cuda:'+str(np.argmax(memory_available))\n\n\ndevice = torch.cuda.is_available()\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=str, default='gcn_vae', help=\"models used\")\nparser.add_argument('--seed', type=int, default=123456789, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')\nparser.add_argument('--hidden1', type=int, default=32, help='Number of units in hidden layer 1.')\nparser.add_argument('--hidden2', type=int, default=2, help='Number of units in hidden layer 2.')\nparser.add_argument('--gamma', type=float, default=1, help='coefficient for the information term')\nparser.add_argument('--lr', type=float, default=0.0005, help='Initial learning rate.')\nparser.add_argument('--dropout', type=float, default=0., help='Dropout rate (1 - keep probability).')\nparser.add_argument('--dataset-str', type=str, default='synthetic', help='type of dataset.')\nparser.add_argument('--device', type=str, default=get_freer_gpu() if device else 'cpu')\nparser.add_argument('--noise_dim', type=int, default=1)\nparser.add_argument('--K', type=int, default=18)\nparser.add_argument('--J', type=int, default=3)\nparser.add_argument('--c', type=float, default=1., help='constant of curvature')\nparser.add_argument('--warmup_de', type=float, default=30.)\nparser.add_argument('--final_latent', type=str, default=True)\nparser.add_argument('--start_latent_display', type=int, default=0)\nparser.add_argument('--reduced_latent_size', type=int, default=1000)\nparser.add_argument('--latent_display_show', type=int, default=50)\nparser.add_argument('--latent_animation', type=str, default=False)\nparser.add_argument('--syn_dim', type=list, default=[64, 64])\nparser.add_argument('--syn_depth', type=int, default=6)\nparser.add_argument('--new_generation', type=bool, default=True)\nargs = parser.parse_args()\n\nwarnings.filterwarnings('ignore')\n\n\nclass ExpZero(nn.Module):\n def __init__(self, manifold):\n super(ExpZero, self).__init__()\n self.manifold = manifold\n\n def forward(self, input):\n return self.manifold.expmap0(input)\n\n\nclass LogZero(nn.Module):\n def __init__(self, manifold):\n super(LogZero, self).__init__()\n self.manifold = manifold\n\n def forward(self, input):\n return self.manifold.logmap0(input)\n\nclass Discriminator(nn.Module):\n def __init__(self, feature_dim=2, z_dim=2):\n super(Discriminator, self).__init__()\n self.z_dim = z_dim\n self.feature_dim = feature_dim\n self.net = nn.Sequential(\n nn.Linear(self.z_dim + self.feature_dim, 1000),\n nn.ReLU(False),\n nn.Linear(1000, 400),\n nn.ReLU(False),\n nn.Linear(400, 100),\n nn.ReLU(False),\n nn.Linear(100, 1),\n\n )\n\n def forward(self, x, z):\n x = x.view(-1, 64*64)\n x = torch.cat((x, z), 1)\n return self.net(x).squeeze()\n\n\ndef permute_dims(z):\n assert z.dim() == 2\n B, _ = z.size()\n perm = torch.randperm(B).to(args.device)\n perm_z = z[perm]\n return perm_z\n\ndef gae_for(args):\n torch.manual_seed(args.seed + 1)\n print(\"Using {} dataset\".format(args.dataset_str))\n if args.dataset_str in ['cora', 'citeseer', 'pubmed']:\n adj, features, labels = load_data(args.dataset_str)\n print(adj.shape, features.shape)\n exit()\n\n elif args.dataset_str == 'synthetic':\n if args.new_generation:\n dict_adj, adj_array, features = SyntheticDataset(args.syn_dim, args.syn_depth).__getitem__()\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(dict_adj))\n features = (255 - features) / 255.\n features = torch.Tensor(features)\n else:\n with open('adj_dict.json', 'r') as fp:\n dict_adj = json.load(fp)\n\n adj_dict = {}\n for a in dict_adj:\n adj_dict[int(a)] = dict_adj[a]\n\n adj_array, features = np.load('adjacancy.npy'), np.load('features.npy')\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(adj_dict))\n features = torch.Tensor(features)\n else:\n raise ValueError('not exist!!!')\n\n features = features.to(args.device).unsqueeze(1)\n n_nodes, _, feat_dim_hight, feat_dim_length = features.shape\n\n # Store original adjacency matrix (without diagonal entries) for later\n adj_orig = adj\n adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)\n adj_orig.eliminate_zeros()\n\n adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj=adj,\n args=args)\n adj = adj_train\n\n # Some preprocessing\n adj_norm = preprocess_graph(adj)\n adj_norm = adj_norm.to(args.device)\n\n adj_label = adj_train + sp.eye(adj_train.shape[0])\n # adj_label = sparse_to_tuple(adj_label)\n adj_label = torch.FloatTensor(adj_label.toarray())\n adj_orig_tile = adj_label.unsqueeze(2).repeat(1, 1, args.K)\n adj_orig_tile = adj_orig_tile.to(args.device)\n\n pos_weight = torch.tensor(float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()).float().to(\n device=args.device)\n norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)\n\n psi_input_dim = args.noise_dim + feat_dim_hight + feat_dim_length\n logv_input_dim = feat_dim_hight + feat_dim_length\n\n model = GCNModelVAE(psi_input_dim, logv_input_dim, args.hidden1, args.hidden2, args.dropout, args.K, args.J, args.noise_dim, args.device, args.c).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n D = Discriminator(feature_dim=64*64, z_dim=args.hidden2).to(args.device)\n optimizer_D = optim.Adam(D.parameters(), lr=0.0005)\n manifold = getattr(manifolds, 'PoincareBall')(args.hidden2, args.c)\n\n latent_img = []\n fig = plt.figure()\n ax = fig.add_subplot(111)\n mapper = LogZero(manifold)\n\n for epoch in range(args.epochs):\n warm_up = torch.min(torch.FloatTensor([epoch/args.warmup_de, 1])).to(args.device)\n\n t = time.time()\n model.train()\n\n reconstruct_iw, log_prior_iw, log_H_iw, psi_iw_vec, psi_iw = model(features, adj_norm)\n hidden_emb = psi_iw[:, 1, :].data.contiguous().cpu().numpy()\n z_vec = mapper(psi_iw)\n\n loss1 = loss_function(reconstructed_iw=reconstruct_iw, log_prior_iw=log_prior_iw, log_H_iw=log_H_iw,\n adj_orig_tile=adj_orig_tile, nodes=n_nodes, K=args.K, pos_weight=pos_weight, norm=norm,\n warm_up=warm_up, device=args.device)\n for i in range(int(args.K/2)):\n z = z_vec[:, i]\n D_xz = D(features, z)\n z_perm = permute_dims(z)\n D_x_z = D(features, z_perm)\n output_ = -(D_xz.mean() - (torch.exp(D_x_z - 1).mean()))\n if i == 0:\n output = output_.unsqueeze(0)\n else:\n output = torch.cat((output, output_.unsqueeze(0)), dim=0)\n\n Info_xz = output.mean()\n\n loss = loss1 + args.gamma + Info_xz\n\n optimizer.zero_grad()\n loss.backward(retain_graph=True)\n optimizer_D.zero_grad()\n Info_xz.backward()\n\n optimizer.step()\n optimizer_D.step()\n\n cur_loss = loss.item()\n print('Epoch:', '%04d ---> ' % (epoch + 1), 'training_loss = {:.5f} '.format(cur_loss),\n 'time = {:.5f} '.format(time.time() - t))\n\n writer.add_scalar('Loss/train_loss', cur_loss, epoch)\n\n #print(\"Optimization Finished!\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for i in range(adj_array.shape[0]):\n for j in range(adj_array.shape[0]):\n if adj_array[i, j] == 1:\n x_vals = [hidden_emb[i, 0], hidden_emb[j, 0]]\n y_vals = [hidden_emb[i, 1], hidden_emb[j, 1]]\n ax.plot(x_vals, y_vals, color='blue', linewidth=0.8)\n\n for i in range(adj_array.shape[0]):\n\n ax.scatter(hidden_emb[i, 0],\n hidden_emb[i, 1],\n cmap='jet', c='black', edgecolors=None, s=20)\n\n\n ax.set_xlim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n ax.set_ylim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n patch = plt.Circle((0, 0), radius=1 / np.sqrt(args.c), color='black', fill=False)\n ax.add_patch(patch)\n if epoch > 60:\n plt.show()\n fig.savefig('moreeps/reduced_latent_more_{}.pdf'.format(epoch), format='pdf', dpi=500)\n #hidden_emb = torch.from_numpy(hidden_emb)\n #A = torch.zeros(hidden_emb.shape[0], hidden_emb.shape[0])\n #for i in range(hidden_emb.shape[0]):\n # for j in range(hidden_emb.shape[0]):\n # A[i, j] = dist(hidden_emb[i], hidden_emb[j], c=args.c)\n\n #print(A)\n #exit()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for i in range(adj_array.shape[0]):\n for j in range(adj_array.shape[0]):\n if adj_array[i, j] == 1:\n x_vals = [hidden_emb[i, 0], hidden_emb[j, 0]]\n y_vals = [hidden_emb[i, 1], hidden_emb[j, 1]]\n ax.plot(x_vals, y_vals, color='blue', linewidth=0.8)\n\n for i in range(adj_array.shape[0]):\n\n ax.scatter(hidden_emb[i, 0],\n hidden_emb[i, 1],\n cmap='jet', c='black', edgecolors=None, s=20)\n\n\n for i in range(adj_array.shape[0]):\n ax.annotate(str(i), (hidden_emb[i, 0], hidden_emb[i, 1]))\n\n ax.set_xlim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n ax.set_ylim(\n [-1 / np.sqrt(args.c) - 0.2 * (1 / np.sqrt(args.c)), 1 / np.sqrt(args.c) + 0.2 * (1 / np.sqrt(args.c))])\n patch = plt.Circle((0, 0), radius=1 / np.sqrt(args.c), color='black', fill=False)\n ax.add_patch(patch)\n fig.savefig('moreeps/reduced_latent_{}.pdf'.format(epoch), format='pdf', dpi=500)\n\n\nif __name__ == '__main__':\n print('New_Experiment', 'c:{}'.format(args.c), 'K:{}'.format(args.K), 'J:{}'.format(args.J),\n 'learning_rate:{}'.format(args.lr),\n 'warm_up:{}'.format(args.warmup_de), 'hidden1:{}'.format(args.hidden1), 'hidden2:{}'.format(args.hidden2),\n 'droput:{}'.format(args.dropout))\n tensorboard_file_name = '___Run_ID___' + '__c' + str(args.c) + '__K' + str(args.K) + '__J' + str(args.K) + \\\n '__lr' + str(args.lr) + '__warm_up' + str(args.warmup_de) + '__hidden1_' + str(\n args.hidden1) + \\\n '__hidden2_' + str(args.hidden2) + '__dropout' + str(args.dropout)\n writer = SummaryWriter(log_dir='./logs', filename_suffix=tensorboard_file_name)\n gae_for(args)\n","repo_name":"esihge/esihge","sub_path":"main_synthetic.py","file_name":"main_synthetic.py","file_ext":"py","file_size_in_byte":11922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41346121122","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom PySide import QtCore, QtGui\n\nclass FlatProxyModel(QtGui.QAbstractProxyModel):\n @QtCore.Slot(QtCore.QModelIndex, QtCore.QModelIndex)\n def sourceDataChanged(self, topLeft, bottomRight):\n self.dataChanged.emit(self.mapFromSource(topLeft), \\\n self.mapFromSource(bottomRight))\n def buildMap(self, model, parent = QtCore.QModelIndex(), row = 0):\n if row == 0:\n self.m_rowMap = {}\n self.m_indexMap = {}\n rows = model.rowCount(parent)\n for r in range(rows):\n index = model.index(r, 0, parent)\n print('row', row, 'item', model.data(index))\n self.m_rowMap[index] = row\n self.m_indexMap[row] = index\n row = row + 1\n if model.hasChildren(index):\n row = self.buildMap(model, index, row)\n return row\n def setSourceModel(self, model):\n QtGui.QAbstractProxyModel.setSourceModel(self, model)\n self.buildMap(model)\n print(flush = True)\n model.dataChanged.connect(self.sourceDataChanged)\n def mapFromSource(self, index):\n if index not in self.m_rowMap: return QtCore.QModelIndex()\n #print('mapping to row', self.m_rowMap[index], flush = True)\n return self.createIndex(self.m_rowMap[index], index.column())\n def mapToSource(self, index):\n if not index.isValid() or index.row() not in self.m_indexMap:\n return QtCore.QModelIndex()\n #print('mapping from row', index.row(), flush = True)\n return self.m_indexMap[index.row()]\n def columnCount(self, parent):\n return QtGui.QAbstractProxyModel.sourceModel(self)\\\n .columnCount(self.mapToSource(parent))\n def rowCount(self, parent):\n #print('rows:', len(self.m_rowMap), flush=True)\n return len(self.m_rowMap) if not parent.isValid() else 0\n def index(self, row, column, parent):\n #print('index for:', row, column, flush=True)\n if parent.isValid(): return QtCore.QModelIndex()\n return self.createIndex(row, column)\n def parent(self, index):\n return QtCore.QModelIndex()\n def __init__(self, parent = None):\n super(FlatProxyModel, self).__init__(parent)\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n\n model = QtGui.QStandardItemModel()\n names = ['Foo', 'Bar', 'Baz']\n for first in names:\n row = QtGui.QStandardItem(first)\n for second in names:\n row.appendRow(QtGui.QStandardItem(first+second))\n model.appendRow(row)\n\n proxy = FlatProxyModel()\n proxy.setSourceModel(model)\n\n nestedProxy = FlatProxyModel()\n nestedProxy.setSourceModel(proxy)\n\n w = QtGui.QWidget()\n layout = QtGui.QHBoxLayout(w)\n view = QtGui.QTreeView()\n view.setModel(model)\n view.expandAll()\n view.header().hide()\n layout.addWidget(view)\n view = QtGui.QListView()\n view.setModel(proxy)\n layout.addWidget(view)\n view = QtGui.QListView()\n view.setModel(nestedProxy)\n layout.addWidget(view)\n w.show()\n\n sys.exit(app.exec_())\n","repo_name":"KubaO/stackoverflown","sub_path":"questions/pyside-flatmodel-21564976/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"31"} +{"seq_id":"33201543562","text":"from sympy import *\r\nfrom scipy import sparse\r\nfrom numpy import empty\r\nfrom scipy.sparse.linalg import dsolve\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport scipy.integrate as integrate\r\nimport matplotlib.animation as animation\r\nimport time\r\n\r\n\r\ndef phi(xi, yi, di):\r\n\r\n return Matrix([xi**2+yi**2-di**2])\r\n\r\n\r\ndef cm_rhs(ci, bi, xi):\r\n\r\n \"\"\"Coefficient Matrix Right Hand Side: The function takes the coefficient\r\n matrix of the Lagrangian equations of motion of first kind, the second\r\n time derivative of the geometrical constraints, and the vectors of the\r\n unknowns. It provides the coefficient matrix of the linear algebraic\r\n equation system, and the right hand side of the equation.\"\"\"\r\n\r\n z = zeros(ci.shape[0], ci.shape[1]) #Makes zeros matrix of the same shape as the coefficient matrix\r\n n_b = zeros(bi.shape[0], bi.shape[1]) # Makes zeros matrix(column vector) as the right hand side\r\n s = 0\r\n for i, bi_val in enumerate(bi):\r\n for j, xi_val in enumerate(xi):\r\n z[-bi.shape[0]+i, j] = bi_val.coeff(xi_val)\r\n s += bi_val.coeff(xi_val)*xi_val\r\n n_b[i] = bi_val - s\r\n s = 0\r\n\r\n return ci - z, n_b\r\n\r\n\r\ndef n_sys_eq(ai, bi, ri, ri_t, r0i, r0i_t):\r\n\r\n \"\"\"The function replaces the symbolic variables with their numerical\r\n values and returns with sparse matrices.\"\"\"\r\n\r\n for j in range(len(ri)):\r\n ai = ai.subs(ri_t[j], r0i_t[j])\r\n ai = ai.subs(ri[j], r0i[j])\r\n bi = bi.subs(ri_t[j], r0i_t[j])\r\n bi = bi.subs(ri[j], r0i[j])\r\n return matrix2sparse(N(ai)), matrix2sparse(N(bi))\r\n\r\n\r\ndef st_space(ri, ri_t):\r\n\r\n \"\"\"The function interweaves the position and velocity vectors into a\r\n state space vector\"\"\"\r\n\r\n w = zeros(2*len(ri), 1)\r\n for i in range(len(w)):\r\n if i % 2 is 0:\r\n w[i] = ri[int(i/2)]\r\n else:\r\n w[i] = ri_t[int((i-1)/2)]\r\n return w\r\n\r\n\r\ndef cauchy_form(ai, bi, ri_t):\r\n\r\n \"\"\"The function rewrites the differential equation system into its\r\n Cauchy-form\"\"\"\r\n\r\n eye_m = eye(len(ri_t))\r\n z_m = zeros(ai.shape[0], len(ri_t))\r\n coeff_m = eye_m.row_join(zeros(len(ri_t), ai.shape[1]))\r\n a_c = coeff_m.col_join(z_m.row_join(ai))\r\n b_c = ri_t.col_join(bi)\r\n return a_c, b_c\r\n\r\n\r\ndef sys_rk4(ai, qi, r, r_t, ic, ic_t, h):\r\n\r\n nai, nqi = n_sys_eq(ai, qi, r, r_t, ic, ic_t)\r\n\r\n len_ict = len(ic_t)\r\n len_ic = len(ic)\r\n xi_1 = dsolve.spsolve(nai, nqi, use_umfpack=False)\r\n lbd = xi_1[len_ic+len_ict:]\r\n k_1 = h*xi_1\r\n\r\n ictk_1 = ic_t + 0.5 * k_1[len_ict:len_ic + len_ict]\r\n ick_1 = ic + 0.5*k_1[0:len_ict]\r\n nai_2, nqi_2 = n_sys_eq(ai, qi, r, r_t, ick_1, ictk_1)\r\n xi_2 = dsolve.spsolve(nai_2, nqi_2, use_umfpack=False)\r\n k_2 = h*xi_2\r\n\r\n ictk_2 = ic_t + 0.5 * k_2[len_ict:len_ic + len_ict]\r\n ick_2 = ic + 0.5 * k_2[0:len_ict]\r\n nai_3, nqi_3 = n_sys_eq(ai, qi, r, r_t, ick_2, ictk_2)\r\n xi_3 = dsolve.spsolve(nai_3, nqi_3, use_umfpack=False)\r\n k_3 = h*xi_3\r\n\r\n ictk_3 = ic_t + k_3[len_ict:len_ic + len_ict]\r\n ick_3 = ic + k_3[0:len_ict]\r\n nai_4, nqi_4 = n_sys_eq(ai, qi, r, r_t, ick_3, ictk_3)\r\n xi_4 = dsolve.spsolve(nai_4, nqi_4, use_umfpack=False)\r\n k_4 = h*xi_4\r\n\r\n y_sol = ic + (k_1[0:len_ict] + 2*(k_2[0:len_ict] + k_3[0:len_ict]) +\r\n k_4[0:len_ict])/6\r\n\r\n yt_sol = ic_t + (k_1[len_ict:len_ic + len_ict] +\r\n 2*(k_2[len_ict:len_ic + len_ict] +\r\n k_3[len_ict:len_ic + len_ict]) +\r\n k_4[len_ict:len_ic + len_ict])/6\r\n lbd_sol = lbd + (k_1[len_ic+len_ict:] + 2*(k_2[len_ic+len_ict:] +\r\n k_3[len_ic+len_ict:]) +\r\n k_4[len_ic+len_ict:])/6\r\n return y_sol, yt_sol, lbd_sol\r\n\r\n\r\ndef matrix2sparse(mi):\r\n \"\"\"Converts SymPy's matrix to a SciPy sparse matrix.\"\"\"\r\n a = empty(mi.shape, dtype=float)\r\n for i in range(mi.rows):\r\n for j in range(mi.cols):\r\n a[i, j] = mi[i, j]\r\n return sparse.csr_matrix(a)\r\n\r\n\r\nt = Symbol('t')\r\nlbd = Symbol('lbd')\r\nx = Function('x')(t)\r\ny = Function('y')(t)\r\nm = 1\r\ng = -9.81\r\nd_l = 2\r\nalpha = 10\r\nh = 0.01\r\nR = 0.5\r\nbeta = 10\r\n\r\nphi_r = Matrix([phi(x, y, d_l).diff(x), phi(x, y, d_l).diff(y)])\r\nphi_t = Matrix([phi(x, y, d_l).diff(t)])\r\nunknowns = Matrix([x.diff(t, t), y.diff(t, t), lbd])\r\nr = Matrix([x, y])\r\nr_t = r.diff(t)\r\n\r\nM = Matrix([[m, 0], [0, m]])\r\nF = Matrix([0, m*g])\r\nb = Matrix([- (phi_r.diff(t)).T*r.diff(t) - phi_t.diff(t) -\r\n 2*alpha*(phi_r.T*r.diff(t) + phi_t) -\r\n (beta**2)*phi(x, y, d_l)])\r\nZ = zeros(phi_r.shape[1])\r\nA = M.row_join(phi_r).col_join(phi_r.T.row_join(Z))\r\n\r\nC, Nb = cm_rhs(A, b, unknowns)\r\nQ = F.col_join(Nb)\r\n\r\nC_c, Q_c = cauchy_form(C, Q, r_t)\r\n\r\nsimulation_time = 4\r\nsteps = int(simulation_time/h)\r\nic = [N(sqrt(2)/2), -N(sqrt(2)/2)]\r\nic_t = [0, 0]\r\n\r\ny = [None]*int(steps)\r\ny_t = [None]*int(steps)\r\nt = [None]*int(steps)\r\n\r\ny[0] = ic\r\ny_t[0] = ic_t\r\nt[0] = 0\r\nstart = time.clock()\r\ny_test = sys_rk4(C_c, Q_c, r, r_t, y[0], y_t[0], h)[0]\r\nend = time.clock()\r\n#for i in range(steps-1):\r\n# y[i+1] = sys_rk4(C_c, Q_c, r, r_t, y[i], y_t[i], h)[0]\r\n# y_t[i+1] = sys_rk4(C_c, Q_c, r, r_t, y[i], y_t[i], h)[1]\r\n# t[i+1] = i*h\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, autoscale_on=False, projection='3d')\r\nplt.gca().set_aspect('equal', adjustable='box')\r\nax.grid()\r\nax.set_xlim3d(-1.5, 1.5)\r\nax.set_ylim3d(-1.5, 1.5)\r\nax.set_zlim3d(-1.5, 1.5)\r\nax.view_init(30,60)\r\n\r\nline, = ax.plot([], [], [], 'o-', lw=2)\r\n\r\nx1 = [None]*int(steps)\r\ny1 = [None]*int(steps)\r\nz1 = [0]*int(steps)\r\n\r\nfor i in range(len(y)):\r\n x1[i] = y[i][0]\r\n y1[i] = y[i][1]\r\n\r\n\r\ndef init():\r\n\r\n line.set_data([], [])\r\n line.set_3d_properties([])\r\n return line,\r\n\r\n\r\ndef animate(i):\r\n\r\n thisx = [0, x1[i]]\r\n thisy = [0, y1[i]]\r\n thisz = [0, z1[i]]\r\n\r\n\r\n line.set_data(thisx, thisz)\r\n line.set_3d_properties(thisy)\r\n\r\n return line,\r\n\r\nani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)),\r\n interval=25, blit=True, init_func=init)\r\n\r\nplt.show()\r\n","repo_name":"kungergely92/RBD","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28071511895","text":"#stair climbing -> https://leetcode.com/problems/climbing-stairs/submissions/\n#Time complexity\n\n#Brute Force:\n#O(2^n) time as you have a branch factor of 2 and a maximal tree depth of n (1s subtracted each time)\n#O(n) as stack depth is at most n \n\n#Memoized Soln:\n# O(n) as we at most compute n nodes each with at most 2 children \n#O(n) as stack depth is at most n and your memo holds at most n key value pairs so O(n) + O(n) = O(n)\nclass Solution:\n def climbStairs(self, n: int, memo = None) -> int:\n if memo is None:\n memo = {}\n if n == 2:\n return 2\n elif n == 1:\n return 1\n elif n in memo:\n return memo[n]\n else:\n memo[n] = self.climbStairs(n-1, memo) + self.climbStairs(n-2, memo)\n return memo[n]\nsoln = Solution()\nprint(soln.climbStairs(3))","repo_name":"LangLazy/codingProblemSolving","sub_path":"climbStairs.py","file_name":"climbStairs.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2104635156","text":"import sys, os\nsys.path.append(os.path.abspath(\"model/\"))\nsys.path.append(os.path.abspath(\"data_loader/\"))\nsys.path.append(os.path.abspath(\"base/\"))\nsys.path.append(os.path.abspath(\".\"))\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom os.path import expanduser\nfrom model import ResNet_AE\nfrom data_loaders import AcousticDataset\nimport torch\n\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n\ndef save_sample(data, outfile):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.imshow(data)\n ratio = 1.0\n xleft, xright = ax.get_xlim()\n ybottom, ytop = ax.get_ylim()\n \n # the abs method is used to make sure that all numbers are positive\n # because x and y axis of an axes maybe inversed.\n ax.set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)\n fig.colorbar(im)\n \n # or we can utilise the get_data_ratio method which is more concise\n # ax.set_aspect(1.0/ax.get_data_ratio()*ratio)\n plt.savefig(fname=outfile,dpi=300,format='png')\n\n\n\n\nif __name__ == '__main__':\n home = home = expanduser(\"~\")\n \n save_dir = home + '/acoustic/ae_outputs_batch_16/'\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n model = ResNet_AE().to(device)\n \n state = torch.load('/data/acoustic_tmp/ae_acoustic_results_batch_16/models/acoustic_ae/1016_170641/model_best.pth')\n model.load_state_dict(state['state_dict'])\n model.eval()\n\n #load data\n dataset = AcousticDataset('/gsceph/adapd/acoustic/AA_10/train.pkl')\n \n idxs = np.random.randint(0, len(dataset), 10)\n print(idxs)\n data = []\n for i in idxs:\n img, label = dataset[i]\n name = dataset.data[i][0]\n data.append((name, img, label))\n data = np.array(data)\n print(data[:, 0])\n\n #generate data\n imgs = np.expand_dims(np.vstack(data[:, 1]), axis=1)\n print(imgs.shape)\n outputs = model(torch.from_numpy(imgs).to(device)).cpu().detach().numpy()\n\n #plot inputs and outputs\n for index, (full_name, _, label) in enumerate(data):\n name = full_name.split('/')[-1].replace('.pkl', '')\n #print(name)\n save_sample(np.squeeze(imgs[index], axis=0), save_dir + name + '_' + str(label) + '.png')\n save_sample(np.squeeze(outputs[index], axis=0), save_dir + 'output_' + name + '_' + str(label) + '.png')\n\n","repo_name":"dwidemann/self_supervision_for_transfer_learning","sub_path":"utils/generate_outputs_from_ae_model.py","file_name":"generate_outputs_from_ae_model.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"4020028220","text":"import gym, math, random\nimport numpy as np\n#env = gym.make('CartPole-v0')\nenv = gym.make('MountainCar-v0')\n#env = gym.make('Hopper-v1')\n#env = gym.make('MsPacman-v0')\n\n\nA_DIVS=100\nB_DIVS=100\n\n\naRange = ( -2, 2 ) #\nbRange = ( -.1, .1 ) #\n\n\nbrain = np.zeros( (A_DIVS,B_DIVS, 3) )\n#brain += 4.5\n\nobs = env.reset()\n\nLEFT = 0\nNOTHING = 1\nRIGHT = 2\n\ndiscount = .99999\n\nrunning_average_value = 0\n\ndef obs_to_index( _obs ):\n result = [ int((_obs[0]-aRange[0])/(aRange[1]-aRange[0])*A_DIVS + .5 ),\n int((_obs[1]-bRange[0])/(bRange[1]-bRange[0])*B_DIVS + .5 ),]\n\n result[0] = max(0,min(A_DIVS-1,result[0]))\n result[1] = max(0,min(B_DIVS-1,result[1]))\n\n\n return result\n\nsteps_till_crash = 0\nrun_number = 0\n\n\ncount_out = 0\n\nwhile True:\n\n \n epsilon = 1.0/((run_number+1)/1)\n\n indexes = obs_to_index( obs )\n\n left_value = brain[ indexes[0], indexes[1], LEFT ]\n no_value = brain[ indexes[0], indexes[1], NOTHING ]\n right_value = brain[ indexes[0], indexes[1], RIGHT ]\n\n\n if random.random() > epsilon:\n best_value = left_value\n picked_direction = LEFT\n if no_value > best_value:\n picked_direction = NOTHING\n best_value = no_value\n if right_value > best_value:\n picked_direction = RIGHT\n best_value = right_value\n\n else:\n picked_direction = random.choice( [LEFT,NOTHING,RIGHT])\n\n #if picked_direction == LEFT:\n # print( \"vl:\" + str(left_value), end=' ' )\n # else:\n # print( \"vr:\" + str(right_value), end=' ' )\n\n results = env.step(picked_direction)\n\n obs = results[0]\n reward = results[1]\n done = results[2]\n\n new_index = obs_to_index( obs )\n\n thing = max(max( brain[ new_index[0], new_index[1], LEFT], brain[new_index[0], new_index[1], RIGHT] ), brain[new_index[0], new_index[1], NOTHING])\n \n if done:\n target_value = reward\n #target_value = -.5 #-5\n \n\n #alpha = .99\n #running_average_value = alpha*running_average_value + (1-alpha)*steps_till_crash\n\n #print( \"epsilon at : \" + str( epsilon ) + \" r\" + str(run_number) + \" stepped \" + str(steps_till_crash) + \" till crash or \" + str( running_average_value ) )\n #steps_till_crash = 0\n else:\n target_value = thing*discount + reward\n #steps_till_crash += 1\n\n\n existing_value = brain[ indexes[0], indexes[1], picked_direction ]\n\n brain[ indexes[0], indexes[1], picked_direction ] = existing_value * .9 + target_value * .1\n #print( \"changing from \" + str( existing_value ) + \" closer to \" + str( target_value ) + \" indexing at \" + str( indexes[0] ) + \", \" + str( indexes[1] ) )\n\n #if existing_value * .9 + target_value * .1 > 1:\n # print( \"ahahahah\" )\n\n #steps_final = steps_till_crash_array[-1]\n\n if run_number % 2000 == 0:\n #if steps_final == steps_till_crash_max:\n env.render()\n\n \n if done:\n env.reset()\n run_number += 1\n print( \"now on run \" + str( run_number ) )\n\n #env.render()\n \n \n\n \n\n \n","repo_name":"Chadleewalker/AI","sub_path":"Mountain Car.py","file_name":"Mountain Car.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42949786145","text":"\"\"\"\ni have many samples and i have a cnn that can detect patterns in images\nso i can prepare much bigger dataset from some source and run cnn through\nevery square that is not pattern i can add to bad class\nevere square without continuation by other square is also in bad class\nall other could be recommended to good class\nquestion: how to find out that square without continuation?\nanswer:\nAfter merging all rects, squares which sizes are 32x32 will stand alone they are in bad class\n\nNow we try to create concept for needed functions\nwe need to solve several problems\nwe must prepare new dataset\nwe must run through it with cnn\nwe must prepare three classes which are bad class without patterns,\nbad class with pattern but without continuation and\ngood class with both pattern and continuation\n\nwe now how to marge rects, we have complete class for it\nwe now how to check if square without continuation\n\nAll of that gives as an algorithm\n\n1) prepare bigger dataset (probably, several millions samples)\n2) decompose it by frames\n3) for each frame find it cwt and run cnn on it\n4) find all rects and merge them if it is needed\n5) each square that is not a pattern we can add to a first class\n6) each rect that is only 32x32 we can add to a second class\n7) each rect that is not 32x32 we can add to a good class\n\n\"\"\"\n\n\nimport random\nimport numpy as np\nimport pandas as pd\n\n# Keras Imports - CNN\nfrom keras.models import model_from_json\n\n# My scripts import\nfrom scripts.extremumlib import *\nfrom scripts.Segmentation import *\nfrom scripts.Rect import *\n\nimport time\nimport datetime\nimport json\n\n\n# load json and create\n\nmodel_json_path = '../../KerasCNN/models/model1.json' #model_nclasses_46_1\nmodel_h5_path = '../../KerasCNN/models/model1.h5'\njson_file = open(model_json_path, 'r')\nloaded_model_json = json_file.read()\njson_file.close()\ncnn = model_from_json(loaded_model_json)\n# load weights into new model\ncnn.load_weights(model_h5_path)\nprint(\"Loaded model from disk\")\n\n\nopt = 'adam'\nloss = 'categorical_crossentropy'\nmetrics = ['accuracy']\n# Compile the classifier using the configuration we want\ncnn.compile(optimizer=opt, loss=loss, metrics=metrics)\n\nwith open('temp.txt', 'r') as f:\n lines = list(f)\n price_series = [float(x) for x in lines[0][1:-2].split(',')]\n # print(line)\n n = len(price_series)\n # print(price_series)\n\nimages = []\n\nwindow_size = 256\n\nintervals = []\n\nbad_first_class = []\nbad_second_class = []\ngood_class = []\n\nfor b in range(0, len(price_series), 256):\n\n scale = 50\n\n x = np.arange(1, window_size + 1)\n y = np.arange(1, scale)\n\n X, Y = np.meshgrid(x, y)\n\n window = price_series[n - window_size - b: n - b]\n window = np.array(window)\n\n M = get_cwt_swt(window, scale=scale, mask=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n M = linear(M)\n # M = bound_filter(M, alpha=0.5)\n\n block_sizex = 32\n block_sizey = 32\n\n test = []\n coords = []\n\n for i in range(scale - block_sizex):\n for j in range(window_size - block_sizey):\n test.append(M[i:i + block_sizex, j:j + block_sizey])\n coords.append((i, j))\n\n test = np.array(test)\n test = test.reshape(test.shape[0], block_sizex, block_sizey, 1)\n result = cnn.predict(test, verbose=1)\n\n cnt = 0\n wow = []\n wow_coords = []\n\n for i in range(len(result)):\n if result[i, 1] > 0.80:\n cnt+=1\n wow.append(test[i, :, :, 0])\n wow_coords.append(coords[i])\n else:\n bad_first_class.append(test[i, :, :, 0])\n\n segmentations = []\n\n wow_rects = [Rect(wow_coords[i], 32, 32) for i in range(cnt)]\n\n wow_rects = sorted(wow_rects, key=lambda a: a.x[1])\n\n correct_rects = []\n\n for rect in wow_rects:\n if len(correct_rects) == 0:\n correct_rects.append(rect)\n elif not correct_rects[-1].is_crossing(rect):\n correct_rects.append(rect)\n else:\n correct_rects[-1] = correct_rects[-1].get_convex_rect(rect)\n\n for x in correct_rects:\n if x.h == x.w == 32:\n bad_second_class.append(M[x.x[0]: x.x[0] + x.h, x.x[1]: x.x[1] + x.w])\n else:\n good_class.append(M[x.x[0]: x.x[0] + 32, x.x[1]: x.x[1] + 32])\n\n wow = [M[x.x[0]: x.x[0] + x.h, x.x[1]: x.x[1] + x.w] for x in correct_rects]\n\n\ndata = []\nfor x in bad_first_class[:1000]:\n data.append((x, 0))\n\nfor x in bad_second_class:\n data.append((x, 1))\n\nfor x in good_class:\n data.append((x, 2))\n\n\ndef prepare_data(data):\n new_data = []\n for x, y in data:\n x = np.array(list(x.reshape((1, -1))[0, :]) + [y])\n # print(len(x))\n new_data.append(x)\n return pd.DataFrame(data=new_data)\n\n# size = len(data)\n# for i in range(size):\n# data.append((-data[i][0], data[i][1]))\n\nprint(len(data), len(bad_first_class), len(bad_second_class), len(good_class))\ndata = prepare_data(data)\ndata.to_csv('../../KerasCNN/input/data_cwt.csv', sep=',', header=None, index=None)\n\n\n\n","repo_name":"CyberSoftStudio/TSForecasting","sub_path":"scripts/training_data_preparation.py","file_name":"training_data_preparation.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71560042009","text":"import os\nimport dialogflow\nimport requests\nimport json\nimport pusher\n\nfrom flask import Flask, request, jsonify, render_template\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\n# initialize Pusher\npusher_client = pusher.Pusher(\n\tapp_id=os.getenv('PUSHER_APP_ID'),\n\tkey=os.getenv('PUSHER_APP_KEY'),\n\tsecret=os.getenv('PUSHER_APP_SECRET'),\n\tcluster=os.getenv('PUSHER_APP_CLUSTER'),\n\tssl=True)\n\t\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n\n@app.route('/get-movie-detail', methods=['POST'])\ndef get_movie_detail():\n\tdata = request.get_json(silent=True)\n\tmovie = data['queryResult']['parameters']['movie']\n\tapi_key = os.getenv('OMDB_API_KEY')\n\n\tmovie_detail = requests.get('http://www.omdbapi.com/?t={0}&apikey={1}'.format(movie, api_key)).content\n\tmovie_detail = json.loads(movie_detail)\n\tresponse = \"\"\"\n\t\tTitle : {0}\n\t\tReleased: {1}\n\t\tActors: {2}\n\t\tPlot: {3}\n\t\"\"\".format(movie_detail['Title'], movie_detail['Released'], movie_detail['Actors'], movie_detail['Plot'])\n\n\treply = {\n\t\t'fulfillmentText': response\n\t}\n\n\treturn jsonify(reply)\n\n\ndef detect_intent_texts(project_id, session_id, text, language_code):\n\tsession_client = dialogflow.SessionsClient()\n\tsession = session_client.session_path(project_id, session_id)\n\t\n\tif text:\n\t\ttext_input = dialogflow.types.TextInput(\n\t\t\ttext=text, language_code=language_code)\n\t\tquery_input = dialogflow.types.QueryInput(text=text_input)\n\t\tresponse = session_client.detect_intent(\n\t\t\tsession=session, query_input=query_input)\n\n\t\treturn response.query_result.fulfillment_text\n\n\n@app.route('/send_message', methods=['POST'])\ndef send_message():\n\tmessage = request.form['message']\n\tproject_id = os.getenv('DIALOGFLOW_PROJECT_ID')\n\tfulfillment_text = detect_intent_texts(project_id, \"unique\", message, 'en')\n\tresponse_text = {\"message\": fulfillment_text}\n\t# socketId = request.form['socketId']\n\t# pusher_client.trigger('movie_bot', 'new_message',\n\t# \t{'human_message': message, 'bot_message': fulfillment_text},\n\t# \tsocketId)\n\n\treturn jsonify(response_text)\n\n\nif __name__ == '__main__':\n\tapp.run()\n","repo_name":"Alexmhack/flask_chatbot","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"35275283310","text":"def heap_sort(a,reverse=False):\n key=lambda a,b: bool(ab)\n n=len(a)\n for i in range(n//2-1,-1,-1):\n heapify(a,n,i,key)\n \n for i in range(n-1,0,-1):\n a[0],a[i]=a[i],a[0]\n heapify(a,i,0,key)\n return a \n\ndef heapify(a,n,i,key):\n largest=i\n left_child=2*i+1\n right_child=2*i+2\n if(left_child dict:\n with open(path, \"r\") as fp:\n data = json.load(fp)\n return data\n\ndef get_deployments(root_dir):\n deployments = []\n deletions = []\n for root, dirs, files in os.walk(root_dir):\n print(\"Found deployments: \",dirs, \" on \", root_dir, files)\n for dir in dirs:\n print(\"Parsing Directory: \" + os.path.join(root, dir))\n d_ = Deployment()\n d_.base_path = os.path.join(root, dir)\n d_.set_base_name(os.path.basename(dir))\n subfiles = os.listdir(d_.base_path)\n subfiles = [f for f in subfiles if os.path.isfile(os.path.join(d_.base_path, f))] #Filtering only the files.\n d_.subfiles = subfiles\n if \"delete.keep\" in subfiles:\n deletions.append(d_)\n continue\n if \"completed.keep\" in subfiles:\n continue # Directory allready processed so skip \n if \"config.json\" in subfiles:\n d_.config = read_json(os.path.join(d_.base_path, \"config.json\"))\n deployments.append(d_)\n break\n\n return deployments,deletions\n\ndef process_deployment_creation(deploy_list: list[Deployment]) -> None:\n for d in deploy_list:\n print(f\"Deploying {d.base_name}\")\n d.deploy()\n mark_changed()\n\ndef process_deployment_deletion(deploy_list: list[Deployment]) -> None:\n for d in deploy_list:\n print(f\"Deleting {d.base_name}\")\n d.delete()\n mark_changed()\n\nif __name__ == \"__main__\":\n d_list = get_deployments(\"deployments\")\n process_deployment_creation(d_list[0])\n process_deployment_deletion(d_list[1])","repo_name":"jptalukdar/GitOps-Workflows","sub_path":"src/invoke.py","file_name":"invoke.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44046222161","text":"import pandas as pd\nimport argparse\nimport os\nimport networkx as nx\nfrom collections import Counter\nfrom scipy.stats import binom_test\nfrom helpful_functions import source_names\n\ndef readArgs ():\n\tparser = argparse.ArgumentParser (description=\"statistics about semantic leaders\")\n\tparser.add_argument (\"--src-path\", required=True, type=str, help=\"directory contains the leadership scores\")\n\tparser.add_argument (\"--leaders-file\", required=True, type=str, help=\"file contains the leaders and their scores\")\n\tparser.add_argument (\"--leader-stats-file\", required=True, type=str, help=\"output file should contain aggregated leadership stats\")\n\tparser.add_argument (\"--leader-follower-stats-file\", required=True, type=str, help=\"output file should contain aggregated leadership/followee stats\")\n\targs = parser.parse_args ()\n\treturn args\n\ndef create_net (source_names, dyads_counter):\n\tG = nx.DiGraph ()\n\n\tG.add_nodes_from(source_names)\n\tn_edges = sum([dyads_counter[item] for item in dyads_counter])\n\tfor item in dyads_counter:\n\t\t# edge should point from follower to leader\n\t\tG.add_edge(item[1], item[0], weight=dyads_counter[item]/n_edges)\n\treturn G\n\ndef main (args):\n\tdf = pd.read_csv(os.path.join (args.src_path, args.leaders_file), sep=\";\")\n\trows = df[[\"s1\", \"s2\"]].values.tolist()\n\tpairs = list ()\n\tfor row in rows:\n\t\ts1, s2 = row[0], row[1]\n\t\tpairs.append ((s1, s2))\n\n\tleaders_followers = Counter (pairs)\n\n\tG = create_net (source_names, leaders_followers)\n\tpagerank = nx.pagerank_numpy (G, alpha=0.85)\n\t# include also a personalization factor\n\t\n\tleaders = Counter([x1 for x1, x2 in pairs])\n\tfollowers = Counter ([x2 for x1, x2 in pairs])\n\t\n\t# calculate the leader follower stats\n\titems = list ()\n\ttotal_dyads = sum([item[1] for item in leaders_followers.most_common (None)])\n\tfor item in leaders_followers.most_common (None):\n\t\titems.append ([item[0][0], item[0][1], item[1], item[1]/total_dyads])\n\n\tleaders_followers_df = pd.DataFrame (items, columns=[\"Leader\", \"Follower\", \"Count\", \"Probability\"])\n\n\t# calculate the leader stats\n\titems = list ()\n\tepsilon = 1e-10\n\tfor name in sorted (source_names):\n\t\tleader_prob = leaders[name]/(leaders[name] + followers[name] + epsilon)\n\t\tfollower_prob = followers[name]/(leaders[name] + followers[name] + epsilon)\n\t\tpval = binom_test([leaders[name], followers[name]], alternative=\"greater\")\n\t\tpr = pagerank[name]\n\t\titems.append ([name, leaders[name], f\"{leader_prob:.4f}\", f\"{pval:.4f}\", followers[name], f\"{follower_prob:.4f}\", f\"{pr:.4f}\"])\n\n\tleaders_df = pd.DataFrame (items, columns=[\"Name\", \"Count(role as leader)\", \"P(Name=leader)\", \"Pval\", \"Count(role as follower)\", \"P(Name=follower)\", \"PageRank\"])\n\n\t## Write to files\n\tleaders_followers_df.to_csv (os.path.join (args.src_path, args.leader_follower_stats_file), sep=\",\", index=False, header=True)\t\t\n\tleaders_df.to_csv (os.path.join (args.src_path, args.leader_stats_file), sep=\",\", index=False, header=True)\n\nif __name__ == \"__main__\":\n\tmain (readArgs())\n","repo_name":"sandeepsoni/semantic-leadership-network","sub_path":"scripts/leadership_stats.py","file_name":"leadership_stats.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"4876282123","text":"#!/Users/slava/work/fb_hacker_cup/venv/bin/python\n\nimport fileinput\n\nfd = {}\n\ndef F(n, m, L):\n if L <= 0:\n return G(n, m)\n\n if n == 0:\n return 0\n\n key = (n,m,L)\n if key in fd:\n return fd[key]\n\n r = 0\n for k in range(1, m+1):\n r += F(n-1, m, L-k)\n\n fd[key] = r\n return r\n\ndef G(n, m):\n return m**n\n\ndef P(x, y, z, h):\n return F(x, y, h-z) / G(x, y)\n\n\nf = fileinput.input()\n\nt = int(f.readline())\nfor i in range(t):\n h, s = [int(w) for w in f.readline().split()]\n spells = [w for w in f.readline().split()]\n assert(len(spells) == s)\n maxp = 0\n for spell in spells:\n if '+' in spell:\n xdy, z = spell.split('+')\n z = int(z)\n elif '-' in spell:\n xdy, z = spell.split('-')\n z = -int(z)\n else:\n xdy = spell\n z = 0\n\n x, y = xdy.split('d')\n x = int(x)\n y = int(y)\n p = P(x, y, z, h)\n maxp = max(maxp, p)\n if maxp >= 1:\n break\n\n print('Case #{}: {:0.6f}'.format(i+1, maxp))\n\n","repo_name":"vchernoy/coding","sub_path":"contests/2016/fb_hacker_cup/qualification/fighting_the_zombie/fighting_the_zombie.py","file_name":"fighting_the_zombie.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32532153287","text":"import numpy as np\nfrom scipy.fft import fft\n\nfrom task import DECIMAL_PLACES\n\n\ndef checkAccuracy(x, x1):\n eps = 10**-DECIMAL_PLACES\n for i in range(x.shape[0]):\n if (abs(x[i] - x1[i]) > eps):\n return False\n \n return True\n\n\n\n\ndef calrResultRelaxMethod(matrix, vector, w):\n \n x = np.zeros(matrix.shape[0])\n step = 1\n while(True):\n x1 = np.zeros(matrix.shape[0])\n x0 = x.copy()\n for i in range(matrix.shape[0]):\n x1[i] = (vector[i] - sum(matrix[i][j]*x[j] for j in range(matrix.shape[0])) + matrix[i][i]*x[i])/matrix[i][i]\n x[i] = w*x1[i] + (1 - w)*x0[i]\n print(str(step) + \" step: \" + str(x1))\n step += 1\n if(checkAccuracy(x1, x0)):\n x = x1.copy()\n break\n x = x1.copy()\n\n return x\n\ndef relax(matrix, vector):\n\n x = calrResultRelaxMethod(matrix, vector, w = 1.25)\n return x","repo_name":"Vypsen/vichmat","sub_path":"methods/relax.py","file_name":"relax.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36169571118","text":"'''\nQuaternions and generic 3D transformations\n'''\n\n\n\nimport numpy as np\n\nclass Quaternion(object):\n def __init__(self, w=1, x=0, y=0, z=0):\n if isinstance(w, (list, tuple, np.ndarray)) and not isinstance(x, np.ndarray):\n #Allows the use of a quaternion as a vector\n self.quat = np.array([0, w[0], w[1], w[2]])\n else:\n self.quat = np.array([w, x, y, z])\n \n def __repr__(self):\n if self.quat.ndim > 1:\n return \"\"%self.quat.shape[1]\n return \"%g+%gi+%gj+%gk\"%tuple(self.quat)\n \n def norm(self):\n self.quat = self.quat / np.sqrt((self.quat**2).sum())\n return self\n \n def conj(self):\n return Quaternion(self.w, *(-self.vec))\n\n @property\n def H(self):\n return self.conj()\n \n def __getattr__(self, attr):\n if attr in [\"w\", \"scalar\"]:\n return self.quat[0]\n elif attr in [\"x\", \"i\"]:\n return self.quat[1]\n elif attr in [\"y\", \"j\"]:\n return self.quat[2]\n elif attr in [\"z\", \"k\"]:\n return self.quat[3]\n elif attr in [\"v\", \"vec\", \"vector\"]:\n return self.quat[1:]\n else:\n super(Quaternion, self).__getattr__(self, attr)\n \n def __mul__(self, other):\n if isinstance(other, Quaternion):\n w = self.w*other.w - (self.vec*other.vec).sum(0)\n v = self.w*other.vec + other.w*self.vec + np.cross(self.vec.T, other.vec.T).T\n return Quaternion(w, *v).norm()\n elif isinstance(other, (np.ndarray, list, tuple)):\n if isinstance(other, (list, tuple)):\n other = np.array(other)\n #rotate a vector, will need to be implemented in GLSL eventually\n cross = np.cross(self.vec.T, other) + self.w*other\n return (other + np.cross(2*self.vec.T, cross)).squeeze()\n '''\n conj = self.H\n w = -np.dot(other, conj.vec)\n vec = np.outer(conj.w, other) + np.cross(other, conj.vec.T)\n if self.quat.ndim > 1:\n return self.w*vec.T + np.\n return self.w*vec + np.outer(w, self.vec).squeeze() + np.cross(self.vec, vec)\n '''\n else:\n raise ValueError\n\n def to_mat(self):\n '''\n Convert to an augmented rotation matrix if the quaternion is of unit norm\n ??? Does this function provide a sensible result for non-unit quaternions?\n\n Parameters\n ----------\n None\n\n Returns\n -------\n np.ndarray of shape (4, 4)\n Affine transformation matrix\n '''\n a, b, c, d = self.quat\n return np.array([\n [a**2+b**2-c**2-d**2, 2*b*c-2*a*d, 2*b*d+2*a*c, 0],\n [ 2*b*c+2*a*d, a**2-b**2+c**2-d**2, 2*c*d-2*a*b, 0],\n [ 2*b*d-2*a*c, 2*c*d+2*a*b, a**2-b**2-c**2+d**2, 0],\n [ 0, 0, 0, 1]])\n\n @classmethod \n def from_mat(cls, M):\n qw = np.sqrt(1 + M[0,0] + M[1,1] + M[2,2]) / 2\n qx = (M[2,1] - M[1,2])/(4*qw)\n qy = (M[0,2] - M[2,0])/(4*qw)\n qz = (M[1,0] - M[0,1])/(4*qw)\n return Quaternion(w=qw, x=qx, y=qy, z=qz)\n\n def rotate_to(self, vec):\n svec = self.vec / np.sqrt((self.vec**2).sum())\n nvec = nvec = vec2 / np.sqrt((vec2**2).sum())\n rad = np.arccos(np.dot(svec, nvec))\n axis = np.cross(svec, nvec)\n self = self.from_axisangle(axis, rad)*self\n\n @classmethod\n def rotate_vecs(cls, vec1, vec2):\n '''\n Get the quaternion which rotates vec1 onto vec2\n\n Parameters\n ----------\n vec1: np.ndarray of shape (3,)\n Starting vector\n vec2: np.ndarray of shape (3,)\n Vector which defines the orientation that you want to rotate the first vector to\n\n Returns\n -------\n Quaternion representing the rotation\n '''\n vec1, vec2 = np.array(vec1), np.array(vec2)\n svec = vec1 / np.sqrt((vec1**2).sum())\n nvec = vec2 / np.sqrt((vec2**2).sum())\n if nvec.ndim > 1:\n if svec.ndim > 1:\n rad = (svec * nvec).sum(1)\n else:\n rad = np.arccos(np.dot(svec, nvec.T))\n else:\n rad = np.arccos(np.dot(svec, nvec))\n axis = np.cross(svec, nvec)\n return cls.from_axisangle(axis, rad)\n \n @classmethod\n def from_axisangle(cls, axis, rad):\n '''\n Convert from the Axis-angle representation of rotations to the quaternion representation\n\n Parameters\n ----------\n axis: np.ndarray of shape (3,) or ?????\n Rotation axis\n rad: float\n Angle to rotate around the specified axis in radians\n\n Returns\n -------\n Quaternion representing the rotation\n '''\n #normalize the axis first\n axis = np.array(axis)\n if axis.ndim > 1:\n axis = axis.T / np.sqrt((axis**2).sum(1))\n else:\n if not np.all(axis == 0):\n axis = axis / np.sqrt((axis**2).sum())\n w = np.cos(rad*0.5)\n v = axis * np.sin(rad*0.5)\n return cls(w, *v)\n\nclass Transform(object):\n '''\n Homogenous transformations ???\n '''\n def __init__(self, move=(0,0,0), scale=1, rotate=None):\n self.move = np.array(move, dtype=np.float)\n self.scale = scale\n self.rotate = rotate if rotate is not None else Quaternion()\n\n def __repr__(self):\n return \"Rotate %s, then scale %s, then translate %s\"%(self.rotate, self.scale, self.move)\n \n def __mul__(self, other):\n if isinstance(other, Transform):\n #Pre-multiply the other transform, then apply self\n move = self.move + self.rotate*other.move\n scale = self.scale * other.scale\n rot = self.rotate * other.rotate\n return Transform(move, scale, rot)\n\n elif isinstance(other, Quaternion):\n #Apply the quaternion directly to current rotation\n return Transform(self.move, self.scale, other.rotate * self.rotate)\n\n def __call__(self, vecs):\n return self.scale * (self.rotate * vecs) + self.move\n\n def translate(self, x, y, z, reset=False):\n '''\n Set the translation point of the transformation\n\n Parameters\n ----------\n x, y, z: float\n Coordinates representing how much to move\n reset: bool, optional, default=False \n If true, the new coordinates replace the old ones. If false, they are added on\n '''\n if reset:\n self.move[:] = x,y,z\n else:\n self.move += x,y,z\n return self\n\n def rotate_x(self, rad, reset=False):\n rotate = Quaternion.from_axisangle((1,0,0), rad)\n if reset:\n self.rotate = rotate\n else:\n self.rotate = (rotate * self.rotate).norm()\n return self\n\n def rotate_y(self, rad, reset=False):\n rotate = Quaternion.from_axisangle((0,1,0), rad)\n if reset:\n self.rotate = rotate\n else:\n self.rotate = (rotate * self.rotate).norm()\n return self\n\n def rotate_z(self, rad, reset=False):\n rotate = Quaternion.from_axisangle((0,0,1), rad)\n if reset:\n self.rotate = rotate\n else:\n self.rotate = (rotate * self.rotate).norm()\n return self\n \n def to_mat(self):\n scale = np.eye(4)\n scale[(0,1,2), (0,1,2)] = self.scale\n move = np.eye(4)\n move[:3, -1] = self.move\n \n return np.dot(move, np.dot(scale, self.rotate.to_mat()))\n\ndef test():\n world = Transform().rotate_x(np.radians(-90))\n eye = Transform().translate(0,35,0)\n obj = Transform().translate(0,10,5)\n assert np.allclose((world*eye*obj)((0,0,1)), [0,6,-45])\n obj.rotate_y(np.radians(-90))\n assert np.allclose((world*eye*obj)((0,0,1)), [-1, 5, -45])\n obj.rotate_z(np.radians(-90))\n assert np.allclose((world*eye*obj)((0,0,1)), [0,5,-46])\n assert np.allclose(np.dot((world*eye*obj).to_mat(), [0,0,1,1]), [0,5,-46, 1])\n","repo_name":"carmenalab/brain-python-interface","sub_path":"riglib/stereo_opengl/xfm.py","file_name":"xfm.py","file_ext":"py","file_size_in_byte":8262,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"14312078336","text":"import numpy as np\nimport scipy.io as sio\nimport time\nfrom FeatWalk import featurewalk\n\n'''################# Load data #################'''\nmat_contents = sio.loadmat('ACM.mat')\nnumber_walks = 35 # 'Number of random walks to start at each instance'\nwalk_length = 25 # 'Length of the random walk started at each instance'\nwin_size = 5 # 'Window size of skipgram model.'\n\n'''################# Experimental Settings #################'''\nd = 100 # the dimension of the embedding representation\nX1 = mat_contents[\"Features\"]\nX2 = mat_contents[\"Network\"]\nLabel = mat_contents[\"Label\"]\ndel mat_contents\nn = X1.shape[0]\nIndices = np.random.randint(25, size=n)+1 # 5-fold cross-validation indices\n\nGroup1 = []\nGroup2 = []\n[Group1.append(x) for x in range(0, n) if Indices[x] <= 5] # 2 for 10%, 5 for 25%, 20 for 100% of training group\n[Group2.append(x) for x in range(0, n) if Indices[x] >= 21] # test group\nn1 = len(Group1) # num of instances in training group\nn2 = len(Group2) # num of instances in test group\nCombX1 = X1[Group1+Group2, :]\nCombX2 = X2[Group1+Group2, :][:, Group1+Group2]\n\n\n'''################# Large-Scale Heterogeneous Feature Embedding #################'''\nprint(\"Large-Scale Heterogeneous Feature Embedding (FeatWalk), 5-fold with 25% of training is used:\")\nprint(\"Estimated running time {} seconds\".format((n1+n2)*0.014))\nstart_time = time.time()\nH_FeatWalk = featurewalk(featur1=CombX1, alpha1=.97, featur2=None, alpha2=0, Net=CombX2, beta=0, num_paths=number_walks, path_length=walk_length, dim=d, win_size=win_size).function()\nprint(\"time elapsed: {:.2f}s\".format(time.time() - start_time))\n\n'''################# FeatWalk for a single feature matrix #################'''\nprint(\"FeatWalk for a single feature matrix:\")\nstart_time = time.time()\nH_FeatWalk_X = featurewalk(featur1=CombX1, alpha1=1, featur2=None, alpha2=0, Net=None, beta=0, num_paths=number_walks, path_length=walk_length, dim=d, win_size=win_size).function()\nprint(\"time elapsed: {:.2f}s\".format(time.time() - start_time))\n\nsio.savemat('Embedding.mat', {\"H_FeatWalk\": H_FeatWalk, \"H_FeatWalk_X\": H_FeatWalk_X})","repo_name":"DEEP-PolyU/FeatWalk_AAAI19","sub_path":"Runme.py","file_name":"Runme.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"46302704277","text":"import webapp2\nimport os\nimport random\nimport jinja2\n#import taco_model\n\nfrom google.appengine.ext import ndb\n\n\nclass TacoFillingModel(ndb.Model):\n #list here all the properties of the entity\n tacoIn = ndb.StringProperty(required=True)\n\n\n\ndef get_bill():\n # Add a list of fortunes to the empty fortune_list array\n bill_list=[10, 15.5, 20, 25.5, 30]\n # Use the random library to return a random element from the array\n random_bill = random.choice(bill_list)\n return random_bill\n\ndef get_tacos():\n # Add a list of fortunes to the empty fortune_list array\n filling_list = get_all_tacos()\n # Use the random library to return a random element from the array\n if len(filling_list) == 0:\n random_filling = 'test-filling'\n else:\n random_filling = random.choice(filling_list)\n return random_filling\n\ndef get_all_tacos():\n #fillings = ['steak', 'carnitas', 'veggie', 'chicken', 'ground beef']\n fillings = TacoFillingModel.query().filter().fetch()\n only_fillings = []\n for fil in fillings:\n only_fillings.append(str(fil.tacoIn))\n return only_fillings\n\n# Remember, you can get this by searching for jinja2 google app engine\njinja_current_directory = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass TacosHandler(webapp2.RequestHandler):\n def get(self):\n results_template = jinja_current_directory.get_template('template/tacos_results.html')\n self.response.write(results_template.render(tacoType = get_tacos()))\n\n #this gets executed when you use a form with POST method and /tacos route\n def post(self):\n # get input from the html form\n new_filling_from_form = self.request.get('new-filling')\n tacoFilling1 = TacoFillingModel( tacoIn = new_filling_from_form)\n k = tacoFilling1.put()\n results_template = jinja_current_directory.get_template('template/add_taco.html')\n self.response.write(results_template.render(filling = k.get().tacoIn))\n\n\nclass BillHandler(webapp2.RequestHandler):\n def get(self):\n results_template = jinja_current_directory.get_template('template/bill_results.html')\n self.response.write(results_template.render(total = str(get_bill())))\n\n # def post(self):\n\n\nclass WelcomeHandler(webapp2.RequestHandler):\n def get(self):\n results_template = jinja_current_directory.get_template('template/welcome.html')\n self.response.write(results_template.render())\n\n\nclass AllFillingsHandler(webapp2.RequestHandler):\n def get(self):\n #results_template = jinja_current_directory.get_template('template/welcome.html')\n #self.response.write(results_template.render())\n self.response.write(get_all_tacos())\n\n# Route mapping\napp = webapp2.WSGIApplication([\n # This line routes the main url ('/') - also know as\n # The root route - to the Fortune Handler\n ('/', WelcomeHandler),\n ('/tacos', TacosHandler), #maps '/predict' to the TacosHandler\n ('/bill', BillHandler), #maps '/farewell' to the BillHandler\n ('/fillings', AllFillingsHandler),\n], debug=True)\n","repo_name":"marianelamin/mvc-restaurant-only-tacos","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75092159446","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--filename', type=str, help=\"numberofcables\")\n args = parser.parse_args() \n filename = args.filename\n\n values = open(filename, \"r\")\n # values = values.read()\n valuesStr = values.read()\n print(valuesStr)\n\n ValuesList = list(valuesStr.split(\"\\n\"))\n cablesNum = []\n runtimeAv = []\n for value in ValuesList:\n value = list(value.split(\" \"))\n cablesNum.append(float(value[0]))\n runtimeAv.append(float(value[2]))\n\n p = np.poly1d(np.polyfit(cablesNum, runtimeAv, 2))\n cablesNumSpace = np.linspace(cablesNum[0], cablesNum[-1])\n plt.plot(cablesNum, runtimeAv, 'o', cablesNumSpace, p(cablesNumSpace), '-')\n plt.show()\nif __name__ == '__main__':\n main()\n","repo_name":"IMRCLab/col-trans","sub_path":"hardware/qp-solve/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20109986150","text":"import os\nfrom datetime import datetime\n\n# Run this example with LC_TIME=[other locale] to use a different\n# locale's datetime formatting, eg:\n#\n# LC_TIME=en_US python examples/datetimecol.py\n# or\n# LC_TIME=en_GB python examples/datetimecol.py\nos.environ.setdefault('LC_TIME', 'en_GB') # noqa\n\nfrom flask_table import Table, Col, DatetimeCol\n\n\nclass Item(object):\n def __init__(self, name, dt):\n self.name = name\n self.dt = dt\n\n\nclass ItemTable(Table):\n name = Col('Name')\n dt = DatetimeCol('Datetime')\n\n\ndef main():\n items = [\n Item('Name1', datetime.now()),\n Item('Name2', datetime(2018, 1, 1, 12, 34, 56)),\n ]\n\n table = ItemTable(items)\n\n # or {{ table }} in jinja\n print(table.__html__())\n\nif __name__ == '__main__':\n main()\n","repo_name":"plumdog/flask_table","sub_path":"examples/datetimecol.py","file_name":"datetimecol.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"31"} +{"seq_id":"30725958167","text":"''' import numpy as np\nimport pandas\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Input\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\n\ndf = pandas.read_json('json_data.json')\n# replace nan with -1000\ndf.fillna(0, inplace=True)\ndataset = df.values\nX = dataset[:, df.columns != 'location'].astype(float)\n# multiple all values by -1\nX[:, :] = X[:, :] * -1/100\nY = dataset[:, df.columns == 'location']\n\nencoder = LabelEncoder()\nencoder.fit(Y)\nencoded_Y = encoder.transform(Y)\n# convert integers to dummy variables (i.e. one hot encoded)\ndummy_y = np_utils.to_categorical(encoded_Y)\nprint(dummy_y)\n\nprint(X)\n\n# define baseline model\n\n\ndef create_model():\n # create model\n model = Sequential()\n model.add(Input(shape=np.shape(X[0]), name='input'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(2, activation='softmax'))\n # Compile model\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop', metrics=['accuracy'])\n return model\n\n\nmodel = create_model()\nhistory_callback = model.fit(X, dummy_y, epochs=100, batch_size=50)\nscore = model.evaluate(X, dummy_y, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\ny_pred = model.predict(X)\nactual = np.argmax(dummy_y, axis=1)\npredicted = np.argmax(y_pred, axis=1)\nprint(actual)\nprint(predicted)\n# save the model to disk\nmodel.save('model.h5')\n '''\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport tensorflow as tf\n\ndf = pd.read_csv('data_file.csv')\ndf.fillna(0, inplace=True)\n\ndf['is_inside'] = [\n 1 if typ == 'inside' else 0 for typ in df['location']\n]\ndf.drop('location', axis=1, inplace=True)\n\nX = df.drop('is_inside', axis=1)\ny = df['is_inside']\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y,\n test_size=0.2, random_state=42\n)\n\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\n\ntf.random.set_seed(42)\n\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nmodel.compile(\n loss=tf.keras.losses.binary_crossentropy,\n optimizer=tf.keras.optimizers.Adam(lr=0.03),\n metrics=[\n tf.keras.metrics.BinaryAccuracy(name='accuracy'),\n tf.keras.metrics.Precision(name='precision'),\n tf.keras.metrics.Recall(name='recall')\n ]\n)\n\nhistory = model.fit(X_train_scaled, y_train, epochs=100)\n\npredictions = model.predict(X_test_scaled)\n\nprediction_classes = [\n 1 if prob > 0.5 else 0 for prob in np.ravel(predictions)\n]\n\nprint(prediction_classes)\n\nmodel.save('model.h5')\n","repo_name":"helloparthshah/ecs172Labs","sub_path":"Lab6/python server/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"38434551120","text":"from typing import List\n\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\nfrom django_elasticsearch_dsl import Document, fields\nfrom django_elasticsearch_dsl.documents import model_field_class_to_field_class\n\nfrom swp.models.fields import CombinedISBNField, LongURLField\n\nmodel_field_class_to_field_class[CombinedISBNField] = model_field_class_to_field_class[models.CharField]\nmodel_field_class_to_field_class[LongURLField] = model_field_class_to_field_class[models.URLField]\n\nANALYZERS = {\n 'ar': 'arabic',\n 'bg': 'bulgarian',\n 'bn': 'bengali',\n 'ca': 'catalan',\n 'ckb': 'sorani',\n 'cs': 'czech',\n 'da': 'danish',\n 'de': 'german',\n 'el': 'greek',\n 'en': 'english',\n 'es': 'spanish',\n 'et': 'estonian',\n 'eu': 'basque',\n 'fa': 'persian',\n 'fi': 'finnish',\n 'fr': 'french',\n 'ga': 'irish',\n 'gl': 'galician',\n 'hi': 'hindi',\n 'hu': 'hungarian',\n 'hy': 'armenian',\n 'id': 'indonesian',\n 'it': 'italian',\n 'ja': 'cjk',\n 'ko': 'cjk',\n 'lt': 'lithuanian',\n 'lv': 'latvian',\n 'nl': 'dutch',\n 'no': 'norwegian',\n 'pt': 'portuguese',\n 'ro': 'romanian',\n 'ru': 'russian',\n 'sv': 'swedish',\n 'th': 'thai',\n 'tr': 'turkish',\n 'zh': 'cjk'\n}\n\n\nclass TranslationField(fields.ObjectField):\n\n def __init__(self, attr=None, **kwargs):\n properties = {\n 'default': fields.TextField(analyzer='default'),\n }\n\n for language, analyzer in ANALYZERS.items():\n properties[language] = fields.TextField(analyzer=analyzer)\n\n super(TranslationField, self).__init__(attr, properties=properties, **kwargs)\n\n def get_value_from_instance(self, instance, field_value_to_ignore=None):\n return {\n 'default': super(fields.ObjectField, self).get_value_from_instance(\n instance=instance,\n field_value_to_ignore=field_value_to_ignore,\n ),\n }\n\n\ndef get_translation_fields(language, field_names):\n languages = [*ANALYZERS, 'default']\n\n if language in languages:\n languages.remove(language)\n languages.insert(0, language)\n\n return [f'{field}.{language}' for field in field_names for language in languages]\n\n\nclass FieldMixin:\n TRANSLATION_FIELDS: List[str] = []\n\n @classmethod\n def to_field(cls, field_name, model_field):\n if field_name in cls.TRANSLATION_FIELDS:\n return TranslationField(attr=field_name)\n\n if isinstance(model_field, ArrayField):\n base_field = Document.to_field(field_name, model_field.base_field)\n\n return fields.ListField(base_field)\n\n return Document.to_field(field_name, model_field)\n","repo_name":"swp-berlin/webmonitor","sub_path":"swp/documents/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"8121484503","text":"import functools\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport itertools\nfrom functools import reduce\n\nimport torch.nn.functional as F\nimport math\nfrom math import cos,atan\n\n#CCD相机参数\nCCD_length = 7.7\nCCD_width = 5.5\nox = CCD_width/2\noy = CCD_length/2\na = 8.3\nk = 4\ndx = 0.00859375\ndy = 0.00859375\nf = 8\n\nclass dis_conv(nn.Module):\n def __init__(self, w, h, batch_size):\n super(dis_conv,self).__init__()\n w0, h0 = w, h\n batch_size0 = batch_size\n self.bn = np.zeros((h0,w0))\n for i in range(h0):\n for j in range(w0):\n a0 = atan(dx*(j-ox)/f)\n self.bn[i,j] = math.floor((cos(a0)-1)*(i-oy))\n \n self.conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0,\n bias=False)\n \n def forward(self,input):\n input = input.cuda()\n shape1 = input.shape\n #print(shape1)\n \n feature1 = torch.zeros((shape1[0],shape1[1],shape1[2],(shape1[3])))\n \n \n matrix1 = torch.zeros((shape1[0],shape1[1],3,3)).cuda()\n \n tb = torch.zeros((9),dtype=torch.int)\n for i in range(shape1[2]):\n for j in range(shape1[3]):\n if i+2= 256:\n j1 = j%256\n tb[k] = int(i+int(k/3)+self.bn[int(i+k/3),int(j1+k%3)])\n else:\n tb[k] = int(i+int(k/3)+self.bn[int(i+k/3),int(j+k%3)])\n \n matrix1[:,:,int(k/3),int(k%3)] = input[:,:,tb[k],int(k%3)]\n \n \n feature1[:,:,i,j] = self.conv1(matrix1).reshape(shape1[0],shape1[1])\n \n out = feature1\n \n out = out.cuda()\n return out\n \n \n ","repo_name":"hukexiangcun/DDCNet","sub_path":"DDCNet_allV1/modeling/s2cnn/dis_convolutional.py","file_name":"dis_convolutional.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71921916887","text":"\"\"\"A proxy server that forwards requests from one port to another server.\n\nTo run this using Python 2.7:\n\n% python proxy.py\n\nIt listens on a port (`LISTENING_PORT`, below) and forwards commands to the\nserver. The server is at `SERVER_ADDRESS`:`SERVER_PORT` below.\n\"\"\"\n\n# This code uses Python 2.7. These imports make the 2.7 code feel a lot closer\n# to Python 3. (They're also good changes to the language!)\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport library\nimport optparse\n\n# Where to find the server. This assumes it's running on the smae machine\n# as the proxy, but on a different port.\nSERVER_ADDRESS = 'localhost'\nSERVER_PORT = 7777\n\n# The port that the proxy server is going to occupy. This could be the same\n# as SERVER_PORT, but then you couldn't run the proxy and the server on the\n# same machine.\nLISTENING_PORT = 8888\n\n# Cache values retrieved from the server for this long.\nMAX_CACHE_AGE_SEC = 60.0 # 1 minute\n\n\ndef ForwardCommandToServer(command, server_addr, server_port):\n \"\"\"Opens a TCP socket to the server, sends a command, and returns response.\n\n Args:\n command: A single line string command with no newlines in it.\n server_addr: A string with the name of the server to forward requests to.\n server_port: An int from 0 to 2^16 with the port the server is listening on.\n Returns:\n A single line string response with no newlines.\n \"\"\"\n socket = library.CreateClientSocket(server_addr, server_port)\n socket.send(command)\n\n # Wait to receive the data from the server socket.\n return library.ReadCommand(socket)\n\n\ndef ProxyClientCommand(sock, server_addr, server_port, cache):\n \"\"\"Receives a command from a client and forwards it to a server:port.\n\n A single command is read from `sock`. That command is passed to the specified\n `server`:`port`. The response from the server is then passed back through\n `sock`.\n\n Args:\n sock: A TCP socket that connects to the client.\n server_addr: A string with the name of the server to forward requests to.\n server_port: An int from 0 to 2^16 with the port the server is listening on.\n cache: A KeyValueStore object that maintains a temorary cache.\n max_age_in_sec: float. Cached values older than this are re-retrieved from\n the server.\n \"\"\"\n command_line = library.ReadCommand(sock)\n\n cmd, name, text = library.ParseCommand(command_line)\n\n if cmd == \"GET\":\n if cache.GetValue(name, MAX_CACHE_AGE_SEC):\n print('Key %s in the cache.' % name)\n sock.send(\"Key: {0}, Value: {1}\\n\".format(\n name, cache.GetValue(name, MAX_CACHE_AGE_SEC)))\n return\n\n # Get record from the server, and update the cache.\n serverResponse = ForwardCommandToServer(\n command_line, server_addr, server_port)\n cache.StoreValue(name, serverResponse)\n elif cmd == \"PUT\":\n print('Writing %s: %s to the cache' % (name, text))\n cache.StoreValue(name, text)\n serverResponse = ForwardCommandToServer(\n command_line, server_addr, server_port)\n elif cmd == \"DUMP\":\n serverResponse = ForwardCommandToServer(\n command_line, server_addr, server_port)\n else:\n return\n\n # Forward the server response to the client.\n print(\"Forwarding the response from the server to the client\")\n sock.send(serverResponse)\n\n\ndef main(records_file=None):\n # Listen on a specified port...\n server_sock = library.CreateServerSocket(LISTENING_PORT)\n if records_file:\n try:\n database = library.KeyValueStore(\n fileName=records_file, isTimer=True)\n except library.InvalidRecordFormatException as e:\n print(e)\n print(\"Initializing an empty cache.\")\n database = library.KeyValueStore()\n except library.InvalidRecordTypeException as e:\n print(e)\n print(\"Initializing an empty cache.\")\n database = library.KeyValueStore()\n else:\n cache = library.KeyValueStore(isTimer=True)\n # Accept incoming commands indefinitely.\n try:\n while True:\n # Wait until a client connects and then get a socket that connects to the\n # client.\n client_sock, (address, port) = library.ConnectClientToServer(\n server_sock)\n print('Received connection from %s:%d' % (address, port))\n ProxyClientCommand(client_sock, SERVER_ADDRESS, SERVER_PORT,\n cache)\n client_sock.close()\n except KeyboardInterrupt:\n # Close server socket.\n # Write the records to a file for later use.\n server_sock.close()\n with open(\"proxy-records.txt\", 'w') as fileHandle:\n fileHandle.write(str(cache))\n\n\nparser = optparse.OptionParser()\nparser.add_option('-d', '--database', action='store',\n dest='database', default=None)\noption, args = parser.parse_args()\n\nmain(option.database)\n","repo_name":"Aayyush/Key-Value-Server-","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25662713572","text":"\"\"\"\r\nsponsor_manager.py\r\nAuthor: Timothy Gist\r\nDatabase Support: Maranda Dodgson\r\n12/13/2023: The Sponsor Manager program is meant to be a utility to allow the club to manage sponsors.\r\nfunctionality includes a login function to restrict access to the information contained herein.\r\nThe main program allows the user to view entries in the database in order to better manage\r\ninteraction with those sponsors. See the 'about' function from the menubar for more detailed\r\ninformation about the specific functions available.\r\nThere is also functionality that allows the user to add new sponsors and then return to the main program.\r\nThis is not intended to be the final version at this time, but rather a working prototype that can\r\nallow for greater functionality in the future.\r\n12/14/2023: Added formatting to the output for cleaner, more readable text.\r\n\"\"\"\r\n\r\nimport tkinter as tk\r\nimport tkinter.messagebox as tkmessagebox\r\nfrom login_module_V3 import *\r\nimport sqlite3\r\nfrom tkinter import ttk\r\nfrom new_sponsor import *\r\n\r\nlogin()\r\n\r\n\r\n# Create base window\r\nroot = tk.Tk()\r\nroot.title('Sponsor Manager')\r\nroot.geometry('700x700')\r\n\r\n\r\ndef group_members():\r\n result = tkmessagebox.showinfo('Team Members',\r\n 'Team Leader: Timothy Gist \\nDatabase Support: Maranda Dodgson', icon=\"info\")\r\n\r\n\r\ndef about():\r\n about_page = tk.Tk()\r\n about_page.title(\"How to\")\r\n about_page.geometry(\"800x800\")\r\n about_page.configure(background='grey')\r\n panel = tk.Text(about_page, height=50, width=100)\r\n f = open(\"about.txt\", \"r\")\r\n\r\n for line in f:\r\n panel.insert('end', line)\r\n\r\n f.close()\r\n\r\n panel.grid(row=0, column=0)\r\n # scrollbar = ttk.Scrollbar(panel, orient='vertical', command=panel.yview)\r\n # scrollbar.grid(row=0, column=1, sticky=tk.NS)\r\n # panel['yscrollcommand'] = scrollbar.set\r\n about_page.mainloop()\r\n\r\n\r\ndef goodbye():\r\n result = tkmessagebox.askquestion('System', 'Are you sure you want to exit?', icon=\"warning\")\r\n if result == 'yes':\r\n root.destroy()\r\n exit()\r\n\r\n\r\nroot_menubar = tk.Menu(root)\r\nroot.config(menu=root_menubar)\r\nfile_menu = tk.Menu(root_menubar, tearoff=False)\r\nfile_menu.add_command(label='Team Members', command=group_members)\r\nfile_menu.add_command(label='About', command=about)\r\nfile_menu.add_command(label=\"Exit\", command=goodbye)\r\nroot_menubar.add_cascade(label=\"File\", menu=file_menu)\r\n# Create frame to hold group member labels\r\nframe = tk.LabelFrame(root, text=\"Group 1 Team Members\", relief=tk.RAISED, padx=12, pady=12)\r\nframe.grid(row=0, column=0, columnspan=5)\r\n\r\n\r\n# Buttons for functionality\r\ndef sponsors():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM sponsors')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', '----------\\n')\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef membership():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM Membership_level')\r\n records = cur.fetchall()\r\n print_records = ''\r\n item_label = ['ID', 'Level', 'Renewal', 'Cost']\r\n for record in records:\r\n count = 0\r\n for item in record:\r\n print_records += f'{item_label[count]: >10}: {str(item): <10} \\n'\r\n count += 1\r\n\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef member_level():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Membership_level FROM Membership_level')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef sponsor_names():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Sponsor_Name FROM sponsors')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n print_records += f'Sponsor Name: {str(record[0])} \\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef payment_received():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Sponsor_Name FROM sponsors WHERE Payment_Received = \"Yes\"')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef payment_not_received():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT Sponsor_Name FROM sponsors WHERE Payment_Received = \"No\"')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef sponsor_level():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('''SELECT Sponsors.Sponsor_Name, Membership_level.Membership_level \r\n FROM Sponsors JOIN Sponsors.Membership_ID ON Membership_level.Membership_ID''')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef membership_cost():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('''SELECT Sponsors.Sponsor_Name, Membership_level.Membership_cost \r\n FROM Sponsors JOIN Membership_level.Membership_ID ON Sponsors.Membership_id''')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef membership_renewal():\r\n txt_edit.delete(1.0, tk.END)\r\n conn = sqlite3.connect('db_member.db')\r\n cur = conn.cursor()\r\n cur.execute('''SELECT Sponsors.Sponsor_Name, Membership_level.Membership_renewal, Membership_level.Membership_cost \r\n FROM Sponsors JOIN Membership_level.Membership_ID ON Sponsors.Membership_id''')\r\n records = cur.fetchall()\r\n print_records = ''\r\n for record in records:\r\n for item in record:\r\n print_records += str(item) + '\\n'\r\n txt_edit.insert('end', f'{print_records} \\n')\r\n print_records = ''\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef add_sponsor():\r\n new_sponsor()\r\n\r\n\r\n# Create the text box for query output\r\ntxt_edit = tk.Text(root)\r\n# Create the frame to hold the buttons\r\nfrm_buttons = tk.Frame(root, relief=tk.RAISED)\r\n# Create the buttons\r\nbtn1 = tk.Button(frm_buttons, text='Sponsors', command=sponsors)\r\nbtn2 = tk.Button(frm_buttons, text=\"Membership\", command=membership)\r\nbtn3 = tk.Button(frm_buttons, text='Membership Level', command=member_level)\r\nbtn4 = tk.Button(frm_buttons, text='Sponsor Names', command=sponsor_names)\r\nbtn5 = tk.Button(frm_buttons, text='Payment Received', command=payment_received)\r\nbtn6 = tk.Button(frm_buttons, text='Payment Not received', command=payment_not_received)\r\nbtn7 = tk.Button(frm_buttons, text='Sponsor Level', command=sponsor_level, state='disabled')\r\nbtn8 = tk.Button(frm_buttons, text='Membership Cost', command=membership_cost, state='disabled')\r\nbtn9 = tk.Button(frm_buttons, text='Membership Renewal', command=membership_renewal, state='disabled')\r\nbtn10 = tk.Button(frm_buttons, text='Add New Sponsor', command=add_sponsor)\r\n# Place everything in the main window\r\nbtn1.grid(row=1, column=0, padx=5, pady=5)\r\nbtn2.grid(row=2, column=0, padx=5, pady=5)\r\nbtn3.grid(row=3, column=0, padx=5, pady=5)\r\nbtn4.grid(row=4, column=0, padx=5, pady=5)\r\nbtn5.grid(row=5, column=0, padx=5, pady=5)\r\nbtn6.grid(row=6, column=0, padx=5, pady=5)\r\nbtn7.grid(row=7, column=0, padx=5, pady=5)\r\nbtn8.grid(row=8, column=0, padx=5, pady=5)\r\nbtn9.grid(row=9, column=0, padx=5, pady=5)\r\nbtn10.grid(row=10, column=0, padx=5, pady=5)\r\n\r\nfrm_buttons.grid(row=1, column=0, sticky=\"ns\")\r\ntxt_edit.grid(row=1, column=1, sticky=\"nsew\")\r\n\r\nroot.mainloop()\r\n","repo_name":"AtlasIdol/SDEV220-Final-Project","sub_path":"sponsor_manager.py","file_name":"sponsor_manager.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19543458702","text":"def solution(k, score):\n honors = []\n result = []\n \n for i in score:\n honors.append(i)\n if len(honors) > k:\n honors.remove(min(honors))\n result.append(min(honors))\n \n return result\n\nprint(f'test1 = {solution(3, [10,100,20,150,1,100,200])}')\nprint(f'test2 = {solution(4, [0,300,40,300,20,70,150,50,500,1000])}')","repo_name":"Ji-Hwan-Jung/coding-test","sub_path":"level1/명예의 전당(1).py","file_name":"명예의 전당(1).py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70052238167","text":"from django.shortcuts import render,redirect\nfrom compose.forms import MailForm\n\n\n\n# Create your views here.\n# def index(request):\n# return render(request,'load.html')\n\ndef index(request):\n if request.method == \"POST\":\n form = MailForm(request.POST)\n if form.is_valid():\n mail_item=form.save(commit=False)\n mail_item.save()\n return redirect('/')\n else:\n form=MailForm()\n return render(request,'load.html',{'form':form})\n\n\n\n","repo_name":"ArunBalajiR/Django-Email-Editor","sub_path":"compose/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70728461848","text":"from infrastructure.dataset_repository import DatasetRepository\nfrom domain.dataset_factory import DatasetFactory\nfrom domain.service import PreprocessingOptions, PreprocessingService\nfrom infrastructure.metadata import MetadataRepository\nfrom pydantic import BaseModel\nfrom typing import List, Dict\nfrom domain.service import Metadata\n\nclass PreprocessingFitTransformArgs(BaseModel):\n input_dir_path: str\n output_dir_path: str\n fulfillment_mode: str\n columns_to_fulfill: List[str]\n\nclass PreprocessingFitTransformFacade:\n def __init__(self,\n dataset_repository: DatasetRepository,\n preprocessing_service: PreprocessingService,\n metadata_repository: MetadataRepository,\n ):\n self._dataset_repository = dataset_repository\n self._preprocessing_service = preprocessing_service\n self._metadata_repository = metadata_repository\n\n def fit_transform(self, args: PreprocessingFitTransformArgs):\n dataset_factory = DatasetFactory(self._dataset_repository)\n dataset = dataset_factory.create_from_files(\n input_dir_path=args.input_dir_path)\n\n preprocessing_options = PreprocessingOptions(fulfillment_mode=args.fulfillment_mode,\n columns_to_fulfill=args.columns_to_fulfill,\n )\n\n metadata = self._preprocessing_service.preprocess(dataset=dataset,\n preprocessing_options=preprocessing_options,\n )\n self._dataset_repository.save_dataset(\n dataset, output_dir_path=args.output_dir_path)\n\n self._metadata_repository.save_metadata(\n metadata=metadata.to_dict(), run_name=args.run_name)\n\nclass PreprocessingTransformFacade:\n def __init__(self,\n metadata_repository: MetadataRepository,\n preprocessing_service: PreprocessingService,\n dataset_repository: DatasetRepository,\n ):\n self._metadata_repository = metadata_repository\n self._preprocessing_service = preprocessing_service\n self._dataset_repository = dataset_repository\n\n def transform(self,\n measurements: List[Dict],\n run_name: str):\n\n measurements_series = DatasetFactory.create_from_dict(measurements)\n\n metadata = Metadata.from_dict(self._metadata_repository.get_metadata(\n run_name=run_name))\n\n preprocessing_options = PreprocessingOptions(fulfillment_mode=metadata.filler_metadata['filler_type'], \n columns_to_fulfill=list(metadata.filler_metadata[\n 'filler_value'].keys()),\n )\n\n self._preprocessing_service.preprocess(dataset=measurements_series,\n preprocessing_options=preprocessing_options,\n metadata_do=metadata,\n ) ","repo_name":"swiatowiec/Forecast","sub_path":"preprocessing/domain/facade.py","file_name":"facade.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26109568295","text":"import os\nimport logging\n\nPARAM_FNAME = 'param.txt'\n\n\ndef read_params(user_dir):\n '''read last used source and dest params.\n\n '''\n params = dict(source='', dest1='', dest2='')\n\n param_file = os.path.join(user_dir, PARAM_FNAME)\n try:\n with open(param_file, 'r') as fout:\n content = fout.read().strip('\\n')\n params['source'], params['dest1'], params['dest2'] = content.split(\n \";\")\n\n # Do not load folders that no longer exist.\n for key, val in params.items():\n if val != '' and not os.path.exists(val):\n params[key] = ''\n\n except Exception as err:\n logging.getLogger(__name__).debug(\n 'Could not read parameters from %s. Error: %s', param_file,\n str(err))\n return params\n\n\ndef dump_params(user_dir, source, dest1, dest2):\n '''write last source and dest params\n\n '''\n delimiter = ';'\n param_file = os.path.join(user_dir, PARAM_FNAME)\n\n logging.getLogger(__name__).debug('Writing user params to %s', param_file)\n try:\n with open(param_file, 'w') as fout:\n fout.write(delimiter.join([source, dest1, dest2]))\n except Exception as err:\n logging.getLogger(__name__).error(\n 'Could not write parameters to %s. Error: %s', param_file,\n str(err))\n","repo_name":"fmi-basel/faim-robocopy","sub_path":"faim_robocopy/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"73586569048","text":"\nimport import_declare_test\n\nfrom splunktaucclib.rest_handler.endpoint import (\n field,\n validator,\n RestModel,\n SingleModel,\n)\nfrom splunktaucclib.rest_handler import admin_external, util\nfrom splunktaucclib.rest_handler.admin_external import AdminExternalHandler\nimport logging\n\nutil.remove_http_proxy_env_vars()\n\n\nfields = [\n field.RestField(\n 'account_username',\n required=True,\n encrypted=False,\n default=None,\n validator=validator.String(\n max_len=50, \n min_len=1, \n )\n ), \n field.RestField(\n 'account_password',\n required=True,\n encrypted=True,\n default=None,\n validator=validator.String(\n max_len=8192, \n min_len=1, \n )\n )\n]\nmodel = RestModel(fields, name=None)\n\n\nendpoint = SingleModel(\n 'graphee_accounts',\n model,\n config_name='accounts'\n)\n\n\nif __name__ == '__main__':\n logging.getLogger().addHandler(logging.NullHandler())\n admin_external.handle(\n endpoint,\n handler=AdminExternalHandler,\n )\n","repo_name":"Bamfax/graphee","sub_path":"graphee/bin/graphee_rh_accounts.py","file_name":"graphee_rh_accounts.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"15162798906","text":"def lcs(x, y, m, n):\n t = [[None]*(m+1)]*(n+1)\n for i in range(0, m+1):\n for j in range(0, n+1):\n if i==0 or j==0:\n t[i][j]=0\n elif x[i-1] == y[j-1]:\n t[i][j] = 1+t[i-1][j-1]\n else:\n t[i][j] = max(t[i-1][j], t[i][j-1])\n print(t)\n return t[m][n]\ndef buildPalindrome(st):\n return len(st)-lcs(st, st[::-1], len(st), len(st))\nst = \"abaa\"\nprint(buildPalindrome(st))\n\n","repo_name":"aditya109/data-structures-and-algorithms","sub_path":"codesignal/buildPalindrome.py","file_name":"buildPalindrome.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5094024002","text":"import pandas as pd\nimport os\nimport glob\nimport string as str\n\n\n# read all the files with extension .csv\npath = os.getcwd()\n\n# read all the files with extension .csv\nfilenames = glob.glob(os.path.join(path, \"*.csv\"))\n\nd_list = []\n\nfor file in filenames:\n # reading csv files\n d = pd.read_csv(file, index_col=None, header=0)\n\n #get sample name from file\n file_name=file.split('_')[-2]\n d[\"Sample\"] = file_name\n\n # drop all but year from column names\n #d.columns.str.replace('(\\-\\w+.*?)',\"\")\n renameColumns = d.columns.str.replace(r'\\S*\\-\\S*','')\n d.columns = renameColumns\n\n mergeDuplicates = d.sum(axis=1, level=0)\n mergeDuplicates = mergeDuplicates.reindex(sorted(mergeDuplicates.columns), axis=1)\n\n\n d_list.append(mergeDuplicates)\n\n\nfinal_output = pd.concat(d_list, axis=0, ignore_index=True) \n#re order columns 0->23\nfinal_output = final_output.reindex(sorted(final_output.columns), axis=1)\n#set Sample and Transition as index\nfinal_output = final_output.set_index(['Sample', 'Transition'])\nfinal_output.to_csv(\"SleepOutputConcatenate.csv\", index=True)","repo_name":"andtinsley/OHSU_Projects","sub_path":"SleepOutputsConcatenate/SleepOutputConcatenate.py","file_name":"SleepOutputConcatenate.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"340148573","text":"\"\"\"\nPlots the energy and seperation evolutions of the [sun, earth, jupiter] system.\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport initial_conditions as ic\n\nyear_in_s = ic.year_in_s\ndt = np.array([50, 25, 10, 1]) * ic.dt\nparticle_sets = [ic.create_particles(dt=i) for i in dt]\n\ndef modulus(vector):\n\t\ttotal = np.sqrt(sum(vector[i]**2 for i in range(3))) \n\t\treturn total\n\nmpl.rcParams[\"font.size\"] = 25\nfig, ax = plt.subplots(figsize=(10,10))\nax.set(xlabel='Time [yrs]', ylabel='Percentage change in Seperation')\nplt.gcf().set_tight_layout(True) # To prevent the xlabel being cut off\n\n\n\nfor index, particles in enumerate(particle_sets):\n time = 0\n sun = particles[0]\n earth = particles[1]\n seperation = [modulus(earth.pos - sun.pos)]\n time_tracker = [0]\n\n for i in range(int(year_in_s / dt[index]) * 10):\n time += dt[index] / year_in_s\n time_tracker.append(time)\n for p in particles:\n p.calc_next_v(particles)\n for p in particles:\n p.set_new_v()\n p.calc_next_pos()\n p.set_new_pos()\n seperation.append(modulus(earth.pos - sun.pos))\n \n ax.plot(time_tracker, ((seperation - seperation[0]) / seperation[0]) * 100, label = f\"{int(dt[index] / ic.dt)} day timestep\", linewidth=4)\n\nax.legend()\nplt.show()\n","repo_name":"zebsummerfield/Gravitational_Collapse","sub_path":"seperation_with_different_dt.py","file_name":"seperation_with_different_dt.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38663618070","text":"import json\nVERSION = \"5.0\"\nPROFILE_HEADERS = {\n 'authority': 'twitter.com',\n 'accept': '*/*',\n 'accept-language': 'en-US,en;q=0.9',\n 'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',\n 'origin': 'https://twitter.com',\n 'referer': 'https://twitter.com/settings/profile',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'sec-gpc': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',\n 'x-twitter-active-user': 'yes',\n 'x-twitter-auth-type': 'OAuth2Session',\n 'x-twitter-client-language': 'en'\n}\n\nwith open(\"config.json\", \"r\") as f:\n config = json.load(f)\n\nPROXY_URL = config.get(\"proxy\")\nNUM_THREADS = config.get(\"threads\")\nCT0_FIX = config.get(\"ct0_fix\")\nMAX_RETRIES = 10\n\nif PROXY_URL:\n PROXY = f\"http://{PROXY_URL}\"\nelse:\n PROXY = None\n","repo_name":"FatBeeBHW/Twitter-Account-Checker","sub_path":"util/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"706432420","text":"from Ship import Ship\r\nfrom exception import BoardOutException, ShipOutException\r\nclass Board(Ship):\r\n def __init__(self, board, ships, hid, live_ships, long, x, y, direction, quantity_of_life):\r\n super().__init__(long, x, y, direction, quantity_of_life)\r\n self.board = [[' ', '1', '2', '3', '4', '5', '6'],\r\n ['1', 'O', 'O', 'O', 'O', 'O', 'O'],\r\n ['2', 'O', 'O', 'O', 'O', 'O', 'O'],\r\n ['3', 'O', 'O', 'O', 'O', 'O', 'O'],\r\n ['4', 'O', 'O', 'O', 'O', 'O', 'O'],\r\n ['5', 'O', 'O', 'O', 'O', 'O', 'O'],\r\n ['6', 'O', 'O', '0', 'O', 'O', 'O']\r\n ]\r\n self.ships=[]\r\n self.hid=True\r\n self.live_ships=6\r\n def add_ship(self,board,ships,x,y,direction):\r\n try:\r\n self.x = int(input(\"Вв��дите координату коробля х: \"))\r\n self.y = int(input(\"Введите координату коробля y: \"))\r\n self.long = int(input(\"Введите длину коробля от 1 до 3: \"))\r\n self.direction = str(input(\"Введите напраление коробля (upright or horizon): \"))\r\n if (self.direction == 'upright' and self.x + self.long - 1 > 6) or (\r\n self.direction == 'horizon' and self.y + self.long - 1 > 6) or self.long > 3 or self.long < 0 or (\r\n self.direction != 'horizon' and self.direction != 'upright'):\r\n raise ShipOutException\r\n except ShipOutException:\r\n print(\"Недопустимые координаты для коробля: \")\r\n else:\r\n self.ships.append(self)\r\n\r\n\r\n def contour(self,x,y,direction,long):\r\n if self.direction=='horizontal':\r\n if x+long<7 :\r\n self.board[x+long][y]='#'\r\n if x-1>0:\r\n self.board[x-1][y]='#'\r\n if y+1<7:\r\n self.board[x][y+1]='#'\r\n if y-1>0:\r\n self.board[x][y-1]='#'\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Singaevskii/sea-battle-singaevskaia","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24070696332","text":"import numpy as np\n\nfrom gym_minigrid.minigrid import *\nfrom gym_minigrid.register import register\n\n\nclass Ice(WorldObj):\n def __init__(self):\n super().__init__('ice', 'blue')\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n c = (119, 201, 240) # Pale blue\n\n # Background color\n fill_coords(img, point_in_rect(0, 1, 0, 1), c)\n\n\n# Add Ice top object index.\nOBJECT_TO_IDX['ice'] = max(OBJECT_TO_IDX.values()) + 1\n\n\nclass IceGridEnv(MiniGridEnv):\n def __init__(self, size, mass=1, friction_norm=0.8):\n super().__init__(\n grid_size=size,\n max_steps=4*size*size,\n see_through_walls=False,\n seed=None\n )\n\n self.mass = mass\n self.friction_norm = friction_norm\n self.agent_velocity = np.array([0, 0])\n\n def _gen_grid(self, width, height):\n assert width >= 5 and height >= 5\n\n self.grid = Grid(width, height)\n\n # Surrounding walls.\n self.grid.wall_rect(0, 0, width, height)\n\n # Fill with ice.\n for i in range(1, width - 1):\n for j in range(1, height - 1):\n self.put_obj(Ice(), i, j)\n\n # Agent top left.\n self.agent_pos = [1, 1]\n self.agent_dir = 0\n\n # Place goal bottom right.\n self.goal_pos = [\n self._rand_int(5, width - 1),\n self._rand_int(5, height - 1)\n ]\n self.put_obj(Goal(), *self.goal_pos)\n\n self.mission = \"Get to the goal square\"\n\n def take_discrete_step(self):\n \"\"\"Take one time step wrt the velocity vector.\"\"\"\n self.agent_pos = np.round(\n self.agent_pos + self.agent_velocity\n ).astype(int)\n\n if self.agent_pos[0] < 1:\n self.agent_pos[0] = 1\n self.agent_velocity[0] = 0\n\n elif self.agent_pos[0] > self.grid.width - 1:\n self.agent_pos[0] = self.grid.width - 2\n self.agent_velocity[0] = 0\n\n if self.agent_pos[1] < 1:\n self.agent_pos[1] = 1\n self.agent_velocity[1] = 0\n\n elif self.agent_pos[1] > self.grid.height - 1:\n self.agent_pos[1] = self.grid.height - 2\n self.agent_velocity[1] = 0\n\n def step(self, action):\n if action != self.actions.forward:\n return super().step(action)\n\n # Move with inertia.\n # Force applied by action.\n # The constant here to determine how strong the agent gets pushed.\n force = 2.0 * DIR_TO_VEC[self.agent_dir]\n\n # Add friction.\n if self.agent_velocity.sum() > 0:\n friction = (\n -self.friction_norm *\n self.agent_velocity / np.linalg.norm(self.agent_velocity)\n )\n force += friction\n\n acceleration = force / self.mass\n self.agent_velocity = self.agent_velocity + acceleration\n\n # Discretize the updated square.\n self.take_discrete_step()\n cell = self.grid.get(*self.agent_pos)\n reward = 0\n done = False\n\n if cell != None and cell.type == 'goal':\n done = True\n reward = self._reward()\n\n if self.step_count >= self.max_steps:\n done = True\n\n obs = self.gen_obs()\n\n return obs, reward, done, {}\n\n\n\nclass IceGridS50Env(IceGridEnv):\n def __init__(self):\n super().__init__(size=50)\n\n\nregister(\n id='MiniGrid-IceGridS50-v0',\n entry_point='ice_slippery:IceGridS50Env'\n)","repo_name":"legaultmarc/gym-minigrid-ice","sub_path":"ice_slippery.py","file_name":"ice_slippery.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72484760089","text":"#!/usr/bin/env python3\nfrom handle_vtt import *\nimport unittest\n\n\nclass TestHandle_Vtt(unittest.TestCase):\n def test_parse_vtt_str(self):\n time_string = \"00:00:08.690 --> 00:00:08.700 align:start position:0%\"\n start = parse_vtt_str(time_string)\n self.assertEqual(start, 8.0)\n time_string = \"00:10:08.690 --> 00:00:08.700 align:start position:0%\"\n start = parse_vtt_str(time_string)\n self.assertEqual(start, 608.0)\n\n def test_find_vtt_video(self):\n fileVar = \"/Users/huangyingw/Dropbox/learning/AI/fast.ai/Intro_to_Machine_Learning_-_Lesson_1.en.vtt\"\n videoFile = find_vtt_video(fileVar)\n self.assertEqual(videoFile, '/Users/huangyingw/Dropbox/learning/AI/fast.ai/Intro_to_Machine_Learning_-_Lesson_1.mkv')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"huangyingw/loadrc","sub_path":"pythonrc/handle_vtt_test.py","file_name":"handle_vtt_test.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20051244445","text":"x = int(input())\r\ncontador = 0\r\ntotal = 0\r\n\r\nwhile True:\r\n y = int(input())\r\n if x >= y:\r\n continue\r\n else:\r\n break\r\nwhile True:\r\n total += x\r\n contador += 1\r\n x += 1\r\n if total > y:\r\n break\r\nprint(contador)","repo_name":"gustavoLimar/beecrowd","sub_path":"beecrowd 1150.py","file_name":"beecrowd 1150.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35324765860","text":"import customtkinter\n\ncustomtkinter.set_appearance_mode(\"System\") # Modes: system (default), light, dark\ncustomtkinter.set_default_color_theme(\"blue\") # Themes: blue (default), dark-blue, green\n\napp = customtkinter.CTk() # create CTk window like you do with the Tk window\nscreen_width = app.winfo_screenwidth()\nscreen_height = app.winfo_screenheight()\n\n# Set window size to 1024x720 unless the screen is too small\nwindow_width = min(1024, screen_width)\nwindow_height = min(720, screen_height)\napp.geometry(f\"{window_width}x{window_height}\")\n\ndef button_function():\n app.destroy()\n\n# CTkButton example \nbutton = customtkinter.CTkButton(master=app, text=\"End!\", command=button_function)\nbutton.place(relx=0.5, rely=0.5, anchor=customtkinter.CENTER)\n\napp.mainloop()","repo_name":"ProfessorRich/dnd-combat-simulator","sub_path":"tkinter_interface.py","file_name":"tkinter_interface.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"19105817052","text":"import matplotlib.pyplot as plot\nimport pandas as pandas\n\n\n\"\"\"\ndef pandas_plot_single(pandas_dataframe: pandas.DataFrame, y_axs: list, x_axs: list, title: str = \"\") -> plot:\n\nFunction to create a scatter plot for a single pair of columns.\n\nParameters:\n pandas_dataframe (pandas.DataFrame): input data in the form of a Pandas DataFrame.\n y_axs (list): list of column names to be used as the y-axis data.\n x_axs (list): list of column names to be used as the x-axis data.\n title (str): title of the plot.\n\nReturns: plot\n\"\"\"\n\ndef pandas_plot_single(pandas_dataframe: pandas.DataFrame, y_axs: list, x_axs: list, title: str = \"\") -> plot:\n # Build the scatter plot\n plot.scatter(y_axs, x_axs)\n plot.xlabel(y_axs)\n plot.ylabel(x_axs)\n plot.title(title)\n return plot\n\n\n\"\"\"\ndef pandas_plot_figure(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n\nFunction to create a plot with multiple lines.\n\nParameters:\n pandas_dataframe (pandas.DataFrame): input data in the form of a Pandas DataFrame.\n col_names (list): list of column names to be plotted.\n axs_col_name (str): column name to be used as the x-axis data.\n tittle (str): title of the plot.\n\nReturns: plot\n\"\"\"\n\ndef pandas_plot_figure(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n x_axs = pandas_dataframe[axs_col_name].tolist()\n \n fig, ax = plot.subplots() # Create a matplotlib figure\n for col_name in col_names:\n ax.plot(x_axs, pandas_dataframe[col_name].tolist(), label=col_name)# Set title and labels\n \n ax.set_xlabel(axs_col_name)\n ax.set_ylabel('score')# Add a legend\n ax.legend(loc='lower center', bbox_to_anchor=(1.25, 0.5), ncol=3)\n ax.set_title(tittle)\n return plot\n \n \n\"\"\"\ndef pandas_plot_scatter(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n\nFunction to create a scatter plot for multiple pairs of columns.\n\nParameters:\n pandas_dataframe (pandas.DataFrame): input data in the form of a Pandas DataFrame.\n col_names (list): list of column names to be plotted.\n axs_col_name (str): column name to be used as the x-axis data.\n tittle (str): title of the plot.\n\nReturns: plot\n\"\"\"\n\ndef pandas_plot_scatter(pandas_dataframe: pandas.DataFrame, col_names: list[str], axs_col_name: str, tittle : str = \"\") -> plot:\n plot.subplots(figsize=(8, 6))\n plot.subplots_adjust(left=0.1)\n plot.title(tittle)\n plot.xlabel(axs_col_name)\n plot.ylabel(\"z-score\")\n \n x_axs = pandas_dataframe[axs_col_name].tolist()\n \n for column in col_names:\n plot.scatter(x_axs, pandas_dataframe[column].tolist(), label=column, s=9)\n \n plot.legend(loc='lower right', fontsize=8)\n\n return plot","repo_name":"mstrielnikov/score-standardization-spark","sub_path":"src/visualization/visualize_plot.py","file_name":"visualize_plot.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22312100675","text":"n = []\nwhile True:\n a = int(input('Digite um número: '))\n if a not in n:\n n.append(a)\n else:\n print('Número duplicado, não posso adicionar...')\n o = ' '\n while o not in 'SN':\n o = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]\n if o == 'N':\n break\nn.sort()\nprint(f'Os valores digitados foram {n}')\n","repo_name":"VanessaCML/python","sub_path":"Desafios/desafio079.py","file_name":"desafio079.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70386659927","text":"# Advent of Code 2022\n#\n# From https://adventofcode.com/2022/day/7\n#\n\n\nclass Directory:\n def __init__(self, name, parent):\n self.name = name\n self.parent = parent\n self.contents = []\n\n def __repr__(self):\n return f\"Directory: {self.name}\"\n\n @property\n def size(self):\n return sum(item.size for item in self.contents)\n\n\nclass File:\n def __init__(self, name, size):\n self.name = name\n self.size = int(size)\n\n def __repr__(self):\n return f\"File: {self.name}\"\n\n\ndata = open(f'../inputs/day7.txt', 'r').read().split('\\n')\nroot_directory = Directory('root', None)\nwd = None\ndirectories = []\n\nfor cmd in data:\n if '$ cd' in cmd:\n dir_ = cmd.split(' ')[-1]\n if dir_ == '/':\n wd = root_directory\n continue\n elif dir_ == '..':\n wd = wd.parent\n continue\n else:\n for item_ in wd.contents:\n if isinstance(item_, Directory) and item_.name == dir_:\n wd = item_\n break\n if wd.name == dir_:\n continue\n print(f\"Error: In directory {wd.name}, unable to complete command {cmd}\")\n raise Exception\n elif '$ ls' in cmd:\n continue\n else:\n size_or_dir, name_ = cmd.split(' ')\n if size_or_dir.isnumeric():\n file = File(name_, size_or_dir)\n wd.contents.append(file)\n else:\n directory = Directory(name_, wd)\n wd.contents.append(directory)\n directories.append(directory)\n\nprint(f'Day 7, Part 1 {sum(directory.size for directory in directories if directory.size <= 100000)}')\n\ndisk_size = 70000000\nrequired = 30000000\nused = root_directory.size\nneeded_to_free = required - (disk_size - used)\nprint(f'Day 7, Part 2 {min(directory.size for directory in directories if directory.size >= needed_to_free)}')\n","repo_name":"davidxbuck/adventofcode","sub_path":"2022/src/Advent2022_07.py","file_name":"Advent2022_07.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73090276889","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 14\n\n@author: lkivi\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\n\n\ndef plot_buildings(param, nodes):\n \n\n \n max_width = 20\n shift = 50\n for n in nodes:\n nodes[n][\"x\"] += shift\n nodes[n][\"y\"] += shift\n \n nodes[6][\"x\"] += 12\n nodes[6][\"y\"] += 12\n \n \n \n \n\n total_demands = np.zeros(len(nodes))\n for n in nodes: \n total_demands[n] = sum(sum((nodes[n][\"heat\"][d][t] + nodes[n][\"cool\"][d][t]) for t in range(24)) * param[\"day_weights\"][d] for d in range(param[\"n_clusters\"]))\n max_total_demand = np.max(total_demands)\n \n total = {}\n for demand in [\"heat\", \"cool\"]:\n total[demand] = np.zeros(len(nodes))\n for n in nodes:\n total[demand][n] = sum(sum(nodes[n][demand][d][t] for t in range(24)) * param[\"day_weights\"][d] for d in range(param[\"n_clusters\"]))\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"x [m]\", fontweight = \"bold\", fontsize = 12)\n ax.set_ylabel(\"y [m]\", fontweight = \"bold\", fontsize = 12)\n \n width = np.zeros(len(nodes))\n \n for n in nodes:\n width[n] = (total_demands[n]/max_total_demand)**0.25 * max_width\n theta = 360 * total[\"heat\"][n]/(total[\"heat\"][n] + total[\"cool\"][n])\n plt.scatter(nodes[n][\"x\"], nodes[n][\"y\"])\n wedge_heat = patches.Wedge((nodes[n][\"x\"], nodes[n][\"y\"]), width[n], 0, theta, fill = True, facecolor = \"red\", edgecolor = \"black\")\n ax.add_patch(wedge_heat)\n wedge_cool = patches.Wedge((nodes[n][\"x\"], nodes[n][\"y\"]), width[n], theta, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\")\n ax.add_patch(wedge_cool)\n\n # Tag buildings\n for n in nodes:\n if nodes[n][\"name\"] in [\"15.1\", \"04.1\", \"16.4\", \"16.3\"]:\n ax.text(nodes[n][\"x\"], nodes[n][\"y\"]+1.1*width[n], str(n+1), fontsize = 12, horizontalalignment='center', fontweight = \"bold\")\n elif nodes[n][\"name\"] in [\"\"]:\n ax.text(nodes[n][\"x\"]+0.45*width[n], nodes[n][\"y\"]+0.6*width[n], str(n+1), fontsize = 12)\n# ax.plot([nodes[n][\"x\"]+0.2*width[n], nodes[n][\"x\"]+9], [nodes[n][\"y\"]+0.3*width[n],nodes[n][\"y\"]+19], \"black\") \n elif nodes[n][\"name\"] in [\"15.8\"]:\n ax.text(nodes[n][\"x\"], nodes[n][\"y\"]-1.2*width[n], str(n+1), fontsize = 12, horizontalalignment='center', verticalalignment = \"top\")\n# ax.plot([nodes[n][\"x\"]-0.2*width[n], nodes[n][\"x\"]-8], [nodes[n][\"y\"]-0.3*width[n],nodes[n][\"y\"]-18], \"black\")\n elif nodes[n][\"name\"] in [\"\"]:\n ax.text(nodes[n][\"x\"]-0.5*width[n]-20, nodes[n][\"y\"]+0.5*width[n], str(n+1), fontsize = 12)\n# ax.plot([nodes[n][\"x\"]-0.2*width[n], nodes[n][\"x\"]-8], [nodes[n][\"y\"]+0.3*width[n],nodes[n][\"y\"]+15], \"black\") \n else:\n ax.text(nodes[n][\"x\"], nodes[n][\"y\"]+1.2*width[n], str(n+1), fontsize = 12, horizontalalignment='center')\n# ax.plot([nodes[n][\"x\"]+0.2*width[n], nodes[n][\"x\"]+8], [nodes[n][\"y\"]+0.3*width[n],nodes[n][\"y\"]+15], \"black\")\n \n \n ax.set_axisbelow(True)\n plt.grid(color = \"grey\")\n \n# plt.axis('equal')\n ax.set_xlim(0,500)\n ax.set_ylim(0,400)\n xticks =np.arange(0,600,100)\n yticks =np.arange(0,500,100)\n plt.xticks(xticks)\n plt.yticks(yticks)\n xlabels = [\"{:2d}\".format(x) for x in xticks]\n ylabels = [\"{:2d}\".format(x) for x in yticks]\n ax.set_xticklabels(xlabels, fontsize = 12)\n ax.set_yticklabels(ylabels, fontsize = 12)\n \n plt.show()\n \n \n \n # Create legend\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n for n in nodes:\n plt.scatter(nodes[n][\"x\"], nodes[n][\"y\"], color = \"white\")\n wedges_legend = [patches.Wedge((70,325), 7.68, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,325), 7.68, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,275), 9.65, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,275), 9.65, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,225), 11.48, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,225), 11.48, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,175), 13.65, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,175), 13.65, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,125), 17.17, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,125), 17.17, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n patches.Wedge((70,75), 20.4, 0, 180, fill = True, facecolor = \"red\", edgecolor = \"black\"),\n patches.Wedge((70,75), 20.4, 180, 360, fill = True, facecolor = \"blue\", edgecolor = \"black\"),\n ]\n for item in wedges_legend:\n ax.add_patch(item)\n \n ax.text(110, 325, \"100 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 275, \"250 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 225, \"500 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 175, \"1000 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 125, \"2500 MWh\", fontsize = 12, verticalalignment = \"center\")\n ax.text(110, 75, \"5000 MWh\", fontsize = 12, verticalalignment = \"center\")\n\n ax.set_xlim(0,500)\n ax.set_ylim(0,400)\n xticks =np.arange(0,600,100)\n yticks =np.arange(0,500,100)\n plt.xticks(xticks)\n plt.yticks(yticks)\n xlabels = [\"{:2d}\".format(x) for x in xticks]\n ylabels = [\"{:2d}\".format(x) for x in yticks]\n ax.set_xticklabels(xlabels, fontsize = 12)\n ax.set_yticklabels(ylabels, fontsize = 12)\n \n# ax.grid(False)\n# ax.set_xticks([])\n# ax.set_yticks([])\n\n plt.show() \n \n \n \n \n\n # # 15.1 (Labor)\n# ax.text(nodes[0][\"x\"]+8, nodes[0][\"y\"]+22, \"1\", fontsize = 12)\n# ax.plot([nodes[0][\"x\"]+3, nodes[0][\"x\"]+9], [nodes[0][\"y\"]+3,nodes[0][\"y\"]+19], \"black\")\n# # 04.01 (Restaurant)\n# ax.text(nodes[1][\"x\"]+8, nodes[1][\"y\"]+22, \"2\", fontsize = 12)\n# ax.plot([nodes[1][\"x\"]+3, nodes[1][\"x\"]+9], [nodes[1][\"y\"]+3,nodes[1][\"y\"]+19], \"black\")\n# # 16.4 (Rechenzentrum)\n# ax.text(nodes[2][\"x\"]+8, nodes[2][\"y\"]+22, \"3\", fontsize = 12)\n# ax.plot([nodes[2][\"x\"]+3, nodes[2][\"x\"]+9], [nodes[2][\"y\"]+3,nodes[2][\"y\"]+19], \"black\")\n# # 16.3 (Rechenzentrum)\n# ax.text(nodes[3][\"x\"]+8, nodes[3][\"y\"]+22, \"4\", fontsize = 12)\n# ax.plot([nodes[3][\"x\"]+3, nodes[3][\"x\"]+9], [nodes[3][\"y\"]+3,nodes[3][\"y\"]+19], \"black\")\n# # 15.13\n# ax.text(nodes[4][\"x\"]+8, nodes[4][\"y\"]+22, \"5\", fontsize = 12)\n# ax.plot([nodes[4][\"x\"]+3, nodes[4][\"x\"]+9], [nodes[4][\"y\"]+3,nodes[4][\"y\"]+19], \"black\") \n# # 15.8\n# ax.text(nodes[5][\"x\"]-18, nodes[5][\"y\"]-35, \"6\", fontsize = 12)\n# ax.plot([nodes[5][\"x\"]-4, nodes[5][\"x\"]-9], [nodes[5][\"y\"]-5,nodes[5][\"y\"]-19], \"black\") \n# # 15.7\n# ax.text(nodes[6][\"x\"]+8, nodes[6][\"y\"]+22, \"7\", fontsize = 12)\n# ax.plot([nodes[6][\"x\"]+3, nodes[6][\"x\"]+9], [nodes[6][\"y\"]+3,nodes[6][\"y\"]+19], \"black\") \n# # 15.14\n# ax.text(nodes[7][\"x\"]+8, nodes[7][\"y\"]+22, \"7\", fontsize = 12)\n# ax.plot([nodes[7][\"x\"]+3, nodes[7][\"x\"]+9], [nodes[7][\"y\"]+3,nodes[6][\"y\"]+19], \"black\") \n \n #ax.text(317705,5642825,\"04.01\", zorder = 1000, fontsize = size) \n","repo_name":"LKivi/Energy_System_Optimization","sub_path":"EctoPlanner/plot_buildings.py","file_name":"plot_buildings.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25722859449","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bober_tasks', '0003_task_country'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tasktranslation',\n name='template',\n field=models.CharField(default='default', max_length=255, choices=[(b'default', b'default.html')]),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='task',\n name='interaction_type',\n field=models.CharField(default=b'non-interactive', max_length=45),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='task',\n name='international_id',\n field=models.CharField(unique=True, max_length=16),\n preserve_default=True,\n ),\n ]\n","repo_name":"polz113/bober","sub_path":"django/bober/bober_tasks/migrations/0004_auto_20151109_1733.py","file_name":"0004_auto_20151109_1733.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"43092391196","text":"import sys\nimport os\nimport errno\nimport shutil\nimport re\nimport multiprocessing\nimport glob\n\nif sys.version_info < (3, 0):\n from Queue import Queue\nelse:\n from queue import Queue\nfrom threading import Thread\n\nfrom subprocess import Popen, PIPE, STDOUT\nfrom common_tasks import (\n process_glob_string,\n run_check_call,\n cleanup_folder,\n clean_coverage,\n log_file,\n read_file,\n is_error_code_5_allowed,\n create_code_coverage_params,\n find_whl,\n parse_setup\n)\n\nfrom pkg_resources import parse_requirements, RequirementParseError\nimport logging\n\nlogging.getLogger().setLevel(logging.INFO)\n\nroot_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), \"..\", \"..\", \"..\"))\ncoverage_dir = os.path.join(root_dir, \"_coverage/\")\npool_size = multiprocessing.cpu_count() * 2\nDEFAULT_TOX_INI_LOCATION = os.path.join(root_dir, \"eng/tox/tox.ini\")\nIGNORED_TOX_INIS = [\"azure-cosmos\"]\ntest_tools_path = os.path.join(root_dir, \"eng\", \"test_tools.txt\")\ndependency_tools_path = os.path.join(root_dir, \"eng\", \"dependency_tools.txt\")\n\nclass ToxWorkItem:\n def __init__(self, target_package_path, tox_env, options_array):\n self.target_package_path = target_package_path\n self.tox_env = tox_env\n self.options_array = options_array\n\n\nclass Worker(Thread):\n def __init__(self, tasks):\n Thread.__init__(self)\n self.tasks = tasks\n self.daemon = True\n self.start()\n\n def run(self):\n while True:\n func, args, kargs = self.tasks.get()\n try:\n func(*args, **kargs)\n except Exception as e:\n logging.error(e)\n finally:\n self.tasks.task_done()\n\n\ndef in_ci():\n return os.getenv(\"TF_BUILD\", False)\n\n\nclass ThreadPool:\n def __init__(self, num_threads):\n self.tasks = Queue(num_threads)\n for _ in range(num_threads):\n Worker(self.tasks)\n\n def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))\n\n def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)\n\n def wait_completion(self):\n self.tasks.join()\n\n\ndef combine_coverage_files(targeted_packages):\n # find tox.ini file. tox.ini is used to combine coverage paths to generate formatted report\n tox_ini_file = os.path.join(root_dir, \"eng\", \"tox\", \"tox.ini\")\n config_file_flag = \"--rcfile={}\".format(tox_ini_file)\n\n if os.path.isfile(tox_ini_file):\n # for every individual coverage file, run coverage combine to combine path\n for package_dir in [package for package in targeted_packages]:\n coverage_file = os.path.join(package_dir, \".coverage\")\n if os.path.isfile(coverage_file):\n cov_cmd_array = [sys.executable, \"-m\", \"coverage\", \"combine\"]\n # tox.ini file has coverage paths to combine\n # Pas tox.ini as coverage config file\n cov_cmd_array.extend([config_file_flag, coverage_file])\n run_check_call(cov_cmd_array, package_dir)\n else:\n # not a hard error at this point\n # this combine step is required only for modules if report has package name starts with .tox\n logging.error(\"tox.ini is not found in path {}\".format(root_dir))\n\n\ndef collect_tox_coverage_files(targeted_packages):\n root_coverage_dir = os.path.join(root_dir, \"_coverage/\")\n\n clean_coverage(coverage_dir)\n\n # coverage combine fixes this with the help of tox.ini[coverage:paths]\n coverage_files = []\n for package_dir in [package for package in targeted_packages]:\n coverage_file = os.path.join(package_dir, \".coverage\")\n if os.path.isfile(coverage_file):\n destination_file = os.path.join(\n root_coverage_dir, \".coverage_{}\".format(os.path.basename(package_dir))\n )\n shutil.copyfile(coverage_file, destination_file)\n coverage_files.append(destination_file)\n\n logging.info(\"Uploading .coverage files: {}\".format(coverage_files))\n\n\n\ndef individual_workload(tox_command_tuple, workload_results):\n pkg = os.path.basename(tox_command_tuple[1])\n stdout = os.path.join(tox_command_tuple[1], \"stdout.txt\")\n stderr = os.path.join(tox_command_tuple[1], \"stderr.txt\")\n tox_dir = os.path.join(tox_command_tuple[1], \"./.tox/\")\n\n with open(stdout, \"w\") as f_stdout, open(stderr, \"w\") as f_stderr:\n proc = Popen(\n tox_command_tuple[0],\n stdout=f_stdout,\n stderr=f_stderr,\n cwd=tox_command_tuple[1],\n env=os.environ.copy(),\n )\n\n logging.info(\"POpened task for for {}\".format(pkg))\n proc.wait()\n\n return_code = proc.returncode\n\n if proc.returncode != 0:\n logging.error(\"{} returned with code {}\".format(pkg, proc.returncode))\n else:\n logging.info(\n \"{} returned with code 0, output will be printed after the test run completes.\".format(\n pkg\n )\n )\n\n if read_file(stderr):\n logging.error(\"Package {} had stderror output. Logging.\".format(pkg))\n return_code = \"StdErr output detected\"\n\n workload_results[tox_command_tuple[1]] = (return_code, stdout, stderr)\n\n if in_ci():\n shutil.rmtree(tox_dir)\n\n\ndef execute_tox_parallel(tox_command_tuples):\n pool = ThreadPool(pool_size)\n workload_results = {}\n run_result = 0\n\n for index, cmd_tuple in enumerate(tox_command_tuples):\n pool.add_task(individual_workload, cmd_tuple, workload_results)\n\n pool.wait_completion()\n\n for key in workload_results.keys():\n log_file(workload_results[key][1])\n\n if workload_results[key][0] != 0:\n logging.error(\n \"{} tox invocation exited with returncode {}\".format(\n os.path.basename(key), workload_results[key][0]\n )\n )\n run_result = 1\n\n return run_result\n\n\ndef compare_req_to_injected_reqs(parsed_req, injected_packages):\n if parsed_req is None:\n return False\n\n return any(parsed_req.name in req for req in injected_packages)\n\n\ndef inject_custom_reqs(file, injected_packages, package_dir):\n req_lines = []\n injected_packages = [p for p in re.split(\"[\\s,]\", injected_packages) if p]\n\n if injected_packages:\n logging.info(\n \"Adding custom packages to requirements for {}\".format(package_dir)\n )\n with open(file, \"r\") as f:\n for line in f:\n try:\n parsed_req = [req for req in parse_requirements(line)]\n except RequirementParseError as e:\n parsed_req = [None]\n req_lines.append((line, parsed_req))\n\n if req_lines:\n all_adjustments = injected_packages + [\n line_tuple[0].strip()\n for line_tuple in req_lines\n if line_tuple[0].strip()\n and not compare_req_to_injected_reqs(\n line_tuple[1][0], injected_packages\n )\n ]\n else:\n all_adjustments = injected_packages\n\n with open(file, \"w\") as f:\n # note that we directly use '\\n' here instead of os.linesep due to how f.write() actually handles this stuff internally\n # If a file is opened in text mode (the default), during write python will accidentally double replace due to \"\\r\" being\n # replaced with \"\\r\\n\" on Windows. Result: \"\\r\\n\\n\". Extra line breaks!\n f.write(\"\\n\".join(all_adjustments))\n\n\ndef build_whl_for_req(req, package_path):\n if \"..\" in req:\n # Create temp path if it doesn't exist\n temp_dir = os.path.join(package_path, \".tmp_whl_dir\")\n if not os.path.exists(temp_dir):\n os.mkdir(temp_dir)\n\n req_pkg_path = os.path.abspath(os.path.join(package_path, req.replace(\"\\n\", \"\")))\n pkg_name, version, _, _ = parse_setup(req_pkg_path)\n logging.info(\"Building wheel for package {}\".format(pkg_name))\n run_check_call([sys.executable, \"setup.py\", \"bdist_wheel\", \"-d\", temp_dir], req_pkg_path)\n\n whl_path = os.path.join(temp_dir, find_whl(pkg_name, version, temp_dir))\n logging.info(\"Wheel for package {0} is {1}\".format(pkg_name, whl_path))\n logging.info(\"Replacing dev requirement. Old requirement:{0}, New requirement:{1}\".format(req, whl_path))\n return whl_path\n else:\n return req\n\ndef replace_dev_reqs(file, pkg_root):\n adjusted_req_lines = []\n\n with open(file, \"r\") as f:\n for line in f:\n args = [\n part.strip()\n for part in line.split()\n if part and not part.strip() == \"-e\"\n ]\n amended_line = \" \".join(args)\n adjusted_req_lines.append(amended_line)\n\n req_file_name = os.path.basename(file)\n logging.info(\"Old {0}:{1}\".format(req_file_name, adjusted_req_lines))\n\n adjusted_req_lines = list(map(lambda x: build_whl_for_req(x, pkg_root), adjusted_req_lines))\n logging.info(\"New {0}:{1}\".format(req_file_name, adjusted_req_lines))\n\n with open(file, \"w\") as f:\n # note that we directly use '\\n' here instead of os.linesep due to how f.write() actually handles this stuff internally\n # If a file is opened in text mode (the default), during write python will accidentally double replace due to \"\\r\" being\n # replaced with \"\\r\\n\" on Windows. Result: \"\\r\\n\\n\". Extra line breaks!\n f.write(\"\\n\".join(adjusted_req_lines))\n\n\ndef collect_log_files(working_dir):\n logging.info(\"Collecting log files from {}\".format(working_dir))\n package = working_dir.split('/')[-1]\n # collect all the log files into one place for publishing in case of tox failure\n\n log_directory = os.path.join(\n root_dir, \"_tox_logs\"\n )\n\n try:\n os.mkdir(log_directory)\n logging.info(\"Created log directory: {}\".format(log_directory))\n except OSError:\n logging.info(\"'{}' directory already exists\".format(log_directory))\n\n log_directory = os.path.join(\n log_directory, package\n )\n\n try:\n os.mkdir(log_directory)\n logging.info(\"Created log directory: {}\".format(log_directory))\n except OSError:\n logging.info(\"'{}' directory already exists\".format(log_directory))\n\n log_directory = os.path.join(\n log_directory, sys.version.split()[0]\n )\n\n try:\n os.mkdir(log_directory)\n logging.info(\"Created log directory: {}\".format(log_directory))\n except OSError:\n logging.info(\"'{}' directory already exists\".format(log_directory))\n\n for test_env in glob.glob(os.path.join(working_dir, \".tox\", \"*\")):\n env = os.path.split(test_env)[-1]\n logging.info(\"env: {}\".format(env))\n log_files = os.path.join(test_env, \"log\")\n\n if os.path.exists(log_files):\n logging.info(\"Copying log files from {} to {}\".format(log_files, log_directory))\n\n temp_dir = os.path.join(log_directory, env)\n logging.info(\"TEMP DIR: {}\".format(temp_dir))\n try:\n os.mkdir(temp_dir)\n logging.info(\"Created log directory: {}\".format(temp_dir))\n except OSError:\n logging.info(\"Could not create '{}' directory\".format(temp_dir))\n break\n\n for filename in os.listdir(log_files):\n if filename.endswith(\".log\"):\n logging.info(\"LOG FILE: {}\".format(filename))\n\n file_location = os.path.join(log_files, filename)\n shutil.move(\n file_location,\n os.path.join(temp_dir, filename)\n )\n logging.info(\"Moved file to {}\".format(os.path.join(temp_dir, filename)))\n else:\n logging.info(\"Could not find {} directory\".format(log_files))\n\n for f in glob.glob(os.path.join(root_dir, \"_tox_logs\", \"*\")):\n logging.info(\"Log file: {}\".format(f))\n\n\ndef execute_tox_serial(tox_command_tuples):\n return_code = 0\n\n for index, cmd_tuple in enumerate(tox_command_tuples):\n tox_dir = os.path.abspath(os.path.join(cmd_tuple[1], \"./.tox/\"))\n logging.info(\"tox_dir: {}\".format(tox_dir))\n\n logging.info(\n \"Running tox for {}. {} of {}.\".format(\n os.path.basename(cmd_tuple[1]), index + 1, len(tox_command_tuples)\n )\n )\n\n result = run_check_call(cmd_tuple[0], cmd_tuple[1], always_exit=False)\n\n if result is not None and result != 0:\n return_code = result\n\n if in_ci():\n collect_log_files(cmd_tuple[1])\n shutil.rmtree(tox_dir)\n\n return return_code\n\n\ndef prep_and_run_tox(targeted_packages, parsed_args, options_array=[]):\n if parsed_args.wheel_dir:\n os.environ[\"PREBUILT_WHEEL_DIR\"] = parsed_args.wheel_dir\n\n if parsed_args.mark_arg:\n options_array.extend([\"-m\", \"{}\".format(parsed_args.mark_arg)])\n\n tox_command_tuples = []\n\n for index, package_dir in enumerate(targeted_packages):\n destination_tox_ini = os.path.join(package_dir, \"tox.ini\")\n destination_dev_req = os.path.join(package_dir, \"dev_requirements.txt\")\n\n tox_execution_array = [sys.executable, \"-m\", \"tox\"]\n\n local_options_array = options_array[:]\n\n # Get code coverage params for current package\n package_name = os.path.basename(package_dir)\n coverage_commands = create_code_coverage_params(parsed_args, package_name)\n local_options_array.extend(coverage_commands)\n\n pkg_egg_info_name = \"{}.egg-info\".format(package_name.replace(\"-\", \"_\"))\n local_options_array.extend([\"--ignore\", pkg_egg_info_name])\n\n # if we are targeting only packages that are management plane, it is a possibility\n # that no tests running is an acceptable situation\n # we explicitly handle this here.\n if is_error_code_5_allowed(package_dir, package_name):\n local_options_array.append(\"--suppress-no-test-exit-code\")\n\n # if not present, re-use base\n if not os.path.exists(destination_tox_ini) or (\n os.path.exists(destination_tox_ini)\n and os.path.basename(package_dir) in IGNORED_TOX_INIS\n ):\n logging.info(\n \"No customized tox.ini present, using common eng/tox/tox.ini for {}\".format(\n os.path.basename(package_dir)\n )\n )\n tox_execution_array.extend([\"-c\", DEFAULT_TOX_INI_LOCATION])\n\n # handle empty file\n if not os.path.exists(destination_dev_req):\n logging.info(\"No dev_requirements present.\")\n with open(destination_dev_req, \"w+\") as file:\n file.write(\"\\n\")\n\n if in_ci():\n replace_dev_reqs(destination_dev_req, package_dir)\n replace_dev_reqs(test_tools_path, package_dir)\n replace_dev_reqs(dependency_tools_path, package_dir)\n os.environ[\"TOX_PARALLEL_NO_SPINNER\"] = \"1\"\n\n inject_custom_reqs(\n destination_dev_req, parsed_args.injected_packages, package_dir\n )\n\n if parsed_args.tox_env:\n tox_execution_array.extend([\"-e\", parsed_args.tox_env])\n\n if parsed_args.tenvparallel:\n tox_execution_array.extend([\"-p\", \"all\"])\n\n if local_options_array:\n tox_execution_array.extend([\"--\"] + local_options_array)\n\n tox_command_tuples.append((tox_execution_array, package_dir))\n\n if parsed_args.tparallel:\n return_code = execute_tox_parallel(tox_command_tuples)\n else:\n return_code = execute_tox_serial(tox_command_tuples)\n\n if not parsed_args.disablecov:\n collect_tox_coverage_files(targeted_packages)\n\n sys.exit(return_code)","repo_name":"mirespace/python-azure","sub_path":"scripts/devops_tasks/tox_harness.py","file_name":"tox_harness.py","file_ext":"py","file_size_in_byte":15912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"28306106799","text":"import random\n\nimport hangman_words\n\nword = list(hangman_words.words())\nword_to_be_shown_at_end = \"\".join(word)\nrandom.shuffle(word)\n\n\ndef anagram():\n number_of_guesses = 3\n run_again = True\n while run_again:\n capture_guess = input(\"Guess the Word \\\"{}\\\" : \".format(\"\".join(word))).lower()\n number_of_guesses -= 1\n if capture_guess == word_to_be_shown_at_end:\n print(\"Congratulations you have guessed the correct word : {}\".format(word_to_be_shown_at_end))\n run_again = False\n elif number_of_guesses == 0:\n print(\"You are out of guesses!! Word was : {}\".format(word_to_be_shown_at_end))\n run_again = False\n else:\n print(\"Wrong Guess!! You have {} more guesses\".format(number_of_guesses))\n\n\nif __name__ == '__main__':\n print(\"This is an ANAGRAM Game!! You have 3 chances to the guess the word\")\n anagram()\n","repo_name":"SharmaVineet/PythonCode","sub_path":"Anagram_Game.py","file_name":"Anagram_Game.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42314868132","text":"# q*d + r = n; must be r,d,q or r,q,d\n# (d^3)/r + r = a^2\n\nLIMIT = 10**5\nsquares = set(i*i for i in range(1, int(LIMIT**0.5)+1))\ns = set()\n\nfor d in range(1, int(LIMIT**0.5)):\n d3 = d**3\n for r in range(d-1, 0, -1):\n if d3/r + r > LIMIT: break\n if d3 % r == 0:\n if d3 // r + r in squares:\n print(d, r, d/r)\n s.add(d3 // r + r)\n\nprint(sum(s))","repo_name":"jxu/PyPE","sub_path":"src/141.py","file_name":"141.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14432528471","text":"\"\"\"Test script for multitracking\n\nThis script is not inteded to be rigorous in software good practices\nThe script is used to learn about the OpenCV lib and in particular about the its Tracker API\n\nSource: https://www.learnopencv.com/multitracker-multiple-object-tracking-using-opencv-c-python/\n\nUsage:\n\n Open a terminal go to the source file dir and type:\n python 02_opencv-multi-tracker-test.py\n\n Select a Tracker from the list \n The video is displayed while the objects are tracked. The rendered video is generated in the same folder (out.mkv)\n\"\"\"\n\nimport sys\nimport cv2\nfrom random import randint\nimport json\n\ntrackerTypes = ['BOOSTING', 'MIL', 'KCF','TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\n\ndef createTrackerByName(trackerType):\n # Create a tracker based on tracker name\n if trackerType == trackerTypes[0]: \n tracker = cv2.TrackerBoosting_create()\n elif trackerType == trackerTypes[1]: \n tracker = cv2.TrackerMIL_create()\n elif trackerType == trackerTypes[2]:\n tracker = cv2.TrackerKCF_create()\n elif trackerType == trackerTypes[3]:\n tracker = cv2.TrackerTLD_create()\n elif trackerType == trackerTypes[4]:\n tracker = cv2.TrackerMedianFlow_create()\n elif trackerType == trackerTypes[5]:\n tracker = cv2.TrackerGOTURN_create()\n elif trackerType == trackerTypes[6]:\n tracker = cv2.TrackerMOSSE_create()\n elif trackerType == trackerTypes[7]:\n tracker = cv2.TrackerCSRT_create()\n else:\n tracker = None\n print('Incorrect tracker name')\n print('Available trackers are:')\n for t in trackerTypes:\n print(t)\n \n return tracker\n\n\n# Select tracker\nprint(\"Tracker list:\")\nfor i, tracker in enumerate(trackerTypes):\n print( \" \" + str(i) + \" - \" + tracker)\nuser_input = input(\"Select tracker:\")\n\n# Specify the tracker type\ntrackerType = trackerTypes[int(user_input)] \n\n# Set video to load\nvideoPath = \"videos/run.mp4\"\n\n# Create a video capture object to read videos\ncap = cv2.VideoCapture(\"../../data/input.mkv\")\n\n# Define the codec and create VideoWriter object\nw = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nh = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\nfps = cap.get(cv2.CAP_PROP_FPS)\nprint(w)\nprint(h)\nprint(fps)\nout = cv2.VideoWriter( 'out.mp4',\n cv2.VideoWriter_fourcc(*'DIVX'), #cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), \n int(fps), \n (int(w), int(h))\n )\n\n# Read first frame\nsuccess, frame = cap.read()\n# quit if unable to read the video file\nif not success:\n print('Failed to read video')\n sys.exit()\n\n# Read file that contains the bounding boxes to track\njson_file = \"../../data/initial_conditions.json\"\nwith open(json_file) as f:\n initial_conditions = json.load(f)\n\n## Select boxes\nbboxes = []\ncolors = [] \n\nfor obj in initial_conditions:\n coordinates = tuple(obj[\"coordinates\"])\n bboxes.append(coordinates)\n \n color = (randint(0, 255), randint(0, 255), randint(0, 255))\n colors.append(color)\n\nprint('Selected bounding boxes {}'.format(bboxes)) \n\n# Create MultiTracker object\nmultiTracker = cv2.MultiTracker_create()\n\n# Initialize MultiTracker \nfor bbox in bboxes:\n tracker = createTrackerByName(trackerType)\n multiTracker.add(tracker, frame, bbox)\n\n# Process video and track objects\nwhile cap.isOpened():\n success, frame = cap.read()\n if not success:\n break\n \n # get updated location of objects in subsequent frames\n success, boxes = multiTracker.update(frame)\n\n # draw tracked objects\n for i, newbox in enumerate(boxes):\n p1 = (int(newbox[0]), int(newbox[1]))\n p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))\n cv2.rectangle(frame, p1, p2, colors[i], 2, 1)\n\n # show frame\n cv2.imshow('MultiTracker', frame)\n \n # Save video\n out.write(frame)\n\n # quit on ESC button\n if cv2.waitKey(1) & 0xFF == 27: # Esc pressed\n break\n\n\n\n \n# Release the capture\ncap.release()\nout.release()\n","repo_name":"alejandroviegener/ObjectTracker","sub_path":"research/scripts/02_opencv-multi-tracker-test.py","file_name":"02_opencv-multi-tracker-test.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18856120577","text":"from tkinter import *\nfrom random import *\n\nscreen_width = 600\nscreen_height = 400\nr_min = 20\nr_max = 50\ndt = 0.1 # физический шаг времени между кадрами обсчёта\nV_max = 50\nfps = 20 # количество кадров в секунду\nsleep_time = round(1000/fps)\ntarget_initial_number = 5\ntarget_max_number = 10\ndefault_target_born_time = 5 # секунд\n#ball_sprite_filename = \"ball_sprite.png\"\nscores_format = 'очки: %d'\n\n\nclass MainWindow:\n def __init__(self, root):\n global canvas\n canvas = Canvas(root)\n canvas[\"width\"] = screen_width\n canvas[\"height\"] = screen_height\n canvas.pack()\n\n self.gun = Gun()\n self.shells = []\n self.targets = [Target.generate_random() for i in range(target_initial_number)]\n self.scores = 0\n self.scores_text = canvas.create_text(screen_width - 50, 10,\n text=scores_format%self.scores)\n self.target_born_time = default_target_born_time\n self.game_cycle() # запуск игрового цикла\n canvas.bind(\"\", self.mouse_click)\n canvas.bind(\"\", self.mouse_motion)\n\n def mouse_motion(self, event):\n self.gun.aim(event.x, event.y)\n\n def mouse_click(self, event):\n \"\"\" Проверяем, далеко ли шарик, и, если в него попали, то \"лопаем\" его,\n создаём новый шарик, а за старый начисляем очки.\n \"\"\"\n shell = self.gun.shoot(event.x, event.y)\n self.shells.append(shell)\n\n def game_cycle(self, *ignore):\n canvas.after(sleep_time, self.game_cycle) # перезапуск цикла\n # порождаем новую цель, если настало для этого время:\n if len(self.targets) < target_max_number:\n self.target_born_time -= sleep_time/1000\n if self.target_born_time <= 0:\n self.targets.append(Target.generate_random())\n self.target_born_time = default_target_born_time\n\n # смещаем цели и снаряды\n for target in self.targets:\n target.move()\n for shell in self.shells:\n shell.move()\n # для каждой цели проверяем столкновение со снарядом\n for shell in self.shells:\n killed_target = None\n for i, target in enumerate(self.targets):\n if target.check_contact(shell.x, shell.y):\n self.scores += target.scores\n canvas.itemconfig(self.scores_text, text=scores_format%self.scores)\n target.destroy()\n killed_target = i\n break\n if killed_target is not None:\n self.targets.pop(killed_target)\n\n\nclass Ball:\n def __init__(self, x, y, r, Vx, Vy, color):\n self.x, self.y, self.r = x, y, r\n self.Vx, self.Vy = Vx, Vy\n self.avatar = canvas.create_oval(x-r, y-r, x+r, y+r, fill=color)\n\n def check_contact(self, x, y):\n l = ((self.x - x)**2 + (self.y - y)**2)**0.5\n return l <= self.r\n\n def destroy(self):\n canvas.delete(self.avatar)\n\n def move(self):\n \"\"\" сдвинуть шарик на его скорость \"\"\"\n ax = 0\n ay = 10\n self.x += self.Vx*dt # Добавить по��равку?!\n self.y += self.Vy*dt\n self.Vx += ax*dt\n self.Vy += ay*dt\n # отражения слева, справа, снизу\n if self.x - self.r <= 0:\n self.Vx = -self.Vx\n self.x = self.r+1\n if self.x + self.r >= screen_width:\n self.Vx = -self.Vx\n self.x = screen_width - self.r - 1\n if self.y + self.r >= screen_height:\n self.Vy = -self.Vy\n self.y = screen_height - self.r - 1\n canvas.coords(self.avatar, self.x-self.r, self.y-self.r,\n self.x+self.r, self.y+self.r)\n\n\nclass Shell(Ball):\n def __init__(self, x, y, r, Vx, Vy):\n super().__init__(x, y, r, Vx, Vy, \"red\")\n\n\nclass Target(Ball):\n def __init__(self, x, y, r, Vx=0, Vy=0):\n super().__init__(x, y, r, Vx, Vy, \"green\")\n self.scores = 10 + r_max - r\n\n @classmethod\n def generate_random(cls):\n r = randint(r_min, r_max)\n x = randint(r, screen_width-r-1)\n y = randint(r, screen_height-r-1)\n # генерация случайной скорости\n Vx = randint(-V_max, +V_max)\n Vy = randint(-V_max, +V_max)\n return Target(x, y, r, Vx, Vy)\n\n\nclass Gun:\n max_cannon_length = 40\n shell_radius = 5\n\n def __init__(self):\n\n self.x, self.y = [0, screen_height]\n self.lx = 20\n self.ly = 0\n self.line = canvas.create_line(self.x, self.y, self.x+self.lx, self.y+self.ly,\n width=5, fill='red')\n\n def aim(self, x, y):\n self.lx = (x - self.x)\n self.ly = (y - self.y)\n l = (self.lx**2 + self.ly**2)**0.5\n self.lx = Gun.max_cannon_length*self.lx/l\n self.ly = Gun.max_cannon_length*self.ly/l\n\n canvas.coords(self.line, self.x, self.y, self.x+self.lx, self.y+self.ly,)\n\n def shoot(self, x, y):\n self.aim(x, y)\n Vx = 1*self.lx\n Vy = 1*self.ly\n return Shell(self.x+self.lx, self.y+self.ly, self.shell_radius, Vx, Vy)\n\nroot_window = Tk()\n#ball_sprite = PhotoImage(file=ball_sprite_filename)\nwindow = MainWindow(root_window)\nroot_window.mainloop()\n","repo_name":"tkhirianov/fox_python_2016","sub_path":"lesson_21/the_gun.py","file_name":"the_gun.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"32232500648","text":"import math\nfrom multipledispatch import dispatch\n@dispatch(int)\ndef sum(a):\n print(\"Square root value: \")\n print(a**2)\n@dispatch(int,int,int,int)\ndef sum(a,b,c,d):\n print(\"Addition of 4 numbers: \")\n print(a+b+c+d)\n@dispatch(int,int,int)\ndef sum(a,b,c):\n print(\"Multiplication of 3 values: \")\n print(a*b*c)\n@dispatch(int,int)\ndef sum(p,q):\n t=p*q\n print(\"root value\")\n print(t)\n if t<0:\n print(\"Not possible\")\n else:\n r=math.isqrt(t)\n print(\"root of\")\n print(r)\n if r**2==t:\n print(\"This is perfect square\")\n else:\n print(\"This is not a perfect square\")\nsum(7)\nsum(4,3,9,0)\nsum(2,6,3)\nsum(3,3)\n\n\n\n\n\n","repo_name":"Meghna131995/VSCode_RobotFramework_MeghnaSuresh","sub_path":"methodOverloading.py","file_name":"methodOverloading.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2354111046","text":"#Faça um programa que leia duas listas e que gere uma terceira comos elementos das duas primeiras\r\nl1 = []\r\nl2 = []\r\nl3 = []\r\nwhile True:\r\n nomes = str(input(\"Digite o nome (SAIR para sair): \")).upper()\r\n if nomes in 'SAIR':\r\n break\r\n else:\r\n idades = int(input(\"Digite a idade(0 para sair): \"))\r\n l1.append(nomes)\r\n l2.append(idades)\r\nl3.extend(l1)\r\nl3.extend(l2)\r\nprint(l3)\r\n","repo_name":"Tkailaine/Python-exercicios","sub_path":"3.10 - gerador de lista.py","file_name":"3.10 - gerador de lista.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17544772473","text":"file_read = open('nasdaqlisted.txt', 'r')\nfile_write = open('sym.env', 'w')\n\nLines = file_read.readlines()\n\nsym = []\n\nfor num in range(len(Lines)):\n if num == 0 or num == len(Lines)-1:\n continue\n sym.append(Lines[num].split('|')[0])\n\n \n\nstring = \"symbols=[\" + (', '.join('\"' + item + '\"' for item in sym)) + \"]\"\n\nfile_write.write(string)\nfile_read.close\nfile_write.close\n\n","repo_name":"chanfe/random_stock","sub_path":"src/assests/parsefile.py","file_name":"parsefile.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34353116510","text":"from __future__ import annotations\n\nimport pathlib\nimport os\n\nfrom header import SectionHeader\nfrom icecream import ic\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+\n# section_number : 6\n# section_description: file_classes\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-\n\nclass MonitoredFile:\n\n def __init__(self, path : pathlib.Path) -> None:\n self.path = path\n self.filename = self.path.name.split(\".\")[0]\n self.filetype = self.path.name.split(\".\")[-1]\n self.prev_mod_time = self.get_mod_time()\n self.pulse_file_changed : bool = False\n\n # Attempt to read all lines on init to validate this is possible for this file.\n try:\n with open(self.path, \"r\") as f:\n self.lines : list[str] = f.readlines()\n except:\n self.lines_readable = False\n else:\n self.lines_readable = True\n\n # Save the RAM, we don't need the lines right now.\n self.lines : list[str] = []\n\n def get_mod_time(self) -> float:\n try:\n return os.stat(self.path).st_mtime\n except:\n return None\n\n def detect_file_change(self) -> bool:\n new_mod_time = self.get_mod_time()\n if self.prev_mod_time != new_mod_time and new_mod_time is not None:\n self.pulse_file_changed = True\n self.prev_mod_time = new_mod_time\n else:\n self.pulse_file_changed = False\n\n return self.pulse_file_changed\n\n def __eq__(self, __value: object) -> bool:\n return self.path == __value.path\n\n def __hash__(self) -> int:\n return hash(tuple(self.path, self.filename, self.filetype))\n\nclass SectionFile(MonitoredFile):\n \"\"\"MonitoredFile that is one numbered and described part of a MasterFile\"\"\"\n\n def __init__(\n self,\n path : pathlib.Path,\n section_number: int,\n section_description: str,\n master_file : MasterFile\n ) -> None:\n\n super().__init__(path)\n self.section_number = section_number\n self.section_description = section_description\n self.lines : list[str] = []\n self.master_file = master_file\n\nclass MasterFile(MonitoredFile):\n \"\"\"MonitoredFile with sections delimited by headers\"\"\"\n def __init__(self, path : pathlib.Path) -> None:\n\n super().__init__(path)\n self.dir_master_sections = path.parent.resolve().joinpath(\"sections\").joinpath(self.filename)\n self.sections : list[SectionFile] = []\n self.section_header : SectionHeader = None\n\nclass HeaderSpecifier:\n\n def __init__(\n self,\n header_start_line,\n header_end_line,\n section_number,\n section_description\n ) -> None:\n\n self.header_start_line = header_start_line\n self.header_end_line = header_end_line\n self.code_start_line = None\n self.code_end_line = None\n self.section_number = section_number\n self.section_description = section_description\n\ndef parse_master_file_headers(master_file : MasterFile) -> list[HeaderSpecifier]:\n\n # The header sequence specifies the position\n # in standard_header_sequence that is being\n # parsed for\n\n last_head_sq_index = len(master_file.section_header.key_sequence) - 1\n\n head_sq_index = 0\n parsing_header = False\n header_lines : list[str] = []\n return_header_specifiers : list[HeaderSpecifier] = []\n new_header_specifier = None\n\n section_number = None\n section_description = None\n\n # ic(\"Parsing master file\", master_file.filename)\n\n try:\n with open(master_file.path, \"r\") as f:\n\n for line_num, line in enumerate(f):\n line : str\n\n # ic(\"evaluating line\\n\", line)\n\n index_char = 0\n\n # Parse the entire line, which could contain\n # several header sequence elements\n while index_char < len(line):\n # ic(head_sq_index)\n current_header_sequence_string = master_file.section_header.key_sequence[head_sq_index]\n test_string = line[index_char : index_char + len(current_header_sequence_string)]\n header_sequence_match = test_string == current_header_sequence_string\n pulse_last_head_sq_index = last_head_sq_index == head_sq_index\n header_parse_success = False\n\n # ic(current_header_sequence_string, test_string, header_sequence_match, pulse_last_head_sq_index)\n\n # Header text match\n if header_sequence_match:\n index_char += len(master_file.section_header.key_sequence[head_sq_index])\n header_parse_success = True\n\n # Header sequence number\n if current_header_sequence_string == master_file.section_header.key_number:\n\n parse_key = False\n\n # Get the section number as string\n if pulse_last_head_sq_index:\n section_number_str = line[index_char:].strip()\n parse_key = True\n\n else:\n next_test_string = master_file.section_header.key_sequence[head_sq_index + 1]\n try:\n next_test_string_index = line[index_char:].index(next_test_string)\n except:\n pass\n else:\n section_number_str = line[index_char:][:next_test_string_index].strip()\n parse_key = True\n\n if parse_key:\n try:\n section_number = int(section_number_str)\n except:\n pass\n else:\n header_parse_success = True\n index_char += len(section_number_str)\n\n # Header sequence description\n if current_header_sequence_string == master_file.section_header.key_description:\n\n # Get the section number as string\n if pulse_last_head_sq_index:\n section_description = line[index_char:].strip()\n header_parse_success = True\n\n else:\n next_test_string = master_file.section_header.key_sequence[head_sq_index + 1]\n try:\n next_test_string_index = line[index_char:].index(next_test_string)\n except:\n pass\n else:\n section_description = line[index_char:][:next_test_string_index].strip()\n header_parse_success = True\n index_char += len(section_description)\n\n # Increment header sequence index\n # or reset if header parsing failure\n if header_parse_success:\n head_sq_index += 1\n if pulse_last_head_sq_index:\n\n head_sq_index = 0\n index_char = len(line)\n\n header_lines.append(line)\n\n # ic(\"header complete\", line_num, header_lines)\n new_header_specifier = HeaderSpecifier(\n header_start_line= line_num - len(header_lines) + 1,\n header_end_line= line_num,\n section_description= section_description,\n section_number= section_number\n )\n\n else:\n # ic(\"just a regular line\", line)\n head_sq_index = 0\n index_char = len(line)\n\n if new_header_specifier is not None:\n # ic(\"parsing when code starts and ends...\")\n\n if line.strip() and new_header_specifier.code_start_line is None:\n new_header_specifier.code_start_line = line_num\n # ic(new_header_specifier.code_start_line)\n\n if line.strip():\n new_header_specifier.code_end_line = line_num\n # ic(new_header_specifier.code_end_line)\n\n if not parsing_header:\n header_lines = []\n\n if header_parse_success and not pulse_last_head_sq_index:\n parsing_header = True\n header_lines.append(line.strip())\n # ic(\"Header parse success\",header_lines)\n\n elif parsing_header:\n\n parsing_header = False\n header_lines = []\n\n if new_header_specifier is not None:\n return_header_specifiers.append(new_header_specifier)\n\n except PermissionError:\n pass\n\n return return_header_specifiers\n\nif __name__ == \"__main__\":\n\n section_header = SectionHeader(\n key_sequence= [\n \"numnumnum\",\n \" \",\n \"numnumnum\",\n \" \",\n \"numnumnum\",\n \" \",\n \"numnumnum\",\n \" \",\n \"spank \",\n \"numnumnum\",\n \"\\n\",\n \" dobonk: \",\n \"descdescdesc\",\n \"\\n\",\n \"] browntown\"\n ],\n key_number= \"numnumnum\",\n key_description= \"descdescdesc\"\n )\n\n temporary_file_path = pathlib.Path(\"test.txt\").resolve()\n\n with open(temporary_file_path, \"w\") as f:\n\n f.write(section_header.generate_header(number= 1, description= \"section one\"))\n\n f.write(\"\\n\")\n\n f.write(\"code code\\ncode section 1\\n\")\n\n f.write(section_header.generate_header(number= 2, description= \"section two\"))\n\n f.write(\"\\n\")\n\n f.write(\"code section 2\\n\\n\\n\\n\\nend of section2\")\n\n f.write(\"\\n\")\n f.write(\"spank \")\n\n mfile = MasterFile(\n path= temporary_file_path,\n dir_master_sections= pathlib.Path(\"temp_sections\").resolve()\n )\n mfile.section_header = section_header\n\n if not mfile.dir_master_sections.is_dir():\n os.mkdir(mfile.dir_master_sections)\n\n mfile.parse()\n\n for section in mfile.sections:\n print(f\"Section {section.section_number}, {section.section_description}\")\n for line in section.lines:\n print(f\"\\t{line}\")\n\n with open(section.path, \"w\") as f:\n for line_number, line in enumerate(section.lines):\n if line_number < len(section.lines) - 1:\n f.write(line + \"\\n\")\n else:\n f.write(line)","repo_name":"UpAllNate/filesectioner","sub_path":"project_repo/file_class.py","file_name":"file_class.py","file_ext":"py","file_size_in_byte":11134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33306300148","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import (\n NotFound,\n PermissionDenied,\n ValidationError,\n)\n\nfrom environments.models import Environment\nfrom environments.permissions.constants import VIEW_ENVIRONMENT\nfrom projects.permissions import VIEW_PROJECT\n\n\nclass EnvironmentIntegrationCommonViewSet(viewsets.ModelViewSet):\n serializer_class = None\n pagination_class = None # set here to ensure documentation is correct\n model_class = None\n\n def get_queryset(self):\n if getattr(self, \"swagger_fake_view\", False):\n return self.model_class.objects.none()\n\n environment_api_key = self.kwargs[\"environment_api_key\"]\n\n try:\n environment = Environment.objects.get(api_key=environment_api_key)\n if not self.request.user.has_environment_permission(\n VIEW_ENVIRONMENT, environment\n ):\n raise PermissionDenied(\n \"User does not have permission to perform action in environment.\"\n )\n\n return self.model_class.objects.filter(environment=environment)\n except Environment.DoesNotExist:\n raise NotFound(\"Environment not found.\")\n\n def perform_create(self, serializer):\n environment = self.get_environment_from_request()\n\n if self.model_class.objects.filter(environment=environment).exists():\n raise ValidationError(\n f\"{self.model_class.__name__} for environment already exist.\"\n )\n\n serializer.save(environment=environment)\n\n def perform_update(self, serializer):\n environment = self.get_environment_from_request()\n serializer.save(environment=environment)\n\n def get_environment_from_request(self):\n \"\"\"\n Get environment object from URL parameters in request.\n \"\"\"\n return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"])\n\n\nclass ProjectIntegrationBaseViewSet(viewsets.ModelViewSet):\n serializer_class = None\n pagination_class = None\n model_class = None\n\n def get_queryset(self):\n if getattr(self, \"swagger_fake_view\", False):\n return self.model_class.objects.none()\n\n project = get_object_or_404(\n self.request.user.get_permitted_projects(VIEW_PROJECT),\n pk=self.kwargs[\"project_pk\"],\n )\n return self.model_class.objects.filter(project=project)\n\n def perform_create(self, serializer):\n project_id = self.kwargs[\"project_pk\"]\n if self.model_class.objects.filter(project_id=project_id).exists():\n raise ValidationError(\n f\"{self.model_class.__name__} for this project already exists.\"\n )\n serializer.save(project_id=project_id)\n\n def perform_update(self, serializer):\n project_id = self.kwargs[\"project_pk\"]\n serializer.save(project_id=project_id)\n","repo_name":"Flagsmith/flagsmith","sub_path":"api/integrations/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":3272,"dataset":"github-code","pt":"31"} +{"seq_id":"543940191","text":"import re\n\nimport feedparser\nfrom django.conf import settings\nfrom django.core.cache import cache\n\n\ndef convert_http_to_https(url):\n url = url.replace(\"http://\", \"https://\") if url and url.startswith('http://') else url\n return url\n\n\ndef get_news_cached(base_url):\n cache_key = 'news_cache_key'\n news = cache.get(cache_key)\n if not news:\n news = get_news(base_url)\n cache.set(cache_key, news, settings.NEWS_FEED_CACHE_TIMEOUT)\n return news\n\n\ndef get_news(base_url):\n img_re = re.compile(r'')\n slug_re = re.compile(r'([\\w-]+$)')\n entries = get_news_feeds()\n for entity in entries:\n img_search = img_re.search(entity.description)\n try:\n entity.image = convert_http_to_https(img_search.group(1))\n entity.parsed_description = img_re.sub('', entity.description)\n except AttributeError:\n entity.parsed_description = entity.description\n\n try:\n entity.slug = slug_re.search(entity.link).group(1)\n entity.real_link = base_url + entity.slug if base_url and entity.slug else entity.link\n except AttributeError:\n entity.slug = None\n entity.real_link = entity.link\n\n if not hasattr(entity, 'image') or not entity.image:\n entity.is_default_image = True\n entity.image = settings.NEWS_FEED_DEFAULT_IMAGE\n\n return entries\n\n\ndef get_news_feeds():\n feed = feedparser.parse(settings.NEWS_FEED_URL)\n return feed.entries\n","repo_name":"City-of-Helsinki/digihel","sub_path":"news/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"5333826825","text":"\n\nimport pandas\n\ndf = pandas.read_csv('nato_phonetic_alphabet.csv')\nnato_dict = {row.letter: row.code for (index, row) in df.iterrows()}\n\ninput_word = input('Enter a word: ').upper()\ntry:\n code_list = [nato_dict[letter] for letter in input_word]\nexcept KeyError:\n raise KeyError(\"Sorry, only letters in the alphabet please\")\nelse:\n print(code_list)\n","repo_name":"loisbaker/100-days-of-code","sub_path":"Day26/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42807840312","text":"from tbtc.utils import initialize_contract\nfrom tbtc.bitcoin_helpers import point_to_p2wpkh_address\n\nclass Deposit():\n \"\"\"\n Deposit class to interact with the deposit contract\n \"\"\"\n\n def __init__(self, tbtc_system, deposit_contract, keep_address):\n self.deposit_address = deposit_contract\n self.keep_address = keep_address\n self.tbtc_system = tbtc_system\n self.deposit_contract = initialize_contract(tbtc_system.w3, deposit_contract, \"Deposit\")\n self.keep_contract = initialize_contract(tbtc_system.w3, keep_address, \"BondedECDSAKeep\")\n \n def __repr__(self):\n return f\"Deposit contract at: {self.deposit_contract}\"\n\n def get_signer_public_key(self):\n # finding qn existing public key event\n event = self.tbtc_system._get_existing_public_key_event(self.deposit_address)\n # no registered public key found\n if event == []:\n # check if key is ready\n transaction_filter = self.keep_contract.events.PublicKeyPublished.createFilter(\n fromBlock='earliest',\n toBlock='latest'\n )\n key_ready_event = transaction_filter.get_all_entries()\n if key_ready_event is None:\n logger.info(\"Retry again later key has not been published yet\")\n return None\n else:\n call = self.deposit_contract.functions.retrieveSignerPubkey()\n receipt = self.tbtc_system._manage_transaction(\n call, \n gas_limit=160000\n )\n event = self.tbtc_system.system.events.RegisteredPubkey().processReceipt(receipt)\n return point_to_p2wpkh_address(\n event[0]['args']['_signingGroupPubkeyX'], \n event[0]['args']['_signingGroupPubkeyY']\n )\n\n def get_lot_size(self):\n return self.deposit_contract.functions.lotSizeSatoshis().call()","repo_name":"ankitchiplunkar/tbtc.py","sub_path":"tbtc/deposit.py","file_name":"deposit.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27553522719","text":"import logging\nimport os\nfrom pathlib import Path\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import List, Optional\nimport json\n\nimport numpy as np\nfrom datasets import load_dataset\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n DataCollatorForTokenClassification,\n HfArgumentParser,\n PreTrainedTokenizerFast,\n TrainingArguments,\n set_seed,\n)\nfrom transformers.trainer_utils import is_main_process\n\nfrom .trainer import Trainer\nfrom .utils import LABEL_SETS\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\n \"help\":\n \"Path to pretrained model or model identifier from huggingface.co/models\"\n })\n config_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"Pretrained config name or path if not the same as model_name\"\n })\n tokenizer_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"Pretrained tokenizer name or path if not the same as model_name\"\n })\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"Where do you want to store the pretrained models downloaded from huggingface.co\"\n },\n )\n freeze_layers: Optional[List[str]] = field(\n default=None, metadata={\"help\": \"Which layer(s) to freeze\"})\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n task_name: Optional[str] = field(\n default=\"pos\",\n metadata={\"help\": \"The name of the task (ner, pos...).\"})\n dataset_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"The name of the dataset to use (via the datasets library).\"\n })\n dataset_config_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"The configuration name of the dataset to use (via the datasets library).\"\n })\n train_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The input training data file (a csv or JSON file).\"\n })\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"An optional input evaluation data file to evaluate on (a csv or JSON file).\"\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\":\n \"An optional input test data file to predict on (a csv or JSON file).\"\n },\n )\n overwrite_cache: bool = field(\n default=False,\n metadata={\"help\": \"Overwrite the cached training and evaluation sets\"})\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The number of processes to use for the preprocessing.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\":\n \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n label_all_tokens: bool = field(\n default=False,\n metadata={\n \"help\":\n \"Whether to put the label for one word on all tokens of generated by that word or just on the \"\n \"one (in which case the other tokens will have a padding index).\"\n },\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\n \"Need either a dataset name or a training/validation file.\")\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\n \"csv\", \"json\"\n ], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\n \"csv\", \"json\"\n ], \"`validation_file` should be a csv or a json file.\"\n self.task_name = self.task_name.lower()\n\n\ndef load_args():\n v_path = Path(sys.argv[1])\n d_path = Path(sys.argv[2])\n\n v_name = v_path.name.split('.')[0]\n d_name = d_path.name.split('.')[0]\n\n with open(v_path) as f:\n args = json.load(f)\n with open(d_path) as f:\n args.update(json.load(f))\n\n while (v_path.parent / 'config.json').exists():\n v_path = v_path.parent\n print(v_path)\n args_ = args\n with open(v_path / 'config.json') as f:\n args = json.load(f)\n args.update(args_)\n\n args['output_dir'] = os.path.join(args['output_dir'], v_name, d_name)\n if os.path.exists(args['output_dir']) and args['overwrite_output_dir']:\n ckpt = 0\n\n for ckpt_path in Path(args['output_dir']).glob('checkpoint-*'):\n this_ckpt = int(ckpt_path.name.split('-')[-1])\n if this_ckpt > ckpt:\n ckpt = this_ckpt\n\n if ckpt == 0:\n print('output dir exists, but does not contain a checkpoint')\n exit(1)\n ckpt_dir = os.path.join(args['output_dir'], f'checkpoint-{ckpt}')\n print('WARNING: continuing training', ckpt_dir)\n args['model_name_or_path'] = ckpt_dir\n return args\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser(\n (ModelArguments, DataTrainingArguments, TrainingArguments))\n\n args = load_args()\n model_args, data_args, training_args = parser.parse_dict(args)\n\n if (os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir) and training_args.do_train\n and not training_args.overwrite_output_dir):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty.\"\n \"Use --overwrite_output_dir to overcome.\")\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO\n if is_main_process(training_args.local_rank) else logging.WARN,\n )\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n +\n f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n datasets = load_dataset(data_args.dataset_name,\n data_args.dataset_config_name)\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.train_file.split(\".\")[-1]\n datasets = load_dataset(extension, data_files=data_files)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n if training_args.do_train:\n column_names = datasets[\"train\"].column_names\n else:\n column_names = datasets[\"validation\"].column_names\n text_column_name = \"tokens\" if \"tokens\" in column_names else column_names[0]\n label_column_name = (f\"{data_args.task_name}_tags\"\n if f\"{data_args.task_name}_tags\" in column_names else\n column_names[1])\n\n label_names = LABEL_SETS[data_args.task_name]\n id2label = dict(enumerate(label_names))\n label2id = {l: i for i, l in id2label.items()}\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name\n if model_args.config_name else model_args.model_name_or_path,\n id2label=id2label,\n label2id=label2id,\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name\n if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=True,\n )\n model = AutoModelForTokenClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # Freeze layers\n if model_args.freeze_layers is not None:\n for name, p in model.named_parameters():\n if name in model_args.freeze_layers:\n p.requires_grad = False\n continue\n for lay_name in model_args.freeze_layers:\n if name.startswith(f'{lay_name}.'):\n p.requires_grad = False\n break\n\n # for name, param in model.named_parameters():\n # print(name, param.requires_grad)\n # exit(0)\n\n # Tokenizer check: this script requires a fast tokenizer.\n if not isinstance(tokenizer, PreTrainedTokenizerFast):\n raise ValueError(\n \"This example script only works for models that have a fast tokenizer. Checkout the big table of models \"\n \"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this \"\n \"requirement\")\n\n # Preprocessing the dataset\n # Padding strategy\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n # Tokenize all texts and align the labels with them.\n def tokenize_and_align_labels(examples):\n tokenized_inputs = tokenizer(\n examples[text_column_name],\n padding=padding,\n truncation=True,\n # We use this argument because the texts in our dataset are lists of words (with a label for each word).\n is_split_into_words=True,\n )\n labels = []\n for i, label in enumerate(examples[label_column_name]):\n word_ids = tokenized_inputs.word_ids(batch_index=i)\n previous_word_idx = None\n label_ids = []\n for word_idx in word_ids:\n # Special tokens have a word id that is None. We set the label to -100 so they are automatically\n # ignored in the loss function.\n if word_idx is None:\n label_ids.append(-100)\n # We set the label for the first token of each word.\n elif word_idx != previous_word_idx:\n label_ids.append(label2id[label[word_idx]])\n # For the other tokens in a word, we set the label to either the current label or -100, depending on\n # the label_all_tokens flag.\n else:\n label_ids.append(label2id[label[word_idx]] if data_args.\n label_all_tokens else -100)\n previous_word_idx = word_idx\n\n labels.append(label_ids)\n tokenized_inputs[\"labels\"] = labels\n return tokenized_inputs\n\n tokenized_datasets = datasets.map(\n tokenize_and_align_labels,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Data collator\n data_collator = DataCollatorForTokenClassification(tokenizer)\n\n # Metrics\n def compute_metrics(p):\n predictions, labels = p\n predictions = np.argmax(predictions, axis=2)\n\n # Remove ignored index (special tokens)\n true_predictions = [\n id2label[p] for prediction, label in zip(predictions, labels)\n for (p, lab) in zip(prediction, label) if lab != -100\n ]\n true_labels = [\n id2label[lab] for prediction, label in zip(predictions, labels)\n for (_, lab) in zip(prediction, label) if lab != -100\n ]\n\n mip, mir, mif, _ = precision_recall_fscore_support(true_labels,\n true_predictions,\n labels=label_names,\n average='micro')\n map, mar, maf, _ = precision_recall_fscore_support(true_labels,\n true_predictions,\n labels=label_names,\n average='macro')\n p, r, f, _ = precision_recall_fscore_support(true_labels,\n true_predictions,\n labels=label_names,\n average=None)\n\n res = {\n \"accuracy_score\": accuracy_score(true_labels, true_predictions),\n \"precision_micro\": mip,\n \"recall_micro\": mir,\n \"f1_micro\": mif,\n \"precision_macro\": map,\n \"recall_macro\": mar,\n \"f1_macro\": maf\n }\n for i, tag in enumerate(label_names):\n res.update({\n f'precision_{tag}': p[i],\n f'recall_{tag}': r[i],\n f'f1_{tag}': f[i]\n })\n return res\n\n # Initialize our Trainer\n trainer: Trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_datasets[\"train\"]\n if training_args.do_train else None,\n eval_dataset=tokenized_datasets[\"validation\"]\n if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n model_path = (model_args.model_name_or_path if\n (model_args.model_name_or_path is not None\n and os.path.isdir(model_args.model_name_or_path)) else\n None)\n trainer.train(model_path=model_path)\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n results = trainer.evaluate()\n\n output_eval_file = os.path.join(\n training_args.output_dir,\n f\"eval_results_{data_args.task_name}.txt\")\n if trainer.is_world_process_zero():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key, value in results.items():\n logger.info(f\" {key} = {value}\")\n writer.write(f\"{key} = {value}\\n\")\n\n # Predict\n if training_args.do_predict:\n logger.info(\"*** Predict ***\")\n\n test_dataset = tokenized_datasets[\"test\"]\n predictions, labels, metrics = trainer.predict(test_dataset)\n predictions = np.argmax(predictions, axis=2)\n\n # Remove ignored index (special tokens)\n true_predictions = [[\n id2label[p] for (p, lab) in zip(prediction, label) if lab != -100\n ] for prediction, label in zip(predictions, labels)]\n\n output_test_results_file = os.path.join(training_args.output_dir,\n \"test_results.txt\")\n if trainer.is_world_process_zero():\n with open(output_test_results_file, \"w\") as writer:\n for key, value in metrics.items():\n logger.info(f\" {key} = {value}\")\n writer.write(f\"{key} = {value}\\n\")\n\n # Save predictions\n output_test_predictions_file = os.path.join(training_args.output_dir,\n \"test_predictions.txt\")\n if trainer.is_world_process_zero():\n with open(output_test_predictions_file, \"w\") as writer:\n for prediction in true_predictions:\n writer.write(\" \".join(prediction) + \"\\n\")\n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wietsedv/low-resource-adapt","sub_path":"src/train/train_pos.py","file_name":"train_pos.py","file_ext":"py","file_size_in_byte":18388,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"34229855159","text":"from pyglet.gl import *\r\n\r\n\r\nclass Camera:\r\n def __init__(self):\r\n self.x = 0\r\n self.y = 0\r\n self.scale = 1\r\n self.actor = None\r\n\r\n def move(self, x, y):\r\n self.x = self.y + x\r\n self.y = self.y + y\r\n glTranslatef(-x, -y, 0)\r\n\r\n def zoom(self, z):\r\n self.scale += z\r\n glScalef(self.scale, self.scale, self.scale)\r\n\r\n def focus_on(self, actor):\r\n self.actor = actor\r\n xo = actor.position[0] - 300\r\n yo = actor.position[1] - 300\r\n glTranslatef(-xo, -yo, 0)\r\n self.x = 0\r\n self.y = 0\r\n\r\n\r\n def place(self, x, y):\r\n self.x = x\r\n self.y = y\r\n glTranslatef(-x, -y, 0)\r\n","repo_name":"solidsmokesoftware/solconomy","sub_path":"source/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74444363928","text":"import os\nfrom typing import Dict\n\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom helpers import (read_sql, AIRFLOW_CONNECTION_ID, CSV_Table, CSV_TABLES,\n QUERIES_PATH, SCHEMA_NAME)\n\n\nclass CSVToTableOperator(BaseOperator):\n ui_color = '#ededed'\n\n @apply_defaults\n def __init__(self,\n schema_name: str = SCHEMA_NAME,\n csv_tables: Dict[str, CSV_Table] = CSV_TABLES,\n queries_path: str = QUERIES_PATH,\n query_file: str = 'copy_csv_data.sql',\n postgres_conn_id: str = AIRFLOW_CONNECTION_ID,\n should_run: bool = True,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n task_id = str(kwargs['task_id'])\n if task_id.startswith('copy_') and task_id.endswith('_table'):\n # len('copy_') = 5; len('_table') = 6\n self.table = task_id[5:-6]\n else:\n raise ValueError(f'Invalid task_id=\"{task_id}\"')\n self.schema_name = schema_name\n self.csv_tables = csv_tables\n self.queries_path = queries_path\n self.query_file = query_file\n self.postgres_conn_id = postgres_conn_id\n self.should_run = should_run\n\n def execute(self, context):\n query_file = os.path.join(self.queries_path, self.query_file)\n self.log.info(f'Running query from file \"{query_file:s}\" into table '\n f'\"{self.schema_name:s}.{self.table}\"...')\n postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n csv_table = self.csv_tables[self.table]._asdict()\n for csv in csv_table['file_names']:\n sql = read_sql(query_file,\n schema_name=self.schema_name,\n **csv_table,\n file_name=csv)\n if self.should_run:\n postgres.run(sql=sql)\n self.log.info('Done!')\n else:\n self.log.info(sql)\n self.log.info('Skipping this task.')\n","repo_name":"gcbeltramini/etl-project","sub_path":"etl/airflow_home/plugins/operators/csv_to_table.py","file_name":"csv_to_table.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12494388937","text":"class Solution:\n def getRow(self, rowI: int) -> List[int]:\n # declaring a list representing rows of pascals traingle. \n\n # 1 element stands for the first row. \n res = [[1]]\n \n # iterate through rows.\n for i in range(rowI):\n temp = [0] + res[-1] + [0]\n row = []\n\n # iterate through elements of the row. \n for j in range(len(res[-1])+1):\n row.append(temp[j] + temp[j+1])\n res.append(row)\n\n # returns the last row -> rowI. \n return res[-1]","repo_name":"Deven1902/GFG-LEETCCODE","sub_path":"119-pascals-triangle-ii/pascals-triangle-ii.py","file_name":"pascals-triangle-ii.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7457420508","text":"import copy\n\nN = int(input())\nP = list(map(int, input().split()))\n\n# dp = [0] * 10001\ndp = [False] * (max(P) * N + 1)\ndp[0] = True\n\nmax_val = 0\nfor v in P:\n tmp_dp = copy.deepcopy(dp)\n # print(\"v: {}\".format(v))\n for i in range(max_val+1):\n if tmp_dp[i]:\n dp[v+i] = True\n dp[v] = True\n max_val += v\n\n# print(dp)\nprint(dp.count(True))\n","repo_name":"naru380/AtCoder","sub_path":"others/Typical_DP_Contest/A/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20409054800","text":"from pymongo import MongoClient\n\n\ndef find_bid_in_range(host, port, udb, ucollection, gte, lte):\n connection = MongoClient(host, port)\n db = connection[udb]\n collection = db[ucollection]\n\n # cursor finds the bids within the given range. This is then converted to a Python list object\n # so that it can be printed below.\n cursor = collection.find({\"AuctionFeeTotal\": {\"$gte\": gte, \"$lte\" : lte}})\n results = list(cursor)\n\n # Validates that there are bids to be printed.\n if results is None:\n print(\"No bids found...\")\n return None\n\n # Print column titles\n field_list = [\"AuctionID\", \"AuctionTitle\", \"Fund\", \"AuctionFeeTotal\"]\n\n print(\"{0} | {1} | {2} | {3}\".format(field_list[0], field_list[1], field_list[2], field_list[3]))\n\n for i in results:\n listed_values = []\n for j in i:\n listed_values.append(i[j])\n print(\"{0} | {1} | {2} | {3}\".format(listed_values[2], listed_values[1], listed_values[3], listed_values[9]))\n\n return None\n\n\n# check_values abstracts the code for finding bids in the given range. The functions and logic below\n# validate the user input.\ndef check_values():\n loop_condition = True\n print(\"Find bids between a lesser and greater value Auction Fee Total\")\n while loop_condition:\n try:\n lesser = int(input(\"Enter the lesser value: \"))\n greater = int(input(\"Enter the greater value: \"))\n except:\n print(\"Enter a valid input...\")\n if lesser > greater:\n print(\"The lower value is greater than the higher value. Please enter valid inputs...\")\n else:\n find_bid_in_range('localhost', 27017, 'CityData', 'bids', lesser, greater)\n loop_condition = False\n\n","repo_name":"JamesCourcelle/FinalProject","sub_path":"MongoFiles/search_functions.py","file_name":"search_functions.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15512630886","text":"import random\nimport time\nfrom gmssl import sm3, func\n\ndef padding(msg):\n msg.append(0x80)\n len0 = len(msg)\n msg = msg+[0]*(56-len0)\n\n bit_length = (len0 - 1) * 8\n bit_length_str = [bit_length % 0x100]\n for i in range(7):\n bit_length = int(bit_length / 0x100)\n bit_length_str.append(bit_length % 0x100)\n for i in range(8):\n msg.append(bit_length_str[7-i])\n return msg\n\ndef get_iv(str):\n print(str)\n res = []\n for i in range(8):\n res.append(int(str[i*8:i*8+8],16))\n # print(str[i*8:i*8+8])\n return res\n\ndef sm3_hash_0(len0,msg):\n # print(msg)\n len1 = len(msg)\n reserve1 = len1 % 64\n msg.append(0x80)\n reserve1 = reserve1 + 1\n # 56-64, add 64 byte\n range_end = 56\n if reserve1 > range_end:\n range_end = range_end + 64\n\n for i in range(reserve1, range_end):\n msg.append(0x00)\n\n bit_length = (len1 + len0) * 8\n bit_length_str = [bit_length % 0x100]\n for i in range(7):\n bit_length = int(bit_length / 0x100)\n bit_length_str.append(bit_length % 0x100)\n for i in range(8):\n msg.append(bit_length_str[7-i])\n\n group_count = round(len(msg) / 64)\n\n B = []\n for i in range(0, group_count):\n B.append(msg[i*64:(i+1)*64])\n\n V = []\n V.append(IV)\n for i in range(0, group_count):\n V.append(sm3.sm3_cf(V[i], B[i]))\n\n y = V[i+1]\n result = \"\"\n for i in y:\n result = '%s%08x' % (result, i)\n return result\n\nstr1 = 'secret'\nstr2 = 'padding'\nmsg1 = func.bytes_to_list(str1.encode())\nmsg2 = func.bytes_to_list(str2.encode())\n\nIV = get_iv(sm3.sm3_hash(msg1))\n\nmsg3 = msg1+msg2\n\nprint(sm3_hash_0(len(msg1),msg2))\nprint(sm3.sm3_hash(msg3))","repo_name":"Maxlsc/Projects-of-CSPIE","sub_path":"Project3/length_append_attack_sm3.py","file_name":"length_append_attack_sm3.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"35498830375","text":"#!/usr/bin/env python\n\nif __name__ == \"__main__\":\n import re\n import tablib\n import logging\n import datetime\n import collections\n from argparse import ArgumentParser, FileType\n\n from core import HEADERS\n from core.parsers.mention import Mention\n\n date_prefix = datetime.date.today().strftime(\"%Y%m%d\")\n\n parser = ArgumentParser()\n parser.add_argument('input', metavar='INPUT', type=open)\n parser.add_argument(\"-o\", \"--output\", default=\"%s-newsclips.xls\" % date_prefix)\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.DEBUG)\n\n log = logging.getLogger('newsclips.main')\n\n data = tablib.Dataset(headers=HEADERS)\n\n for line in args.input:\n item = {}\n line = line.strip()\n\n if not line:\n continue\n\n mention = Mention(line)\n mention.append(data)\n\n book = tablib.Databook((data, data.filter([\"in-the-news\"])))\n with open(args.output, 'wb') as fp:\n fp.write(book.xls)\n","repo_name":"edavis/newsclips","sub_path":"newsclips.py","file_name":"newsclips.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"3932740237","text":"if __name__ == \"__main__\":\n d = {'name' : 'parham',\n 'age' : 20,\n 'number' : [6568568, 9456356]}\n\n print(d) \n print(d['number'][0])\n\n print(list(d.keys()))\n\n for key, value in d.items():\n print(key, value)\n\n names = ['javad', 'angha', 'seyed', 'seyed']\n height = [177, 178, 170, 190]\n o = dict(zip(names, height))\n print(o)\n\n a = 5\n b = 6\n print(a, b)\n a, b = b, a\n print(a, b)","repo_name":"cesa-class/Python","sub_path":"Week 3/5 Dictionaries.py","file_name":"5 Dictionaries.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"74997295767","text":"def sort(sorted_list):\n if len(sorted_list) <= 1:\n return sorted_list\n\n middle = len(sorted_list) // 2\n left = sorted_list[:middle]\n right = sorted_list[middle:]\n left = sort(left)\n right = sort(right)\n return merge(left, right) \n\ndef merge(a_list, b_list):\n combined_list = []\n index_a = 0\n index_b = 0\n length_a = len(a_list)\n length_b = len(b_list)\n\n while index_a < length_a or index_b < length_b:\n if index_a < length_a and index_b < length_b:\n if a_list[index_b] <= b_list[index_b]: #error!!! should be ==> if a_list[index_a] <= b_list[index_b]:\n combined_list += [a_list[index_a]]\n index_a = index_a + 1\n else:\n combined_list += [b_list[index_b]]\n index_b = index_b + 1\n elif index_a < length_a:\n combined_list += [a_list[index_a]]\n index_a = index_a +1\n else:\n combined_list += [b_list[index_b]]\n index_b = index_b + 1\n return combined_list\n\n\n# main part\ndef main():\n n = int(input())\n a = []\n for i in range(n):\n a.append(int(input()))\n b = sort(a)\n for e in b:\n print(e)\n\nmain()","repo_name":"EririSawamura/Debugging-tools-for-cpp-python","sub_path":"Test/Sorting Algorithm/merge sort/merge_sort_1.py","file_name":"merge_sort_1.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"36169732518","text":"import numpy as np\nimport time\nimport scipy.io as sio\n\nfrom ismore import brainamp_channel_lists\nfrom riglib.brainamp.rda import *\n\n\n\nfs = 1000\nchannels = brainamp_channel_lists.emg_eog2_eeg\n\ntotal_time = 120 # how many secs of data to receive and save\n\nn_samples = 2 * fs * total_time # allocate twice as much space as expected\nn_chan = len(channels)\n\n\nDATA = np.zeros((n_chan, n_samples))\nidxs = np.zeros(n_chan, int)\n\nchan_to_row = dict()\nfor row, chan in enumerate(channels):\n chan_to_row[chan] = row\n\nemgdata_obj = EMGData()\nemgdata_obj.start()\n\nstart_time = time.time()\n\nwhile (time.time() - start_time) < total_time:\n chan, data = emgdata_obj.get()\n\n row = chan_to_row[chan]\n idx = idxs[row]\n\n DATA[row, idx] = data['data']\n idxs[row] += 1\n\n\nsave_dict = {'data': DATA}\nsio.matlab.savemat('brainamp_data.mat', save_dict)\n","repo_name":"carmenalab/brain-python-interface","sub_path":"tests/ibmi/brainamp/basic_brainamp_test.py","file_name":"basic_brainamp_test.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"8157355733","text":"# Download required libraries\nimport docx2txt\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n# This function's argument is a .docx file type document path\ndef main(document):\n # Load data\n resume = docx2txt.process(document)\n\n # in this expression we use another .docx file type document against the one we want to score\n # this document has the required keywords\n job_description = docx2txt.process('job_description.docx')\n\n # creating a list of text\n texts_list = [resume, job_description]\n\n count_v = CountVectorizer()\n\n count_matrix = count_v.fit_transform(texts_list)\n\n # Print the similar scores\n print('Curriculum Vitae score:')\n matchPercentage = cosine_similarity(count_matrix)[0][1] * 100\n\n print(str(round(matchPercentage, 2)) + ' %')\n input()\n\n\nif __name__ == '__main__':\n main('myCV.docx')\n","repo_name":"cathbert/ResumeScanner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18679274677","text":"#####################\n# Zadatak 3 #\n#####################\n\n\"\"\"\n Na temelju primjera 2.6. učitajte sliku 'tiger.png'. Manipulacijom odgovarajuće numpy matrice pokušajte:\n a) posvijetliti sliku (povećati brightness),\n b) zarotirati sliku za 90 stupnjeva u smjeru kazaljke na satu,\n c) zrcaliti sliku,\n d) smanjiti rezoluciju slike x puta (npr. 10 puta),\n e) prikazati samo drugu četvrtinu slike po širini, a prikazati sliku cijelu po visini; ostali dijelovi slike trebaju biticrni.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef show(img):\n plt.figure()\n plt.imshow(img, cmap=\"gray\")\n plt.show()\n\n# read the image into a buffer\nimg = plt.imread(\"../assets/images/tiger.png\", \"png\")\n\n# copy the image into a gray-scale buffer\nimg = img[:, :, 0].copy()\n\n# get the width & the height of the image\nheight, width = img.shape\n\n# show the base image\nshow(img)\n\n# increasing the brightness\nfor i in range(0, len(img)):\n for j in range(0, len(img[i])):\n img[i][j] = img[i][j] * 1.75\n if (img[i][j] > 1.0):\n img[i][j] = 1.0\nshow(img)\n\n# rotate the image 90deg clockwise\nrotated_img = np.zeros((width, height))\nfor y in range(height):\n rotated_img[:, height - 1 - y] = img[y, :]\nshow(rotated_img)\n\n# mirror the image\nrotated_img = np.zeros((height, width))\nfor y in range(height):\n rotated_img[y] = img[height - 1 - y]\nshow(rotated_img)\n\n# scaled down the image\nshow(img[::10, ::10])\n\n# clip the image\nclipped_img = np.zeros((height, width))\nclip_size = width // 4\nclipped_img[:, clip_size : clip_size * 2] = img[:, clip_size : clip_size * 2]\nshow(clipped_img)","repo_name":"Mat1337/ferit","sub_path":"machine-lerning/lab_2/zad_3.py","file_name":"zad_3.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10250783584","text":"import os, sys\n\nfrom setuptools import setup\n\nlong_description = open(\"README.rst\").read()\ndef main():\n setup(\n name='fsredis',\n description='fsredis: in-process redis api, persisting to file system.',\n long_description = long_description,\n version=\"0.4\",\n url='http://github.com/hpk42/fsredis',\n license='MIT license',\n platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],\n author='holger krekel',\n author_email='holger at merlinux.eu',\n classifiers=['Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Topic :: Utilities',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python'],\n py_modules = ['fsredis', \"test_fsredis\"],\n )\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"hpk42/fsredis","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"2076188404","text":"\nfrom xstruct import pack\nfrom objects import Object\n\nclass Galaxy(Object):\n\t\"\"\"\\\n\tThe Galaxy is a container for a large group of close star systems, like the Milky Way.\n\t\n\tThe Galaxy contains no extra data.\n\t\"\"\"\n\tsubtype = 1\n\tsubstruct = \"\"\n\n\tdef __init__(self, sequence, \\\n\t\t\tid, type, name, \\\n\t\t\tsize, \\\n\t\t\tposx, posy, posz, \\\n\t\t\tvelx, vely, velz, \\\n\t\t\tcontains, \\\n\t\t\torder_types, \\\n\t\t\torder_number, \\\n\t\t\tmodify_time):\n\t\tObject.__init__(self, sequence, \\\n\t\t\tid, type, name, \\\n\t\t\tsize, \\\n\t\t\tposx, posy, posz, \\\n\t\t\tvelx, vely, velz, \\\n\t\t\tcontains, \\\n\t\t\torder_types, \\\n\t\t\torder_number, \\\n\t\t\tmodify_time)\n\t\t\t\n\t\tself.length += 0\n","repo_name":"thousandparsec/libtpproto-py","sub_path":"tp/netlib/objects/ObjectExtra/Galaxy.py","file_name":"Galaxy.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"39199592497","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom functools import reduce\n\n\nclass computer:\n def __init__(self,cpu,ram):\n self.cpu = cpu\n self.ram = ram\n\n def config(self):\n print('Hello what is the name of your cpu and how much ram',self.cpu,self.ram)\n\ncom1 = computer('E5',56) #Learning how to use constructor\ncom2 = computer('r5',24)\n\ncom1.config() # second way of calling to a method in a class\ncom2.config()","repo_name":"hhabibullah/General-Python","sub_path":"Array.py","file_name":"Array.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25086120223","text":"#!/usr/bin/env python\n\"\"\"\nProvides a Class to find and install Mac OS X Applications\n\nThe install module provides an easy interface to find installable objects on\nMac OS X. An installable object is any of the following:\n 1. an Application Bundle ('.app')\n 2. a OS X Install-Package ('.pkg')\n 3. an Alfred Workflow ('.alfredworkflow')\n 4. a Disk Image containing any of the above ('.dmg')\n 5. a Zipfile containing any of 1-3 ('.zip')\n\"\"\"\n\nimport os.path\nimport logging\nimport logging.handlers\nimport zipfile\nimport errno\nimport subprocess\nimport tempfile\n\nimport send2trash\n\n__author__ = \"Franz Greiling\"\n__email__ = \"dev.installpy@lc3dyr.de\"\n__copyright__ = \"Copyright (c) 2014, Franz Greiling\"\n__licence__ = \"BSD 2-Clause License\"\n__version__ = \"v1.0\"\n\n\n# Setting up Logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nformatter_ch = logging.Formatter(\n '%(asctime)s - %(name)s <%(levelname)s>\\n%(message)s\\n'\n)\nformatter_sl = logging.Formatter(\n '%(name)s: %(levelname)s %(message)s'\n)\n\nsyslog = logging.handlers.SysLogHandler(address=\"/var/run/syslog\")\nsyslog.setLevel(logging.WARNING)\nsyslog.setFormatter(formatter_sl)\nlogger.addHandler(syslog)\n#\n# console = logging.StreamHandler()\n# console.setLevel(logging.DEBUG)\n# console.setFormatter(formatter_ch)\n# logger.addHandler(console)\n\n\ndef mount_dmg(dmg, unmount=False):\n \"\"\" (Un)Mounts given DMG at /Volumes/NAME \"\"\"\n\n # Generate Mountpoint\n mount_point = os.path.join('/Volumes/',\n os.path.splitext(os.path.basename(dmg))[0])\n\n # Mount dmg\n dnull = open('/dev/null', 'w')\n if unmount:\n logger.info(\"Unmounted %s\")\n return_code = subprocess.call([\n 'hdiutil',\n 'detach',\n mount_point\n ], stdout=dnull)\n else:\n logger.info(\"Mounted %s at %s\" % (dmg, mount_point))\n return_code = subprocess.call([\n 'hdiutil',\n 'attach',\n '-mountpoint',\n mount_point,\n dmg\n ], stdout=dnull)\n\n # Minimal Error Handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n return mount_point\n\n\nclass NoApplicationException(Exception):\n pass\n\n\nclass NotInstalledException(Exception):\n pass\n\n\nclass Installable(object):\n \"\"\"\n Specifices an installable object.\n \"\"\"\n\n #: List of Acceptable Types\n TYPES = [\n '.dmg',\n '.zip',\n '.pkg',\n '.app',\n '.alfredworkflow',\n ]\n\n #: List of Search Paths\n PATHS = [\n '~/Downloads/',\n # '~/Desktop/',\n ]\n\n def __init__(self, path, types=TYPES):\n \"\"\"\n Creates new Instance of Installable from path.\n\n During initialization, zipfiles are inspected to find any possible\n Installable inside them.\n\n Args:\n path: Object to reference in this Instance\n REQUIRED\n types: Types which to accept in path. Needs to be a subset of TYPES\n Defaults to TYPES\n\n Raises:\n NoApplicationException: is raised when the type of 'path' is not in\n TYPES, i.e. not recognized.\n This is also true if type is '.zip', but this '.zip' does not\n contain any valid Installables.\n \"\"\"\n path = path.rstrip('/')\n ext = os.path.splitext(path)[1]\n\n # Check if path is a valid File\n if ext not in types:\n logger.debug(\"%s is no valid Installable object.\" % path)\n raise NoApplicationException()\n\n # Special Zip Treatment\n # Only accept zips, if they include a valid type\n # Inside zips, ignore .zips and .dmgs\n inzip = []\n\n if ext == '.zip':\n _types = list(types)\n _types.remove('.zip')\n _types.remove('.dmg')\n\n zf = zipfile.ZipFile(path, 'r')\n\n for f in zf.namelist():\n if f.startswith(\"__MACOSX/\"):\n continue\n\n t = os.path.splitext(f.rstrip('/'))[1]\n if t in _types and f.count(t+'/') == 1:\n logger.info(\"Found Installable %s inside %s\" % (f, path))\n inzip.append(f.split('.app/', 1)[0]+'.app/')\n\n if not inzip:\n logger.debug(\"No Installables in %s\" % path)\n raise NoApplicationException()\n\n self.inzip = set(inzip)\n self.path = path\n self.ext = ext\n\n def _install_app(self, prefix, overrite=False, remove=False):\n dest = os.path.join(prefix, os.path.basename(self.path))\n if os.path.exists(dest):\n if overrite:\n logger.debug(\"Trying to remove %s\" % (dest))\n send2trash.send2trash(dest)\n logger.info(\"Moved %s to trash.\" % dest)\n else:\n logger.error(\"File exists: %s\" % dest)\n raise OSError(17, \"File exists\", dest)\n\n logger.debug(\n \"Installing: %s\" % os.path.basename(self.path))\n return_code = subprocess.call(\n ['/bin/cp', '-a', self.path, prefix])\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n logger.info(\"Installed %s to %s\" % (self, prefix))\n\n def _install_zip(self, prefix, overrite=False, remove=False):\n tmp = tempfile.gettempdir()\n\n return_code = subprocess.call(\n ['unzip', '-u', '-o', self.path, '-d', tmp]\n )\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n for f in self.inzip:\n a = Installable(os.path.join(tmp, f))\n a.install(prefix, overrite=overrite)\n\n def _install_dmg(self, prefix, overrite=False, remove=False):\n where = mount_dmg(self.path)\n\n apps = self.get_installables(path=where)\n for app in apps:\n app.install(prefix, overrite=overrite)\n\n mount_dmg(self.path, unmount=True)\n\n def _install_pkg(self, prefix=None, overrite=False, remove=False):\n return_code = subprocess.call(['open', '-W', self.path])\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n def _install_alfredworkflow(self, prefix=\"/\",\n overrite=False, remove=False):\n if remove:\n tmp = tempfile.gettempdir()\n\n return_code = subprocess.call(\n ['/bin/cp', '-a', self.path, tmp])\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n path = os.path.join(tmp, os.path.basename(self.path))\n else:\n path = self.path\n\n return_code = subprocess.call(\n ['open', path]\n )\n\n # Minimal Error handling\n if return_code != 0:\n logger.error(\"%d: %s\" %\n (return_code, errno.errorcode[return_code]))\n raise OSError(return_code)\n\n def install(self, prefix='/Applications/', remove=False, overrite=False):\n \"\"\"\n Installs the Applications referenced by this Instance.\n\n This method is mainly a wrapper around type-specific install functions.\n\n Args:\n prefix: Path to where Applications ('.app') shall be installed.\n Defaults to '/Applications/'.\n Note: This prefix will only be used for ('.app')-Files and\n ignored otherwise.\n remove: Boolean. If set to 'True', method will try to remove\n Object after successful installation.\n Defaults to 'False'\n overrite: Boolean. If set to 'True', will overrite existing Apps at\n path.\n Defaults to 'False'\n\n Returns:\n Original Path of the referenced object on success, None otherwise.\n\n Raises:\n OSError: is raised on several occasions, when installation failed.\n This can for example happen, when you dont have Permissions at\n path.\n \"\"\"\n\n logger.debug(\n \"Trying to install %s to %s with remove %s and overrite %s\" %\n (self.path, prefix, remove, overrite))\n try:\n if self.removed:\n logger.warning(\"%s has been removed!\" % self)\n return None\n except AttributeError:\n pass\n\n getattr(self, \"_install\" + self.ext.replace('.', \"_\"))(\n prefix=prefix,\n overrite=overrite,\n remove=remove,\n )\n logger.info(\"Installed %s to %s\" % (self, prefix))\n\n self.installed = True\n\n if remove:\n self.remove()\n\n return self.path\n\n def remove(self, force=False):\n \"\"\"\n Removes the Container of Applications (dmgs, zips, pkgs).\n\n This method can only be called after install() has run succesfully.\n\n Args:\n force: If set, Installable will be removed even if it has not been\n installed. Defaults to 'False'\n\n Raises:\n NotInstalledException: If Installable().install has not been called\n successfully and force is 'False'\n \"\"\"\n\n if not self.installed and not force:\n logger.debug(\"Cant remove %s!\" % self)\n raise NotInstalledException()\n\n try:\n send2trash.send2trash(self.path)\n self.removed = True\n logger.info(\"Moved %s to trash.\" % self)\n except OSError as ose:\n logger.exception(ose)\n\n def __len__(self):\n \"\"\"returns number of installable objects\"\"\"\n return 1 if len(self.inzip) == 0 else len(self.inzip)\n\n def __repr__(self):\n \"\"\"gives a representation of the instance\"\"\"\n return \"<\" + self.__class__.__name__ + \": \" + str(self) + \">\"\n\n def __str__(self):\n \"\"\"returns __unicode__\"\"\"\n return unicode(self).encode('utf-8')\n\n def __unicode__(self):\n \"\"\"gives the basename of the referenced installable object\"\"\"\n return os.path.basename(self.path)\n\n # Static Methods\n @staticmethod\n def get_installables(paths=PATHS, types=TYPES):\n \"\"\"\n Finds installable objects\n\n Args:\n paths: List of Path in which to look for installable objects.\n Defaults to Installable.PATHS\n types: List of Types to recognize as installable objects. Must be\n a subset of Installable.TYPES. Defaults to Installable.TYPES\n\n Returns:\n a List of Installable() objects.\n \"\"\"\n\n inst = []\n\n for p in paths:\n p = os.path.expanduser(p)\n for f in os.listdir(p):\n try:\n i = Installable(os.path.join(p, f), types=types)\n logger.info(\"Found Installable at '%s'\" % i.path)\n inst.append(i)\n except NoApplicationException:\n logger.log(logging.NOTSET, \"No valid Installable at %s\")\n\n return inst\n","repo_name":"fgr0/dmginstall","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":11609,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"31"} +{"seq_id":"25240160302","text":"def dfs(n):\n global num_str\n temp = num_str\n if n == 11:\n return\n for i in range(9, -1, -1):\n if num_str == '' or i < int(num_str[-1]):\n num_str += str(i)\n decrease_num.append(int(num_str))\n dfs(n+1)\n num_str = temp\n\ninput = __import__('sys').stdin.readline\nif __name__ == \"__main__\":\n n = int(input())\n decrease_num = []\n num_str = ''\n dfs(0)\n decrease_num.sort()\n if len(decrease_num) <= n:\n print(-1)\n else:\n print(decrease_num[n])","repo_name":"juntae6942/ANA-Daily-Algorithm","sub_path":"송영운/1038.py","file_name":"1038.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"34621511168","text":"\"\"\"Where we build the urls that we'll search in scrapy.\"\"\"\nimport subprocess\n\nimport isort # noqa: F401\nimport snoop\nfrom loguru import logger\nfrom systemd import journal\n\nfmt = \"{time} - {name} - {level} - {message}\"\nlogger.add(\"../logs/info.log\", level=\"INFO\", format=fmt, backtrace=True, diagnose=True) # noqa: E501\nlogger.add(\"../logs/error.log\", level=\"ERROR\", format=fmt, backtrace=True, diagnose=True) # noqa: E501\n\nsubprocess.run([\"isort\", __file__])\n\n\ndef type_watch(source, value):\n return \"type({})\".format(source), type(value)\n\n\nsnoop.install(watch_extras=[type_watch])\n\n\n@logger.catch\n@snoop\ndef build_url_list():\n \"\"\"\n We get the usual structure of pypi site,\n and insert the names in the name list\n where the names usually go in the url.\n \"\"\"\n\n with open(\"/home/mic/python/cli_apps/cli_apps/lists/pypi/only_names.txt\", \"r\") as f:\n names = f.readlines()\n journal.sendv(\"MESSAGE=only_names_list\", \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\")\n for name in names:\n journal.sendv(\"MESSAGE=the name is {}\".format(name), \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=34\")\n\n urls = []\n sname = [i for i in names if not (\".\") in i]\n journal.sendv(\"MESSAGE=List sname\", \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=39\")\n for name in sname:\n lname = name.lower()\n gname = lname.replace(\"-\", \"_\")\n rname = gname.strip()\n journal.sendv(\"MESSAGE=rname is {}\".format(rname), \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=43\")\n url = f\"https://pypi.org/project/{rname}\"\n urls.append(url)\n\n for url in urls:\n journal.sendv(\"MESSAGE=url is {}\".format(url), \"CODE_FILE=build_url_list.py\", \"CODE_FUNC=build_url_list\", \"CODE_LINE=48\")\n with open(\"/home/mic/python/cli_apps/cli_apps/lists/pypi/urls.txt\", \"a\") as f:\n f.write(url)\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n build_url_list()\n","repo_name":"miccaldas/old_alternative_projects","sub_path":"old_cli_apps/build_url_list.py","file_name":"build_url_list.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23238450685","text":"\"\"\"\n* https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/\n\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nDesign an algorithm to find the maximum profit. You may complete at most two transactions.\n\nNote: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).\n\"\"\"\n\ndef solution1(prices):\n # Time: O(N); Space: O(N)\n l = len(prices)\n if l < 2:\n return 0\n\n max_profit_from_left_until_today = [0]\n lowest_from_left = prices[0]\n max_profit_from_left = 0\n\n for i in range(1, l):\n price = prices[i]\n profit = price - lowest_from_left\n max_profit_from_left = max(max_profit_from_left, profit)\n lowest_from_left = min(lowest_from_left, price)\n\n max_profit_from_left_until_today.append(max_profit_from_left)\n\n max_profit_from_right_start_toady = [0]\n highest_from_right = prices[-1]\n max_profit_from_right = 0\n for i in range(l-2, -1, -1):\n price = prices[i]\n profit = highest_from_right - price\n\n max_profit_from_right = max(max_profit_from_right, profit)\n highest_from_right = max(highest_from_right, price)\n\n max_profit_from_right_start_toady.insert(0, max_profit_from_right)\n\n max_profit = 0\n for i in range(1, l):\n max_from_left = max_profit_from_left_until_today[i-1]\n max_from_right = max_profit_from_right_start_toady[i]\n\n max_profit = max(max_profit, max_from_left + max_from_right)\n\n return max(max_profit, max_profit_from_left_until_today[-1], max_profit_from_right_start_toady[0])\n\ndef solution2(prices):\n # Time: O(N^2); Space: O(1)\n\n def max_profit(prices):\n l = len(prices)\n if l < 2:\n return 0\n lowest = prices[0]\n max_profit = 0\n for i in range(1, l):\n max_profit = max(max_profit, prices[i] - lowest)\n lowest = min(lowest, prices[i])\n return max_profit\n\n max_p = 0\n for i in range(len(prices)):\n max_p = max(max_p, max_profit(prices[:i]) + max_profit(prices[i:]))\n\n return max_p\n\ndef solution3(prices):\n # Improved solution2\n # Time: O(N); Space: O(N)\n\n max_profits_left = {} # i: (max_profit, lowest_price)\n def get_max_profit_left(i):\n if i < 0 or i >= len(prices):\n return (0, float('-inf'))\n if i in max_profits_left:\n return max_profits_left[i]\n if i == 0:\n max_profits_left[i] = (0, prices[i])\n else:\n sub_max, lowest = get_max_profit_left(i - 1)\n price = prices[i]\n max_profits_left[i] = (max(sub_max, price - lowest), min(lowest, price))\n return max_profits_left[i]\n\n max_profits_right = {} # i: (max_profit, highest_price)\n def get_max_profit_right(i):\n if i < 0 or i >= len(prices):\n return (0, float('inf'))\n if i in max_profits_right:\n return max_profits_right[i]\n if i == len(prices) - 1:\n max_profits_right[i] = (0, prices[i])\n else:\n sub_max, highest = get_max_profit_right(i + 1)\n price = prices[i]\n max_profits_right[i] = (max(sub_max, highest - price), max(highest, price))\n return max_profits_right[i]\n\n l = len(prices)\n if l < 2:\n return 0\n\n max_p = 0\n for i in range(l):\n max_p = max(max_p, get_max_profit_left(i)[0] + get_max_profit_right(i+1)[0])\n\n return max_p\n\n\n\nimport unittest\nfrom unittest_data_provider import data_provider\n\ndef data():\n return [\n (6, [3,3,5,0,0,3,1,4,1]),\n (6, [3,3,5,0,0,3,1,4]),\n (4, [1,2,3,4,5]),\n (0, [7,6,4,3,1]),\n (0, [1]),\n (13, [1,2,4,2,5,7,2,4,9,0]),\n ]\n\nclass Tests(unittest.TestCase):\n @data_provider(data)\n def test_all_solutions(self, expected, *argv):\n for n in range(1, 10):\n fn_name = 'solution' + str(n)\n if fn_name in globals():\n fn = globals()[fn_name]\n # print('Testing %s with input %s' % (fn_name, str(argv)))\n self.assertEqual(expected, fn(*argv))\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(Tests)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"hanqingzdev/Everything101","sub_path":"Algorithm/LeetCode/123_best_time_to_buy_and_sell_stock_iii.py","file_name":"123_best_time_to_buy_and_sell_stock_iii.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34844813451","text":"#LIBRARIES USED\nimport qrcode\nfrom tkinter import *\nfrom tkinter import messagebox\n\n#CREATING THE WINDOW\nwn = Tk()\nwn.title('QR CODE GENERATOR')\nwn.geometry('700x700')\nwn.config(bg = 'SteelBlue3')\n\n#FUNCTIONS\ndef generateCode():\n #creating QR\n qr = qrcode.QRCode(version = size.get(),\n box_size=10,\n border=5)\n qr.add_data(text.get())\n qr.make(fit=True)\n img = qr.make_image()\n fileDirec = loc.get()+'\\\\'+name.get()\n img.save(f'{fileDirec}.png')\n messagebox.showinfo(\"QR CODE GENERATED,\",\"QR CODE SAVED SUCCESSFULLY\")\n\n#Labels\nheadingFrame = Frame(wn,bg=\"azure\",bd=5)\nheadingFrame.place(relx=0.15,rely=0.05,relwidth=0.7,relheight=0.1)\nheadingLabel = Label(headingFrame,text=\"GENERATE QR CODE WITH THIS\",bg='azure',font=('Times',20,'bold'))\nheadingLabel.place(relx=0,rely=0,relwidth=1,relheight=1)\n\n#TAKE INPUT IN FORM\nFrame1 = Frame(wn,bg=\"SteelBlue3\")\nFrame1.place(relx=0.1,rely=0.15,relwidth=0.7,relheight=0.3)\n\nlabel1= Label(Frame1,text=\"ENTER TEXT OR URL: \",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel1.place(relx=0.05,rely=0.2,relheight=0.08)\n\ntext = Entry(Frame1,font=('Century 12'))\ntext.place(relx=0.05,rely=0.4,relwidth=1,relheight=0.2)\n\n#GETTING INPUT OF QR SAVE LOCATION\nFrame2 = Frame(wn,bg=\"SteelBlue3\")\nFrame2.place(relx=0.1,rely=0.35,relwidth=0.7,relheight=0.3)\n\nlabel2 = Label(Frame2,text=\"Enter The Location To Save The QR\",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel2.place(relx=0.05,rely=0.2,relheight=0.08)\n\nloc = Entry(Frame2,font=('Century 12'))\nloc.place(relx=0.05,rely=0.4,relwidth=1,relheight=0.2)\n\n#GETTING INPUT QR CODE IMAGE NAME\nFrame3 = Frame(wn,bg=\"SteelBlue3\")\nFrame3.place(relx=0.1,rely=0.55,relwidth=0.7,relheight=0.3)\n\nlabel3 = Label(Frame3,text=\"Enter The Name Of The QR CODE\",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel3.place(relx=0.05,rely=0.2,relheight=0.08)\n\nname = Entry(Frame3,font=('Century 12'))\nname.place(relx=0.05,rely=0.4,relwidth=1,relheight=0.2)\n\n#Getting The Input FOR QR CODE SIZE\nFrame4 = Frame(wn,bg=\"SteelBlue3\")\nFrame4.place(relx=0.1,rely=0.75,relwidth=0.7,relheight=0.2)\n\nlabel4 = Label(Frame4,text=\"Enter The Size From 1 to 40, With 1 being 21x21: \",bg=\"SteelBlue3\",fg='azure',font=('Courier',13,'bold'))\nlabel4.place(relx=0.05,rely=0.2,relheight=0.2)\n\nsize = Entry(Frame4,font=('Century 12'))\nsize.place(relx=0.05,rely=0.4,relwidth=0.5,relheight=0.2)\n\n#Buttons TO GENERATE AND SAVE\nbutton = Button(wn,text=\"Generate CODE\",font=('Courier',15,'normal'),command=generateCode)\nbutton.place(relx=0.35,rely=0.9,relwidth=0.25,relheight=0.05)\n\n#RUNS UNTIL CLOSED MANUALLY\nwn.mainloop()\n\n","repo_name":"Novelzalsastian/QRCODE_Generator","sub_path":"QRCODE_READER.py","file_name":"QRCODE_READER.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13201506545","text":"import sys\nfrom collections import deque\n\nN = int(sys.stdin.readline())\npath = []\nvisited = []\nfor i in range(N):\n temp = list(map(int, sys.stdin.readline().split()))\n path.append(temp[:])\n visited.append(temp[:])\n\ndeq = deque()\ndx = [-1, 0, 0, 1]\ndy = [0, -1, 1, 0]\n\ncur_size = 2\neaten = 0\nans = 0\n\nfor i in range(N):\n for j in range(N):\n if path[i][j] == 9:\n path[i][j] = 0\n deq.append((i, j, 1))\nwhile True:\n food = []\n while len(deq) != 0:\n temp = deq.popleft()\n x, y, cnt = temp[0], temp[1], temp[2]\n if visited[x][y] < 0:\n continue\n visited[x][y] = -cnt\n\n # handle when found\n if 0 < path[x][y] < cur_size:\n food.append((x, y))\n\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n\n if 0 <= nx < N and 0 <= ny < N and path[nx][ny] <= cur_size:\n deq.append((nx, ny, cnt + 1))\n\n ################ end of while ###############\n\n if len(food) == 0:\n break\n shortest = [food[0][0], food[0][1]]\n cur = -visited[food[0][0]][food[0][1]]\n for i in food[1:]:\n x, y = i[0], i[1]\n if -visited[x][y] < cur:\n cur = -visited[x][y]\n elif -visited[x][y] == cur:\n if x < shortest[0]:\n shortest[0] = x\n shortest[1] = y\n elif x == shortest[0] and y < shortest[1]:\n shortest[1] = y\n\n x, y = shortest[0], shortest[1]\n path[x][y] = 0\n deq.clear()\n deq.append((x, y, 1))\n ans += -visited[x][y] - 1\n\n for i in range(N):\n for j in range(N):\n visited[i][j] = 0\n\n eaten += 1\n if cur_size == eaten:\n cur_size += 1\n eaten = 0\n\nprint(ans)","repo_name":"mushroom1324/Algorithm","sub_path":"BOJ_16236.py","file_name":"BOJ_16236.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23812329903","text":"from copy import deepcopy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom pycolab.examples.research.box_world import box_world\nfrom pycolab.examples.classics import cliff_walk\nfrom pycolab import ascii_art\n\nfrom helpers import pycolab_gymify\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Core.\n\ndef rgbify_dict(color_dict):\n \"\"\"Rescale scalar from [0, 999] interval to [0, 255] \"\"\"\n return {k: tuple([int(c / 999 * 255) for c in list(v)])\n for k, v in color_dict.items()}\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Create Pycolab environments.\n\nclass BoxWorldEnv(pycolab_gymify.PyColabEnv):\n \"\"\"Box-world environment.\n The environment was first introduced in https://arxiv.org/pdf/1806.01830.pdf,\n for Relational Reinforcement Learning.\n \"\"\"\n\n def __init__(self, default_reward=0.): # task too hard for -1 as default (reward hacking)\n \"\"\"The agent 'only actually move[s] if action is one of the 4 directions of movement'\n (comment found at: pycolab/examples/research/box_world/box_world.py#L166). We could\n ass a fifth action to enable the agent to perform a no-op action, but since only the\n agent move in this environment, there is no use for it. Thus, in accordance to\n the pycolab environment, the action space is defined as `range(4)`.\n (Note, any other action would act as a no-op.)\n\n `max_iterations` is not needed here (but signature is preserved nonetheless) since the\n pycolab environment already set the episode termination horizon with `max_num_steps`.\n\n For grid_size=12 (as pycolab's default), resize_scale=6 gives a render of size 84x84.\n For grid_size=12 (as pycolab's default), resize_scale=16 gives a render of size 224x224.\n \"\"\"\n super(BoxWorldEnv, self).__init__(max_iterations=np.infty,\n default_reward=default_reward,\n action_space=spaces.Discrete(4),\n delay=30,\n resize_scale=16)\n\n def make_game(self):\n \"\"\"Note, those are the settings from the paper.\"\"\"\n return box_world.make_game(grid_size=12,\n solution_length=(1, 2, 3, 4),\n num_forward=(0, 1, 2, 3, 4),\n num_backward=(0,),\n branch_length=1,\n random_state=None,\n max_num_steps=120)\n\n def make_colors(self):\n \"\"\"Return the color dictionary defined in the pycolab environment.\n Note, need to transform it to RGB format for proper rendering.\n \"\"\"\n color_dict = deepcopy(box_world.OBJECT_COLORS)\n return rgbify_dict(color_dict)\n\n\nclass CliffWalkEnv(pycolab_gymify.PyColabEnv):\n \"\"\"Classic cliff-walk game.\"\"\"\n\n def __init__(self, max_iterations, default_reward=-1.):\n super(CliffWalkEnv, self).__init__(max_iterations=max_iterations,\n default_reward=default_reward,\n action_space=spaces.Discrete(4),\n delay=30,\n resize_scale=24)\n\n def make_game(self):\n \"\"\"Reimplemention of the game map.\"\"\"\n # We modify the game art to make the cliff section visual discernible.\n BOOTLEG_GAME_ART = ['......',\n '......',\n 'Pxxxx.']\n return ascii_art.ascii_art_to_game(BOOTLEG_GAME_ART,\n what_lies_beneath='.',\n sprites={'P': cliff_walk.PlayerSprite})\n\n def make_colors(self):\n return {'.': (192, 192, 192),\n 'P': (127, 0, 255),\n 'x': (0, 0, 0)}\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Create environment maker.\n\ndef make_pycolab(env_id):\n if env_id == 'BoxWorld-v0':\n return BoxWorldEnv()\n elif env_id == 'CliffWalk-v0':\n return CliffWalkEnv(max_iterations=150)\n else:\n pass\n","repo_name":"lionelblonde/ppo-gail-pytorch","sub_path":"helpers/pycolab_envs.py","file_name":"pycolab_envs.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8564140142","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('front', '0005_auto_20150602_2356'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='category',\n field=models.ForeignKey(blank=True, to='front.Category', null=True),\n ),\n ]\n","repo_name":"bikeanjo/bikeanjo","sub_path":"front/migrations/0006_event_category.py","file_name":"0006_event_category.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"1319711896","text":"import pickle \nimport numpy as np\n\n# tfidf=pickle.load(open(\"tfidf_test.pickle\", \"rb\" ))\n# X=pickle.load(open(\"X_test.pickle\", \"rb\" ))\ntfidf=pickle.load(open(\"min_tfidf.pickle\", \"rb\" ))\nX=pickle.load(open(\"min_X.pickle\", \"rb\" ))\nX_T=X.T\n\nvocab=tfidf.vocabulary_\nfeature_names=tfidf.get_feature_names()\n\n\ndef input_w():\n while True:\n word=input('請輸入:')\n # print(X.sahpe)\n if word not in vocab:\n print('dic沒有這個單字')\n else:\n i=vocab[word]\n doc_arr=X.T[i].toarray()[0]\n doc_id_arr=(-doc_arr).argsort()\n \n # data=X_T.data[X_T.indptr[i]:X_T.indptr[i+1]]\n # index=X_T.indices[X_T.indptr[i]:X_T.indptr[i+1]]\n # zip_list=[z for z in zip(data,index) if z[0]<1]\n # list1=sorted(zip_list,key = lambda z: z[0],reverse=True)\n # doc_id_arr = [x[1] for x in list1]\n \n top_k=30\n n=0\n bag=[word]\n\n for doc_id in doc_id_arr:\n if n>=top_k:\n break\n # print(doc_arr[doc_id])\n if doc_arr[doc_id] < 1 and doc_arr[doc_id] >0:\n # arrs=X[doc_id].toarray()[0]\n # w_id = (-arrs).argsort()[:80]\n data=X.data[X.indptr[doc_id]:X.indptr[doc_id+1]]\n index=X.indices[X.indptr[doc_id]:X.indptr[doc_id+1]]\n zip_list=[z for z in zip(data,index) if z[0]<1]\n list1=sorted(zip_list,key = lambda z: z[0],reverse=True)\n w_id = [x[1] for x in list1]\n for _id in w_id :\n # if arrs[_id] == 0:\n # break\n if feature_names[_id] not in bag:\n print(feature_names[_id])\n n+=1\n # print(n)\n if n>=top_k:\n break\n \n print()\n\ninput_w()","repo_name":"pupumeme/EDM","sub_path":"kcm_demo.py","file_name":"kcm_demo.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26367644375","text":"# !/usr/bin/env python\n# -*- coding: utf8 -*-\n\nfrom sqlalchemy import Column, BIGINT, BOOLEAN, VARCHAR, ForeignKey\nfrom sqlalchemy.orm import relationship\n\nfrom .base import MyMixin, Base, BaseOrm\n\nfrom utils.orm_format import model_to_list, session_auto_commit\n\n\nclass UserModel(MyMixin, Base):\n __tablename__ = \"user\"\n\n id = Column(BIGINT, primary_key=True, autoincrement=True)\n email = Column(VARCHAR(255), unique=True)\n mobileNumber = Column(VARCHAR(16))\n isActive = Column(BOOLEAN)\n isAdmin = Column(BOOLEAN)\n password = Column(VARCHAR(255))\n username = Column(VARCHAR(255), unique=True)\n receiveNoticeEmail = Column(BOOLEAN)\n passwordRecoveryCode = Column(VARCHAR(255))\n orderCount = Column(BIGINT)\n city_id = Column(BIGINT, ForeignKey(\"city.id\"))\n successOrderCount = Column(BIGINT)\n realName = Column(VARCHAR(255))\n removed = Column(BOOLEAN)\n orders = relationship(\"OrderModel\", back_populates=\"user\")\n addresses = relationship(\"AddressesModel\", back_populates=\"user\")\n oauth2_session = relationship(\"Oauth2SessionModel\", back_populates=\"user\")\n\n\nclass UserOrm(BaseOrm):\n def __init__(self, db):\n super().__init__(db)\n\n @model_to_list\n def get_all_users(self):\n return self.session.query(UserModel).order_by(UserModel.id).all()\n\n def get_all_mobile_number(self):\n return self.session.query(UserModel.mobileNumber).all()\n\n @session_auto_commit\n def add_users(self, param):\n new_user = UserModel(**param)\n self.session.add(new_user)\n\n @session_auto_commit\n def del_users(self, param):\n del_user = self.session.query(UserModel).filter_by(**param).first()\n self.session.delete(del_user)\n\n @model_to_list\n def search_users(self, param):\n users = self.session.query(UserModel).filter_by(**param).first()\n return users\n\n @session_auto_commit\n def update_users(self, query_param, update_param):\n self.session.query(UserModel).filter_by(**query_param).update(update_param)\n","repo_name":"Zacard274/library","sub_path":"orm/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25442633989","text":"import sys\r\nimport heapq\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\nclasstime = [] # 튜플로 저장\r\nclassroom = [] # 강의실 배열\r\nfor _ in range(n):\r\n s, t = map(int, input().split())\r\n classtime.append((s, t))\r\n\r\n# 시작 시간이 빠른 순으로 정렬한다.\r\n# 만약 시작 시간이 같으면, 종료가 빠른 순으로 정렬\r\nclasstime.sort(key=lambda x: (x[0], x[1]))\r\n# print(classtime)\r\nfor i in classtime:\r\n (s, t) = i\r\n # 강의실이 비어 있지 않고, 현 강의의 시작 시간이 강의실의 최소값보다 작거나 같으면,\r\n # 강의실의 최소값을 빼고나서, 현 강의의 종료 시간을 푸쉬한다.\r\n # 힙이 비어있으면 인덱스 에러가 나므로, 강의실이 비어있으면 안됨\r\n if classroom and classroom[0] <= s:\r\n heapq.heappop(classroom) # classroom의 최소값 빼기\r\n heapq.heappush(classroom, t) # 강의실의 현재 강의의 종료 시간 넣기\r\n\r\n# print(classroom)\r\nprint(len(classroom))","repo_name":"hany0147/algorithm","sub_path":"백준/Gold/11000. 강의실 배정/강의실 배정.py","file_name":"강의실 배정.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11632746880","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef prime(n):\n\tc = 0\n\tfor a in range(2,n):\n\t\tfor x in range(2,a):\n\t\t\tif a % x == 0:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif c < 10:\n\t\t\t\tprint(a, end='\\t')\n\t\t\t\tc += 1\n\t\t\telse:\n\t\t\t\tc = 1\n\t\t\t\tprint()\n\t\t\t\tprint(a, end='\\t')\n\n\nprint(\"Prime numbers to \", sys.argv[1])\nprime(int(sys.argv[1]))\nprint()\n\n","repo_name":"jkrynski9/python_stuff","sub_path":"prime/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"6177776579","text":"from email import encoders\r\nfrom email.header import Header\r\nfrom email.mime.text import MIMEText\r\nfrom email.utils import parseaddr, formataddr\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nimport smtplib\r\nfrom openpyxl import load_workbook,Workbook\r\nimport subprocess as sb\r\nfrom openpyxl.styles import PatternFill, Alignment, Side, Border\r\nimport csv\r\nimport zipfile\r\nimport time\r\nimport os\r\nimport pandas as pd\r\nfrom pandas import DataFrame,Series\r\n#分组平均聚合\r\n####数据可视化\r\n##设置中文字体\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.pyplot import plot,savefig\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\n#matplotlib.use('Agg')\r\n##仿宋字体设置\r\nplt.rcParams['font.family'] = ['FangSong']\r\npaths ='./成绩分析清洗.csv'\r\ntarget ='./学生个人成绩/'\r\n\r\ndef mkdir(path):\r\n folder = os.path.exists(path)\r\n if not folder: \r\n os.makedirs(path) \r\n else:\r\n print(\"--- There is this folder! ---\")\r\n##参数预置\r\ntar_list =[]\r\ntest_list =[]\r\nsums =0\r\n##压缩文件模块\r\nclass zip:\r\n def get_zip(self,files,zip_name):\r\n zp=zipfile.ZipFile(zip_name,'w', zipfile.ZIP_DEFLATED)\r\n for file in files:\r\n zp.write(file)\r\n zp.close()\r\n time.sleep(1)\r\n\r\nlest =os.listdir('./')\r\nprint(lest)\r\npath1 =input(\"选择要打开的表格名称(输入序号): \")\r\npath1 =lest[int(path1)-1]\r\npath ='./'+path1\r\ndef open_f():\r\n lest =os.listdir('./')\r\n print(lest)\r\n path1 =input(\"选择要打开的表格名称(输入序号): \")\r\n path1 =lest[int(path1)-1]\r\n path ='./'+path1\r\n return path\r\n\r\ndef menu_all():\r\n print('''\r\n ********菜单********\r\n 1.进入学生信息系统\r\n 2.进入教师成绩系统\r\n 0.退出\r\n ''')\r\ndef menu2():\r\n print('''\r\n ********成绩系统********\r\n 1.学生成绩访问(查询,增加,删除,修改)\r\n 2.学生成绩分析\r\n ''')\r\ndef menu2_add():\r\n print('''\r\n 1.学生成绩查询\r\n 2.学生成绩增加\r\n 3.学生成绩删除\r\n 4.学生成绩修改\r\n ''') \r\n\r\ndef api_c():\r\n #调用c程序用以生成 xxx.txt\r\n sb.run([\"info.exe\"])\r\n\r\ndef visit():\r\n menu2_add()\r\n choice =input(\"请输入您的选择:\")\r\n\r\n if(choice =='1'):#学生成绩查询\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n name =input(\"请输入要查询的学生姓名: \")\r\n k =search(name)\r\n for cell in ws[2]:\r\n print(\"%s \"%cell.value,end =\"\")\r\n print(\"\\n\")\r\n for row in ws.iter_rows(min_row =k,max_row =k,min_col=1,max_col=6,values_only= True):\r\n print(row)\r\n \r\n elif(choice =='2'):#学生成绩增加\r\n print('''\r\n 1.横向增加学生成绩\r\n 2.纵向增加科目成绩\r\n ''')\r\n choices =input(\"请输入您的选择: \")\r\n if(choices =='1'):\r\n scores_x()\r\n elif(choices =='2'):\r\n scores_y()\r\n\r\n elif(choice =='3'):#学生成绩删除\r\n print('''\r\n 1.横向删除学生的所有成绩信息\r\n 2.纵向删除课程的所有成绩信息\r\n ''')\r\n choices =input(\"请输入您的选择: \")\r\n if(choices =='1'):\r\n del_X()\r\n elif(choices =='2'):\r\n del_y()\r\n \r\n elif(choice =='4'):#学生成绩修改\r\n name =input(\"请输入待修改学生姓名: \")\r\n lesson =input(\"请输入待修改科目: \")\r\n score =input(\"请输入待修改成绩数值: \")\r\n vary(name,lesson,score)\r\n\r\ndef search(name):\r\n k =1\r\n wb = load_workbook(path)\r\n ws = wb.active\r\n for cell in ws['C']:\r\n if(cell.value !=name):\r\n k =k+1\r\n else:\r\n break\r\n return k\r\n\r\ndef scores_x():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n adds =input(\"请依次输入以下信息以逗号间隔 例如 班级,学号,姓名,语文,数学,英语 :\")\r\n lists =adds.split(\",\")\r\n ws.append(lists)\r\n rows =ws.max_row\r\n for cell in ws[rows]:\r\n cell.alignment = align\r\n wb.save(path)\r\n\r\ndef scores_y():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n cols =ws.max_column\r\n cols =['A','B','C','D','E','F','G','H'][cols-1]\r\n rows =ws.max_row\r\n lesson =input(\"输入要增加的科目:\")\r\n ws[cols+'2'] =lesson\r\n for i in range(3,rows+1):\r\n print(ws['C'+str(i)].value)\r\n score =input(\"输入该同学\"+lesson+'成绩: ' )\r\n ws[cols+str(i)] =score\r\n ws[cols+str(i)].alignment = align\r\n print(\"\\n\")\r\n wb.save(path)\r\n\r\ndef del_X():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n adds =input(\"请输入要删除的学生姓名: \")\r\n k =search(adds)\r\n ws.delete_rows(k) #删除从第一行开始算的2行内容\r\n wb.save(path)\r\n\r\ndef del_y():\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n adds =input(\"请输入要删除的课程名称: \")\r\n k =4\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n for i in ['D','E','F','G','H']:\r\n if(ws[i+str(2)].value !=adds):\r\n k =k+1\r\n else:\r\n break\r\n ws.delete_cols(k) #删除从第一列开始算的2列内容\r\n wb.save(path)\r\n\r\ndef vary(name,lesson,score):\r\n rows =search(name)\r\n wb =load_workbook(path)\r\n ws = wb.active\r\n k =4\r\n for i in ['D','E','F','G','H']:\r\n if(ws[i+str(2)].value !=lesson):\r\n k =k+1\r\n else:\r\n break\r\n cols =['A','B','C','D','E','F','G','H'][k-1]\r\n ws[cols+str(rows)] =score\r\n ws[cols+str(rows)].alignment = align\r\n wb.save(path)\r\n\r\n\r\n\r\ndef analyze_fun(ws1,ws2):#成绩分析\r\n fo =open('期中期末成绩汇总表.csv','w',encoding= 'utf-8')\r\n lists =[]\r\n lists.append(['班级','姓名','期中语文','期中数学','期中英语','期末语文','期末数学','期末英语','\\n'])\r\n lists =\",\".join(lists[0])\r\n fo.write(lists)\r\n str1 =''\r\n lists =[]\r\n for row in ws1.iter_rows(min_row =3,max_row =782,min_col =2,max_col =6,values_only =True):\r\n for i in row:\r\n if i ==row[-1]:\r\n str1 =str1+str(i)\r\n else:\r\n str1 =str1+str(i)+','\r\n lists.append(str1)\r\n str1 =''\r\n x =0\r\n for row in ws2.iter_rows(min_row =3,max_row =782,min_col =4,max_col =6,values_only =True):\r\n for i in row:\r\n if i ==row[-1]:\r\n lists[x] =lists[x]+','+str(i)+'\\n'\r\n else:\r\n lists[x] =lists[x]+','+str(i)\r\n x =x+1\r\n for i in lists:\r\n fo.write(i)\r\n fo.close()\r\n\r\ndef analyze_clean():\r\n read_csv =pd.read_csv('./期中期末成绩汇总表.csv',encoding ='utf-8')\r\n read_csv =read_csv.drop('Unnamed: 8',axis=1)\r\n #查找缺失值\r\n read_csv.isna()\r\n #删除缺失值\r\n read_csv =read_csv.dropna()\r\n #can't del repetitive value\r\n analy =read_csv.describe()\r\n analy.to_csv('./成绩描述性统计数据.csv',encoding ='utf-8')\r\n #描述性统计分析后没有异常值存在,为简化不必清洗异常值\r\n #清洗后数据存储\r\n read_csv.to_csv('./成绩分析清洗.csv',encoding ='utf-8')\r\n\r\ndef analyze_class():\r\n grade_df =pd.read_csv('./成绩分析清洗.csv',encoding ='utf-8')\r\n grade_df\r\n #平均成绩聚合\r\n grade_mid_chinese =grade_df.groupby('班级')['期中语文'].mean()\r\n grade_last_chinese =grade_df.groupby('班级')['期末语文'].mean()\r\n grade_mid_math =grade_df.groupby('班级')['期中数学'].mean()\r\n grade_last_math =grade_df.groupby('班级')['期末数学'].mean()\r\n grade_mid_eng =grade_df.groupby('班级')['期中英语'].mean()\r\n grade_last_eng =grade_df.groupby('班级')['期末英语'].mean()\r\n grade_df2 =DataFrame({'期中语文':grade_mid_chinese,'期末语文':grade_last_chinese,'期中数学':grade_mid_math,'期末数学':grade_last_math,'期中英语':grade_mid_eng,'期末英语':grade_last_eng})\r\n grade_df2.to_csv('./班级分组平均成绩聚合.csv',encoding ='utf-8')\r\n #成绩标差聚合\r\n grades_mid_chinese =grade_df.groupby('班级')['期中语文'].std()\r\n grades_last_chinese =grade_df.groupby('班级')['期末语文'].std()\r\n grades_mid_math =grade_df.groupby('班级')['期中数学'].std()\r\n grades_last_math =grade_df.groupby('班级')['期末数学'].std()\r\n grades_mid_eng =grade_df.groupby('班级')['期中英语'].std()\r\n grades_last_eng =grade_df.groupby('班级')['期末英语'].std()\r\n grades_df2 =DataFrame({'期中语文':grades_mid_chinese,'期末语文':grades_last_chinese,'期中数学':grades_mid_math,'期末数学':grades_last_math,'期中英语':grades_mid_eng,'期末英语':grades_last_eng})\r\n grades_df2.to_csv('./班级分组标差成绩聚合.csv',encoding ='utf-8')\r\n \r\n #画布生成\r\n plt.figure(figsize =(15,15))\r\n fun_read =pd.read_csv('./班级分组平均成绩聚合.csv',encoding ='utf-8')\r\n xclass =fun_read['班级']\r\n ychinese =fun_read[['期中语文','期末语文']]\r\n plt.plot(xclass,ychinese,linewidth =3,marker ='o',markersize =10,markerfacecolor ='w')\r\n #图表标题及字体大小\r\n plt.title('各班语文平均成绩折线图',fontsize =20)\r\n #坐标轴刻度字体\r\n plt.xticks(fontsize =15,rotation =90)\r\n plt.yticks(fontsize =15)\r\n plt.xlabel('班级',fontsize =15)\r\n plt.ylabel('成绩(分)',fontsize =15)\r\n plt.legend(['期中语文','期末语文'])\r\n savefig('./各班语文平均成绩折线图.png')\r\n plt.close()\r\n\r\n \r\n funs_read =pd.read_csv('./班级分组标差成绩聚合.csv',encoding ='utf-8')\r\n plt.figure(figsize =(15,15))\r\n ####柱状图基本设计\r\n # 设置 x/y 坐标值\r\n x =funs_read['班级']\r\n y =funs_read['期中语文']\r\n plt.plot(x, y, color='dodgerblue')\r\n plt.title('年级班级语文成绩标差分布',fontdict ={\r\n 'family': 'FangSong', 'color': 'black', 'weight': 'bold', 'size': 25})\r\n plt.xticks(fontsize=18,rotation =90)\r\n plt.yticks(fontsize=12)\r\n plt.xlabel('班级', fontsize=15)\r\n plt.ylabel('标差稳定性', fontsize=17)\r\n plt.bar(x, height=y, color='darkorange',width=0.6,alpha=0.6)\r\n plt.legend(['稳定性变化','稳定性分布'])\r\n savefig('./各班语文标差成绩图.png')\r\n plt.close()\r\n #for a,b in zip(x,y):\r\n #plt.text(a, b, b, ha='center', va='bottom', fontsize=12)\r\ndef analy_stu(paths):\r\n sums =0\r\n with open(paths,'r',encoding ='utf-8',newline =\"\") as csv_file:\r\n csvs_reader =csv.DictReader(csv_file)\r\n headers =csvs_reader.fieldnames\r\n for row in csvs_reader:\r\n target_file =row['姓名']+'.csv'\r\n fil =target+row['姓名']+'/'\r\n mkdir(fil)\r\n test_list.append(fil)\r\n target_file =fil+target_file\r\n tar_list.append(target_file)\r\n sums =sums+1\r\n with open(target_file,'w',encoding ='utf-8',newline =\"\") as csv_f:\r\n csv_w =csv.DictWriter(csv_f,headers)\r\n csv_w.writeheader()\r\n csv_w.writerow(row)\r\n i=0\r\n for tar_file in tar_list[0:1]:###########\r\n f =pd.read_csv(tar_file,encoding ='utf-8')\r\n plt.figure(figsize =(15,15))\r\n xclass =f['姓名']\r\n ychinese =f[['期中语文','期末语文']]\r\n plt.plot(xclass,ychinese,linewidth =3,marker ='o',markersize =10,markerfacecolor ='w')\r\n #图表标题及字体大小\r\n plt.title('语文成绩折线图',fontsize =20)\r\n #坐标轴刻度字体\r\n plt.xticks(fontsize =15)\r\n plt.yticks(fontsize =15)\r\n plt.xlabel('姓名',fontsize =15)\r\n plt.ylabel('成绩(分)',fontsize =15)\r\n plt.legend(['期中语文','期末语文'])\r\n savefig(test_list[i]+'语文成绩折线图.png')\r\n i =i+1\r\n plt.close()\r\n ##压缩模块\r\n i =0\r\n name =[]\r\n\r\n lists =[]\r\n lest =[]\r\n for fo in test_list[0:1]:#########\r\n lists =os.listdir(fo)\r\n for x in lists:\r\n path1 =fo+lists[0]\r\n lest.append(path1)\r\n path2 =fo+lists[1]\r\n lest.append(path2)\r\n z =zip()\r\n zip_file =fo+'成绩.zip'\r\n name.append(zip_file)\r\n z.get_zip(lest,zip_file)\r\n time.sleep(2)\r\n i =i+1\r\n sums =sums-1\r\n lest =[]\r\n lists =[]\r\n print(\"{}个文件已完成,剩余{}个预计需要{}分钟\".format(i,sums,sums*3/60))\r\n\r\ndef maile(aim_account):\r\n account = input('请输入邮箱账户:')\r\n token = input('请输入邮箱授权码:')\r\n # 设置邮箱服务器,端口\r\n smtp = smtplib.SMTP_SSL('smtp.qq.com', 465)\r\n # 登录qq邮箱\r\n smtp.login(account, token)\r\n content ='本学期成绩已整理完成,现在对你的成绩单独发送'\r\n content =content+'详情见附件内容'\r\n email_content = MIMEText(content, 'plain', 'utf-8')\r\n#for tar in name:\r\n #passhttp://localhost:8888/notebooks/Untitled3.ipynb#\r\n tar ='./学生个人成绩/高健玮/成绩.zip'#########\r\n f =open(tar,'rb')\r\n # 设置附件的MIME和文件名,这里是rar类型:\r\n fil = MIMEBase('zip', 'zip', filename='成绩单.zip')\r\n # 加上必要的头信息:\r\n fil.add_header('Content-Disposition', 'attachment', filename='成绩单')\r\n fil.add_header('Content-ID', '<0>')\r\n fil.add_header('X-Attachment-Id', '0')\r\n # 把附件的内容读进来:\r\n fil.set_payload(f.read())\r\n # 用Base64编码\r\n encoders.encode_base64(fil)\r\n #添加到MIMEMultipart\r\n msg = MIMEMultipart()\r\n msg.attach(fil)\r\n f.close()\r\n msg.attach(email_content)\r\n # 设置发送者信息\r\n msg['From'] = '贾'\r\n msg['To'] = '各位同事们' \r\n msg['Subject'] = '测试'\r\n # 发送邮件\r\n smtp.sendmail(account, aim_account, msg.as_string()) \r\n # 关闭邮箱服务\r\n smtp.quit() \r\n\r\nchoice =5###学生成绩管理后期用openpyxl改进\r\nalign = Alignment(horizontal='right')\r\nwhile(choice !='0'):\r\n menu_all()\r\n choice =input(\"请输入您的选择:\")\r\n \r\n if(choice =='2'):#教师端操作\r\n menu2()\r\n choice =input('���输入您的选择:')\r\n\r\n if(choice =='1'):#学生成绩访问\r\n visit()\r\n \r\n elif(choice =='2'):#学生成绩分析\r\n print('''\r\n 1.班级总体分析\r\n 2.单位学生分析\r\n ''')\r\n choice =input(\"请输入你的选择: \")\r\n if(choice =='1'):#总体分析\r\n print(\"请打开两个xlsx表格\")\r\n #打开 期中、期末两个xlsx表格\r\n path1 =open_f()\r\n wb1 =load_workbook(path1)\r\n ws1 =wb1.active\r\n path2 =open_f()\r\n wb2 =load_workbook(path2)\r\n ws2 =wb2.active\r\n #用函数封装具体分析\r\n analyze_fun(ws1,ws2)\r\n analyze_clean()\r\n analyze_class()\r\n \r\n \r\n elif(choice =='2'):#单位学生分析\r\n analy_stu(paths)\r\n aim_account =input('请输入要发送的目标邮箱: ')\r\n maile(aim_account)\r\n \r\n elif(choice =='1'):\r\n api_c()\r\n\r\n\r\n\r\n menu_all()\r\n choice =input(\"请输入您的选择:\") \r\n","repo_name":"JhonDavies/grade-two-design","sub_path":"课程设计项目技术部分.py","file_name":"课程设计项目技术部分.py","file_ext":"py","file_size_in_byte":15490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"364705495","text":"\"\"\"\n207. Course Schedule\nThere are a total of n courses you have to take, labeled from 0 to n-1.\n\nSome courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]\n\nGiven the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?\n\nExample 1:\n\nInput: 2, [[1,0]] \nOutput: true\nExplanation: There are a total of 2 courses to take. \n To take course 1 you should have finished course 0. So it is possible.\nExample 2:\n\nInput: 2, [[1,0],[0,1]]\nOutput: false\nExplanation: There are a total of 2 courses to take. \n To take course 1 you should have finished course 0, and to take course 0 you should\n also have finished course 1. So it is impossible.\nNote:\n\nThe input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.\nYou may assume that there are no duplicate edges in the input prerequisites.\n\"\"\"\n# Time complexity: O(V+E)\n# Space complexity: O(V+E)\nclass Solution:\n def canFinish(self, n: int, prerequisites: List[List[int]]) -> bool:\n graph = [[] for _ in range(n)]\n visits = [0 for _ in range(n)]\n \n for course, prereq in prerequisites:\n graph[course].append(prereq)\n\n for i in range(n):\n if not self.dfs(i, graph, visits):\n return False\n return True\n \n def dfs(self, i, graph, visits):\n if visits[i] == -1:\n return False\n if visits[i] == 1:\n return True\n visits[i] = -1\n for j in graph[i]:\n if not self.dfs(j, graph, visits):\n return False\n visits[i] = 1\n return True\n \n \n# ============================================================================\n\nclass Solution:\n def canFinish(self, n: int, prerequisites: List[List[int]]) -> bool:\n graph = {i:[] for i in range(n)}\n indeg = {i:0 for i in range(n)}\n \n total_deps = 0\n for course, req in prerequisites:\n graph[req].append(course)\n indeg[course] += 1\n total_deps += 1\n \n non_dep = [node for node in graph if indeg[node] == 0]\n visited = 0\n \n while non_dep:\n req = non_dep.pop()\n for course in graph[req]:\n visited += 1\n indeg[course] -= 1\n if indeg[course] == 0:\n non_dep.append(course)\n return visited == total_deps\n","repo_name":"victorplusc/Algorithms","sub_path":"Leetcode/207. Course Schedule.py","file_name":"207. Course Schedule.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"31593372875","text":"grocery = [\"harpic\", \"vin bar\", \"Bhindi\", \"lollypop\", 89]\n#print(grocery[0])\n# numbers = [2, 5, 6, 70, 9, 22]\n#numbers.sort()\n#numbers.reverse()\n#print(numbers[::2])\n#numbers.append(43)\n#numbers.insert(2, 88)\n# numbers.remove(9)\n# numbers.pop()\n# numbers[4] = 56\n# print(numbers)\n# #tp = (67, 78, 99)\n#print(tp)\n#a = 1\n#b = 99\n#a, b = b, a\n#print(a, b)\nq = input()\ngrocery.append(q)\nprint(grocery)\ngrocery.remove(q)\nprint(grocery)","repo_name":"Singhal861/Python_Learning_stage_codes","sub_path":"1.2.py","file_name":"1.2.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"73340712727","text":"import pandas as pd\nimport time\n\ncolumn_names = ['year', 'pistol', 'riflshot', 'asltweap', 'machgun', \n 'knifcuti', 'othrweap', 'pct', 'trhsloc', \n 'ac_assoc', 'ac_cgdir', 'ac_rept', 'ac_evasv', 'ac_incid', \n 'ac_inves', 'ac_proxm', 'ac_time', 'ac_stsnd', 'ac_other',\n 'cs_objcs', 'cs_descr', 'cs_casng', 'cs_lkout', 'cs_cloth', \n 'cs_drgtr', 'cs_furtv', 'cs_vcrim', 'cs_bulge', 'cs_other', \n 'age', 'build', 'sex', 'ht_feet', 'ht_inch', 'weight', \n 'inout', 'radio', 'perobs', 'datestop', 'timestop']\n \ncoln = [\"cs_objcs\", \"cs_descr\", \"cs_casng\",\"cs_lkout\", \"cs_cloth\", \n \"cs_drgtr\", \"cs_furtv\", \"cs_vcrim\", \"cs_bulge\", \"cs_other\",\n \"ac_rept\", \"ac_inves\", \"ac_proxm\", \"ac_evasv\", \"ac_assoc\", \n \"ac_cgdir\", \"ac_incid\", \"ac_time\", \"ac_stsnd\", \"ac_other\", \n \"pistol\", \"riflshot\", \"asltweap\", \"knifcuti\", \"machgun\", \n \"othrweap\"]\n\nlocation_housing_recode_dict = {'P': 'neither', \n 'H': 'housing', \n 'T': 'transit'}\n\nbuild_recode_dict = {'H': 'heavy', 'M': 'medium', 'T': 'thin', \n 'U': 'muscular', 'Z': 'unknown'}\n\nsex_recode_dict = {'M': 'male', 'F': 'female'}\n\ndrop_column_names = ['pistol', 'riflshot', 'asltweap', 'machgun', \n 'knifcuti', 'othrweap', 'pct', 'trhsloc', \n 'ac_assoc', 'ac_cgdir', 'ac_rept', 'ac_evasv', 'ac_incid', \n 'ac_inves', 'ac_proxm', 'ac_time', 'ac_stsnd', 'ac_other',\n 'cs_objcs', 'cs_descr', 'cs_casng', 'cs_lkout', 'cs_cloth', \n 'cs_drgtr', 'cs_furtv', 'cs_vcrim', 'cs_bulge', 'cs_other', \n 'age', 'build', 'sex', 'ht_feet', 'ht_inch', 'weight', \n 'inout', 'radio', 'perobs', 'datestop', 'timestop', \n 'found_pistol', 'found_rifle', 'found_assault', \n 'found_machinegun', 'found_knife', 'found_other']\n\ndef load_data(years: list):\n dataframes = []\n for year in years:\n filename = f'./data/{year}.csv'\n\n try: \n this_data = pd.read_csv(filename, na_values=' ')\n except:\n this_data = pd.read_csv(filename, encoding= 'unicode_escape', na_values=' ')\n \n if year in range(2011, 2017):\n this_data = this_data.drop(columns=['forceuse'])\n if year in range(2013, 2017):\n this_data = this_data.rename(columns={'dettypCM': 'dettypcm', \n 'lineCM': 'linecm', \n 'detailCM': 'detailcm'})\n \n dataframes.append(this_data)\n sqf_data_full = pd.concat(dataframes, ignore_index=True)\n sqf_data = sqf_data_full.copy()\n\n return sqf_data\n\ndef recode_yn(f):\n f_new = f.replace({'N': 0, 'Y': 1})\n f_new = f_new.astype(bool)\n return f_new\n\ndef recode_io(f):\n f_new = f.replace({'O': 0, 'I': 1})\n f_new = f_new.astype(bool)\n return f_new\n\ndef main(years: list):\n start_time = time.time()\n\n sqf_data = load_data(years)\n\n sqf_data = sqf_data[column_names]\n sqf_data = sqf_data.dropna(subset=['timestop'])\n\n sqf_data['datestop'] = sqf_data['datestop'].apply(lambda x: '{0:0>8}'.format(x))\n sqf_data['timestop'] = sqf_data['timestop'].apply(lambda x: '{0:0>4}'.format(x))\n\n sqf_data['month'] = sqf_data['datestop'].str[:2].astype(int)\n sqf_data['day'] = sqf_data['datestop'].str[2:4].astype(int)\n sqf_data['year'] = sqf_data['year'].astype(int)\n\n sqf_data['time_period'] = sqf_data['timestop'].str[:2].astype(int)\n \n if 2014 in years:\n for i in coln:\n sqf_data.loc[(sqf_data['year'] == 2014) & (sqf_data[i] == 1), i] = 'Y'\n sqf_data.loc[(sqf_data['year'] == 2014) & (sqf_data[i].isna()), i] = 'N'\n \n sqf_data = sqf_data.assign(\n found_pistol = recode_yn(sqf_data['pistol']),\n found_rifle = recode_yn(sqf_data['riflshot']),\n found_assault = recode_yn(sqf_data['asltweap']),\n found_machinegun = recode_yn(sqf_data['machgun']),\n found_knife = recode_yn(sqf_data['knifcuti']),\n found_other = recode_yn(sqf_data['othrweap']),\n precinct=pd.factorize(sqf_data['pct'])[0]+1,\n additional_associating = recode_yn(sqf_data['ac_assoc']),\n additional_direction = recode_yn(sqf_data['ac_cgdir']),\n additional_report = recode_yn(sqf_data['ac_rept']),\n additional_evasive = recode_yn(sqf_data['ac_evasv']),\n additional_highcrime = recode_yn(sqf_data['ac_incid']),\n additional_investigation = recode_yn(sqf_data['ac_inves']),\n additional_proximity = recode_yn(sqf_data['ac_proxm']),\n additional_time = recode_yn(sqf_data['ac_time']),\n additional_sights = recode_yn(sqf_data['ac_stsnd']),\n additional_other = recode_yn(sqf_data['ac_other']),\n stopped_bulge = recode_yn(sqf_data['cs_objcs']),\n stopped_object = recode_yn(sqf_data['cs_descr']),\n stopped_casing = recode_yn(sqf_data['cs_casng']),\n stopped_clothing = recode_yn(sqf_data['cs_lkout']),\n stopped_desc = recode_yn(sqf_data['cs_cloth']),\n stopped_drugs = recode_yn(sqf_data['cs_drgtr']),\n stopped_furtive = recode_yn(sqf_data['cs_furtv']),\n stopped_lookout = recode_yn(sqf_data['cs_vcrim']),\n stopped_violent = recode_yn(sqf_data['cs_bulge']),\n stopped_other = recode_yn(sqf_data['cs_other']),\n inside = recode_io(sqf_data['inout']),\n observation_period = sqf_data['perobs'],\n radio_run = recode_yn(sqf_data['radio']),\n location_housing = sqf_data['trhsloc'].replace(location_housing_recode_dict).fillna('neither'),\n suspect_build = sqf_data['build'].replace(build_recode_dict),\n suspect_sex = sqf_data['sex'].replace(sex_recode_dict)\n )\n \n sqf_data['found_weapon'] = (sqf_data['found_pistol'] | \n sqf_data['found_rifle'] |\n sqf_data['found_assault'] | \n sqf_data['found_machinegun'] |\n sqf_data['found_knife'] | \n sqf_data['found_other'])\n\n sqf_data = sqf_data.drop(sqf_data[sqf_data['age'] == '**'].index)\n sqf_data = sqf_data.dropna(subset=['age'])\n sqf_data['suspect_age'] = sqf_data['age'].astype(int)\n sqf_data = sqf_data.loc[(sqf_data['suspect_age'] >= 5) & \n (sqf_data['suspect_age'] <= 100)]\n sqf_data = sqf_data.dropna(subset=['suspect_age'])\n\n sqf_data['suspect_height'] = sqf_data['ht_feet'] + (\n sqf_data['ht_inch'] / 12)\n sqf_data['suspect_weight'] = sqf_data['weight']\n\n # replace suspect.weight >= 700 with NA\n sqf_data = sqf_data.loc[sqf_data['suspect_weight'] < 700]\n\n sqf_data = sqf_data.drop(columns=drop_column_names)\n print(\"--- Took: %s seconds ---\\n\" % (time.time()-start_time))\n\n sqf_data.to_csv('./data/sqf_data.csv', index=False)\n\n\nif __name__ == \"__main__\":\n years = [2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016]\n main(years)","repo_name":"JinqianPan/Advanced-Python-Final-Project","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":7147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14544381446","text":"import json\nimport sys\nimport pandas as pd\nfrom handlers.dataHandler import dataHandler\nfrom datetime import datetime, timedelta\n\n# load config file\ntry:\n with open('config.json') as configFile:\n config = json.load(configFile)\n print(\"Config file loaded\")\nexcept:\n print(\"Failed to load config file\")\n sys.exit(\"Terminating program\")\n\n# Initialize the debug object if debugging is enabled\nif config['debugMode']:\n debugDict = {}\n debugDict['config'] = config\n\n# Initialize and load the data\ndata = dataHandler(config)\ndata.loadData()\n\ndef main(): \n if config['serverMode'] == 'flask':\n count = 0\n import interfaces.flaskInterface as fI \n fI.api.add_resource(fI.BaseTime, '/', \n resource_class_kwargs={'mainConfig': config, 'data': data,\n 'debugDict': debugDict, 'count': count})\n fI.app.run(host=config['flaskHostName'], port=config['flaskPort'])\n if config['serverMode'] == 'bacnet': \n import interfaces.bacnetInterface as bI \n bI.main(data, config)\n \nif __name__ == \"__main__\":\n main()","repo_name":"jkimmerling/Simulacra","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18132819923","text":"#!/usr/bin/python\r\nimport sys\r\nimport itertools \r\n\r\ndef resetdata():\r\n ans = []\r\n prog = open('advent07.txt').readlines()\r\n for a in prog[0].split(\",\"):\r\n ans.append(int(a))\r\n return ans\r\n\r\ndef combos(mydata):\r\n listA = [0,1,2,3,4]\r\n perm = itertools.permutations(listA) \r\n maxthrust = 0\r\n maxthrustcombo = []\r\n\r\n for i in list(perm): \r\n # pass mydata, then an array of inputs from right to left (pop)\r\n # print(\"processed 0,{} ao={}\".format(i[0],ao)) \r\n ao = processa(mydata,[0,i[0]]) # A\r\n bo = processa(mydata,[ao,i[1]]) # B\r\n co = processa(mydata,[bo,i[2]]) # C\r\n do = processa(mydata,[co,i[3]]) # D\r\n eo = processa(mydata,[do,i[4]]) # E\r\n if eo > maxthrust:\r\n maxthrust = eo\r\n maxthrustcombo = i\r\n print (\"Max with {} is {}\".format(maxthrust,maxthrustcombo))\r\n\r\n\r\ndef processa(mydata,ins):\r\n\r\n lenmydata = len(mydata)\r\n i = 0\r\n while (i < lenmydata and mydata[i] != 99 ):\r\n # print(mydata)\r\n oc = mydata[i]%10\r\n if mydata[i] > 100 and int(mydata[i]/100)%10 == 1:\r\n p1 = 1 # immediate\r\n else:\r\n p1 = 0 # positional\r\n if mydata[i] > 1000:\r\n p2 = 1\r\n else:\r\n p2 = 0\r\n if (4 == oc):\r\n if p1:\r\n dout = mydata[i+1]\r\n else:\r\n dout = mydata[mydata[i+1]]\r\n #print(\"output: {}\".format(dout))\r\n return dout\r\n i += 2\r\n elif (3 == oc):\r\n mydata[mydata[i+1]] = ins.pop()\r\n i += 2\r\n else:\r\n a = mydata[i+1]\r\n b = mydata[i+2]\r\n p = mydata[i+3] # always positional\r\n #print (\"setof4 ocfull:{},a:{},b:{},p:{},|,p1:{},p2:{}\".format(mydata[i],a,b,p,p1,p2))\r\n # 000oo\r\n if p1:\r\n av = a\r\n else:\r\n av = mydata[a]\r\n if p2:\r\n bv = b\r\n else:\r\n bv = mydata[b]\r\n #print (\"av,bv={},{}\".format(av,bv))\r\n if (1 == oc):\r\n mydata[p] = av + bv\r\n i += 4\r\n elif (2 == oc):\r\n mydata[p] = av * bv\r\n i += 4\r\n elif (5 == oc):\r\n # Opcode 5 is jump-if-true: if the first parameter is non-zero, it sets the instruction pointer to the value from the second parameter.\r\n # Otherwise, it does nothing.\r\n if 0 != av:\r\n i = bv\r\n else:\r\n i += 3\r\n elif (6 == oc):\r\n # Opcode 6 is jump-if-false: if the first parameter is zero, it sets the instruction pointer to the value from the second parameter.\r\n # Otherwise, it does nothing.\r\n if 0 == av:\r\n i = bv\r\n else:\r\n i += 3\r\n elif (7 == oc):\r\n # Opcode 7 is less than: if the first parameter is less than the second parameter, it stores 1 in the position given by the third parameter.\r\n # Otherwise, it stores 0.\r\n if av < bv:\r\n mydata[p] = 1\r\n i += 4\r\n else:\r\n mydata[p] = 0\r\n i += 4\r\n elif (8 == oc):\r\n # Opcode 8 is equals: if the first parameter is equal to the second parameter, it stores 1 in the position given by the third parameter.\r\n # Otherwise, it stores 0.\r\n if av == bv:\r\n mydata[p] = 1\r\n i += 4\r\n else:\r\n mydata[p] = 0\r\n i += 4\r\n else:\r\n i += 1 \r\nif __name__ == \"__main__\":\r\n mydata = resetdata()\r\n #print (mydata)\r\n combos(mydata)\r\n","repo_name":"allanpaschall/Advent2019","sub_path":"2019/Bryan/day07a.py","file_name":"day07a.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19999066690","text":"from Soup import Soup\nfrom datetime import datetime, time, date\n\nclass MatchHeadingSoup(Soup):\n\n def __init__(self,soup):\n Soup.__init__(self, soup)\n\n def get_date_of_match(self):\n month_year = self.get_soup().find('span', class_='month').text.strip().split('/')\n hour_min = self.get_soup().find('span', class_='hour').text.strip().split(':')\n\n # Find date of the game\n day = int(self.get_soup().find('span', class_='day').text.strip())\n month = int(month_year[0])\n year = int(month_year[1])\n\n # Find hour of the game\n hour = int(hour_min[0])\n minute = int(hour_min[1])\n\n # Combine date and time together\n d = date(year, month, day)\n t = time(hour, minute)\n\n return datetime.combine(d, t)\n\n def get_match_detail_url(self):\n return self.get_soup().find('div', class_='season__game-action grid-16 grid-mt-12 grid-msw-48').a['href']\n","repo_name":"StanislawAbyszkin/Soccer-Web-Scraper","sub_path":"Scraper/SoupModels/SoupMatchHeading.py","file_name":"SoupMatchHeading.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2500931251","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport os\nfrom joblib import dump,load\nfrom scripts.disease_to_symptoms import D2S\nfrom sklearn.ensemble import RandomForestClassifier\n\nclass S2D:\n def __init__(self,ROOT_DIR,mode=\"train\"):\n self.ROOT_DIR=ROOT_DIR\n if mode==\"train\":\n self.load_data()\n self.model=RandomForestClassifier(300,n_jobs=-1)\n else:\n self.model=load(os.path.join(self.ROOT_DIR,'models/custom/S2D.joblib'))\n self.diseases,self.symptoms_list=pickle.load(open(os.path.join(self.ROOT_DIR,'models/custom/S2D.b'),'rb'))\n def load_data(self):\n self.data=pd.read_csv(os.path.join(self.ROOT_DIR,'datasets/preprocessed/df_pivoted.csv'))\n def train(self):\n self.symptoms_list=self.data.columns[2:]\n self.diseases=self.data['Source']\n symptoms=self.data.iloc[:,2:]\n self.model.fit(symptoms,self.diseases)\n self.store_params()\n def predict(self,symptoms):\n self.symptoms=symptoms\n symptom_vector=np.zeros(self.symptoms_list.size)\n for s in symptoms:\n symptom_vector[self.symptoms_list.get_loc(s)]=1\n probabilities=self.model.predict_proba([symptom_vector])\n predictions=sorted(zip(probabilities[0],self.diseases),reverse=True)\n predicted_diseases=[]\n i=0\n while len(predicted_diseases)!=3 and i 0:\n popped_element = self.first_stack.pop()\n self.second_stack.append(popped_element)\n popped_element = self.second_stack.pop()\n return popped_element\n\n\na = Stack_Queue()\na.push(1)\na.push(2)\na.push(3)\nprint (a.pop())\nprint (a.pop())\nprint (a.pop())\na.push(100)\nprint (a.pop())\n","repo_name":"yask123/interview_prep_python","sub_path":"queue_with_stack.py","file_name":"queue_with_stack.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"19561293313","text":"__version__ = \"1.0.3\"\n__author__ = 'JoshuaMK'\n__credits__ = 'Treeki'\n\nimport re\nfrom argparse import ArgumentParser\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import List, Union\n\nfrom dolreader.dol import DolFile\n\nfrom pykamek import __version__\nfrom pykamek.addressmapper import AddressMapper\nfrom pykamek.exceptions import InvalidDataException\nfrom pykamek.kamek import KamekBinary\nfrom pykamek.linker import Linker\nfrom pykamek.versionmap import VersionMapper\n\n\ndef sorted_alphanumeric(l):\n \"\"\" Sort the given iterable in the way that humans expect.\"\"\"\n def convert(text): return int(text) if text.isdigit() else text\n def alphanum_key(key): return [convert(c)\n for c in re.split('([0-9]+)', str(key))]\n return sorted(l, key=alphanum_key)\n\n\nclass ElfHandler(Linker):\n def __init__(self, base: AddressMapper, files: Union[Path, List[Path]]):\n super().__init__(base)\n\n self.outputPath = None\n self.versionMap = None\n self.externals = {}\n\n if isinstance(files, Path):\n self.add_module(files)\n elif isinstance(files, str):\n self.add_module(Path(files))\n else:\n for obj in sorted_alphanumeric(files):\n obj = Path(obj)\n if obj.is_file():\n self.add_module(obj)\n else:\n for f in sorted_alphanumeric(obj.iterdir()):\n if f.is_file:\n self.add_module(f)\n\n def __repr__(self):\n return f\"repr={vars(self)}\"\n\n def __str__(self):\n return f\"ELF module converter; {self.__repr__()}\"\n\n @staticmethod\n def read_externals(file: str) -> dict:\n symbolDict = {}\n assignmentRegex = re.compile(\n r\"^\\s*([a-zA-Z0-9_<>,\\-\\$]+)\\s*=\\s*0x([a-fA-F0-9]+)\\s*(#.*)?$\")\n\n with open(file, \"r\") as f:\n for i, line in enumerate(f.readlines()):\n if line.strip() == \"\" or line.strip().startswith(\"#\") or line.strip().startswith(\"//\"):\n continue\n\n try:\n match = re.findall(assignmentRegex, line.strip())\n _symbol = match[0][0]\n _address = match[0][1]\n except IndexError:\n raise InvalidDataException(\n f\"Symbol definition {line.strip()} at line {i} is an invalid entry\")\n\n try:\n symbolDict[_symbol] = int(_address, 16)\n except ValueError:\n raise InvalidDataException(\n f\"Address {_address} at line {i} is not a hexadecimal number\")\n\n return symbolDict\n\n\ndef main(args: list):\n parser = ArgumentParser(\n f\"pykamek {__version__}\", description=\"ELF to Kuribo module converter\")\n\n parser.add_argument(\n \"elf\", help=\"ELF object file(s) and or folders of ELF object files\", nargs=\"+\")\n parser.add_argument(\n \"--dynamic\", help=\"The module is dynamically relocated\", action=\"store_true\")\n parser.add_argument(\n \"--static\", help=\"The module is statically located at ADDR\", metavar=\"ADDR\")\n parser.add_argument(\n \"--output-kamek\", help=\"File to output Kamek Binary\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-riiv\", help=\"File to output riivolution XML\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-gecko\", help=\"File to output gecko code\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-code\", help=\"File to output raw code\", metavar=\"FILE\")\n parser.add_argument(\"--input-dol\", help=\"Input DOL file\", metavar=\"FILE\")\n parser.add_argument(\n \"--output-dol\", help=\"File to output patched DOL\", metavar=\"FILE\")\n parser.add_argument(\"--extern\", help=\"External linker map\", metavar=\"FILE\")\n parser.add_argument(\n \"--versionmap\", help=\"Version map for address translations\", metavar=\"FILE\")\n\n args = parser.parse_args(args)\n\n if args.dynamic and args.static:\n parser.error(\"Args `--dynamic' and `--static' cannot be used together\")\n elif not args.dynamic and not args.static:\n parser.error(\"Must provide either `--dynamic' or `--static' arguments\")\n\n _externals = None\n _versionMap = None\n\n if args.dynamic:\n _baseAddr = None\n elif args.static:\n _baseAddr = int(args.static, 16)\n\n _externals = {}\n if args.extern:\n _externals = ElfHandler.read_externals(Path(args.extern).resolve())\n\n if args.versionmap:\n _versionMap = VersionMapper(Path(args.versionmap).resolve())\n else:\n _versionMap = VersionMapper()\n\n _outputKamekPath = None\n _outputRiivPath = None\n _outputGeckoPath = None\n _outputCodePath = None\n _inputDolPath = None\n _outputDolPath = None\n\n if args.output_kamek:\n _outputKamekPath = Path(args.output_kamek).resolve()\n if args.output_riiv:\n _outputRiivPath = Path(args.output_riiv).resolve()\n if args.output_gecko:\n _outputGeckoPath = Path(args.output_gecko).resolve()\n if args.output_code:\n _outputCodePath = Path(args.output_code).resolve()\n if args.input_dol:\n _inputDolPath = Path(args.input_dol).resolve()\n if args.output_dol:\n _outputDolPath = Path(args.output_dol).resolve()\n\n if (_outputKamekPath is None and\n _outputRiivPath is None and\n _outputGeckoPath is None and\n _outputCodePath is None and\n _outputDolPath is None\n ):\n parser.error(\"No output path(s) specified\")\n\n if _inputDolPath is None and _outputDolPath:\n parser.error(\"Input DOL path not specified\")\n\n for versionKey in _versionMap.mappers:\n print(f\"Linking version {versionKey}\")\n\n elfConverter = ElfHandler(_versionMap.mappers[versionKey], args.elf)\n\n if _baseAddr:\n elfConverter.link_static(_externals, _baseAddr)\n else:\n elfConverter.link_dynamic(_externals)\n\n kb = KamekBinary()\n kb.load_from_linker(elfConverter)\n if _outputKamekPath:\n with open(str(_outputKamekPath).replace(\"$KV$\", versionKey), \"wb\") as kBinary:\n kBinary.write(kb.pack().getvalue())\n if _outputRiivPath:\n with open(str(_outputRiivPath).replace(\"$KV$\", versionKey), \"w\") as kBinary:\n kBinary.write(kb.pack_riivo())\n if _outputGeckoPath:\n with open(str(_outputGeckoPath).replace(\"$KV$\", versionKey), \"w\") as kBinary:\n kBinary.write(kb.pack_gecko_codes())\n if _outputCodePath:\n with open(str(_outputCodePath).replace(\"$KV$\", versionKey), \"wb\") as kBinary:\n kBinary.write(kb.rawCode.getvalue())\n\n if _outputDolPath:\n dol = DolFile(BytesIO(_inputDolPath.read_bytes()))\n kb.apply_to_dol(dol)\n\n outPath = str(_outputDolPath).replace(\"$KV$\", versionKey)\n\n with open(outPath, \"wb\") as outDol:\n dol.save(outDol)\n\n print(\"Finished execution\")\n","repo_name":"JoshuaMKW/pykamek","sub_path":"pykamek/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"17983804601","text":"import sys\nsys.stdin = open('input_1743.txt', 'r')\n\nN, M, K = map(int, input().split())\nfield = [[0 for _ in range(M)] for _ in range(N)]\n\ndy = [-1, 1, 0, 0]\ndx = [0, 0, -1, 1]\n\nfor k in range(K):\n r, c = map(int, input().split())\n field[r-1][c-1] = 1\n\nmax_size = 0\n\nfor n in range(N):\n for m in range(M):\n size = 1\n if field[n][m]:\n field[n][m] = 0\n Q = [(n, m)]\n\n while Q:\n y, x = Q.pop(0)\n\n for d in range(4):\n n_y = y + dy[d]\n n_x = x + dx[d]\n\n if -1 < n_y < N and -1 < n_x < M and field[n_y][n_x]:\n field[n_y][n_x] = 0\n size += 1\n Q.append((n_y, n_x))\n\n if size > max_size:\n max_size = size\n\nprint(max_size)","repo_name":"weekyear/CodingTest","sub_path":"정준현/02_BFSDFS/boj_1743_음식물피하기.py","file_name":"boj_1743_음식물피하기.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21161030849","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.platform import test\n\n\nclass PadOpTest(test.TestCase):\n\n def _npPad(self, inp, paddings, mode, constant_values=0):\n mode = mode.lower()\n if mode == \"constant\":\n return np.pad(inp, paddings, mode=mode, constant_values=constant_values)\n else:\n return np.pad(inp, paddings, mode=mode)\n\n def testNpPad(self):\n self.assertAllEqual(\n np.array([[0, 0, 0, 0, 0, 0],\n [0, 3, 3, 0, 0, 0],\n [0, 4, 4, 0, 0, 0],\n [0, 5, 5, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]),\n self._npPad(\n np.array([[3, 3], [4, 4], [5, 5]]),\n [[1, 2], [1, 3]],\n mode=\"constant\"))\n\n self.assertAllEqual(\n np.array([[1, 1, 1, 1, 1, 1],\n [1, 3, 3, 1, 1, 1],\n [1, 4, 4, 1, 1, 1],\n [1, 5, 5, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]),\n self._npPad(\n np.array([[3, 3], [4, 4], [5, 5]]),\n [[1, 2], [1, 3]],\n mode=\"constant\", constant_values=1))\n\n self.assertAllEqual(\n np.array([[4, 3, 4, 9, 4, 3],\n [1, 0, 1, 2, 1, 0],\n [4, 3, 4, 9, 4, 3],\n [1, 0, 1, 2, 1, 0]]),\n self._npPad(\n np.array([[0, 1, 2], [3, 4, 9]]),\n [[1, 1], [1, 2]],\n mode=\"reflect\"))\n\n self.assertAllEqual(\n np.array([[0, 0, 1, 2, 2, 1],\n [0, 0, 1, 2, 2, 1],\n [3, 3, 4, 9, 9, 4],\n [3, 3, 4, 9, 9, 4]]),\n self._npPad(\n np.array([[0, 1, 2], [3, 4, 9]]),\n [[1, 1], [1, 2]],\n mode=\"symmetric\"))\n\n def _testPad(self, np_inputs, paddings, mode, constant_values):\n np_val = self._npPad(np_inputs, paddings, mode=mode,\n constant_values=constant_values)\n with self.test_session(use_gpu=True):\n tf_val = array_ops.pad(np_inputs, paddings, mode=mode,\n constant_values=constant_values)\n out = tf_val.eval()\n self.assertAllEqual(np_val, out)\n self.assertShapeEqual(np_val, tf_val)\n\n def _testGradient(self, x, a, mode, constant_values):\n with self.test_session(use_gpu=True):\n inx = ops.convert_to_tensor(x)\n xs = list(x.shape)\n ina = ops.convert_to_tensor(a)\n y = array_ops.pad(inx, ina, mode=mode, constant_values=constant_values)\n # Expected y's shape to be:\n ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, xs, y, ys, x_init_value=x)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _testAll(self, np_inputs, paddings, constant_values):\n for mode in (\"CONSTANT\", \"REFLECT\", \"SYMMETRIC\", \"reflect\", \"symmetric\",\n \"constant\"):\n # Zero-sized input is not allowed for REFLECT mode, but we still want\n # zero-sized input test cases for the other modes.\n if np_inputs.size or mode.upper() != \"REFLECT\":\n self._testPad(np_inputs, paddings, mode=mode,\n constant_values=constant_values)\n if np_inputs.dtype == np.float32:\n self._testGradient(np_inputs, paddings, mode=mode,\n constant_values=constant_values)\n\n def testInputDims(self):\n with self.test_session(use_gpu=True):\n with self.assertRaises(ValueError):\n array_ops.pad(array_ops.reshape(\n [1, 2], shape=[1, 2, 1, 1, 1, 1]),\n array_ops.reshape(\n [1, 2], shape=[1, 2]))\n\n def testPaddingsDim(self):\n with self.test_session(use_gpu=True):\n with self.assertRaises(ValueError):\n array_ops.pad(array_ops.reshape(\n [1, 2], shape=[1, 2]),\n array_ops.reshape(\n [1, 2], shape=[2]))\n\n def testPaddingsDim2(self):\n with self.test_session(use_gpu=True):\n with self.assertRaises(ValueError):\n array_ops.pad(array_ops.reshape(\n [1, 2], shape=[1, 2]),\n array_ops.reshape(\n [1, 2], shape=[2, 1]))\n\n def testPaddingsDim3(self):\n with self.test_session(use_gpu=True):\n with self.assertRaises(ValueError):\n array_ops.pad(array_ops.reshape(\n [1, 2], shape=[1, 2]),\n array_ops.reshape(\n [1, 2], shape=[1, 2]))\n\n def testPaddingsDim4(self):\n with self.test_session(use_gpu=True):\n with self.assertRaises(ValueError):\n array_ops.pad(array_ops.reshape(\n [1, 2], shape=[1, 2]),\n array_ops.reshape(\n [1, 2, 3, 4, 5, 6], shape=[3, 2]))\n\n def testPaddingsNonNegative(self):\n with self.test_session(use_gpu=True):\n with self.assertRaisesRegexp(ValueError, \"must be non-negative\"):\n array_ops.pad(constant_op.constant(\n [1], shape=[1]),\n constant_op.constant(\n [-1, 0], shape=[1, 2]))\n\n def testPaddingsNonNegative2(self):\n with self.test_session(use_gpu=True):\n with self.assertRaisesRegexp(ValueError, \"must be non-negative\"):\n array_ops.pad(constant_op.constant(\n [1], shape=[1]),\n constant_op.constant(\n [-1, 0], shape=[1, 2]))\n\n def testPaddingsMaximum(self):\n with self.test_session(use_gpu=True):\n with self.assertRaises(Exception):\n array_ops.pad(constant_op.constant(\n [1], shape=[2]),\n constant_op.constant(\n [2, 0], shape=[1, 2]),\n mode=\"REFLECT\").eval()\n with self.assertRaises(Exception):\n array_ops.pad(constant_op.constant(\n [1], shape=[2]),\n constant_op.constant(\n [0, 3], shape=[1, 2]),\n mode=\"SYMMETRIC\").eval()\n\n def testInvalid(self):\n with self.test_session():\n x = [[1, 2, 3], [4, 5, 6]]\n with self.assertRaisesRegexp(ValueError, \"Unknown padding mode\"):\n array_ops.pad(x, [[1, 0], [2, 1]], mode=\"weird\").eval()\n\n def testPaddingTypes(self):\n paddings = [[1, 0], [2, 3], [0, 2]]\n inputs = np.random.randint(-100, 100, (4, 4, 3)).astype(np.float32)\n for mode in (\"CONSTANT\", \"REFLECT\", \"SYMMETRIC\", \"reflect\", \"symmetric\",\n \"constant\"):\n for padding_dtype in [dtypes.int32, dtypes.int64]:\n np_val = self._npPad(inputs,\n paddings,\n mode=mode,\n constant_values=0)\n with self.test_session(use_gpu=True):\n tf_val = array_ops.pad(inputs,\n constant_op.constant(paddings, padding_dtype),\n mode=mode,\n constant_values=0)\n out = tf_val.eval()\n self.assertAllEqual(np_val, out)\n self.assertShapeEqual(np_val, tf_val)\n\n def testIntTypes(self):\n # TODO(touts): Figure out why the padding tests do not work on GPU\n # for int types and rank > 2.\n for t in [np.int32, np.int64]:\n self._testAll(\n np.random.randint(-100, 100, (4, 4, 3)).astype(t),\n [[1, 0], [2, 3], [0, 2]], 0)\n self._testAll(\n np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t),\n [[0, 0], [0, 0], [0, 0], [0, 0]], -1234)\n\n def testFloatTypes(self):\n for t in [np.float32, np.float64]:\n self._testAll(np.random.rand(2, 5).astype(t), [[1, 0], [2, 0]], 0.0)\n self._testAll(np.random.rand(2, 3, 4).astype(t),\n [[0, 0], [0, 0], [0, 0]], -1234.0)\n self._testAll(np.random.rand(0, 3, 4).astype(t),\n [[0, 0], [2, 1], [2, 3]], 0.0)\n\n def testComplexTypes(self):\n for t in [np.complex64, np.complex128]:\n x = np.random.rand(2, 5).astype(t)\n self._testAll(x + 1j * x, [[1, 0], [2, 0]], 1234.0 - 1234.0j)\n x = np.random.rand(3, 2, 1, 1).astype(t)\n self._testAll(x + 1j * x, [[0, 0], [0, 0], [0, 0], [0, 0]], 0 + 0j)\n\n def testShapeFunctionEdgeCases(self):\n # Unknown paddings shape.\n inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])\n padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))\n self.assertEqual([None, None, None, None], padded.get_shape().as_list())\n\n # Unknown input shape.\n inp = array_ops.placeholder(dtypes.float32)\n padded = array_ops.pad(inp, [[2, 2], [2, 2]])\n self.assertEqual([None, None], padded.get_shape().as_list())\n\n # Unknown input and paddings shape.\n inp = array_ops.placeholder(dtypes.float32)\n padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))\n self.assertAllEqual(None, padded.get_shape().ndims)\n\n def testPartialShapeInformation(self):\n unknown = array_ops.placeholder(dtypes.int32)\n\n # Known input shape, partial unknown padding (one dimension).\n inp = constant_op.constant(0.0, shape=[4, 4])\n padded = array_ops.pad(inp, [[1, 2], unknown])\n self.assertEqual([7, None], padded.get_shape().as_list())\n\n # Known input shape, partial unknown padding (begin).\n inp = constant_op.constant(0.0, shape=[4, 4])\n padded = array_ops.pad(inp, [[unknown, 0], [1, 2]])\n self.assertEqual([None, 7], padded.get_shape().as_list())\n\n # Known input shape, partial unknown padding (end).\n inp = constant_op.constant(0.0, shape=[4, 4])\n padded = array_ops.pad(inp, [[1, 2], [0, unknown]])\n self.assertEqual([7, None], padded.get_shape().as_list())\n\n # Unknown input shape, partial unknown padding (one dimension).\n padded = array_ops.pad(unknown, [[1, 2], unknown])\n self.assertEqual([None, None], padded.get_shape().as_list())\n\n # Unknown input shape (rank known), partial unknown padding (one dimension).\n rank_known = array_ops.placeholder(dtypes.int32)\n rank_known.set_shape([None, None])\n padded = array_ops.pad(rank_known, [[1, 2], unknown])\n self.assertEqual([None, None], padded.get_shape().as_list())\n\n # Known input shape, partial unknown padding (begin), with constant begin.\n inp = constant_op.constant(0.0, shape=[4, 4])\n padded = array_ops.pad(inp, [[constant_op.constant(1, shape=[]), 2],\n [0, unknown]])\n self.assertEqual([7, None], padded.get_shape().as_list())\n\n # Known input shape, partial unknown padding (begin), with constant dim.\n inp = constant_op.constant(0.0, shape=[4, 4])\n padded = array_ops.pad(inp,\n [constant_op.constant(1, shape=[2]), [0, unknown]])\n self.assertEqual([6, None], padded.get_shape().as_list())\n\n def testScalars(self):\n paddings = np.zeros((0, 2), dtype=np.int32)\n inp = np.asarray(7)\n with self.test_session(use_gpu=True):\n tf_val = array_ops.pad(inp, paddings)\n out = tf_val.eval()\n self.assertAllEqual(inp, out)\n self.assertShapeEqual(inp, tf_val)\n\n def testPadTypes(self):\n for dtype in [dtypes.int32, dtypes.int64]:\n paddings = np.zeros((0, 2))\n inp = np.asarray(7)\n with self.test_session(use_gpu=True):\n tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype))\n out = tf_val.eval()\n self.assertAllEqual(inp, out)\n self.assertShapeEqual(inp, tf_val)\n\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"benoitsteiner/tensorflow-opencl","sub_path":"tensorflow/python/kernel_tests/pad_op_test.py","file_name":"pad_op_test.py","file_ext":"py","file_size_in_byte":11789,"program_lang":"python","lang":"en","doc_type":"code","stars":468,"dataset":"github-code","pt":"31"} +{"seq_id":"13095308032","text":"import gdb\n\n#class mmap_break (gdb.Breakpoint):\n# def stop (self):\n# inf_val = gdb.parse_and_eval(\"foo\")\n# if inf_val == 3:\n# return True\n# return False\n\ndef handle_do_mmap(event):\n gdb.execute(\"finish\")\n global hit_wait\n hit_wait = True\n #print(\"hit set \", hit_wait)\n\nhandled_breaks = {'do_mmap' : handle_do_mmap}\n\n\n\ndef stop_handler (event):\n global hit_wait\n global handled_breaks\n if isinstance(event, gdb.BreakpointEvent):\n if event.breakpoint.location in handled_breaks:\n #print('we know how handle it')\n handled_breaks[event.breakpoint.location](event)\n return\n if isinstance(event, gdb.StopEvent):\n #print(\"Got stop...\")\n #if not hit_wait:\n # print(\"No hit, you need handle it\")\n # return\n hit_wait = False\n rax = int(gdb.parse_and_eval(\"$rax\")) & 0xffffffffffffffff\n #print(\"got value %x\"%rax)\n if rax&0x8000000000000000:\n print(\"wut?\")\n else:\n # print(\"continue...\")\n gdb.execute(\"c\")\n return\n return\n\n#gdb.execute(\"finish\")\n#rax = int(gdb.parse_and_eval(\"$rax\")) & 0xffffffffffffffff\n\ngdb.events.stop.connect(stop_handler)\n\n\n","repo_name":"blackzert/aslur","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"31"} +{"seq_id":"13128693534","text":"#https://www.acmicpc.net/problem/3190\n\nimport sys\n\ndef eat_apple(head,apple):\n for i in range(len(apple)):\n if head==apple[i]:\n apple[i],apple[-1]=apple[-1],apple[i]\n apple.pop()\n return True\n return False\n\ndef game_over(snake,face,N,t_face,t_speed):\n head=snake[0]\n body=snake[1]\n tail=snake[2]\n if head[face]==0 or head[face]==N+1:\n## print('==wall==')\n return True\n if len(body)>2:\n for i in range(1,len(body)-2):\n face=body[i-1][1]\n speed=body[i-1][2]\n x=-1*((speed+1)//2)\n y=(speed-1)//2\n if head[face-1]==body[i][0][face-1]:\n if body[i+x][0][face]<=head[face] and head[face]<=body[i+y][0][face]:\n## print('==body==')\n return True\n \n if head[t_face-1]==body[0][0][t_face-1]:\n if t_speed>0:\n if tail[t_face]<=head[t_face] and head[t_face]<=body[0][0][t_face]:\n return True\n else:\n if tail[t_face]>=head[t_face] and head[t_face]>=body[0][0][t_face]:\n return True\n## print('==tail==')\n return False \n\n\n\ndef turn(t,move,face,speed):\n i=0\n if move[0]0:\n body.append([head.copy(),h_face,h_speed])\n head[h_face]+=h_speed\n if game_over(snake,h_face,N,t_face,t_speed):\n## print(snake)\n break\n\n if eat_apple(head,apple):\n continue\n else:\n if len(body)>0:\n if tail==body[0][0]:\n t_face=body[0][1]\n t_speed=body[0][2]\n body.pop(0)\n tail[t_face]+=t_speed\n## print(body)\n\nprint(time)\n","repo_name":"yehoon17/beakjoon","sub_path":"3190.py","file_name":"3190.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14551109132","text":"import socket, sys\nimport struct\nfrom struct import *\nfrom dnslib import DNSRecord\nimport binascii\nimport time\nimport pprint\n# import FilterAgent.network\nclass compiledPacket:\n \n def __init__(self):\n self.ips = []\n self.tcps = []\n self.dns = []\n \n def add(self,packettype,packet):\n if packettype == 'IP':\n self.ips.append(packet)\n elif packettype == 'TCP':\n self.tcps.append(packet)\n elif packettype == 'DNS' or packettype == 'UDP':\n self.dns.append(packet)\n else:\n raise error\n \nclass dataPacket:\n \n '''\n Data class that can be sent to the central\n '''\n def __init__(self,ipVersion,ipHLength,ttl,protocol,sourceAddr,destAddr,sourcePort,destPort,seqNum,Ack):\n self.ipVersion = ipVersion\n self.ipHLength = ipHLength\n self.ttl = ttl\n self.protocol = protocol\n self.sourceAddr= sourceAddr\n self.destAddr = destAddr\n self.sourcePort= sourcePort\n self.destPort = destPort\n self.seqNum = seqNum\n self.Ack = Ack\n\nclass udpPacket:\n '''\n UDP stored data\n '''\n def __init__(self,sourcePort,destPort,length,data):\n self.sourcePort = sourcePort\n self.destPort = destPort\n self.length = length\n self.data = data\n \ndef eth_addr (a) :\n\tb = \"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))\n\treturn b\n\ndef decode_label(message, offset):\n\tlabels = []\n\t\t\t\n\twhile True:\n\t\tlength, = struct.unpack_from(\"!B\", message, offset)\n\t\t\n\t\tif(length & 0xC0) == 0xC0:\n\t\t\tpointer, = struct.unpack_from(\"!H\", message, offset)\n\t\t\toffset += 2\n\t\t\treturn labels + decode_label(message, pointer & 0x3FFF),offset\n\t\t\t\n\t\tif(length & 0xC0) != 0x00:\n\t\t\traise StandardError(\"Unknown labelencoding\")\n\t\t\t\n\t\toffset += 1\n\t\t\n\t\tif length == 0:\n\t\t\treturn labels, offset\n\t\t\n\t\tlabels.append(*struct.unpack_from(\"!%ds\" % length, offset))\n\t\toffset += length\n\nDNS_QUERY_SECTION_FORMAT = struct.Struct(\"!2H\")\n\ndef decode_question_section(message, offset, qdcount):\n\tquestions = []\n\tfor _ in range(qdcount):\n\t\tqname, offset = decode_label(message, offset)\n\t\t\n\t\tqtype, qclass = DNS_QUERY_SECTION_FORMAT.unpack_from(message,offset)\n\t\toffset += DNS_QUERY_SECTION_FORMAT.size\n\t\tprint(message)\n\t\tquestion = {\"domain_name\":qname,\n\t\t\t\t\t\"query_type\":qtype,\n\t\t\t\t\t\"query_class\":qclass}\n\t\t\t\t\t\n\t\tquestions.append(question)\n\treturn questions, offset\n\n\nDNS_QUERY_MESSAGE_HEADER = struct.Struct(\"!6H\")\n\t\ndef decode_dns_message(message):\n\t\n\tid, misc,qdcount,ancount,nscount,arcount = DNS_QUERY_MESSAGE_HEADER.unpack_from(message)\n\t\n\tqr = (misc & 0x8000) != 0\n\topcode = (misc & 0x7800) >> 11\n\taa = (misc & 0x0400) != 0\n\ttc = (misc & 0x200) != 0\n\trd = (misc & 0x100) != 0\n\tra = (misc & 0x80) != 0\n\tz = (misc & 0x70) != 0\n\trcode = misc & 0xF\n\t\n\toffset = DNS_QUERY_MESSAGE_HEADER.size\n\tquestions, offset = decode_question_section(message, offset, qdcount)\n\t\n\tresult = { \"id\": id,\n\t\t\t \"is_response\": qr,\n\t\t\t \"opcode\":opcode,\n\t\t\t \"is_authoritative\": aa,\n\t\t\t \"is_truncated\":tc,\n\t\t\t \"recursion_desired\":rd,\n\t\t\t \"recursion_available\":ra,\n\t\t\t \"reserved\":z,\n\t\t\t \"response_code\":rcode,\n\t\t\t \"question_count\":qdcount,\n\t\t\t \"answer_count\":ancount,\n\t\t\t \"authority_count\":nscount,\n\t\t\t \"additional_count\":arcount,\n\t\t\t \"questions\": questions}\n\treturn result\n\t\t\t\ndef sniff(duration=5): \n#create an INET, STREAMing socket\n try:\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))\n except socket.error as msg:\n print ('Socket could not be created. Error Code : ' + str(msg))\n sys.exit()\n \n cPacket = compiledPacket()\n # print(cPacket)\n start = time.time()\n print('start time = '+str(start))\n # packet = s.recvfrom(65565)\n # print(binascii.unhexlify(packet[0]))\n # receive a packet\n while True:\n 'started!'\n packet = s.recvfrom(65565)\n #packet string from tuple\n fullSocketReturn = packet\n packet = packet[0]\n try:\n d = DNSRecord.parse(packet)\n print(d)\n except struct.error as err:\n continue\n except RuntimeError as re:\n continue\n #parse ethernet header\n eth_length = 14\n eth_header = packet[:eth_length]\n eth = unpack('!6s6sH', eth_header)\n eth_protocol = socket.ntohs(eth[2])\n #print('Destination MAC' +eth_addr(packet[0:6])+' Source MAC '+eth_addr(packet[6:12])+' Protocol : '+str(eth_protocol))\n \n if eth_protocol == 8:\n \n ip_header = packet[eth_length:20+eth_length]\n \n #now unpack them :)\n iph = unpack('!BBHHHBBH4s4s' , ip_header)\n \n version_ihl = iph[0]\n version = version_ihl >> 4\n ihl = version_ihl & 0xF\n \n iph_length = ihl * 4\n \n ttl = iph[5]\n protocol = iph[6]\n s_addr = socket.inet_ntoa(iph[8]);\n d_addr = socket.inet_ntoa(iph[9]);\n \n #print ('IP Version : ' + str(version) + ' IP Header Length : ' + str(ihl) + ' TTL : ' + str(ttl) + ' Protocol : ' + str(protocol) + ' Source Address : ' + str(s_addr) + ' Destination Address : ' + str(d_addr))\n #print('\\n')\n if protocol == 6: \n tcp_header = packet[iph_length:iph_length+20]\n \n #now unpack them :)\n tcph = unpack('!HHLLBBHHH' , tcp_header)\n \n source_port = tcph[0]\n dest_port = tcph[1]\n sequence = tcph[2]\n acknowledgement = tcph[3]\n doff_reserved = tcph[4]\n tcph_length = doff_reserved >> 4\n \n #print ('TCP PACKET ::: Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Sequence Number : ' + str(sequence) + ' Acknowledgement : ' + str(acknowledgement) + ' TCP header length : ' + str(tcph_length))\n \n h_size = iph_length + tcph_length * 4\n data_size = len(packet) - h_size\n \n #get data from the packet\n data = str(packet[h_size:])\n packet = dataPacket(str(version),str(ihl),str(ttl),str(protocol),str(s_addr),\n str(d_addr),str(source_port),str(dest_port),str(sequence),str(acknowledgement));\n cPacket.add('TCP', packet)\n if protocol == 17:\n u = iph_length + eth_length\n udph_length = 8\n udph_header = packet[u:u+8]\n udph = unpack('!HHHH',udph_header)\t\t\t\t\n sourcePort = udph[0]\n destPort = udph[1]\n length = udph[2]\n checksum = udph[3]\n # print(udph)\n h_size = eth_length + iph_length + udph_length\n data_size = len(packet) - h_size\n print ('UDP PACKET ::: Source Port : ' + str(sourcePort) + ' Dest Port : ' + str(destPort) + ' Length : ' \n + str(length) + ' Checksum : ' + str(checksum)+' Data size :'+str(data_size))\n print()\n # print(packet[h_size:])\n #get data from the packet\n data = packet[h_size:]\n # print ('UDP DATA ENCODED\\n')\n # print(type(data))\n # print(repr(data))\n \n try:\n # print(data.decode('ascii'))\n # d = DNSRecord.parse(packet)\n # print(d)\n pprint.pprint(decode_dns_message(packet))\n except UnicodeDecodeError as err:\n # print(err)\n continue\n except binascii.Error as baerr:\n # print(baerr)\n continue\n except struct.error as strerr:\n # print(strerr)\n continue\n except TypeError as te:\n # print(te)\n continue\n except RuntimeError as re:\n continue\n packet = udpPacket(sourcePort, destPort, length, data)\n cPacket.add('UDP', packet)\n #return packet \n #print ('Data : ' + data)\n return cPacket\n # printw\n\ntry:\n\tpacket = sniff(0)\nexcept KeyboardInterrupt as ki:\n print('Exiting sniffer! Collected data :: \\n')\n print(packet)\n sys.exit()","repo_name":"hugokuijzer/Uxx","sub_path":"FilterAgent/ltcp.py","file_name":"ltcp.py","file_ext":"py","file_size_in_byte":8652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1778120262","text":"from django.shortcuts import render\n# from django.http import HttpResponse\nfrom .forms import ContactForm\nfrom .models import Contact\nfrom django.http import HttpResponseRedirect\n\n# Create your views here.\ndef contact(request):\n form = ContactForm()\n return render(request,\"contato.html\",{\"form\":form})\n\ndef saving_contact(request):\n form = ContactForm(request.POST)\n if form.is_valid():\n contato_enviado = Contact(name=form.cleaned_data['name'],\n email=form.cleaned_data['email'],\n phone=form.cleaned_data['phone'],\n subject=form.cleaned_data['subject'],\n message=form.cleaned_data['message'])\n contato_enviado.save()\n return HttpResponseRedirect('/')\n","repo_name":"alexzwir/zwodonto_old","sub_path":"website/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29561432875","text":"import logging\nimport os\nimport re\nimport subprocess\nimport sys\nimport mimetypes\nimport shutil\nimport zipfile\nimport smtplib\nimport datetime\nfrom os.path import dirname, abspath\nfrom pathlib import Path\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\nfrom xmlrpc.client import ServerProxy\nimport git\nimport yaml\nimport xlsxwriter\n\nfrom googleapiclient import discovery, errors\nfrom googleapiclient.http import MediaFileUpload\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\n\nSCOPES = 'https://www.googleapis.com/auth/drive'\nCLIENT_SECRET_FILE = 'client_secret.json'\nREPORT_XLSX = \"report.xlsx\"\nREPORT_TXT = \"report.txt\"\nCOMMASPACE = ', '\n\ndevices_in_use = []\nPROJECT_DIR = dirname(dirname(abspath(__file__)))\n\n# ****************************************************************************\n# Mail\n# ****************************************************************************\n\n\ndef status_dict2summary_html(status_dict):\n \"\"\"Creates HTML formatted summary from status dictionary\n :param status_dict: status dictionary, where key is status and value is\n status count\n :return: HTML formatted summary\n \"\"\"\n summary = \"\"\"

Summary

\n \"\"\"\n total_count = 0\n\n summary += \"\"\"\n \n \n \"\"\"\n\n for status in sorted(status_dict.keys()):\n count = status_dict[status]\n summary += \"\"\"\n \n \n \"\"\".format(status, count)\n total_count += count\n\n summary += \"\"\"\n \n \n \"\"\".format(total_count)\n summary += \"
StatusCount
{}{}
Total{}
\"\n\n if \"PASS\" in status_dict:\n pass_rate = \\\n '{0:.2f}%'.format((status_dict[\"PASS\"] / float(total_count) * 100))\n else:\n pass_rate = '{0:.2f}%'.format(0)\n summary += \"

PassRate = {}

\".format(pass_rate)\n\n return summary\n\n\ndef url2html(url, msg):\n \"\"\"Creates HTML formatted URL with results\n :param url: URL\n :param msg: URL description\n :return: HTML formatted URL\n \"\"\"\n return \"{}\".format(url, msg)\n\n\ndef regressions2html(regressions, descriptions):\n \"\"\"Creates HTML formatted message with regressions\n :param regressions_list: list of regressions found\n :return: HTML formatted message\n \"\"\"\n msg = \"

Regressions

\"\n\n regressions_list = []\n for name in regressions:\n regressions_list.append(\n name + \" - \" + descriptions.get(name, \"no description\"))\n\n if regressions_list:\n for name in regressions_list:\n msg += \"

{}

\".format(name)\n else:\n msg += \"

No regressions found

\"\n\n return msg\n\n\ndef send_mail(cfg, subject, body, attachments=None):\n \"\"\"\n :param cfg: Mailbox configuration\n :param subject: Mail subject\n :param body: Mail boyd\n :return: None\n \"\"\"\n\n msg = MIMEMultipart()\n msg['From'] = cfg['sender']\n msg['To'] = COMMASPACE.join(cfg['recipients'])\n msg['Subject'] = subject\n\n msg.attach(MIMEText(body, 'html'))\n\n # Attach the files if there is any\n if attachments:\n for filename in attachments:\n file_type = mimetypes.guess_type(filename)\n if file_type[0] is None:\n ext = os.path.splitext(filename)[1]\n print('MIME Error: File extension %s is unknown. '\n 'Try to associate it with app.' % ext)\n continue\n mimetype = file_type[0].split('/', 1)\n attachment = MIMEBase(mimetype[0], mimetype[1])\n attachment.set_payload(open(filename, 'rb').read())\n encoders.encode_base64(attachment)\n attachment.add_header('Content-Disposition', 'attachment',\n filename=os.path.basename(filename))\n msg.attach(attachment)\n\n server = smtplib.SMTP(cfg['smtp_host'], cfg['smtp_port'])\n if 'start_tls' in cfg and cfg['start_tls']:\n server.starttls()\n if 'passwd' in cfg:\n server.login(cfg['sender'], cfg['passwd'])\n server.sendmail(cfg['sender'], cfg['recipients'], msg.as_string())\n server.quit()\n\n\n# ****************************************************************************\n# Google Drive\n# ****************************************************************************\nclass GDrive:\n def __init__(self, cfg):\n self.basedir_id = cfg['root_directory_id']\n self.cwd_id = self.basedir_id\n credentials = cfg['credentials_file']\n\n store = file.Storage(credentials)\n creds = store.get()\n if not creds or creds.invalid:\n path_abs = os.path.abspath(credentials)\n path = os.path.dirname(path_abs)\n\n flow = client.flow_from_clientsecrets(\n os.path.join(path, CLIENT_SECRET_FILE), SCOPES)\n creds = tools.run_flow(flow, store)\n self.service = discovery.build('drive', 'v3',\n http=creds.authorize(Http()))\n\n def pwd(self):\n return self.cwd_id\n\n def mkdir(self, name):\n file_metadata = {\n 'name': name,\n 'mimeType': 'application/vnd.google-apps.folder',\n 'parents': [self.pwd()]\n }\n\n try:\n f = self.service.files().create(\n body=file_metadata,\n fields='id, name, webViewLink').execute()\n except errors.HttpError:\n sys.exit(1)\n\n return f\n\n def ls(self):\n results = {}\n\n page_token = None\n while True:\n try:\n response = self.service.files().list(\n q=\"'{}' in parents\".format(self.pwd()),\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n except errors.HttpError:\n sys.exit(1)\n\n for f in response.get('files', []):\n results[f.get('name')] = f\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n\n return results\n\n def cp(self, name):\n if not os.path.exists(name):\n print(\"File not found\")\n sys.exit(1)\n\n basename = os.path.basename(name)\n mime_type, encoding = mimetypes.guess_type(basename)\n\n file_metadata = {\n 'name': basename,\n 'parents': [self.pwd()]\n }\n\n media = MediaFileUpload(\n name,\n mimetype=mime_type)\n\n try:\n f = self.service.files().create(\n body=file_metadata,\n media_body=media,\n fields='id, name').execute()\n except errors.HttpError as err:\n print(err)\n sys.exit(1)\n\n return f\n\n def cd(self, dir_=None):\n \"\"\"\n :param dir_: file object or id of the folder\n \"\"\"\n if not dir_:\n self.cwd_id = self.basedir_id\n elif isinstance(dir_, str):\n self.cwd_id = dir_\n else:\n self.cwd_id = dir_.get('id')\n\n\nclass Drive(GDrive):\n def __init__(self, cfg):\n GDrive.__init__(self, cfg)\n self.url = None\n\n def new_workdir(self, iut):\n files = self.ls()\n if iut in list(files.keys()):\n dir_ = files[iut]\n else:\n dir_ = self.mkdir(iut)\n self.cd(dir_)\n dir_ = self.mkdir(datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M\"))\n self.cd(dir_)\n return \"{}\".format(dir_.get('webViewLink'))\n\n def upload(self, f):\n print(\"Uploading {} ...\".format(f))\n self.cp(f)\n print(\"Done\")\n\n def upload_folder(self, folder, excluded=None):\n def recursive(directory):\n with os.scandir(directory) as it:\n for f in it:\n if excluded and (f.name in excluded or\n os.path.splitext(f.name)[1] in excluded):\n continue\n\n if f.is_dir():\n parent = self.pwd()\n dir_ = self.mkdir(f.name)\n self.cd(dir_)\n recursive(os.path.join(directory, f.name))\n self.cd(parent)\n else:\n filepath = os.path.relpath(os.path.join(directory, f.name))\n self.upload(filepath)\n\n recursive(folder)\n\n\n# ****************************************************************************\n# .xlsx spreadsheet file\n# ****************************************************************************\n# FIXME don't use statuses from status_dict, count it from results dict instead\ndef make_report_xlsx(results_dict, status_dict, regressions_list,\n descriptions):\n \"\"\"Creates excel file containing test cases results and summary pie chart\n :param results_dict: dictionary with test cases results\n :param status_dict: status dictionary, where key is status and value is\n status count\n :param regressions_list: list of regressions found\n :return:\n \"\"\"\n\n errata = {}\n\n try:\n with open('errata.yaml', 'r') as stream:\n errata = yaml.safe_load(stream)\n except Exception as exc:\n print(exc)\n\n if errata is None:\n errata = {}\n\n header = \"AutoPTS Report: \" \\\n \"{}\".format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n workbook = xlsxwriter.Workbook(REPORT_XLSX)\n worksheet = workbook.add_worksheet()\n chart = workbook.add_chart({'type': 'pie',\n 'subtype': 'percent_stacked'})\n\n # Add a bold format to use to highlight cells.\n bold = workbook.add_format({'bold': True})\n\n # Write data headers.\n worksheet.write('A1', header)\n worksheet.write_row('A3', ['Test Case', 'Result'])\n\n row = 3\n col = 0\n\n for k, v in list(results_dict.items()):\n worksheet.write(row, col, k)\n if k in errata:\n v += ' - ERRATA ' + errata[k]\n worksheet.write(row, col + 1, v)\n if k in list(descriptions.keys()):\n worksheet.write(row, col + 2, descriptions[k])\n if k in regressions_list:\n worksheet.write(row, col + 3, \"REGRESSION\")\n row += 1\n\n summary_row = 2\n summary_col = 5\n\n worksheet.write(summary_row, summary_col, 'Summary')\n end_row = summary_row\n for status in sorted(status_dict.keys()):\n count = status_dict[status]\n end_row += 1\n worksheet.write_row(end_row, summary_col, [status, count])\n\n # Total TCS\n row = end_row + 2\n col = summary_col\n total_count = len(results_dict)\n worksheet.write(row, col, \"Total\")\n worksheet.write(row, col + 1, \"{}\".format(total_count))\n worksheet.write(row + 1, col, \"PassRate\", bold)\n if \"PASS\" in status_dict:\n pass_rate = \\\n '{0:.2f}%'.format((status_dict[\"PASS\"] / float(total_count) * 100))\n else:\n pass_rate = '{0:.2f}%'.format(0)\n worksheet.write(row + 1, col + 1, pass_rate, bold)\n\n chart.set_title({'name': 'AutoPTS test results'})\n chart.add_series({\n 'categories': ['Sheet1', summary_row + 1, summary_col,\n end_row, summary_col],\n 'values': ['Sheet1', summary_row + 1, summary_col + 1,\n end_row, summary_col + 1],\n })\n\n worksheet.insert_chart('H2', chart)\n workbook.close()\n\n return os.path.join(os.getcwd(), REPORT_XLSX)\n\n\n# ****************************************************************************\n# .txt result file\n# ****************************************************************************\ndef make_report_txt(results_dict, zephyr_hash):\n \"\"\"Creates txt file containing test cases results\n :param results_dict: dictionary with test cases results\n :return: txt file path\n \"\"\"\n\n filename = os.path.join(os.getcwd(), REPORT_TXT)\n f = open(filename, \"w\")\n\n errata = {}\n\n try:\n with open('errata.yaml', 'r') as stream:\n errata = yaml.safe_load(stream)\n except Exception as exc:\n print(exc)\n\n if errata is None:\n errata = {}\n\n f.write(\"%s\\n\" % zephyr_hash)\n for tc, result in list(results_dict.items()):\n if tc in errata:\n result += ' - ERRATA ' + errata[tc]\n\n # The frist id in the test case is test group\n tg = tc.split('/')[0]\n f.write(\"%s%s%s\\n\" % (tg.ljust(8, ' '), tc.ljust(32, ' '), result))\n\n f.close()\n\n return filename\n\n\n# ****************************************************************************\n# Miscellaneous\n# ****************************************************************************\ndef archive_recursive(dir_path):\n \"\"\"Archive directory recursively\n :return: newly created zip file path\n \"\"\"\n zip_file_path = os.path.join(os.path.dirname(dir_path),\n os.path.basename(dir_path) + '.zip')\n with zipfile.ZipFile(zip_file_path, 'w', allowZip64=True) as zf:\n for root, dirs, files in os.walk(dir_path):\n for file_or_dir in files + dirs:\n zf.write(\n os.path.join(root, file_or_dir),\n os.path.relpath(os.path.join(root, file_or_dir),\n os.path.join(dir_path, os.path.pardir)))\n\n return zip_file_path\n\n\ndef archive_testcases(dir_path, depth=3):\n def recursive(directory, depth):\n depth -= 1\n with os.scandir(directory) as it:\n for f in it:\n if f.is_dir():\n if depth > 0:\n recursive(os.path.join(directory, f.name), depth)\n else:\n filepath = os.path.relpath(os.path.join(directory, f.name))\n archive_recursive(filepath)\n shutil.rmtree(filepath)\n\n recursive(dir_path, depth)\n return dir_path\n\n\ndef upload_bpv_logs(gdrive, args):\n \"\"\"Copy Bluetooth Protocol Viewer logs from auto-pts servers.\n :param gdrive: to upload the logs\n :param server_addr: list of servers addresses\n :param server_port: list of servers ports\n \"\"\"\n excluded = ['SIGDatabase', 'logfiles', '.pqw6', '.xml', '.txt']\n logs_folder = 'tmp/' + args.workspace\n\n shutil.rmtree(logs_folder, ignore_errors=True)\n\n if sys.platform == 'win32':\n workspace_path = get_workspace(args.workspace)\n shutil.copytree(workspace_path, logs_folder)\n archive_testcases(logs_folder, depth=3)\n gdrive.upload_folder(logs_folder, excluded=excluded)\n delete_bpv_logs(workspace_path)\n return\n\n server_addr = args.ip_addr\n server_port = args.srv_port\n\n for i in range(len(server_addr)):\n if i != 0 and server_addr[i] in server_addr[0:i]:\n continue\n\n with ServerProxy(\"http://{}:{}/\".format(server_addr[i], server_port[i]),\n allow_none=True,) as proxy:\n file_list = proxy.list_workspace_tree(args.workspace)\n if len(file_list) == 0:\n continue\n\n workspace_root = file_list.pop()\n while len(file_list) > 0:\n file_path = file_list.pop(0)\n try:\n file_bin = proxy.copy_file(file_path)\n\n if not os.path.splitext(file_path)[1] in ['.pts', '.pqw6']:\n proxy.delete_file(file_path)\n\n if file_bin is None:\n continue\n\n file_path = '/'.join([logs_folder,\n file_path[len(workspace_root) + 1:]\n .replace('\\\\', '/')])\n Path(os.path.dirname(file_path)).mkdir(parents=True,\n exist_ok=True)\n\n with open(file_path, 'wb') as handle:\n handle.write(file_bin.data)\n except BaseException as e:\n logging.exception(e)\n\n if os.path.exists(logs_folder):\n archive_testcases(logs_folder, depth=3)\n gdrive.upload_folder(logs_folder, excluded=excluded)\n\n\ndef get_workspace(workspace):\n for root, dirs, files in os.walk(os.path.join(PROJECT_DIR, 'workspaces'),\n topdown=True):\n for name in dirs:\n if name == workspace:\n return os.path.join(root, name)\n return None\n\n\ndef delete_bpv_logs(workspace_path):\n with os.scandir(workspace_path) as it:\n for f in it:\n if f.is_dir():\n shutil.rmtree(f.path, ignore_errors=True)\n\n\ndef update_sources(repo_path, remote, branch, stash_changes=False, update_repo=True):\n \"\"\"GIT Update sources\n :param repo: git repository path\n :param remote: git repository remote name\n :param branch: git repository branch name\n :param stash_changes: stash non-committed changes\n :param update_repo: update repo\n :return: Commit SHA at HEAD\n \"\"\"\n repo = git.Repo(repo_path)\n\n if update_repo:\n print('Updating ' + repo_path)\n\n dirty = repo.is_dirty()\n if dirty and (not stash_changes):\n print('Repo is dirty. Not updating')\n return repo.git.describe('--always'), \\\n repo.git.show('-s', '--format=%H') + '-dirty'\n\n if dirty and stash_changes:\n print('Repo is dirty. Stashing changes')\n repo.git.stash('--include-untracked')\n\n repo.git.fetch(remote)\n repo.git.checkout('{}/{}'.format(remote, branch))\n\n return repo.git.describe('--always'), \\\n repo.git.show('-s', '--format=%H')\n\n\ndef update_repos(project_path, git_config):\n \"\"\"GIT Update sources\n :param project_path: path to project root\n :param git_config: dictionary with configuration of repositories\n :return: repos_dict with {key=repo name, {commit, desc}}\n \"\"\"\n project_path = os.path.abspath(project_path)\n repos_dict = {}\n\n for repo, conf in list(git_config.items()):\n repo_dict = {}\n if not os.path.isabs(conf[\"path\"]):\n repo_path = os.path.join(project_path, conf[\"path\"])\n else:\n repo_path = os.path.abspath(conf[\"path\"])\n\n project_path.join(repo_path)\n\n if 'update_repo' in conf:\n update_repo = conf[\"update_repo\"]\n else:\n update_repo = True\n\n desc, commit = update_sources(repo_path, conf[\"remote\"],\n conf[\"branch\"], conf[\"stash_changes\"],\n update_repo)\n repo_dict[\"commit\"] = commit\n repo_dict[\"desc\"] = desc\n repos_dict[repo] = repo_dict\n\n return repos_dict\n\n\ndef get_free_device(board=None):\n tty = None\n jlink = None\n\n snr_initials_for_debugger = {\n \"nrf52\": '68',\n \"nrf53\": '96'\n }\n\n com_index_for_debugger = {\n \"nrf52\": '00',\n \"nrf53\": '04'\n }\n\n debugger_snrs = subprocess.Popen('nrfjprog -i',\n shell=True,\n stdout=subprocess.PIPE\n ).stdout.read().decode()\n\n debugger_snrs = debugger_snrs.split()\n\n for d_snr in debugger_snrs:\n if d_snr[:2] != snr_initials_for_debugger[board]:\n continue\n\n d_tty = subprocess.Popen('ls -l /dev/serial/by-id' +\n '/usb-SEGGER_J-Link_000' + d_snr +\n '-if' + com_index_for_debugger[board],\n shell=True,\n stdout=subprocess.PIPE\n ).stdout.read().decode()\n reg = \"(?=tty).+$\"\n d_tty = re.findall(reg, d_tty)\n\n if d_snr not in devices_in_use:\n devices_in_use.append(d_snr)\n jlink = d_snr\n tty = '/dev/' + d_tty[0]\n break\n\n if not tty:\n sys.exit('No free device found!')\n\n if tty.startswith(\"COM\"):\n tty = \"/dev/ttyS\" + str(int(tty[\"COM\".__len__():]) - 1)\n\n return tty, jlink\n\n\ndef release_device(jlink_srn):\n if jlink_srn:\n devices_in_use.remove(jlink_srn)\n\n\ndef pre_cleanup():\n \"\"\"Perform cleanup before test run\n :return: None\n \"\"\"\n try:\n shutil.copytree(\"logs\", \"oldlogs\", dirs_exist_ok=True)\n shutil.rmtree(\"logs\")\n except OSError:\n pass\n\n\ndef cleanup():\n \"\"\"Perform cleanup\n :return: None\n \"\"\"\n try:\n pass\n except OSError:\n pass\n","repo_name":"hermabe/auto-pts","sub_path":"bot/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":20994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"37591316991","text":"\"\"\"\nKeep increasing the count of double steps, calculate the number of ways and add \nthem to the total ways count. Use factorial to calculate the number of ways.\nVery naive solution.\n\"\"\"\nclass Solution(object):\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n ones = n\n twos = 0\n count = 1\n for i in range(n // 2):\n twos += 1\n ones -= 2\n count += self.fact(twos + ones) // (self.fact(twos) * self.fact(ones))\n \n return count\n \n def fact(self, n):\n prod = 1\n while (n > 0):\n prod *= n\n n -= 1\n \n return prod\n","repo_name":"goelhardik/programming","sub_path":"leetcode/climbing_stairs/naive_sol.py","file_name":"naive_sol.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2732123385","text":"# %%\ns1 = 'Lorem ipsum dolor sit amet'\ns2 = \"Lorem ipsum dolor sit amet\"\n# %%\ns3 = \"\"\"Lorem ipsum dolor sit amet, \nconsectetur adipisicing elit, sed do eiusmod\ntempor incididunt ut labore et dolore magna aliqua. \n\nUt enim ad minim veniam,\nquis nostrud exercitation\"\"\"\n# %%\ndef foo():\n \"\"\"Function foo()\nPrints \"Hello Python\"\n\ncreated: 09.04.2021\nmodified: 09.04.2021\nauthor: me\n\"\"\"\n print('Hello Python')\n# %%\nfoo()\n# %%\nprint(foo.__doc__)\nprint(sorted.__doc__)\n# %%\nx = 12\n\nprint(f'value of {x} ** 2 is {x ** 2}')\n# %%\nprint('x' * 10)\n\n\n# %%\ns2[4]\n# %%\ns2[0:5]\n# %%\ns2[0:20:2]\n# %%\ns2[::2]\n# %%\ns2[::-1]\n# %%\n","repo_name":"DikranHachikyan/CPYT210409-PLDA","sub_path":"ex04_strings.py","file_name":"ex04_strings.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37329692972","text":"from collections import defaultdict\nfrom copy import deepcopy\nimport sys\n\ndo_print = True if '--print' in sys.argv else False\nfile_arg = [arg for arg in sys.argv[1:] if arg != '--print']\ninput = open(file_arg[0] if len(file_arg) >= 1 else 'input').read()\nlines = input.split('\\n')\n\ngrid = defaultdict(lambda: '.')\n\nfor line in lines:\n path = [(int(x), int(y)) for x, y in [pair.split(',') for pair in line.split(' -> ')]]\n\n for (sx, sy), (ex, ey) in list(zip(path, path[1:])):\n sx, ex = sorted([sx, ex])\n sy, ey = sorted([sy, ey])\n for y in range(sy, ey + 1):\n for x in range(sx, ex + 1):\n grid[(x,y)] = '#'\n\ndef get_bounds(grid: defaultdict[tuple[int, int], str]):\n min_x, max_x, min_y, max_y = 999999999, 0, 999999999, 0\n for (x,y), _ in grid.items():\n if x < min_x:\n min_x = x\n if x > max_x:\n max_x = x\n if y < min_y:\n min_y = y\n if y > max_y:\n max_y = y\n return min_x, max_x, min_y, max_y\n\ndef print_state(grid):\n minx, maxx, miny, maxy = get_bounds(grid)\n G = [[grid[(x,y)] for x in range(minx, maxx + 1)] for y in range(miny, maxy + 1)]\n for row in G:\n print(\"\".join(row))\n\ndef find_move(grid: defaultdict[tuple[int, int], str], sand: tuple[int, int], maxy: int):\n down = lambda p: (p[0], p[1]+1)\n diag_left = lambda p: (p[0]-1, p[1]+1)\n diag_right = lambda p: (p[0]+1, p[1]+1)\n\n for op in [down, diag_left, diag_right]:\n nx, ny = op(sand)\n\n if maxy != None and ny == maxy:\n return None\n # If we can move the sand in the current direction let's do it\n if grid[(nx, ny)] == '.':\n return (nx, ny)\n return None\n\n\n# Simulation time\ndef run_simulation(input_grid: defaultdict[tuple[int, int], str], part1: bool):\n grid = deepcopy(input_grid)\n _, _, _, maxy = get_bounds(grid)\n sand_origin = (500, 0)\n continue_simulation = True\n while continue_simulation:\n # Spawn 1 below the origin\n sand = (sand_origin[0], sand_origin[1])\n while True:\n move = find_move(grid, sand, None if part1 else maxy + 2)\n\n if move == None:\n grid[sand] = 'o'\n if part1 == False and sand == sand_origin:\n continue_simulation = False\n break\n else:\n if part1 and sand[1] > maxy:\n continue_simulation = False\n break\n sand = move\n \n return grid\n\n# Print initial grid state\ndo_print and print_state(grid)\n\n# Print part 1\np1_grid = run_simulation(grid, True)\ndo_print and print_state(p1_grid)\nprint(sum(1 for x in p1_grid.values() if x == 'o'))\n\n# Print part 2\np2_grid = run_simulation(grid, False)\ndo_print and print_state(p2_grid)\nprint(sum(1 for x in p2_grid.values() if x == 'o'))","repo_name":"timfennis/advent-of-code-2022","sub_path":"python/day14/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"27291739909","text":"from collections import deque\ndef findRedundantBrackets(s:str):\n stack = deque()\n i = 0\n while i < len(s):\n if s[i] == '(' or s[i] == '{' or s[i] == '[':\n stack.append(s[i])\n if (s[i] == '+' or s[i] == '-' or s[i] == '*' or s[i] == '/') and len(stack) != 0:\n while i != len(s):\n if s[i] == ')' or s[i] == '}' or s[i] == ']':\n stack.pop()\n break\n i += 1\n i += 1 \n if len(stack) == 0:\n return 'No'\n else:\n return 'Yes'\n\nprint(findRedundantBrackets('(a+b)'))","repo_name":"yugsharma1711/DS-ALGO","sub_path":"Stacks/redundantRemoval.py","file_name":"redundantRemoval.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40174158058","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom turtle import *\n\n'''\n绘制皮卡丘头部\n\nRef:\n https://blog.csdn.net/weixin_44517301/article/details/94051615\n'''\n\ndef face(x, y):\n \"\"\"画脸\"\"\"\n begin_fill()\n penup()\n # 将海龟移动到指定的坐标\n goto(x, y)\n pendown()\n # 设置海龟的方向\n setheading(40)\n\n circle(-150, 69)\n fillcolor(\"#FBD624\")\n # 将海龟移动到指定的坐标\n\n penup()\n goto(53.14, 113.29)\n pendown()\n\n setheading(300)\n circle(-150, 30)\n setheading(295)\n circle(-140, 20)\n print(position())\n forward(5)\n setheading(260)\n circle(-80, 70)\n print(position())\n penup()\n goto(-74.43, -79.09)\n pendown()\n\n penup()\n # 将海龟移动到指定的坐标\n goto(-144, 103)\n pendown()\n setheading(242)\n circle(110, 35)\n right(10)\n forward(10)\n setheading(250)\n circle(80, 115)\n print(position())\n\n penup()\n goto(-74.43, -79.09)\n pendown()\n setheading(10)\n penup()\n goto(-144, 103)\n\n pendown()\n penup()\n goto(x, y)\n pendown()\n\n end_fill()\n\n # 下巴\n penup()\n goto(-50, -82.09)\n pendown()\n pencolor(\"#DDA120\")\n fillcolor(\"#DDA120\")\n begin_fill()\n setheading(-12)\n circle(120, 25)\n setheading(-145)\n forward(30)\n setheading(180)\n circle(-20, 20)\n setheading(143)\n forward(30)\n end_fill()\n # penup()\n # # 将海龟移动到指定的坐标\n # goto(0, 0)\n # pendown()\n\n\ndef eye():\n \"\"\"画眼睛\"\"\"\n # 左眼\n color(\"black\", \"black\")\n penup()\n goto(-110, 27)\n pendown()\n begin_fill()\n setheading(0)\n circle(24)\n end_fill()\n # 左眼仁\n color(\"white\", \"white\")\n penup()\n goto(-105, 51)\n pendown()\n begin_fill()\n setheading(0)\n circle(10)\n end_fill()\n # 右眼\n color(\"black\", \"black\")\n penup()\n goto(25, 40)\n pendown()\n begin_fill()\n setheading(0)\n circle(24)\n end_fill()\n # 右眼仁\n color(\"white\", \"white\")\n penup()\n goto(17, 62)\n pendown()\n begin_fill()\n setheading(0)\n circle(10)\n end_fill()\n\n\ndef cheek():\n \"\"\"画脸颊\"\"\"\n # 右边\n color(\"#9E4406\", \"#FE2C21\")\n penup()\n goto(-130, -50)\n pendown()\n begin_fill()\n setheading(0)\n circle(27)\n end_fill()\n\n # 左边\n color(\"#9E4406\", \"#FE2C21\")\n penup()\n goto(53, -20)\n pendown()\n begin_fill()\n setheading(0)\n circle(27)\n end_fill()\n\n\ndef nose():\n \"\"\"画鼻子\"\"\"\n color(\"black\", \"black\")\n penup()\n goto(-40, 38)\n pendown()\n begin_fill()\n circle(7, steps=3)\n end_fill()\n\n\ndef mouth():\n \"\"\"画嘴\"\"\"\n color(\"black\", \"#F35590\")\n # 嘴唇\n penup()\n goto(-10, 22)\n pendown()\n begin_fill()\n setheading(260)\n forward(60)\n circle(-11, 150)\n forward(55)\n print(position())\n penup()\n goto(-38.46, 21.97)\n pendown()\n end_fill()\n\n # 舌头\n color(\"#6A070D\", \"#6A070D\")\n begin_fill()\n penup()\n goto(-10.00, 22.00)\n pendown()\n penup()\n goto(-14.29, -1.7)\n pendown()\n penup()\n goto(-52, -5)\n pendown()\n penup()\n goto(-60.40, 12.74)\n pendown()\n penup()\n goto(-38.46, 21.97)\n pendown()\n penup()\n goto(-10.00, 22.00)\n pendown()\n\n end_fill()\n\n color(\"black\", \"#FFD624\")\n\n penup()\n goto(-78, 15)\n pendown()\n begin_fill()\n setheading(-25)\n for i in range(2):\n setheading(-25)\n circle(35, 70)\n\n end_fill()\n color(\"#AB1945\", \"#AB1945\")\n penup()\n goto(-52, -5)\n pendown()\n begin_fill()\n setheading(40)\n circle(-33, 70)\n goto(-16, -1.7)\n penup()\n goto(-18, -17)\n pendown()\n setheading(155)\n circle(25, 70)\n end_fill()\n\n\ndef ear():\n \"\"\"画耳朵\"\"\"\n # 左耳\n color(\"black\", \"#FFD624\")\n penup()\n goto(-145, 93)\n pendown()\n begin_fill()\n setheading(165)\n circle(-248, 50)\n right(120)\n circle(-248, 50)\n end_fill()\n color(\"black\", \"black\")\n penup()\n goto(-240, 143)\n pendown()\n begin_fill()\n setheading(107)\n circle(-170, 25)\n left(80)\n circle(229, 15)\n left(120)\n circle(300, 15)\n end_fill()\n\n # 右耳\n color(\"black\", \"#FFD624\")\n penup()\n goto(30, 136)\n pendown()\n begin_fill()\n setheading(64)\n circle(-248, 50)\n\n right(120)\n circle(-248, 50)\n end_fill()\n color(\"black\", \"black\")\n penup()\n goto(160, 200)\n pendown()\n begin_fill()\n setheading(52)\n circle(170, 25)\n left(116)\n circle(229, 15)\n left(71)\n circle(-300, 15)\n end_fill()\n\ndef setting():\n \"\"\"设置参数\"\"\"\n\n pensize(2)\n # 隐藏海龟\n hideturtle()\n speed(10)\n\n\ndef draw():\n \"\"\"主函数\"\"\"\n setting()\n face(-132, 115)\n eye()\n cheek()\n nose()\n mouth()\n ear()\n\n\nif __name__ == '__main__':\n draw()\n mainloop()\n","repo_name":"bushuhui/python_turtle","sub_path":"codes/5_pikachu2.py","file_name":"5_pikachu2.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"23095428678","text":"## Categorical Cross Entropy, Dynamically Calculated\n# Graham Williams\n# grw400@gmail.com\n\nimport numpy as np\n\n# 3 samples, 3 classes\nsoftmax_outputs = np.array([[0.7, 0.1, 0.2],\n [0.1, 0.5, 0.4],\n [0.02, 0.9, 0.08]])\n\n# class 0 = dog | class 1 = cat | class 2 = human\n# target = [dog, cat, cat]\nclass_targets = [0, 1, 1]\n\n# for the 0th row, return the 0th idx \n# for the 1st row, return the 1st idx\n# for the 2nd row, return the 1st idx\nprint(softmax_outputs[[0, 1, 2], class_targets])\n\n# len(softmax_outputs) = 3\n# range(3) = 0,1,2\nprint(softmax_outputs[\n range(len(softmax_outputs)), class_targets\n])\n\n# print a list of the confidences at the target indices for each sample\nprint(-np.log(softmax_outputs[\n range(len(softmax_outputs)), class_targets\n]))\n\n# map indices to retrieve values from softmax distributions\n# zip() lets us iterate over multiple iterables at the same time\nfor targ_idx, distribution in zip(class_targets, softmax_outputs):\n print(distribution[targ_idx])\n\n# apply negative log to confidences at target indices\nneg_log = -np.log(softmax_outputs[\n range(len(softmax_outputs)), class_targets\n])\n\n# avg loss per batch, using arithmetic mean\naverage_loss = np.mean(neg_log)\nprint(average_loss)","repo_name":"Graham-CO/ai_ml_python","sub_path":"chapter_5/dyn_cat_cross_ent.py","file_name":"dyn_cat_cross_ent.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37496612676","text":"class Solution:\n def oddCells(self, m: int, n: int, indices: list[list[int]]) -> int:\n rows = [0 for _ in range(m)]\n cols = [0 for _ in range(n)]\n for indice in indices:\n rows[indice[0]] += 1\n cols[indice[1]] += 1\n ans = 0\n len = 0\n for i in range(m):\n if rows[i] & 1:\n len += 1\n ans += n\n for i in range(n):\n if cols[i] & 1:\n ans += m - len-len\n return ans\n\n\nm, n = 48, 37\nindices = [[40, 5]]\na = Solution()\nans = a.oddCells(m, n, indices)\nprint(ans)\n","repo_name":"qbnmmm/leetcode","sub_path":"每日一题/220712_1252. 奇数值单元格的数目.py","file_name":"220712_1252. 奇数值单元格的数目.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30766283960","text":"##This function replaces the chromosomes based on rank and crowding distance. Initially until the population size is reached each \n##front is added one by one until addition of a complete front which results in exceeding the population size. At this point the \n##chromosomes in that front is added subsequently to the population based on crowding distance.\n\ndef nsga_ii_para_imple_replace_chromosome(intermediate_chromosome, M, V, pop):\n \n ##intermediate_chromosome --- original population and the child/mutated chromosomes \n ##M --- the number of objective functions \n ##V --- the number of variables \n ##pop --- the original populaation size \n \n import os \n current_path = os.path.dirname(os.path.abspath(__file__)) + '\\\\' ##Incase relative directories are needed\n import sys\n sys.path.append(current_path + 'auxillary_functions\\\\')\n from auxillary_functions_nsga_ii_para_imple import index_sort_by_column \n from auxillary_functions_nsga_ii_para_imple import find_max_index \n import numpy as np\n\n ##Finding the dimensions of the intermediate_chromosome \n \n dim_intermediate_chromosome = intermediate_chromosome.shape \n N = dim_intermediate_chromosome[0]\n\n ##Get the index for the population sort based on rank \n index = index_sort_by_column(intermediate_chromosome, M+V)\n \n ##Now sort the individuals based on the index\n sorted_chromosome = np.zeros((N, dim_intermediate_chromosome[1]))\n for i in range (0, N):\n for j in range (0, dim_intermediate_chromosome[1]):\n sorted_chromosome[i,j] = intermediate_chromosome[int(round(index[i])),j]\n \n ##Find the maximum rank of the current population \n rank = []\n for i in range (0, N):\n rank.append(sorted_chromosome[i, M+V])\n max_rank = max(rank)\n\n ##Start adding each front based on rank and crowding distance until the whole population is filled \n f = np.zeros((pop, dim_intermediate_chromosome[1]))\n previous_index = 0\n for i in range (1, int(round(max_rank))):\n ##Get the index for current rank i.e. the last element in the sorted_chromosome with rank i\n current_index = int(round(find_max_index(sorted_chromosome, M+V, i)))\n \n ##Check to see if the population is fulled if all the individuals with rank i in added to the population\n if current_index > pop-1:\n ##If so, then find the number of individuals with current rank i \n remaining = pop - previous_index\n ##get information about the individuals in the current rank i\n temp_pop = np.zeros((current_index+1-previous_index, dim_intermediate_chromosome[1]))\n \n for j in range (0, current_index - previous_index + 1):\n for k in range (0, dim_intermediate_chromosome[1]):\n temp_pop[j,k] = sorted_chromosome[previous_index + j, k]\n ##Sort the individuals with rank i in the descending order based on the crowding distance \n index = index_sort_by_column(temp_pop, M+V+1)\n index_rev = []\n for j in range (0, len(index)):\n index_rev.append(index[len(index)-1-j])\n \n ##Start filling individuals into the population in descending order until population is filled\n for j in range (0, remaining):\n for k in range (0, dim_intermediate_chromosome[1]):\n f[previous_index+j,k] = temp_pop[int(round(index_rev[j])),k]\n\n return f\n \n elif current_index < pop-1:\n ##Add all the individuals with rank i into the population\n for j in range (0, current_index - previous_index + 1):\n for k in range (0, dim_intermediate_chromosome[1]):\n f[previous_index + j,k] = sorted_chromosome[previous_index+j,k]\n ##Get the index for the lat added individual \n previous_index = current_index+1 \n \n else:\n ##Add all the individuals with rank i into the population.\n for j in range (0, dim_intermediate_chromosome[1]):\n f[current_index, j] = sorted_chromosome[current_index, j]\n \n return f\n \n##Copyright (c) 2009, Aravind Seshadri\n##All rights reserved.\n\n##Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following \n##conditions are met:\n\n## * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n## * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer \n## in the documentation and/or other materials provided with the distribution\n## \n##THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT \n##NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL \n##THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \n##(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n##HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) \n##ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n ","repo_name":"zchiam002/vecmc_codes_zhonglin","sub_path":"nsga_ii/nsga_ii_para_imple_replace_chromosome.py","file_name":"nsga_ii_para_imple_replace_chromosome.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"33608091415","text":"import sqlite3\nfrom sqlite3 import Error\nimport numpy as np \nimport pandas as pd\nfrom datetime import datetime, time\n\n\ndef create_connection(db_file):\n\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\n return conn\n\n\ndef fetch_slots(conn):\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM slots;')\n\n rows = cur.fetchall()\n\n slots = []\n for row in rows:\n slots.append(row)\n\n return slots\n\ndef create_calender(conn, calender):\n sql = ''' INSERT INTO calender(user_id,slot_id)\n VALUES(?,?) '''\n cur = conn.cursor()\n cur.execute(sql, calender)\n conn.commit()\n return cur.lastrowid\n\ndef main():\n \n # Enter Path where you want your database to be:\n database = r\"database.db\"\n\n\n # create a database connection => Db will be created if there does not exists one.\n conn = create_connection(database)\n\n with conn:\n\n slots = fetch_slots(conn)\n\n for slot in slots:\n\n # calender = (7, slot[0])\n # create_calender(conn, calender)\n # calender = (8, slot[0])\n # create_calender(conn, calender)\n calender = (10, slot[0])\n create_calender(conn, calender)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Poornartha/RasaChatbot","sub_path":"actions/load_calender.py","file_name":"load_calender.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"20698843289","text":"# Omid Ershad\n# Student ID: 011123774\n\n# Import required packages\nimport csv\nimport datetime\n\nimport truck\nfrom create_hash_map import ChainingHashTable\nfrom package import Package\n\n# Initialize hash table to store packages\nhashtable = ChainingHashTable()\n\n# Read package data from CSV\npackage_file = \"package_file.csv\"\naddress_file = \"address_table.csv\"\ndistance_file = \"distance_table.csv\"\n\nwith open(package_file) as package_data:\n package_reader = csv.reader(package_data)\n for package in package_reader:\n # Create package object\n package_id = int(package[0])\n address = package[1]\n city = package[2]\n state = package[3]\n zip = package[4]\n deadline = package[5]\n weight = package[6]\n status = \"At Hub\"\n\n # Create package object and insert into hash table\n package_object = Package(package_id, address, city, state, zip, deadline, weight)\n hashtable.insert(package_id, package_object)\n\n# Read address data from CSV\naddress_list = []\nwith open(address_file) as address_data:\n address_reader = csv.reader(address_data)\n for address in address_reader:\n address_list.append(address)\n\n# Read distance data from CSV\ndistance_list = []\nwith open(distance_file) as distance_data:\n distance_reader = csv.reader(distance_data)\n for distance in distance_reader:\n distance_list.append(distance)\n\n\n# Function to get address index from address list\ndef get_index_for_address(address):\n for address_data in address_list:\n if address == address_data[2]:\n return int(address_data[0])\n return -1\n\n\n# Function to calculate distance between addresses\ndef distance_between_two_addresses(address1, address2):\n address_index1 = get_index_for_address(address1)\n address_index2 = get_index_for_address(address2)\n distance = distance_list[address_index1][address_index2]\n if distance == '':\n distance = distance_list[address_index2][address_index1]\n distance_result = float(distance)\n return distance_result\n\n\n# Create method to deliver packages for given truck\ndef deliver_truck(truck):\n print(truck.package_list)\n current_location = truck.current_address\n for package_id in truck.package_list:\n package = hashtable.search(package_id)\n distance = distance_between_two_addresses(current_location, package.address)\n\n\n# Manually load in packages to trucks\ntruck1 = truck.Truck(truck_id=1, package_list=[1, 13, 14, 15, 16, 20, 29, 30, 31, 34, 37, 40, 19], status=\"Hub\",\n current_address=\"4001 South 700 East\",\n departure_time=datetime.timedelta(hours=8))\ntruck2 = truck.Truck(truck_id=2, package_list=[3, 6, 18, 25, 28, 32, 36, 38, 24, 26, 27, 33, 35], status=\"Hub\",\n current_address=\"4001 South 700 East\",\n departure_time=datetime.timedelta(hours=9, minutes=5))\ntruck3 = truck.Truck(truck_id=3, package_list=[9, 2, 4, 5, 7, 8, 10, 11, 12, 17, 21, 22, 23, 39], status=\"Hub\",\n current_address=\"4001 South 700 East\",\n departure_time=datetime.timedelta(hours=10, minutes=30))\n\nimport copy\n\n# Function to deliver packages for a truck\ndef deliver_packages(truck):\n # Make a deep copy of the truck's package list, so we don't modify the original\n packages = copy.deepcopy(truck.package_list)\n current_time = truck.departure_time\n current_location = truck.current_address\n\n while packages:\n # Find the nearest package\n nearest_package_id = find_nearest_package(current_location, packages)\n package = hashtable.search(nearest_package_id)\n # Update current location\n current_time += datetime.timedelta(hours=distance_between_two_addresses(current_location, package.address) / 18)\n truck.mileage += distance_between_two_addresses(current_location, package.address)\n current_location = package.address\n # Deliver it\n package.delivery_time = current_time\n package.departure_time = truck.departure_time\n # Remove delivered package from list\n packages.remove(nearest_package_id)\n\n return current_time\n\n# Define a function to find nearest package\ndef find_nearest_package(current_location, package_ids):\n nearest_distance = float(\"inf\")\n nearest_package_id = None\n\n for package_id in package_ids:\n package = hashtable.search(package_id)\n distance = distance_between_two_addresses(current_location, package.address)\n if distance < nearest_distance:\n nearest_distance = distance\n nearest_package_id = package_id\n\n return nearest_package_id\n\n# Deliver packages for each truck and determine their return times\ntruck1_return = deliver_packages(truck1)\ntruck2_return = deliver_packages(truck2)\nif truck1_return < truck2_return:\n truck3.departure_time = truck1_return\nelse:\n truck3.departure_time = truck2_return\n\n# Fix deliver for package number #9\ndeliver_packages(truck3)\n\n\n# Print total mileage of all trucks combined\ndef print_total_mileage(truck1, truck2, truck3):\n total_mileage = truck1.mileage + truck2.mileage + truck3.mileage\n print(\"Total Mileage:\", total_mileage)\n\n\n# Main loop for user interface\nwhile True:\n print(\"-\" * 40)\n # put mileage here\n print_total_mileage(truck1, truck2, truck3)\n print(\"-\" * 40)\n print(\"1. List all packages with statuses\")\n print(\"2. Get status for specific package\")\n print(\"3. List status for specific time\")\n print(\"0. Exit\")\n\n option = input(\"Enter your option: \")\n if option == \"1\":\n for i in range(1, 41):\n print(hashtable.search(i))\n elif option == \"2\":\n package_number = input(\"Which Package (1-40): \")\n print(hashtable.search(int(package_number)))\n elif option == \"3\":\n test = input(\"Enter a time (HH:MM): \")\n h, m = test.split(\":\")\n user_time = datetime.timedelta(hours=int(h), minutes=int(m))\n for i in range(1, 41):\n print(hashtable.search(i).calculate_status(user_time))\n elif option == \"0\":\n break\n","repo_name":"omiershad/C950_WGU_Omid_Ershad","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36603419699","text":"import sys\nimport os\n# from typing import string;\n\nimport logging\n\n\nmainLogger:logging.Logger = logging.getLogger()\n# Run python script for debugging\n\nmainLogger.info(\"-------- Begin test logging -------\")\n\nmavenCmd:str = './mvnw'\nif sys.platform == \"win32\":\n mavenCmd = '.\\mvnw'\n\nretVal = os.system(f'{mavenCmd} test') # Linux bash run\nif retVal != 0: # If failed exit code\n mainLogger.info(\"Maven Test failed, exiting...\")\n sys.exit(1)\n\nmainLogger.info(\"-------- End test logging -------\")\n\n# Build docker image\nmainLogger.info(\"-------- Begin docker build logging -------\")\nretVal = os.system(f'{mavenCmd} spring-boot:build-image -Dspring-boot.build-image.imageName=demo/payroll')\nif retVal != 0:\n mainLogger.error(\"Docker Build failed, exiting...\")\n sys.exit(1)\n\nmainLogger.info(\"-------- End docker build logging -------\")","repo_name":"adrianAnyansi/SpringBootExercise","sub_path":"buildScript.py","file_name":"buildScript.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"86434805528","text":"import PIL.Image\n\n#arreglo de caracteres que reemplazaran los pixeles de la imgaen\n#cambielos para obtener diferentes resultados\n\nAsciiChars=[\"@\", \"#\", \"S\", \"%\", \"?\", \"*\", \"+\", \";\", \":\", \",\", \".\"]\n\n#funcion que le hace un resize a la imagen\n\ndef resizeImage(image, newWidth=100):\n width, height = image.size\n ratio=height/width\n newHeight=int((newWidth*ratio)/2)\n resizedImage=image.resize((newWidth, newHeight))\n return (resizedImage)\n\n#funcion que cambia elc olor de los pixeles a una escala de grises\n\ndef grayfy(image):\n grayScaleImage=image.convert(\"L\")\n return (grayScaleImage)\n\n\n#funcion que cambia pixeles por caracteres dentro de arreglo declarado arriba\ndef pixelsToAscii(image):\n pixels=image.getdata()\n characters = \"\".join([AsciiChars[pixel//25] for pixel in pixels])\n return (characters)\n\n\ndef imageToAscii(path, newWidth=100):\n try:\n image=PIL.Image.open(path)\n except:\n print(\"ruta no valida\")\n return\n newImageData=pixelsToAscii(grayfy(resizeImage(image)))\n pixel_count=len(newImageData)\n asciiImage=\"\\n\".join(newImageData[i:(i+newWidth)] for i in range(0, pixel_count, newWidth ))\n\n return(asciiImage)\n\n","repo_name":"SenpaiSuchil/arte-generativo","sub_path":"imageToAscii.py","file_name":"imageToAscii.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7059039490","text":"import os\nimport unittest\n\nfrom lxml import etree\n\nfrom src.preprocess.tokenizer import get_root, read_aquaint, read_aquaint2, read_tac, get_date, tokenizer, write_output, \\\n read_by_corpus_type\n\nparser = etree.XMLParser(resolve_entities=False, no_network=True, recover=True)\n\nmaster_headline = \"\\\"One-in-100-year flood event\\\" devastates Western Australia\"\nmaster_body = [\n \"Test, Test (WIKINEWS) _ Aerial evacuations took place and food was airlifted in yesterday after a \"\n \"devastating flood Western Australia emergency services minister Stephen Dawson called the 'worst in a \"\n \"century' isolated communities in the Kimberley.\",\n 'Flooding began last week after heavy rain from Tropical Cyclone Ellie swelled local rivers, bolstered by '\n 'La Niña. Notably, the Fitzroy River broke a 2002 record of 13.95 meters (45.8 ft), reaching a water '\n 'level of 15.81 meters (51.9 ft) on Wednesday, according to a Bureau of Meteorology spokesperson.',\n 'Authorities estimate it could take months']\ntemp = \"temp.txt\"\n\n\nclass TestTokenizer(unittest.TestCase):\n def test_get_root(self):\n root = get_root(\"../snip/tac.sgm\")\n headline = root.find(\"DOC\").find(\"BODY\").find(\"HEADLINE\").text.strip().replace('\\n', ' ')\n self.assertEqual(headline, master_headline)\n\n def test_read_aquaint(self):\n root = get_root(\"../snip/aquaint.xml\")\n headline, body = read_aquaint(root, \"APW19980602.0004\")\n self.assertEqual(headline, master_headline)\n self.assertEqual(body, master_body)\n\n def test_read_aquaint2(self):\n root = get_root(\"../snip/aquaint2.xml\")\n headline, body = read_aquaint2(root, \"APW_ENG_19980602.0002\")\n self.assertEqual(headline, master_headline)\n self.assertEqual(body, master_body)\n\n def test_read_tac(self):\n root = get_root(\"../snip/tac.sgm\")\n headline, body = read_tac(root)\n self.assertEqual(headline, master_headline)\n self.assertEqual(body, master_body)\n\n def test_get_data(self):\n self.assertEqual(get_date(\"APW19980602.1383\"), \"19980602\")\n self.assertEqual(get_date(\"APW_ENG_20041007.0256\"), \"20041007\")\n self.assertEqual(get_date(\"AFP_ENG_20061002.0523\"), \"20061002\")\n\n def test_tokenizer(self):\n result = tokenizer(\"Authorities estimate it could take months\")\n self.assertEqual(len(result), 6)\n self.assertEqual(result[2], \"it\")\n\n def check_two_txt(self):\n with open(temp) as test, open('../snip/gold.txt') as gold:\n for line1, line2 in zip(test, gold):\n self.assertEqual(line1, line2)\n os.remove(temp)\n\n def test_write_output(self):\n output = open(temp, \"w+\")\n date = \"19980602\"\n write_output(output, 1, date, master_headline, master_body)\n self.check_two_txt()\n\n def test_read_by_corpus_type(self):\n read_by_corpus_type(\"../snip/aquaint.xml\", \"APW19980602.0004\", 1, 1, temp)\n self.check_two_txt()\n read_by_corpus_type(\"../snip/aquaint2.xml\", \"APW_ENG_19980602.0002\", 1, 2, temp)\n self.check_two_txt()\n read_by_corpus_type(\"../snip/tac.sgm\", \"AFP_ENG_19980602.0149\", 1, 3, temp)\n self.check_two_txt()\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"LING-575-Summarization/Summarization","sub_path":"src/tests/tokenizer_test.py","file_name":"tokenizer_test.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29831904298","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.nn as nn\nimport torch\nimport numpy as np\n\n\nclass Accuracy(nn.Module):\n def __init__(self,pca_w,pca_b,cube,jointNum,batch_size,device):\n super(Accuracy, self).__init__()\n #self.criterion = nn.MSELoss()\n #self.criterion = nn.MSELoss(reduction='elementwise_mean')\n \n self.pca_w=pca_w\n self.pca_b=pca_b\n self.cube=cube\n \n self.com3d=np.zeros((batch_size,3))\n self.joints3d_gt=np.zeros((batch_size,jointNum*3))\n \n self.batch_size=batch_size\n \n self.device=device\n \n \n\n def _forward(self, output_embed, target,com3d):\n \n output_recon=torch.mm(output_embed,self.pca_w)+self.pca_b\n \n com3d_tile=np.tile(com3d,(1,21))\n com3d_tile=torch.FloatTensor(com3d_tile) \n com3d_tile= com3d_tile.to(self.device) \n output_recon=output_recon*(self.cube[2]/2.)+com3d_tile\n \n error_bag=[]\n for k in range(self.batch_size):\n error_torch1=(target[k]-output_recon[k])**2\n error_torch2=torch.sqrt(torch.sum(error_torch1.view(21*1,3),dim=1)) \n error=torch.sum(error_torch2)/(21*1)\n error_bag.append(error.item())\n \n #loss=self.criterion(output,target)\n #error=self.calculate_error()\n return error_bag\n #return error\n\n \n ","repo_name":"baeckgoo/ir-hand","sub_path":"loss/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"31692136142","text":"def snail_it(coords):\n grid = {\n 0: {\n 0: 1\n }\n }\n time = 0\n x = 0\n y = 0\n\n for x_d, y_d in coords:\n x_step = -1 if x_d < x else 1\n y_step = -1 if y_d < y else 1\n\n while x != x_d:\n x += x_step\n slime(grid, x, y)\n time += grid[y][x]\n\n while y != y_d:\n y += y_step\n slime(grid, x, y)\n time += grid[y][x]\n\n return time\n\n\ndef slime(grid, x, y):\n if y not in grid:\n grid[y] = {}\n\n if x not in grid[y]:\n grid[y][x] = 0\n\n grid[y][x] += 1\n\n\ndef snail_it_file(fname, header=True):\n lines = open(fname).readlines()\n\n if header:\n lines.pop(0)\n\n coords = [tuple(map(int, line.strip().split(','))) for line in lines]\n return snail_it(coords)\n\n\ndef test_snail_it():\n assert 14 == snail_it_file('input/04.test', header=False)\n\n\nif __name__ == '__main__':\n print(snail_it_file('input/coords.csv', header=True))","repo_name":"matslindh/codingchallenges","sub_path":"knowit2019/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"27964344820","text":"'''\n문자열 S가 주어졌을 때, 이 문자열에서 단어만 뒤집으려고 한다.\n먼저, 문자열 S는 아래와과 같은 규칙을 지킨다.\n알파벳 소문자('a'-'z'), 숫자('0'-'9'), 공백(' '), 특수 문자('<', '>')로만 이루어져 있다.\n문자열의 시작과 끝은 공백이 아니다.\n'<'와 '>'가 문자열에 있는 경우 번갈아가면서 등장하며, '<'이 먼저 등장한다. 또, 두 문자의 개수는 같다.\n태그는 '<'로 시작해서 '>'로 끝나는 길이가 3 이상인 부분 문자열이고, '<'와 '>' 사이에는 알파벳 소문자와 공백만 있다. 단어는 알파벳 소문자와 숫자로 이루어진 부분 문자열이고, 연속하는 두 단어는 공백 하나로 구분한다. 태그는 단어가 아니며, 태그와 단어 사이에는 공백이 없다.\n\n입력\n첫째 줄에 문자열 S가 주어진다. S의 길이는 100,000 이하이다.\nbaekjoon online judge\ntag\n출력\n첫째 줄에 문자열 S의 단어를 뒤집어서 출력한다.\nnoojkeab enilno egduj\ngat\n1. 문자열이 소문자, 숫자, 공백, 특수문자가 있다.\n2. 다들 input 으로 받고,\n3. < 랑 > 만나면 그 사이 문자열은 냅두고, > < 이 사이에 있는 것만 뒤집기\n4. 공백을 기준으로 나눠야 함. (이 걸 찾아보자) split() 이면 되는 건가.\n5. 거꾸로 for (N-1) 로 불러서 하나씩 리스트에 나열하면 될 듯.(no)\n6. pop 쓰면 간단함.\n\n나열 된 단어를 하나 꺼내고, 뒤집고, 그 다음 단어 꺼내서 뒤집고, 가능?\n\n'''\n# 문자열, 입력 받고, 특수문자, 공백, 숫자\nS = input()\n\ntemp = ''\nans = ''\n\nfor i in S:\n if i == ' ':\n if '<' not in temp:\n ans += temp[::-1] + i\n temp = ''\n else:\n temp += i\n\n elif i == '<':\n ans += temp[::-1]\n temp = ''\n temp += i\n\n elif i == '>':\n ans += temp + i\n temp = ''\n else:\n temp += i\n\nans += temp[::-1]\n\nprint(ans)\n\n\n\n\n","repo_name":"hhyeona/Algorithm_Study","sub_path":"2월/3주차/17413_단어뒤집기2.py","file_name":"17413_단어뒤집기2.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74157575768","text":"import networkx as nx\nimport typing\nimport numpy as np\nfrom general_utils import Attribute\nfrom pprint import pprint as pp\n\n\ndef find_parents(graph: nx.Graph, level: int, label: str) -> list:\n parents = []\n for node in graph.nodes:\n if (\n graph.nodes[node][Attribute.LEVEL] == level - 1\n and graph.nodes[node][Attribute.LABEL] == label\n ):\n parents.append(node)\n return parents\n\n\ndef node_comparator(found_node, searched_node) -> bool:\n return found_node[Attribute.LABEL] == searched_node[Attribute.LABEL]\n\n\ndef node_comparator_factory(level: int) -> typing.Callable:\n def node_comparator(found_node, searched_node) -> bool:\n desired_level = level + searched_node.get(Attribute.LEVEL, 0)\n return (\n found_node[Attribute.LEVEL] == desired_level\n and found_node[Attribute.LABEL] == searched_node[Attribute.LABEL]\n )\n\n return node_comparator\n\n\ndef find_isomporphic(graph: nx.Graph, left_side_graph: nx.Graph, level: int) -> dict:\n isomorphic_graphs = []\n graph_matcher = nx.algorithms.isomorphism.GraphMatcher(\n graph, left_side_graph, node_match=node_comparator_factory(level)\n )\n for isomorphic_graph in graph_matcher.subgraph_isomorphisms_iter():\n # mapping should be directed from template to real graph:\n inversed_isomorphism = {v: k for k, v in isomorphic_graph.items()}\n isomorphic_graphs.append(inversed_isomorphism)\n return isomorphic_graphs\n\n\ndef find_isomorphic_wrapper(\n graph: nx.Graph, left_side_graph: nx.Graph, level: int, constraints: list = None\n) -> dict:\n \"\"\"\n constraints refer to node in left_side_graph.\n Example constraints:\n [{\n 'first_node': 1,\n 'second_node': 2,\n 'constrained_middle_node': 3\n },\n {\n 'node': 4,\n 'constrained_equal_node': 5\n }]\n\n x of 'constrained_middle_node' == (x of 'first_node' + x of 'second_node') / 2\n y of 'constrained_middle_node' == (y of 'first_node' + y of 'second_node') / 2\n x of 'node' == x of 'constrained_equal_node'\n y of 'node' == y of 'constrained_equal_node'\n \"\"\"\n\n def predicate(mapping):\n eps = 1e-4\n checked_constraints = [False for _ in range(len(constraints))]\n\n def check_middle_node_constraint(constraint, i):\n first_node = graph.nodes[mapping[constraint[\"first_node\"]]]\n second_node = graph.nodes[mapping[constraint[\"second_node\"]]]\n expected_node = graph.nodes[mapping[constraint[\"constrained_middle_node\"]]]\n x1, y1 = first_node[Attribute.X], first_node[Attribute.Y]\n x2, y2 = second_node[Attribute.X], second_node[Attribute.Y]\n x3, y3 = expected_node[Attribute.X], expected_node[Attribute.Y]\n if np.abs((x1 + x2) / 2 - x3) < eps and np.abs((y1 + y2) / 2 - y3) < eps:\n checked_constraints[i] = True\n\n def check_equal_nodes_constraint(constraint, i):\n first_node = graph.nodes[mapping[constraint[\"node\"]]]\n second_node = graph.nodes[mapping[constraint[\"constrained_equal_node\"]]]\n x1, y1 = first_node[Attribute.X], first_node[Attribute.Y]\n x2, y2 = second_node[Attribute.X], second_node[Attribute.Y]\n if np.abs(x1 - x2) < eps and np.abs(y1 - y2) < eps:\n checked_constraints[i] = True\n\n for i, constraint in enumerate(constraints):\n constraint_keys = list(constraint.keys())\n if constraint_keys == [\n \"first_node\",\n \"second_node\",\n \"constrained_middle_node\",\n ]:\n check_middle_node_constraint(constraint, i)\n elif constraint_keys == [\"node\", \"constrained_equal_node\"]:\n check_equal_nodes_constraint(constraint, i)\n else:\n raise Exception(\"Invalid constraint\")\n\n return all(checked_constraints)\n\n initially_found = find_isomporphic(graph, left_side_graph, level)\n\n # modify later here to add ability to choose which mapping to use\n try:\n return (\n initially_found[0]\n if constraints is None\n else list(filter(predicate, initially_found))[0]\n )\n except IndexError as e:\n return None\n\n\ndef add_to_graph(\n graph: nx.Graph,\n isomorphic_mapping: dict,\n right_side_parent_node: tuple,\n right_side_nodes_new: list,\n right_side_edges: list,\n):\n parent_tmp_node_number = right_side_parent_node[0]\n\n n = len(graph.nodes)\n # find max node number in graph\n for node in graph.nodes:\n n = max(n, node)\n\n right_side_nodes_mapping = {\n node[0]: node[0] + n for node in right_side_nodes_new\n } # define a dictionay mapping old node number (based on right_side_nodes ) => graph node.\n right_side_nodes_mapping[parent_tmp_node_number] = isomorphic_mapping[\n parent_tmp_node_number\n ]\n\n right_side_edges_mapped = list(\n map(\n lambda edge: (\n right_side_nodes_mapping[edge[0]],\n right_side_nodes_mapping[edge[1]],\n ),\n right_side_edges,\n )\n )\n\n right_size_edges_to_parent = [\n (isomorphic_mapping[parent_tmp_node_number], right_side_nodes_mapping[node[0]])\n for node in list(\n filter(lambda node: node[1][Attribute.LABEL] == \"I\", right_side_nodes_new)\n )\n ]\n\n right_side_edges_mapped = right_side_edges_mapped + right_size_edges_to_parent\n\n right_side_nodes_mapped = list(\n map(\n lambda node: (right_side_nodes_mapping[node[0]], node[1]),\n right_side_nodes_new,\n )\n )\n\n existing_node_parent = graph.nodes[isomorphic_mapping[parent_tmp_node_number]]\n\n for node in right_side_nodes_mapped:\n node[1][Attribute.LEVEL] = existing_node_parent[Attribute.LEVEL] + 1\n\n graph.nodes[isomorphic_mapping[parent_tmp_node_number]][\n Attribute.LABEL\n ] = right_side_parent_node[1][Attribute.LABEL]\n\n graph.add_nodes_from(right_side_nodes_mapped)\n graph.add_edges_from(right_side_edges_mapped)\n\n\ndef merge_nodes(graph: nx.Graph, nodes: list, new_node: tuple):\n graph_edges = graph.copy().edges()\n for n in nodes:\n graph.remove_node(n)\n\n graph.add_nodes_from([new_node])\n\n for n1, n2 in graph_edges:\n if n1 in nodes:\n graph.add_edge(new_node[0], n2)\n elif n2 in nodes:\n graph.add_edge(n1, new_node[0])\n","repo_name":"maciejsikora2302/GramatykiGrafoweGrupa2Sroda1630","sub_path":"graph_functions.py","file_name":"graph_functions.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11131115676","text":"from django.db import models\n\n# Create your models here.\n\nfrom django import forms\nfrom django.db import models\n\n# Create your models here.\n\nfrom modelcluster.fields import ParentalKey, ParentalManyToManyField\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom taggit.models import TaggedItemBase\n\nfrom wagtail.core.models import Page, Orderable\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.search import index\nfrom wagtail.search.backends import get_search_backend\n\nfrom wagtail.snippets.models import register_snippet\n\nfrom wagtail.core.fields import StreamField\nfrom wagtail.core import blocks\nfrom wagtail.admin.edit_handlers import StreamFieldPanel\nfrom wagtail.images.blocks import ImageChooserBlock\nfrom wagtail.embeds.blocks import EmbedBlock\nfrom wagtail.core import blocks\n\nfrom src.tools import PageTree, readFile\nfrom src.blocks import TwoColumnBlock, ThreeColumnBlock, VideoBlock, DjangoBlock\n\nimport os\nfrom django.shortcuts import render\n\n\n#setting side content\ndef side(context):\n Posts = Site1Index.objects.all()[0]\n blogpages = Posts.get_children().live().order_by('-first_published_at')\n context['last_posts'] = blogpages[:4]\n\n\nclass Site1Home(Page):\n body = RichTextField(blank=True)\n\n content = StreamField([\n ('paragraph', blocks.RichTextBlock()),\n ('exe_htmljs', blocks.TextBlock()),\n ],null=True,blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('body'),\n StreamFieldPanel('content'),\n ]\n\n\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n side(context)\n return context\n\nclass Site1Index(Page):\n intro = RichTextField(blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('intro', classname=\"full\")\n ]\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n blogpages = self.get_children().live().order_by('-first_published_at')\n context['blogpages'] = blogpages\n side(context)\n return context\n\n\nclass Site1Tag(TaggedItemBase):\n content_object = ParentalKey(\n 'Site1Post',\n related_name='tagged_items',\n on_delete=models.CASCADE\n )\n\n\n@register_snippet\nclass Site1Category(models.Model):\n name = models.CharField(max_length=255)\n icon = models.ForeignKey(\n 'wagtailimages.Image', null=True, blank=True,\n on_delete=models.SET_NULL, related_name='+'\n )\n\n panels = [\n FieldPanel('name'),\n ImageChooserPanel('icon'),\n ]\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = 'site1(tech) categories'\n\nclass Site1Post(Page):\n date = models.DateField(\"Post date\")\n intro = models.CharField(max_length=250, blank=True)\n# body = RichTextField(blank=True)\n tags = ClusterTaggableManager(through=Site1Tag, blank=True)\n categories = ParentalManyToManyField('site1.Site1Category', blank=True)\n\n body = StreamField([\n ('heading', blocks.CharBlock(classname=\"full title\")),\n ('paragraph', blocks.RichTextBlock()),\n ('two_columns', TwoColumnBlock()),\n ('three_columns', ThreeColumnBlock()),\n ('image', ImageChooserBlock()),\n ('exe_htmljs', blocks.TextBlock()),\n ('code_bash', blocks.TextBlock()),\n ('code_py', blocks.TextBlock()),\n ('code_htmljs', blocks.TextBlock()),\n ('code_django', DjangoBlock()),\n #('video', VideoBlock()),\n ],null=True,blank=True)\n\n\n search_fields = Page.search_fields + [\n index.SearchField('intro'),\n index.SearchField('body'),\n ]\n\n# content_panels = Page.content_panels + [\n# FieldPanel('date'),\n# FieldPanel('intro'),\n# FieldPanel('body', classname=\"full\"),\n# InlinePanel('gallery_images', label=\"Gallery images\"),\n# ]\n\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n FieldPanel('date'),\n FieldPanel('tags'),\n FieldPanel('categories', widget=forms.CheckboxSelectMultiple),\n ], heading=\"Blog information\"),\n FieldPanel('intro'),\n #FieldPanel('body'),\n StreamFieldPanel('body'),\n InlinePanel('gallery_images', label=\"Gallery images\"),\n ]\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n side(context)\n return context\n \n\nclass Site1PageGalleryImage(Orderable):\n page = ParentalKey(Site1Post, on_delete=models.CASCADE, related_name='gallery_images')\n image = models.ForeignKey(\n 'wagtailimages.Image', on_delete=models.CASCADE, related_name='+'\n )\n caption = models.CharField(blank=True, max_length=250)\n\n panels = [\n ImageChooserPanel('image'),\n FieldPanel('caption'),\n ]\n\n\nclass Site1Tree(Page):\n def get_context(self, request):\n context = super().get_context(request)\n\n index = Site1Index.objects.filter(title='Posts')[0]\n #posts = index.get_children().live()\n #print(posts)\n #context['posts'] = posts\n\n html_menu = PageTree(index).html_menu\n context['menu'] = html_menu\n side(context)\n return context\n\nclass Site1Search(Page):\n def get_context(self, request):\n word = request.GET.get('key')\n context = super().get_context(request)\n s = get_search_backend()\n posts = s.search(word, Site1Post)\n context['posts'] = posts\n side(context)\n return context\n\nclass Site1QueryCategory(Page):\n def get_context(self, request):\n categoryName = request.GET.get('name')\n \n # Filter posts by category name\n rez = Site1Category.objects.filter(name=categoryName)\n \n if (len(rez) == 0):\n return\n else:\n # Update template context\n context = super().get_context(request)\n\n blogpages = Site1Post.objects.filter(categories=rez[0])\n context['blogpages'] = blogpages\n side(context)\n return context\n\n template = 'site1_index.html'\n\nclass Site1CategoryIndex(Page):\n def get_context(self, request):\n categories = Site1Category.objects.all()\n context = super().get_context(request)\n context['categories'] = categories \n side(context)\n return context\n\nclass Site1TagIndex(Page):\n def get_context(self, request):\n context = super().get_context(request)\n tagList = []\n tags = Site1Tag.objects.all()\n #tags = Site1Tag.objects.order_by(\"tag\")\n for tag in tags:\n if tag.tag.name not in tagList:\n tagList.append(tag.tag.name)\n tagList.sort()\n context['tags'] = tagList\n side(context)\n return context\n\nclass Site1QueryTag(Page):\n\n def get_context(self, request):\n # Filter by tag\n tag = request.GET.get('name')\n blogpages = Site1Post.objects.filter(tags__name=tag)\n\n # Update template context\n context = super().get_context(request)\n context['blogpages'] = blogpages\n side(context)\n return context\n\n#this page will display raw html files as body\nclass Site1RawHtml(Page):\n file_name = models.CharField(max_length=255)\n\n content_panels = Page.content_panels + [\n FieldPanel('file_name', classname=\"full\")\n ]\n\n# search_fields = Page.search_fields + [\n# index.SearchField(''),\n# ]\n\n def serve(self, request):\n context = super().get_context(request)\n\n #name = \"codeberry.html\"\n name = self.file_name\n\n base = os.getcwd()\n path = \"site1/\" + \"static/site1/pages/\" + name\n\n f = open(path, \"r\")\n body = f.read();\n f.close()\n\n side(context)\n\n return render(request, 'site1/site1_page.html', {'body':body})\n","repo_name":"mihai2014/wagtail-multi-blog-sites-with-bootstrap-4","sub_path":"site1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36425540927","text":"database = { \"Steve\": [78, 87 ,98, 88, 79], \"Martin\":[45, 99, 100, 89, 88], \"Trish\" :[66, 68 ,70 ,71, 68]}\r\ndatabase[\"Alex\"] =[50 ,98, 69, 78, 89]\r\nnew = {}\r\nMath = 0\r\nPhysics = 1\r\nChemistry = 2\r\nBiology = 3\r\nSocial_Science = 4\r\nx=0\r\n# while True:\r\n# \tname = input(\"enter the name \\n\")\r\n# \tsubject = input(\"enter the subject with spaces\\n\")\r\n# \tif name == \"\":\r\n# \t\tbreak\r\n\r\n# \tmarks = list(subject.split())\r\n# \tdatabase[name] = marks\r\n\r\nssub = input(\"enter sub\\n \")\r\nif ssub == \"Math\":\r\n\tx = 0\r\n\t\t\r\nelif (ssub == \"Physics\"):\r\n\tx = 1\r\n\t\t\t\r\nelif (ssub == \"Chemistry\"):\r\n\tx = 2\r\n\r\nelif (ssub == \"Biology\"):\r\n\tx = 3\r\n\r\nelif (ssub == \"Social_Science\"):\r\n\tx = 4\r\n\r\n# else:\r\n# \tprint(\"enter valid input\")\r\nfor i in database:\r\n\tnew[i] = database[i][x]\r\n\r\n\r\nfinal = list(sorted(new.items(), key = lambda kv:(kv[1], kv[0]),reverse = True))\r\nprint(final)\r\n\r\n\r\n","repo_name":"PavanKumar-98/launchpad","sub_path":"assignment-7.py","file_name":"assignment-7.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8589520458","text":"import numpy as np\r\nimport csv \r\nimport plotly.express as px\r\n\r\ndef plot(path):\r\n with open(path) as f:\r\n read_graph = csv.DictReader(f)\r\n fig = px.scatter(read_graph, x = \"Roll No\", y=\"Days Present\")\r\n fig.show()\r\n\r\n\r\ndef open_data(path):\r\n roll = []\r\n days = []\r\n with open(path) as r:\r\n read = csv.DictReader(r)\r\n\r\n for i in read:\r\n roll.append(float(i[\"Roll No\"]))\r\n days.append(float(i[\"Days Present\"]))\r\n\r\n return {\"x\" : roll, \"y\": days} \r\n\r\ndef calc(value):\r\n corr = np.corrcoef(value[\"x\"], value[\"y\"])\r\n print(\"The corelation co-efficient of the above values is: \" ,corr[0,1])\r\n\r\n\r\n\r\ndef main():\r\n path = \"F:\\Python works\\Python Program 2\\WhiteHatJt\\C106\\Student Marks vs Days Present.csv\"\r\n value = open_data(path)\r\n calc(value)\r\n plot(path)\r\n\r\nmain()\r\n\r\n","repo_name":"Circuit-Overtime/Correlation","sub_path":"C106d.py","file_name":"C106d.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22054151201","text":"import sys\nimport time\n\nfrom telemetrix import telemetrix\n\n\"\"\"\nAttach a pin to a servo and move it about.\n\"\"\"\n\n# some globals\nSERVO_1_PIN = 3 # bottom left\nSERVO_2_PIN = 5 # top left\nSERVO_3_PIN = 6 # bottom right\nSERVO_4_PIN = 9 # top right\n\nWH_BEAT_DEG = 125\n\n# Create a Telemetrix instance.\n# board = telemetrix.Telemetrix(arduino_instance_id=1)\nboard2 = telemetrix.Telemetrix(arduino_instance_id=2)\n# board.set_pin_mode_servo(SERVO_1_PIN, 544, 2400)\n# board.set_pin_mode_servo(SERVO_2_PIN, 544, 2400)\n# board.set_pin_mode_servo(SERVO_3_PIN, 544, 2400)\n# board.set_pin_mode_servo(SERVO_4_PIN, 544, 2400)\n\nboard2.set_pin_mode_servo(SERVO_1_PIN, 544, 2400)\nboard2.set_pin_mode_servo(SERVO_2_PIN, 544, 2400)\nboard2.set_pin_mode_servo(SERVO_3_PIN, 544, 2400)\nboard2.set_pin_mode_servo(SERVO_4_PIN, 544, 2400)\ntime.sleep(5)\n\n# # board.servo_write(SERVO_PIN, 80)\n# time.sleep(.2)\n# # board.servo_write(SERVO_PIN, -80)\n\ndef sweep_servo(servo_pin, min_deg, max_deg, sweep_speed):\n for pos in range(min_deg, max_deg + 1):\n print(pos)\n # board.servo_write(servo_pin, pos)\n time.sleep(sweep_speed)\n for pos in range(max_deg, min_deg-1, -1):\n print(pos)\n # board.servo_write(servo_pin, pos)\n time.sleep(sweep_speed)\n\ndef sweep_2_servo(servo1, servo2, min, max, dif, sweep_speed, _board):\n for pos in range(min, max + 1):\n _board.servo_write(servo1, pos)\n _board.servo_write(servo2, pos - dif)\n time.sleep(sweep_speed)\n for pos in range(max, min - 1, -1):\n _board.servo_write(servo1, pos)\n _board.servo_write(servo2, pos - dif)\n time.sleep(sweep_speed)\n\ndef move_servo(servo_pin, dest, move_speed: float):\n for pos in range(dest+1):\n print(pos)\n # board.servo_write(servo_pin, pos)\n time.sleep(move_speed)\n\ndef beat_servo(servo_pin, servo_beat_pin, max_deg):\n for pos in range(max_deg + 1):\n # board.servo_write(servo_beat_pin, 90)\n print(pos)\n # board.servo_write(servo_pin, pos)\n # board.servo_write(servo_beat_pin, WH_BEAT_DEG)\n time.sleep(0.015)\n\n for pos in range(max_deg, -1, -1):\n # board.servo_write(servo_beat_pin, 90)\n print(pos)\n # board.servo_write(servo_pin, pos)\n # board.servo_write(servo_beat_pin, WH_BEAT_DEG)\n time.sleep(0.015)\n\n\n\n# board.servo_write(SERVO_2_PIN, 124)\n\n# def beat(servo_pin, servo_pos, max_deg, speed):\n# if servo_pos < max_deg:\n# servo_pos += speed\n# if servo_pos > max_deg:\n# servo_pos -= speed\n# if (abs(servo_pos - max_deg) < speed:\n# break\n# board.servo_write(servo_pin, servo_pos)\n# time.sleep(0.2)\n#\n# # for pos in range(max_deg, 90 - speed, -speed):\n# # board.servo_write(servo_pin, pos)\n# # board.servo_write(servo_pin, 90)\n#\n#\n# beat(SERVO_2_PIN, 120, 50)\n\n# servo_pos = 115\n# max_deg = 120\n# speed = 20\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nleft_start_pos = 111\nleft_beat_pos = 116\nright_start_pos = 106\nright_beat_pos = 111\nspeed = 0.0005\n\nleft_start_pos2 = 95\nleft_beat_pos2 = 100\nright_start_pos2 = 100\nright_beat_pos2 = 105\n\nstart_pos = max(left_start_pos, right_start_pos)\nbeat_pos = max(left_beat_pos, right_beat_pos)\n\nstart_pos2 = max(left_start_pos2, right_start_pos2)\nbeat_pos2 = max(left_beat_pos2, right_beat_pos2)\n\n\nmove_speed = 0.0015\n\n# board.servo_write(SERVO_2_PIN, left_start_pos)\n# board.servo_write(SERVO_4_PIN, right_start_pos)\nboard2.servo_write(SERVO_2_PIN, left_start_pos2)\nboard2.servo_write(SERVO_3_PIN, 80)\nboard2.servo_write(SERVO_4_PIN, right_start_pos2)\n\ntime.sleep(5)\n\nwhile True:\n try:\n # board.servo_write(SERVO_2_PIN, 115)\n # time.sleep(0.5)\n # board.servo_write(SERVO_2_PIN, 1)\n # time.sleep(0.1)\n\n # for pos in range(40, 181):\n # move_servo(SERVO_1_PIN, pos, move_speed)\n # time.sleep(0.2)\n # sweep_servo(SERVO_2_PIN, start_pos, beat_pos, speed)\n # time.sleep(0.2)\n #\n # sweep_servo(SERVO_2_PIN, left_start_pos, left_beat_pos, speed)\n # sweep_servo(SERVO_4_PIN, right_start_pos, right_beat_pos, speed)\n\n # sweep_2_servo(SERVO_2_PIN, SERVO_4_PIN, start_pos, beat_pos, 5, speed, board)\n sweep_2_servo(SERVO_2_PIN, SERVO_4_PIN, start_pos2, beat_pos2, 5, speed, board2)\n\n # time.sleep(0.2)\n\n # sweep_servo(SERVO_1_PIN, 0, 180, 0.1)\n\n except KeyboardInterrupt:\n break\n\n# board.shutdown()\nboard2.shutdown()\nsys.exit()\n","repo_name":"AnaZ083c/Diplomska-naloga-Avtomatiziran-ksilofon","sub_path":"Python attempt/servos_beat_test.py","file_name":"servos_beat_test.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73893277529","text":"import os\nimport shutil\n\nfrom test import CONFIG\nfrom test import StageTest\nfrom test import skip_without_matlab\n\nfrom pymotifs.loops.extractor import Loader\n\n\nclass DeterminingComputationTest(StageTest):\n loader_class = Loader\n\n def test_knows_if_has_pdb_and_type(self):\n self.assertTrue(self.loader.has_data('1S72'))\n\n def test_knows_if_missing_pdb(self):\n self.assertFalse(self.loader.has_data('0S72'))\n\n\nclass MappingTests(StageTest):\n loader_class = Loader\n\n def test_can_get_a_correct_mapping_for_HL(self):\n normalizer = self.loader.normalizer('4V4Q')\n mapping = self.loader._mapping('4V4Q', 'HL', normalizer)\n self.assertEqual(206, len(mapping))\n\n def test_can_get_a_correct_mapping_for_IL(self):\n normalizer = self.loader.normalizer('4V4Q')\n mapping = self.loader._mapping('4V4Q', 'IL', normalizer)\n self.assertEqual(354, len(mapping))\n\n def test_can_get_a_correct_mapping_for_J3(self):\n normalizer = self.loader.normalizer('4V4Q')\n mapping = self.loader._mapping('4V4Q', 'J3', normalizer)\n self.assertEqual(52, len(mapping))\n\n def test_gets_empty_mapping_for_missing_structure(self):\n normalizer = self.loader.normalizer('4V4Q')\n val = self.loader._mapping('0000', 'IL', normalizer)\n self.assertEquals({}, val)\n\n def test_has_no_old_style_ids(self):\n normalizer = self.loader.normalizer('1GID')\n mapping = self.loader._mapping('1GID', 'HL', normalizer)\n seperators = [key[4] for key in mapping.keys()]\n self.assertTrue('_' not in seperators)\n\n\nclass NormalizingUnitsTest(StageTest):\n loader_class = Loader\n\n def setUp(self):\n super(NormalizingUnitsTest, self).setUp()\n self.normalize = self.loader.normalizer('1DUH')\n\n def test_it_will_normalize_units(self):\n units = (\n '1DUH|1|A|A|39,'\n '1DUH|1|A|A|42,'\n '1DUH|1|A|U|50||||4_555,'\n '1DUH|2|A|A|42||||4_555,'\n )\n\n assert self.normalize(units) == tuple(sorted([\n '1DUH|1|A|A|39',\n '1DUH|1|A|A|42',\n '1DUH|1|A|U|50||||4_555',\n '1DUH|1|A|A|42||||4_555',\n ]))\n\nclass GettingLoopIdsTest(StageTest):\n loader_class = Loader\n\n def setUp(self):\n super(GettingLoopIdsTest, self).setUp()\n self.normalizer = self.loader.normalizer('4V4Q')\n self.mapping = self.loader._mapping('4V4Q', 'IL', self.normalizer)\n\n def test_generates_a_new_id_for_an_unknown_loop(self):\n val = self.loader._get_loop_id('bob', '4V4Q', 'IL', self.mapping)\n self.assertEquals('IL_4V4Q_355', val)\n\n def test_adds_units_to_mapping(self):\n val = self.loader._get_loop_id('bob', '4V4Q', 'IL', self.mapping)\n self.assertEquals('IL_4V4Q_355', val)\n self.assertEquals(self.mapping['bob'], 'IL_4V4Q_355')\n\n def test_uses_old_id_for_known_loop(self):\n nts = tuple(sorted([\n '4V4Q|1|AA|A|607',\n '4V4Q|1|AA|A|608',\n '4V4Q|1|AA|A|609',\n '4V4Q|1|AA|A|629',\n '4V4Q|1|AA|A|630',\n '4V4Q|1|AA|C|611',\n '4V4Q|1|AA|C|612',\n '4V4Q|1|AA|C|631',\n '4V4Q|1|AA|G|606',\n '4V4Q|1|AA|G|628',\n '4V4Q|1|AA|G|633',\n '4V4Q|1|AA|U|605',\n '4V4Q|1|AA|U|610',\n '4V4Q|1|AA|U|632',\n ]))\n val = self.loader._get_loop_id(nts, '4V4Q', 'IL', self.mapping)\n self.assertEquals('IL_4V4Q_033', val)\n\n\nclass NextNumberTest(StageTest):\n loader_class = Loader\n\n def test_pads_to_three_for_small_numbers(self):\n val = self.loader._next_loop_number_string(10)\n self.assertEquals('011', val)\n\n def test_pads_to_six_for_large_numbers(self):\n val = self.loader._next_loop_number_string(1239)\n self.assertEquals('001240', val)\n\n def test_pading_jumps_at_999(self):\n val = self.loader._next_loop_number_string(999)\n self.assertEquals('001000', val)\n\n\nclass ExtractLoopsTest(StageTest):\n loader_class = Loader\n\n def setUp(self):\n super(ExtractLoopsTest, self).setUp()\n self.loader.save_loops = False\n self.data = self.loader.data('1GID')\n\n @skip_without_matlab\n def test_can_extract_correct_number_of_loops(self):\n ids = set(d.loop_id for d in self.data)\n names = set(d.loop_name for d in self.data)\n units = set(d.unit_ids for d in self.data)\n self.assertEquals(len(self.data), 22)\n self.assertEqual(len(names), 22)\n self.assertEqual(len(ids), 22)\n self.assertEqual(len(units), 22)\n\n\nclass CreatingFilesTest(StageTest):\n loader_class = Loader\n base = os.path.join(CONFIG['locations']['loops_mat_files'], '1GID')\n\n def setUp(self):\n super(CreatingFilesTest, self).setUp()\n if os.path.exists(self.base):\n shutil.rmtree(self.base)\n self.data = self.loader.data('1GID')\n\n @skip_without_matlab\n def test_creates_the_required_files(self):\n def loop(loop_id):\n return str(os.path.join(self.base, loop_id + '.mat'))\n\n assert os.path.isdir(self.base)\n assert os.path.exists(loop('IL_1GID_001'))\n assert os.path.exists(loop('HL_1GID_001'))\n\n # 22 loops\n assert len(os.listdir(self.base)) == 22\n","repo_name":"BGSU-RNA/RNA-3D-Hub-core","sub_path":"test/loops/extractor_test.py","file_name":"extractor_test.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"12733317339","text":"from keras.preprocessing import text\nfrom keras.utils import np_utils\nfrom keras.preprocessing import sequence\nimport keras.backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, Lambda\nfrom keras.models import load_model\nfrom ...lib.utils import word2id, id2word, model, model_path, PreProcess\nimport numpy as np\nimport distance\nimport pickle\n\npre_process = PreProcess()\n\ndef generate_context_word_pairs(wids, vocab_size):\n context_data = []\n label_data = []\n for data in wids:\n context_data.append(data[1:])\n label_data.append(data[0])\n x = sequence.pad_sequences(context_data, maxlen=15)\n y = np_utils.to_categorical(label_data, vocab_size)\n return x, y\n\n\ndef word_train(text_data):\n \"\"\"\n Input : text_data :- List of Setences\n \"\"\"\n try:\n text_data = [pre_process(sent) for sent in text_data]\n tokenizer = text.Tokenizer()\n tokenizer.fit_on_texts(text_data)\n word2id = tokenizer.word_index\n word2id['PAD'] = 0\n id2word = {v:k for k, v in word2id.items()}\n wids = [[word2id[w] for w in text.text_to_word_sequence(doc)] for doc in text_data]\n vocab_size = len(word2id)\n embed_size = 100\n \n context_data, label_data = generate_context_word_pairs(wids, vocab_size)\n \n cbow = Sequential()\n cbow.add(Embedding(input_dim=vocab_size, output_dim=embed_size, input_length=15))\n cbow.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(embed_size,)))\n cbow.add(Dense(vocab_size, activation='softmax'))\n cbow.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n cbow.fit(context_data, label_data, epochs=30, batch_size=10, verbose=2)\n with open('model_path/word_id.pkl', 'wb') as f:\n pickle.dump(word2id, f)\n with open('model_path/id_word.pkl', 'wb') as f:\n pickle.dump(id2word, f)\n cbow.save(\"model_path/cbow_model.h5\")\n return True\n except Exception as e:\n raise Exception(e) \n\n\ndef get_candidate_words(doc, masked_word):\n \"\"\"\n Input text:- Defination of word\n masked_word:- masked character word\n output word:- generate list of candidate words based on Model and words \n are same length as of Masked character word\n \"\"\"\n candidate_words = []\n wids = [word2id[w] for w in text.text_to_word_sequence(doc)]\n d = sequence.pad_sequences([wids], maxlen=15)\n predictied_prob = model.predict(d)\n id_index = np.argsort(predictied_prob[0])[::-1][0:10]\n for ids in id_index:\n word = id2word[ids]\n if len(word) == len(masked_word):\n candidate_words.append(word)\n return candidate_words\n\ndef get_correct_word(masked_word, candidate_words):\n \"\"\"\n Input masked_word:- maksed character word\n candidate_words :- list of canddate word\n output:- Corrected word based on Hamming distance\n \"\"\"\n distances = []\n for word in candidate_words:\n distances.append(distance.hamming(masked_word, word))\n return candidate_words[distances.index(min(distances))]\n\n","repo_name":"MAYUR192/nlp_chardes","sub_path":"app/api/models/word_model.py","file_name":"word_model.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30124326128","text":"# # Note: we are going to use defaultdict(no keyError because has a default key value) for this problem. \n# One example of defaultdict with set:\n# from collections import defaultdict\n\n# defaultdict_demo = defaultdict(set)\n\n# defaultdict_demo['one'].add(1)\n# defaultdict_demo['two'].add(2)\n# defaultdict_demo['one'].add('1')\n# defaultdict_demo['three']\n \n# -> return:\n \n# {'three': set(), 'two': {2}, 'one': {1, '1'}}\n\n# print(dict(defaultdict_demo.items()))\n\nclass WordFilter:\n\n def __init__(self, words: List[str]):\n from collections import defaultdict\n self.prefixes = defaultdict(set)\n self.suffixes = defaultdict(set)\n \n # weights dictionary contains each word and its index; e.x. {\"apple\":0}\n self.weights = {}\n \n for index, word in enumerate(words):\n prefix, suffix = '', ''\n \n for char in list(word):\n prefix += char\n \n self.prefixes[prefix].add(word)\n # print(self.prefixes)\n for char in list(word[::-1]):\n suffix += char\n self.suffixes[suffix[::-1]].add(word)\n # print(self.suffixes)\n self.weights[word] = index\n # print(self.weights)\n \n\n def f(self, prefix: str, suffix: str) -> int:\n weight = -1\n \n # Note: and is a Logical AND that returns True if both the operands are true whereas '&' is a bitwise operator in Python that acts on bits and performs bit by bit operation. Note: When an integer value is 0, it is considered as False otherwise True when using logically.\n\n \n # print(\"****a\", self.prefixes['a'] ) \n # # -> **** \n # print(\"****e\", self.suffixes['e'] ) \n \n # print(\"****\", self.prefixes['a'] & self.suffixes['e']) \n # -> {'appe', 'apple'} # Note: Find the common words\n # print(\"****\", \"apple\" in self.prefixes[prefix] & self.suffixes[suffix] )\n # -> True\n \n # Note: self.prefixes[prefix] & self.suffixes[suffix] -> returns a set with the common words in both self.prefixes[prefix] & self.suffixes[suffix]\n # -> then we check what word(s) satisfies are in both list (aka contains both prefixes and the suffixes)\n \n # -> We traverse through all available words and find the one with the highest weight!\n \n for word in self.prefixes[prefix] & self.suffixes[suffix]:\n if self.weights[word] > weight:\n weight = self.weights[word]\n return weight\n\n\n# Your WordFilter object will be instantiated and called as such:\n# obj = WordFilter(words)\n# param_1 = obj.f(prefix,suffix)\n","repo_name":"xulinxi/Leetcode","sub_path":"DailyChallenge/LC_745.py","file_name":"LC_745.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35586605251","text":"from pmapper.pharmacophore import Pharmacophore as P\nfrom rdkit import Chem\nfrom rdkit import RDConfig\nimport os\nimport pandas as pd\nfrom Bio.SeqUtils import seq3\n\n# Иницилизируем фармакофоры, которые будут использоваться в RDkit, возможно файл BaseFeatures.fdef можно отредактировать, чтобы там были нужные нам фармакофоры, но у меня не получилось\n\nfdefName = os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef') \nfactory = Chem.ChemicalFeatures.BuildFeatureFactory(fdefName)\n\n# SMILES-представление для аминокислот\naa_smiles = {'ALA': 'O=C(O)[C@H](C)N',\n 'CYS': 'O=C(O)[C@H](CS)N', \n 'ASP': 'O=C(O)[C@H](CC(O)=O)N', \n 'GLU': 'O=C(O)[C@H](CCC(O)=O)N', \n 'PHE': 'O=C(O)[C@H](CC1=CC=CC=C1)N', \n 'GLY': 'O=C(O)CN', \n 'HIS': 'O=C(O)[C@H](CC1=CNC=N1)N', \n 'ILE': 'O=C(O)[C@H]([C@@H](C)CC)N', \n 'LYS': 'O=C(O)[C@H](CCCCN)N', \n 'LEU': 'O=C(O)[C@H](CC(C)C)N', \n 'MET': 'O=C(O)[C@H](CCSC)N', \n 'ASN': 'O=C(O)[C@H](CC(N)=O)N', \n 'PRO': 'O=C(O)[C@H]1NCCC1', \n 'GLN': 'O=C(O)[C@H](CCC(N)=O)N', \n 'ARG': 'O=C(O)[C@H](CCCNC(N)=N)N', \n 'SER': 'O=C(O)[C@H](CO)N', \n 'THR': 'O=C(O)[C@H]([C@H](O)C)N', \n 'VAL': 'O=C(O)[C@H](C(C)C)N', \n 'TRP': 'O=C(O)[C@H](CC1=CNC2=CC=CC=C12)N',\n 'TYR': 'O=C(O)[C@H](CC1=CC=C(C=C1)O)N'}\n\n# Сопоставление pmapper фармакофоров и rdkit фармакофоров\ncompare = {\"A\": \"Acceptor\",\n \"D\": \"Donor\",\n \"P\": \"PosIonizable\",\n \"N\": \"NegIonizable\",\n \"H\": \"Hydrophobe\",\n \"a\": \"Aromatic\"}\n\n# Считаем фармакофоры для всех аминокислот с помощью двух тулов. Сохраняем результаты в 2 датафрейма\n\npharmacophore_RDkit = pd.DataFrame(columns = [aa for aa in aa_smiles], index = [\"Hydrophobe\", \"PosIonizable\", \"NegIonizable\", \"Acceptor\", \"Donor\", \"Aromatic\", \"Sulphur\"])\npharmacophore_RDkit = pharmacophore_RDkit.fillna(0)\npharmacophore_Pmapper = pd.DataFrame(columns = [aa for aa in aa_smiles], index = [\"Hydrophobe\", \"PosIonizable\", \"NegIonizable\", \"Acceptor\", \"Donor\", \"Aromatic\", \"Sulphur\"])\npharmacophore_Pmapper = pharmacophore_Pmapper.fillna(0)\nfor aa in aa_smiles:\n #RDkit\n mol = Chem.MolFromSmiles(aa_smiles[aa])\n mol = Chem.AddHs(mol) \n Chem.AllChem.EmbedMolecule(mol, randomSeed=42)\n feats = factory.GetFeaturesForMol(mol)\n for i in range(len(feats)): \n ph = feats[i].GetFamily()\n if feats[i].GetFamily() == \"LumpedHydrophobe\":\n ph = \"Hydrophobe\"\n pharmacophore_RDkit[aa][ph] += 1\n #Pmapper\n p = P()\n p.load_from_mol(mol)\n pharmacophore_dict = p.get_features_count()\n for i in pharmacophore_dict:\n pharmacophore_Pmapper[aa][compare[i]] += pharmacophore_dict[i]\n if \"S\" in aa_smiles[aa]:\n pharmacophore_RDkit[aa][\"Sulphur\"] += 1\n pharmacophore_Pmapper[aa][\"Sulphur\"] += 1\n \n# Функции для подсчета разницы фармакофоров \ndef get_pharm_diff_RDkit(row):\n wild = seq3(row[\"WILD_TYPE\"]).upper()\n mutant = seq3(row[\"MUTANT\"]).upper()\n return list(pharmacophore_RDkit[mutant] - pharmacophore_RDkit[wild])\ndef get_pharm_diff_Pmapper(row):\n wild = seq3(row['WILD_TYPE']).upper()\n mutant = seq3(row['MUTANT']).upper()\n return list(pharmacophore_Pmapper[mutant] - pharmacophore_Pmapper[wild])\n\n# Загружаем наш датасет с посчитанными CSM после create_CSM.py\nPDB_dataset = pd.read_csv(\"/path/to/dataset_with_CSM.csv\")\n\n# Считаем фармакофоры двумя способами\n\nPDB_dataset[\"pharmacophore_RDkit\"] = PDB_dataset.apply(get_pharm_diff_RDkit, axis=1)\nPDB_dataset[\"pharmacophore_Pmapper\"] = PDB_dataset.apply(get_pharm_diff_Pmapper, axis=1)\n\n#Сохраняем датасет с CSM и факрмакофорами\n\nPDB_dataset.to_csv(\"/path/to/folder/dataset_with_CSM_and_PH.csv\")\n","repo_name":"biocad/CSM","sub_path":"get_pharmacophore.py","file_name":"get_pharmacophore.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42352869681","text":"from discord.ext import commands\nimport discord\nimport logging\nimport SecretManager\nfrom main import Attributes\nimport main as db\nfrom typing import Optional\nimport HelpfileReader\nimport bot_admin\nimport Attributes\n\ndebug_mode = True\n\nresearch_cache = {}\nPREFIX = \"?\"\nbot = commands.Bot(\n command_prefix=commands.when_mentioned_or(PREFIX)\n)\n\nbot.help_command = None\n\n\n@bot.command(alias=['agpl', 'gpl', 'legal'])\nasync def license(ctx):\n await ctx.send(\"This bot is available under the AGPL license, and the source code can be found at \")\n return\n\n@bot.command()\nasync def join(ctx, name: str, *, must_be_none: Optional[str]):\n if name.casefold() in {'me', 'my'}:\n await ctx.send(\"Sorry, player name cannot be a special word\")\n if must_be_none is not None:\n await ctx.send(\"Sorry, player name must be a single word\")\n return\n result = db.create_player(name, ctx.author.id)\n await ctx.send(result[1])\n return\n\n\n@bot.command()\nasync def send(ctx, name:str, amount:int):\n sender_id = db.get_player_id_from_context(ctx)\n receiver_id = db.get_player_by_name(name, db.get_game_id_from_context(ctx))\n results = db.send_power(sender_id, receiver_id, amount)\n await ctx.send(results[1])\n return\n\n\n@bot.command()\nasync def pantheon(ctx, first:str, second:str):\n def check_author(author):\n def inner_check(message):\n if message.author.id != author.id:\n return False\n return True\n return inner_check\n player_id = db.get_player_id_from_context(ctx)\n if first == \"create\":\n if db.player_get_pantheon(player_id) != -1:\n await ctx.send(\"You must leave your current pantheon before you can create a new one.\")\n return\n \n name = second\n await ctx.send(\"Please enter the description.\")\n description = await bot.wait_for('message', timeout=30.0, check=check_author(ctx.author))\n description = description.content\n results = db.create_pantheon(name, description)\n db.join_pantheon(ctx.author.id, db.get_pantheon_by_name(name))\n await ctx.send(results[1])\n return\n elif first == \"leave\":\n from user_interaction import user_react_on_message\n output = \"> Are you sure you want to leave your pantheon?\\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\"\n do_leave = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n\n if do_leave:\n results = db.leave_pantheon(ctx.author.id)\n await ctx.send(results[1])\n return\n else:\n await ctx.send(\"Canceled.\")\n return\n\n\n@bot.command()\nasync def admin(ctx, *args):\n discord_id = ctx.author.id\n if len(args) == 0:\n await ctx.send(HelpfileReader.read(PREFIX, ('admin',)))\n return\n if db.context_grants_admin(ctx):\n if args[0] == 'tech':\n await bot_admin.tech(bot, ctx, *(args[1:]))\n elif args[0] == 'user':\n await bot_admin.user(bot, ctx, *(args[1:]))\n elif args[0] == \"newturn\":\n await bot_admin.newturn(bot, ctx)\n elif args[0] == 'kill':\n await bot_admin.kill(bot, ctx)\n elif args[0] == 'help':\n await bot_admin.help(bot, ctx, *(args[1:]))\n elif args[0] == 'pantheon':\n await bot_admin.pantheon(bot, ctx, *(args[1:]))\n elif args[0] == 'update':\n await bot_admin.update()\n elif args[0] == 'join':\n await bot_admin.join(bot, ctx, *(args[1:]))\n else:\n await ctx.send('Admin command does not exist')\n else:\n await ctx.send(\"You're not an admin. You cannot beat the system. Big bird is watching you.\")\n return\n\n\n@bot.command()\nasync def info(ctx, name:str = None, info_type:str = None):\n import formatting\n game_id = db.get_game_id_from_context(ctx)\n if name is None:\n output = \"> **Current game:**\\n> \\n> \"\n for base_name in db.get_player_names(game_id):\n player_id = db.get_player_by_name(base_name, game_id)\n display_name = db.get_display_name(player_id)\n output += \"**{name}**:\\n> \" \\\n \"DP: {power:.0f}\\n> \" \\\n \"Functionaries: {funcs:.0f}\\n> \" \\\n \"Personal Soldiers: {soldiers:.0f}\\n> \" \\\n \"Total Soldiers: {total_soldiers:.0f}\\n\" \\\n \"Priests: {priests:.0f}\\n> \\n> \".format(name=display_name,power=db.get_attribute(player_id, Attributes.POWER),\n funcs=db.get_attribute(player_id, Attributes.FUNCTIONARIES),\n soldiers=db.get_attribute(player_id, Attributes.SOLDIERS),\n total_soldiers=db.get_army(player_id),\n priests=db.get_attribute(player_id, Attributes.PRIESTS))\n output += \"Current turn: {turn:.0f}\".format(turn=db.current_turn(db.get_game_id_from_context(ctx)))\n await ctx.send(output)\n return\n\n player_id = None\n if name.casefold() == \"me\":\n player_id = db.get_player_id_from_context(ctx)\n else:\n player_id = db.get_player_by_name(name, db.get_game_id_from_context(ctx))\n \n if player_id is None:\n await ctx.send('Player {name} does not exist'.format(name=name))\n return\n\n info = db.get_player(player_id)\n if info_type is None:\n output_text = formatting.default_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"income\":\n output_text = formatting.income_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"war\":\n output_text = formatting.war_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"conversion\":\n output_text = formatting.conversion_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"research\":\n output_text = formatting.research_info(info, player_id)\n await ctx.send(output_text)\n return\n elif info_type == \"tech\":\n output_text = \"**{name}'s tech:**\".format(name=info[\"display_name\"])\n for tech_id in db.get_player_techs(player_id):\n output_text += \"\\n> \\n> {name}:\\n> \"\\\n \"*{description}*\".format(name=db.get_tech_name(tech_id),description=db.get_tech_description(tech_id))\n await ctx.send(output_text)\n return\n elif info_type == \"all\":\n await ctx.send(\"**{name}'s attributes:**\".format(name=info[\"display_name\"]))\n attributes_per_print = 20 # avoid 2000 character limit\n attributes = db.get_player_attributes(player_id)\n sliced = [attributes[i * attributes_per_print:(i + 1) * attributes_per_print] for i in range((len(attributes) + attributes_per_print - 1) // attributes_per_print)]\n for sublist in sliced:\n output_text = \"\"\n for attribute in sublist:\n output_text += \"\\n{name}: {value}\".format(name=attribute[0],value=attribute[1])\n await ctx.send(output_text)\n\n return\n\n\n@bot.command()\nasync def buff(ctx,name:str, attribute:str, amount:int = 1):\n from user_interaction import user_react_on_message\n source_id = db.get_player_id_from_context(ctx)\n target_id = None\n if name == \"me\":\n target_id = source_id\n else:\n target_id = db.get_player_by_name(name, db.get_game_id_from_context(ctx))\n try:\n attribute_id = db.get_attribute_id(attribute)\n except:\n await ctx.send(\"Incorrect attribute.\")\n return\n\n if db.get_army(target_id) > 0:\n cost = db.get_buff_cost(target_id, amount)\n output = f\"> You are attempting to buff {attribute} by {amount}. This will cost you {cost} DP.\\n> \" \\\n f\"Do you wish to continue?\\n> \" \\\n f\":thumbsup: Yes\\n> \" \\\n f\":thumbsdown: No\"\n do_buff = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n\n if do_buff:\n results = db.cast_buff(source_id, attribute_id, amount, target_id)\n await ctx.send(results[1])\n return\n\n else:\n await ctx.send(\"Canceled.\")\n return\n else:\n await ctx.send(\"The target has no soldiers to buff.\")\n return\n\n\n@bot.command()\nasync def create(ctx,amount:int, type:str):\n if amount > 0:\n player_id = db.get_player_id_from_context(ctx)\n from user_interaction import user_react_on_message\n if type in [\"priests\", \"priest\"]:\n output = \"> You are creating {num:.0f} priests at a cost of {cost:.0f} per priest, for \" \\\n \"a total of {total:.0f} DP.\" \\\n \"Do you wish to continue?\\n> \\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\".format(num=amount,cost=db.get_attribute(player_id,Attributes.PRIEST_COST),\n total=amount*db.get_attribute(player_id,Attributes.PRIEST_COST))\n do_create = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n\n if do_create:\n results = db.recruit_priests(player_id,amount)\n await ctx.send(results[1])\n return\n\n else:\n await ctx.send(\"Canceled.\")\n return\n\n elif type in [\"soldiers\", \"soldier\", \"troops\"]:\n output = \"> You are creating {num:.0f} soldiers at a cost of {cost:.0f} per soldier, for \" \\\n \"a total of {total:.0f} DP.\" \\\n \"Do you wish to continue?\\n> \\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\".format(num=amount, cost=db.get_attribute(player_id, Attributes.SOLDIER_COST),\n total=amount * db.get_attribute(player_id, Attributes.SOLDIER_COST))\n\n do_create = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n if do_create:\n results = db.recruit_soldiers(player_id, amount)\n await ctx.send(results[1])\n return\n else:\n await ctx.send(\"Canceled.\")\n return\n else:\n await ctx.send(\"Incorrect type.\")\n return\n else:\n await ctx.send(\"> Nice try.\")\n return\n\n@bot.command()\nasync def disband(ctx,amount:int):\n if amount > 0:\n player_id = db.get_player_id_from_context(ctx)\n from user_interaction import user_react_on_message\n output = \"> You are disbanding {num:.0f} soldiers at a disband cost of {cost:.0f} per soldier, for \" \\\n \"a total of {total:.0f} DP.\" \\\n \"Do you wish to continue?\\n> \\n> \" \\\n \":thumbsup: Yes\\n> \" \\\n \":thumbsdown: No\".format(num=amount, cost=db.get_attribute(player_id, Attributes.SOLDIER_DISBAND_COST),\n total=amount * db.get_attribute(player_id, Attributes.SOLDIER_DISBAND_COST))\n do_disband = await user_react_on_message(bot, ctx, output, ctx.author, {\n '\\N{THUMBS UP SIGN}': True,\n '\\N{THUMBS DOWN SIGN}': False,\n })\n if do_disband:\n results = db.disband_soldiers(player_id,amount)\n await ctx.send(results[1])\n return\n else:\n await ctx.send(\"> Canceled\")\n return\n else:\n await ctx.send(\"> Nice try.\")\n return\n\n\n@bot.command()\nasync def research(ctx, *, tech_name):\n import formatting\n from user_interaction import user_react_on_message\n tech_id = db.get_tech_id(tech_name)\n player_id = db.get_player_id_from_context(ctx)\n if tech_id is None:\n await ctx.send('> Technology \"{}\" does not exist.'.format(tech_name))\n return\n if player_id is None:\n await ctx.send('> You have not joined this game yet.')\n return\n success_cost = db.calculate_tech_cost(player_id, tech_id)\n multiplier = db.get_attribute(player_id, Attributes.RESEARCH_COST_MULTIPLIER)\n attempt_costs = tuple(map(lambda x: db.get_attribute(player_id, x) * multiplier, (\n Attributes.DIVINE_INSPIRATION_COST,\n Attributes.AWAKE_REVELATION_COST,\n Attributes.ASLEEP_REVELATION_COST,\n Attributes.DIVINE_AVATAR_COST\n )))\n\n success_probs = tuple(map(lambda x: db.get_attribute(player_id, x) * multiplier, (\n Attributes.DIVINE_INSPIRATION_RATE,\n Attributes.AWAKE_REVELATION_RATE,\n Attributes.ASLEEP_REVELATION_RATE,\n Attributes.DIVINE_AVATAR_RATE\n )))\n output_text = formatting.request_research_method(tech_name, success_probs, success_cost, attempt_costs)\n research_method = await user_react_on_message(bot, ctx, output_text, ctx.author, {\n '\\N{REGIONAL INDICATOR SYMBOL LETTER A}': 'divine_inspiration',\n '\\N{REGIONAL INDICATOR SYMBOL LETTER B}': 'awake_revelation',\n '\\N{REGIONAL INDICATOR SYMBOL LETTER C}': 'asleep_revelation',\n '\\N{REGIONAL INDICATOR SYMBOL LETTER D}': 'divine_avatar'\n })\n if research_method is None:\n await ctx.send(\"Timed out\")\n return\n \n priest_text = '> Do you wish to use priests for this research? \\n'\\\n '> :regional_indicator_y: Yes\\n'\\\n '> :regional_indicator_n: No'\n use_priests = await user_react_on_message(bot, ctx, priest_text, ctx.author, {\n '\\N{REGIONAL INDICATOR SYMBOL LETTER Y}': True,\n '\\N{REGIONAL INDICATOR SYMBOL LETTER N}': False\n })\n if use_priests is None:\n await ctx.send(\"> Timed out\")\n return\n \n result_text = db.attempt_research(player_id, tech_id, research_method, use_priests)[1]\n await ctx.send(result_text)\n\n # ctx.author.id\n\n\n@bot.command()\nasync def battle(ctx, player_name: str, quantity: int):\n from user_interaction import user_react_on_message\n import formatting\n\n attacker_id = db.get_player_id_from_context(ctx)\n target_id = db.get_player_by_name(player_name, db.get_game_id_from_context(ctx))\n if attacker_id is None:\n await ctx.send('> You have not joined this game yet.')\n return\n if target_id is None:\n await ctx.send('> Player \"{}\" does not exist.'.format(player_name))\n return\n \n # expected_outcome = db.expected_damage(attacker_id, target_id, quantity)\n\n # # Phase 1: output text\n # output_text = formatting.battle_ask_continue(\n # player_name,\n # quantity,\n # expected_outcome[2],\n # expected_outcome[0][0],\n # expected_outcome[0][1],\n # expected_outcome[0][2],\n # expected_outcome[1]\n # )\n\n # do_battle = await user_react_on_message(bot, ctx, output_text, ctx.author, {\n # '\\N{REGIONAL INDICATOR SYMBOL LETTER A}': True,\n # '\\N{REGIONAL INDICATOR SYMBOL LETTER B}': False,\n # })\n # if do_battle is None:\n # await ctx.send(\"> Timed out\")\n # return\n\n do_battle = True\n if do_battle:\n results = db.attack(attacker_id, target_id, quantity)\n if results[0]:\n remaining_attackers = db.get_attribute(attacker_id, Attributes.ATTACK_ELIGIBLE_SOLDIERS)\n remaining_soldiers = db.get_army(attacker_id)\n remaining_enemy_soldiers = db.get_army(target_id)\n result_text = formatting.battle_report(\n results[1][0][0],\n results[1][0][1],\n results[1][0][2],\n remaining_enemy_soldiers,\n results[1][1],\n remaining_soldiers,\n remaining_attackers\n )\n \n await ctx.send(\"> \" + result_text)\n return\n else:\n await ctx.send(\"> \" + str(results[1]))\n return\n else:\n await ctx.send(\"> Battle canceled.\")\n return\n\n\n@bot.command()\nasync def convert(ctx, quantity: int):\n from user_interaction import user_react_on_message\n import formatting\n player_discord = ctx.author.id\n\n if player_discord is None:\n await ctx.send('> You have not joined this game yet.')\n return\n\n output_text = formatting.conversion_target_type(\n db.get_attribute(player_discord, Attributes.NEUTRAL_CONVERSION_RATE),\n db.get_attribute(player_discord, Attributes.NEUTRAL_CONVERSION_COST),\n db.get_attribute(player_discord, Attributes.ENEMY_CONVERSION_RATE),\n db.get_attribute(player_discord, Attributes.ENEMY_CONVERSION_COST),\n db.get_attribute(player_discord, Attributes.ENEMY_PRIEST_CONVERSION_RATE),\n db.get_attribute(player_discord, Attributes.ENEMY_PRIEST_CONVERSION_COST)\n )\n\n conversion_target = await user_react_on_message(bot, ctx, output_text, ctx.author, {\n '\\N{REGIONAL INDICATOR SYMBOL LETTER A}': \"neutral\",\n '\\N{REGIONAL INDICATOR SYMBOL LETTER B}': \"enemy\",\n '\\N{REGIONAL INDICATOR SYMBOL LETTER C}': \"enemy_priest\",\n })\n\n if conversion_target == \"neutral\":\n results = db.attempt_conversion(converter_player_id=player_discord,\n quantity=quantity,\n person_type=conversion_target,\n target_player_id=None)\n if results[0]:\n result_text = \"> Successfully converted {converts}.\".format(converts=results[1])\n else:\n result_text = results[1]\n await ctx.send(result_text)\n return\n elif conversion_target in [\"enemy\", \"enemy_priest\"]:\n def check(message):\n return message.author.id == ctx.author.id and db.user_name_exists(message.content)\n \n await ctx.send(\"> Please specify the player to attempt to convert away from. \\n\"\n \"Avoid unnecessary whitespaces or characters.\")\n other_player_name = (await bot.wait_for('message', timeout=30.0, check=check)).content\n other_player_id = db.get_player_by_name(other_player_name)\n\n success, results = db.attempt_conversion(converter_player_id=player_discord,\n quantity=quantity,\n person_type=conversion_target,\n target_player_id=other_player_id)\n\n if success:\n result_text = \"> Successfully converted {converts}, spending {cost} DP and priest channeling power.\".format(\n converts=results[0], cost=results[1])\n else:\n result_text = results\n \n await ctx.send(result_text)\n\n\n@bot.command()\nasync def help(ctx, *command_context):\n await ctx.send(HelpfileReader.read(PREFIX, command_context))\n\n@bot.command()\nasync def whois(ctx, member: discord.Member):\n game_id = db.get_game_id_from_context(ctx)\n name = member.name\n if member.nick:\n name = member.nick\n if db.user_discord_id_exists(member.id, game_id):\n from formatting import pretty_list\n\n player_ids = db.get_players_by_discord_id(member.id, game_id)\n display_names = map(db.get_display_name, player_ids)\n await ctx.send(\"{name} plays as {display_names}\".format(name=name, display_name=pretty_list(display_names)))\n else:\n await ctx.send(\"{name} has not joined the game\".format(name=name))\n\n@bot.command()\nasync def proxy(ctx, *, text):\n if db.context_grants_admin(ctx):\n await ctx.send(text)\n await ctx.message.delete()\n\n\ndef start_bot():\n token = SecretManager.secrets['discord']['clientToken']\n\n if token is not None and len(token) > 0:\n logging.info(\"Starting client\")\n bot.run(token)\n else:\n logging.error(\"Could not start: invalid token\")\n\n\nif __name__ == '__main__':\n start_bot()\n","repo_name":"casithepython/handofgods","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":20358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"25197693348","text":"from unittest import TestCase\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom graphdg.standardize import ArrayStandardizer\n\n\nclass TestNormalizers(TestCase):\n def setUp(self):\n self.matrix = np.array([\n [0, 1, 2],\n [1, 2, 4],\n ])\n\n def test_standardize(self):\n standardizer = ArrayStandardizer.from_array(self.matrix)\n\n assert_allclose(standardizer.mean, np.array([0.5, 1.5, 3]))\n self.assertEqual(standardizer.mean.shape, (3, ))\n\n assert_allclose(standardizer.std, np.array([0.5, 0.5, 1]))\n self.assertEqual(standardizer.std.shape, (3, ))\n\n assert_allclose(self.matrix, standardizer.destandardize(standardizer.standardize(self.matrix)))\n\n def test_standardizer_fail(self):\n standardizer = ArrayStandardizer.from_array(self.matrix)\n\n t = np.array([[0, 1]])\n\n with self.assertRaises(ValueError):\n standardizer.standardize(t)\n\n with self.assertRaises(ValueError):\n standardizer.destandardize(t)\n","repo_name":"gncs/graphdg","sub_path":"graphdg/tests/test_standardizer.py","file_name":"test_standardizer.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"31"} +{"seq_id":"4739694510","text":"from app.mixins.voice_audio_mixin import VoiceAudioMixin\nfrom app.handlers.slide.remove_mixin import RemoveMixin\nfrom app.handlers.slide.voice_mixin import VoiceMixin\nfrom app.handlers.slide.change_data import ChangeData\nfrom app.handlers.slide.image_mixin import ImageMixin\nfrom app.handlers.slide.text_mixin import TextMixin\nfrom app.services.api.entities.conference import Conference\nfrom app.services.api.conference_admin import ConferenceAdminApiService\nfrom app.services.api.slide_admin import SlideAdminApiService\nfrom app.services.api.entities.slide import Slide\nfrom app.helpers import get_command_and_parameters\nfrom app.core.context import Context\nfrom app.core.handler import Handler\n\n\nclass SideHandler(\n TextMixin, ImageMixin, VoiceMixin, RemoveMixin, ChangeData, VoiceAudioMixin, Handler\n):\n def get_start_commands(self):\n return [\"/slide\", \"/init_slide\"]\n\n def start(\n self,\n context: Context,\n slide_service: SlideAdminApiService,\n conference_service: ConferenceAdminApiService,\n ):\n command, parameters = get_command_and_parameters(context.text)\n if command == \"/slide\":\n context.delete_last_message()\n self._update_user_handler_data(context.user, {\"slide_messages\": []})\n context.user.current_handler = self.get_code()\n conference_id = context.user.handler_data[\"conference_id\"]\n slide_id = parameters[0]\n self._update_user_handler_data(context.user, {\"slide_id\": slide_id})\n slide: Slide = slide_service.get_by_id(\n slide_id,\n conference_id=conference_id,\n user=context.user,\n )\n self._save_message_id(\n message=context.send_message(\n self._get_header(\n context=context,\n conference_service=conference_service,\n slide_service=slide_service,\n )\n ),\n context=context,\n )\n if slide.image_id:\n photo_message = slide_service.send_image(\n user=context.user, chat_id=context.chat_id, slide=slide\n )\n self._save_message_id(message=photo_message.json(), context=context)\n if slide.voice_id:\n audio_message = slide_service.send_audio(\n user=context.user, chat_id=context.chat_id, slide=slide\n )\n self._save_message_id(message=audio_message.json(), context=context)\n buttons = [\n [\n context.create_button(\n self._(\"slide_command_change_text\"),\n callback_data=\"/change_text\",\n ),\n context.create_button(\n self._(\"slide_command_change_image\"),\n callback_data=\"/change_image\",\n ),\n context.create_button(\n self._(\"slide_command_change_voice\"),\n callback_data=\"/change_voice\",\n ),\n ],\n [\n context.create_button(\n self._(\"slide_remove\"),\n callback_data=\"/slide_remove\",\n ),\n ],\n [\n context.create_button(\n self._(\"back_in_history_btn\"),\n callback_data=\"/back\",\n )\n ],\n ]\n text = f\"{slide.position}: {slide.text}\"\n self._save_message_id(\n message=context.send_buttons(text, buttons), context=context\n )\n\n def _save_message_id(self, message, context: Context):\n slide_messages = context.user.handler_data.get(\"slide_messages\", [])\n slide_messages += [message[\"message_id\"]]\n self._update_user_handler_data(context.user, {\"slide_messages\": slide_messages})\n\n def _get_header(\n self,\n context: Context,\n conference_service: ConferenceAdminApiService,\n slide_service: SlideAdminApiService,\n ):\n conference_id = context.user.handler_data[\"conference_id\"]\n slide_id = context.user.handler_data[\"slide_id\"]\n conference_id = context.user.handler_data[\"conference_id\"]\n slide: Slide = slide_service.get_by_id(\n slide_id,\n conference_id=conference_id,\n user=context.user,\n )\n conference: Conference = conference_service.get_by_id(\n conference_id, user=context.user\n )\n return self._(\"slide_header\") % (slide.position, conference.name)\n\n def _clear(self, context: Context):\n slide_messages = context.user.handler_data[\"slide_messages\"]\n for message_id in slide_messages:\n context.delete_message(message_id=message_id)\n self._update_user_handler_data(context.user, {\"slide_messages\": []})\n\n def command_back(self, context: Context):\n self._clear(context=context)\n conference_id = context.user.handler_data[\"conference_id\"]\n return context.forward(f\"/slide_list {conference_id}\")\n","repo_name":"confbot-telegram-conferences/bot","sub_path":"app/handlers/slide/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42859792276","text":"import time\nimport random\nimport json\nimport asyncio\nfrom aiohttp import ClientSession\nfrom sastoken import get_auth_token\nfrom config import queuename, tablename, failqueue\nfrom azure.storage.table import TableService\nfrom azure.servicebus import ServiceBusService\n\n# make Table if it doesn't exist\ntable_service = TableService(account_name='gregseon4e059a98c11c',\\\n account_key='yE7Kuy0xVxUDR+wHGoWPjSpOhFO9WLd9b+t3+RI9C8tuBNbuLwEtWSQGERiO7LJRE1cFTGB0/TT4+CYGhtMfww==')\nif not table_service.exists(tablename):\n table_service.create_table(tablename)\n\n# make queues if they dont exist\nbus_service = ServiceBusService(service_namespace='gregseon4e059a98c11c',\\\n shared_access_key_name='RootManageSharedAccessKey',\\\n shared_access_key_value='d8PrqA7to95t0wUFywAfhNDcbUwvh2sIpiHqvUdbPSQ=')\nbus_service.create_queue(queuename)\nbus_service.create_queue(failqueue)\n\n#generate token for https comms\nsas = get_auth_token(\"gregseon4e059a98c11c\",queuename,\"RootManageSharedAccessKey\",\"d8PrqA7to95t0wUFywAfhNDcbUwvh2sIpiHqvUdbPSQ=\")\nsas2 = get_auth_token(\"gregseon4e059a98c11c\",failqueue,\"RootManageSharedAccessKey\",\"d8PrqA7to95t0wUFywAfhNDcbUwvh2sIpiHqvUdbPSQ=\")\n\nasync def tblwrite(msg,session):\n ''' Write message to table '''\n uri = \"https://gregseon4e059a98c11c.table.core.windows.net/\" + tablename + \"?sv=2017-04-17&ss=bfqt&srt=sco&sp=rwdlacup&se=2017-11-30T08:49:46Z&st=2017-11-18T00:49:46Z&spr=https&sig=XL0n1GIAFRslWdTOZY8ivSqK7hQqW7SZXpLHCWrUSmw%3D\"\n tid = str(msg[\"TransactionID\"])\n suid = str(msg[\"UserId\"]) + str(msg[\"SellerID\"]) \n data = json.dumps({\"PartitionKey\":tid,\"RowKey\": suid, \"message\":str(msg)})\n headers = {'Content-Type':'application/json;odata=nometadata','Content-Length':str(len(data)),'Prefer':'return-no-content'}\n async with session.post(uri,headers=headers,data=data) as response:\n if response.status == 409 or response.status == 204:\n pass # this means it was either duplicate key pairs or inserted fine\n else:\n asyncio.sleep(5) # circuit breaker like approach that accepts that the message failed to be sent immediately and waits before resending same message.\n await tblwrite(msg,session)\n return response.status\n\nasync def sendfailure(data,session):\n ''' Write failure to second queue '''\n try:\n global sas2\n headers = {'Authorization':sas2[\"token\"],'Content-Type':'Content-Type: application/vnd.microsoft.servicebus.json'}\n URL = \"https://gregseon4e059a98c11c.servicebus.windows.net/\"+failqueue+\"/messages\"\n async with session.post(URL, data=data, headers=headers) as response:\n if response.status != 201:\n asyncio.sleep(5) # sort of like a circuit breaker pattern. Wait 5 seconds and retry\n await sendfailure(data, session)\n return await response.read()\n except asyncio.TimeoutError:\n pass \n\nasync def getmsg(session):\n ''' send message async '''\n global sas\n headers = {'Authorization':sas[\"token\"], 'Content-Type': \\\n 'application/atom+xml;type=entry;charset=utf-8'}\n URL = \"https://gregseon4e059a98c11c.servicebus.windows.net/\"+queuename+\"/messages/head\"\n async with session.delete(URL, headers=headers) as response:\n if response.status not in (200,204):\n # add another read if https breaks downs for this read.\n # Message should still be in queue and unlocked for another competing consumer.\n await getmsg(session)\n elif response.status == 204: # means queue empty and nothing to write to table. return now\n return None\n else: # means message recieved\n msg = json.loads([x async for x in response.content][0].decode())\n if msg['failure'] == \"yes\":\n await sendfailure(msg,session) #write to failure queue is yes\n else:\n await tblwrite(msg, session)\n return await response.read()\n\nasync def boundgetmsg(sem, session):\n ''' async semaphore '''\n async with sem:\n await getmsg(session)\n\nasync def run(r):\n ''' kicks off the asynchronous generation of the post requests '''\n sem = asyncio.Semaphore(1000)\n tasks = []\n async with ClientSession() as session:\n for _ in range(r):\n task = asyncio.ensure_future(boundgetmsg(sem, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses \n\ndef readqueue(num_messages):\n N = num_messages\n LOOP = asyncio.get_event_loop()\n FUTURE = asyncio.ensure_future(run(N))\n LOOP.run_until_complete(FUTURE)\n return\n\n\n","repo_name":"gregory1506/Assignment3","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35200288799","text":"\"\"\"Align target text to reference translation.\n\"\"\"\nimport argparse\n\nWINDOW_SIZE = 30\nMAX_THRESHOLD = 0.9\nMIN_THRESHOLD = 0.4\nVOCAB = 'glove.840B.300d'\nPROGRESS = False\nDEVICE = 'cpu'\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '--target', '-t', required=True,\n help='The target text file to align.')\n parser.add_argument(\n '--reference', '-r', required=True,\n help='The reference translation to align to.')\n parser.add_argument(\n '--output', '-o', required=True,\n help='The output file to write the aligned target text.')\n\n parser.add_argument(\n '--window_size', '-w', type=int, default=WINDOW_SIZE,\n help='The number of reference sentences to compare per target.')\n parser.add_argument(\n '--max_threshold', type=float, default=MAX_THRESHOLD,\n help='The ABLEU threshold to assume best matching sentences.')\n parser.add_argument(\n '--min_threshold', type=float, default=MIN_THRESHOLD,\n help='The minimum ABLEU score for valid alignment.')\n parser.add_argument(\n '--vocab', '-v', default=VOCAB,\n help='The pretrained alias from `torchtext.vocab` to use.')\n parser.add_argument(\n '--cache_dir',\n help='The directory to save vocabulary cache.')\n parser.add_argument(\n '--progress', '-p', action='store_true', default=PROGRESS,\n help='Show progress bar.')\n parser.add_argument(\n '--device', '-d', default=DEVICE,\n help='The `torch.device` value to use in calculations.')\n\n return parser\n","repo_name":"juneoh/ABLEUAlign","sub_path":"ableualign/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"38506799219","text":"import numpy as np\nimport openmdao.api as om\nimport dymos as dm\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nfrom dymos.examples.racecar.combinedODE import CombinedODE\nfrom dymos.examples.racecar.spline import get_spline, get_track_points\nfrom dymos.examples.racecar.tracks import ovaltrack\n\nfrom paropt.paropt_sparse_driver import ParOptSparseDriver\n\n# change track here and in curvature.py. Tracks are defined in tracks.py\ntrack = ovaltrack\n\n# generate nodes along the centerline for curvature calculation (different\n# than collocation nodes)\npoints = get_track_points(track)\n\n# fit the centerline spline.\nfinespline, gates, gatesd, curv, slope = get_spline(points, s=0.0)\n\n# by default 10000 points\ns_final = track.get_total_length()\n\n# Define the OpenMDAO problem\np = om.Problem(model=om.Group())\n\n# Define a Trajectory object\ntraj = dm.Trajectory()\np.model.add_subsystem(\"traj\", subsys=traj)\n\n# Define a Dymos Phase object with GaussLobatto Transcription\nphase = dm.Phase(\n ode_class=CombinedODE,\n transcription=dm.GaussLobatto(num_segments=80, order=3, compressed=True),\n)\n\ntraj.add_phase(name=\"phase0\", phase=phase)\n\n# Set the time options, in this problem we perform a change of variables. So 'time' is\n# actually 's' (distance along the track centerline)\n# This is done to fix the collocation nodes in space, which saves us the calculation of\n# the rate of change of curvature.\n# The state equations are written with respect to time, the variable change occurs in\n# timeODE.py\nphase.set_time_options(\n fix_initial=True,\n fix_duration=True,\n duration_val=s_final,\n name=\"s\",\n targets=[\"curv.s\"],\n units=\"m\",\n duration_ref=s_final,\n duration_ref0=10,\n)\n\n# Set the reference values\nt_ref = 100.0\nn_ref = 4.0\nV_ref = 40.0\nlambda_ref = 0.01\nalpha_ref = 0.15\nomega_ref = 0.3\nax_ref = 8.0\nay_ref = 8.0\ndelta_ref = 0.04\nthrust_ref = 10.0\n\n# Define states\nphase.add_state(\n \"t\",\n ref=t_ref,\n units=\"s\",\n fix_initial=True,\n fix_final=False,\n lower=0.0,\n upper=10000.0,\n rate_source=\"dt_ds\",\n)\n\n# Normal distance to centerline. The bounds on n define the width of the track\nphase.add_state(\n \"n\",\n ref=n_ref,\n units=\"m\",\n fix_initial=False,\n fix_final=False,\n upper=4.0,\n lower=-4.0,\n rate_source=\"dn_ds\",\n targets=[\"n\"],\n)\n\n# velocity\nphase.add_state(\n \"V\",\n ref=V_ref,\n ref0=5,\n units=\"m/s\",\n fix_initial=False,\n fix_final=False,\n lower=-500.0,\n upper=500.0,\n rate_source=\"dV_ds\",\n targets=[\"V\"],\n)\n\n# vehicle heading angle with respect to centerline\nphase.add_state(\n \"alpha\",\n ref=alpha_ref,\n units=\"rad\",\n fix_initial=False,\n fix_final=False,\n lower=-0.5 * np.pi,\n upper=0.5 * np.pi,\n rate_source=\"dalpha_ds\",\n targets=[\"alpha\"],\n)\n\n# vehicle slip angle, or angle between the axis of the vehicle\n# and velocity vector (all cars drift a little)\nphase.add_state(\n \"lambda\",\n ref=lambda_ref,\n units=\"rad\",\n fix_initial=False,\n fix_final=False,\n lower=-0.5 * np.pi,\n upper=0.5 * np.pi,\n rate_source=\"dlambda_ds\",\n targets=[\"lambda\"],\n)\n\n# yaw rate\nphase.add_state(\n \"omega\",\n ref=omega_ref,\n units=\"rad/s\",\n fix_initial=False,\n fix_final=False,\n lower=-30.0,\n upper=30.0,\n rate_source=\"domega_ds\",\n targets=[\"omega\"],\n)\n\n# longitudinal acceleration\nphase.add_state(\n \"ax\",\n ref=ax_ref,\n units=\"m/s**2\",\n fix_initial=False,\n fix_final=False,\n lower=-100.0,\n upper=100.0,\n rate_source=\"dax_ds\",\n targets=[\"ax\"],\n)\n\n# Lateral acceleration\nphase.add_state(\n \"ay\",\n ref=ay_ref,\n units=\"m/s**2\",\n fix_initial=False,\n fix_final=False,\n lower=-100.0,\n upper=100.0,\n rate_source=\"day_ds\",\n targets=[\"ay\"],\n)\n\n# Define Controls\n\n# steering angle\nphase.add_control(\n name=\"delta\",\n ref=delta_ref,\n units=\"rad\",\n fix_initial=False,\n fix_final=False,\n lower=-0.5 * np.pi,\n upper=0.5 * np.pi,\n rate_continuity=True,\n)\n\n# the thrust controls the longitudinal force of the rear tires and is positive\n# while accelerating, negative while braking\nphase.add_control(\n name=\"thrust\",\n ref=thrust_ref,\n units=None,\n lower=-1000.0,\n upper=1000.0,\n fix_initial=False,\n fix_final=False,\n rate_continuity=True,\n)\n\n# Performance Constraints\npmax = 960000.0 # W\nphase.add_path_constraint(\"power\", upper=pmax, ref=100000.0) # engine power limit\n\n# The following four constraints are the tire friction limits, with 'rr' designating the\n# rear right wheel etc. This limit is computed in tireConstraintODE.py\nphase.add_path_constraint(\"c_rr\", upper=1.0)\nphase.add_path_constraint(\"c_rl\", upper=1.0)\nphase.add_path_constraint(\"c_fr\", upper=1.0)\nphase.add_path_constraint(\"c_fl\", upper=1.0)\n\n# Some of the vehicle design parameters are available to set here. Other parameters can\n# be found in their respective ODE files.\n# vehicle mass\nphase.add_parameter(\n \"M\",\n val=800.0,\n units=\"kg\",\n opt=False,\n targets=[\"car.M\", \"tire.M\", \"tireconstraint.M\", \"normal.M\"],\n static_target=True,\n)\n\n# brake bias\nphase.add_parameter(\n \"beta\", val=0.62, units=None, opt=False, targets=[\"tire.beta\"], static_target=True\n)\n\n# center of pressure location\nphase.add_parameter(\n \"CoP\", val=1.6, units=\"m\", opt=False, targets=[\"normal.CoP\"], static_target=True\n)\n\n# center of gravity height\nphase.add_parameter(\n \"h\", val=0.3, units=\"m\", opt=False, targets=[\"normal.h\"], static_target=True\n)\n\n# roll stiffness\nphase.add_parameter(\n \"chi\", val=0.5, units=None, opt=False, targets=[\"normal.chi\"], static_target=True\n)\n\n# downforce coefficient*area\nphase.add_parameter(\n \"ClA\", val=4.0, units=\"m**2\", opt=False, targets=[\"normal.ClA\"], static_target=True\n)\n\n# drag coefficient*area\nphase.add_parameter(\n \"CdA\", val=2.0, units=\"m**2\", opt=False, targets=[\"car.CdA\"], static_target=True\n)\n\n# Minimize final time.\n# note that we use the 'state' time instead of Dymos 'time'\nphase.add_objective(\"t\", loc=\"final\")\n\n# Add output timeseries\nphase.add_timeseries_output(\"*\")\nphase.add_timeseries_output(\"t\", output_name=\"time\")\n\n# Link the states at the start and end of the phase in order to ensure a continous lap\ntraj.link_phases(\n phases=[\"phase0\", \"phase0\"],\n vars=[\"V\", \"n\", \"alpha\", \"omega\", \"lambda\", \"ax\", \"ay\"],\n locs=[\"final\", \"initial\"],\n connected=True,\n)\n\n# Set up the optimization driver\np.driver = ParOptSparseDriver()\n\noptions = {\n \"algorithm\": \"ip\",\n \"norm_type\": \"infinity\",\n \"qn_type\": \"bfgs\",\n \"qn_subspace_size\": 10,\n \"starting_point_strategy\": \"least_squares_multipliers\",\n \"qn_update_type\": \"damped_update\",\n \"abs_res_tol\": 1e-6,\n \"barrier_strategy\": \"monotone\",\n \"armijo_constant\": 1e-5,\n \"penalty_gamma\": 100.0,\n \"max_major_iters\": 500,\n}\n\nfor key in options:\n p.driver.options[key] = options[key]\n\n# Allow OpenMDAO to automatically determine our sparsity pattern.\n# Doing so can significant speed up the execution of Dymos.\np.driver.declare_coloring(show_summary=True, show_sparsity=False)\n\n# Setup the problem\np.setup(check=True)\n\n# States\n# Nonzero velocity to avoid division by zero errors\np.set_val(\"traj.phase0.states:V\", phase.interp(\"V\", [20, 20]), units=\"m/s\")\n\n# All other states start at 0\np.set_val(\n \"traj.phase0.states:lambda\", phase.interp(\"lambda\", [0.01, 0.01]), units=\"rad\"\n)\np.set_val(\"traj.phase0.states:omega\", phase.interp(\"omega\", [0.0, 0.0]), units=\"rad/s\")\np.set_val(\"traj.phase0.states:alpha\", phase.interp(\"alpha\", [0.0, 0.0]), units=\"rad\")\np.set_val(\"traj.phase0.states:ax\", phase.interp(\"ax\", [0.0, 0.0]), units=\"m/s**2\")\np.set_val(\"traj.phase0.states:ay\", phase.interp(\"ay\", [0.0, 0.0]), units=\"m/s**2\")\np.set_val(\"traj.phase0.states:n\", phase.interp(\"n\", [0.0, 0.0]), units=\"m\")\n\n# initial guess for what the final time should be\np.set_val(\"traj.phase0.states:t\", phase.interp(\"t\", [0.0, 100.0]), units=\"s\")\n\n# Controls\n# A small amount of thrust can speed up convergence\np.set_val(\"traj.phase0.controls:delta\", phase.interp(\"delta\", [0.0, 0.0]), units=\"rad\")\np.set_val(\"traj.phase0.controls:thrust\", phase.interp(\"thrust\", [0.1, 0.1]), units=None)\n\np.run_driver()\nprint(\"Optimization finished\")\n\n# Get optimized time series\nn = p.get_val(\"traj.phase0.timeseries.states:n\")\ns = p.get_val(\"traj.phase0.timeseries.s\")\nV = p.get_val(\"traj.phase0.timeseries.states:V\")\nthrust = p.get_val(\"traj.phase0.timeseries.controls:thrust\")\ndelta = p.get_val(\"traj.phase0.timeseries.controls:delta\")\npower = p.get_val(\"traj.phase0.timeseries.power\", units=\"W\")\n\nprint(\"Plotting\")\n\n# Plot the main vehicle telemetry\nfig, axes = plt.subplots(nrows=4, ncols=1, figsize=(15, 8))\n\n# Velocity vs s\naxes[0].plot(s, p.get_val(\"traj.phase0.timeseries.states:V\"), label=\"solution\")\n\naxes[0].set_xlabel(\"s (m)\")\naxes[0].set_ylabel(\"V (m/s)\")\naxes[0].grid()\naxes[0].set_xlim(0, s_final)\n\n# n vs s\naxes[1].plot(\n s, p.get_val(\"traj.phase0.timeseries.states:n\", units=\"m\"), label=\"solution\"\n)\n\naxes[1].set_xlabel(\"s (m)\")\naxes[1].set_ylabel(\"n (m)\")\naxes[1].grid()\naxes[1].set_xlim(0, s_final)\n\n# throttle vs s\naxes[2].plot(s, thrust)\n\naxes[2].set_xlabel(\"s (m)\")\naxes[2].set_ylabel(\"thrust\")\naxes[2].grid()\naxes[2].set_xlim(0, s_final)\n\n# delta vs s\naxes[3].plot(\n s, p.get_val(\"traj.phase0.timeseries.controls:delta\", units=None), label=\"solution\"\n)\n\naxes[3].set_xlabel(\"s (m)\")\naxes[3].set_ylabel(\"delta\")\naxes[3].grid()\naxes[3].set_xlim(0, s_final)\n\nplt.tight_layout()\n\n# Performance constraint plot. Tire friction and power constraints\nfig, axes = plt.subplots(nrows=1, ncols=1, figsize=(15, 4))\nplt.subplots_adjust(right=0.82, bottom=0.14, top=0.97, left=0.07)\n\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_fl\", units=None), label=\"c_fl\")\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_fr\", units=None), label=\"c_fr\")\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_rl\", units=None), label=\"c_rl\")\naxes.plot(s, p.get_val(\"traj.phase0.timeseries.c_rr\", units=None), label=\"c_rr\")\n\naxes.plot(s, power / pmax, label=\"Power\")\n\naxes.legend(bbox_to_anchor=(1.04, 0.5), loc=\"center left\")\naxes.set_xlabel(\"s (m)\")\naxes.set_ylabel(\"Performance constraints\")\naxes.grid()\naxes.set_xlim(0, s_final)\n\nplt.show()\n","repo_name":"smdogroup/paropt","sub_path":"examples/dymos/racecar/racecar.py","file_name":"racecar.py","file_ext":"py","file_size_in_byte":10178,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"31"} +{"seq_id":"39060022021","text":"from kafka import KafkaProducer\nfrom OpenWeatherApi import OpenWeatherApi\nimport json\nimport time\nfrom multiprocessing import Manager\n\nclass Producer:\n \"\"\"A kafka prodcuer\n calls is an auto incremented variable that tracks the number of api calls.\n timeout is the timeout in between calls of the same city\"\"\"\n def __init__(self, apikey, cityidlist, bootstrap_server='0.0.0.0:9092', timeout=2) -> None:\n self.calls = 0\n self.timeout = timeout\n self.apikey = apikey\n self.cityidlist = [] if cityidlist == None else cityidlist\n self.producer = KafkaProducer(bootstrap_servers=bootstrap_server)\n\n\n def city_exists(self, cityid) -> bool:\n \"\"\"checks whether the provided city topic is being produced to\"\"\"\n return cityid in self.cityidlist\n \n def produce(self):\n \"\"\"production loop\"\"\"\n try :\n # main producing loop\n while True:\n for cityid in self.cityidlist:\n api = OpenWeatherApi(params = {\n 'id': cityid,\n 'units': 'metric',\n 'appid': self.apikey\n })\n jsonpaylode = api.get()\n self.calls += 1\n\n # jsonpaylode = json.dumps(jsonpaylode, indent=2).encode('utf-8')\n # make it async\n self.producer.send(str(cityid), jsonpaylode.content)\n\n print('producing...')\n time.sleep(self.timeout)\n except KeyboardInterrupt :\n print('bye')\n\nif __name__ == '__main__':\n prod = Producer(\"db1ac472d1dd9cf2d4cd31a077113ee9\", [2467959])\n prod.produce()","repo_name":"Capital2/Weather-kafka","sub_path":"Backend/app/modules/producers/Producer.py","file_name":"Producer.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13815738539","text":"from unittest import TestCase\nfrom book import Book\n\n\nclass TestBook(TestCase):\n\n def setUp(self):\n self.book_one = Book(\"123\", \"April C\", \"Hacking with April\", \"Chris\", 9, \"science\", \"computer\")\n self.book_two = Book(\"456\", \"Lipra\", \"Coding with April\", \"Johnson\", 10, \"math\", \"matrix\")\n\n def test__str__book_one_correct_string(self):\n actual = self.book_one.__str__()\n expected = f'\\u001b[32;1mAuthor\\u001b[0m: April C, \\u001b[32;1mTitle\\u001b[0m: Hacking with April, \\u001b[' \\\n f'32;1mPublisher\\u001b[0m: Chris, \\u001b[32;1mShelf\\u001b[0m: 9, ' \\\n f'\\u001b[32;1mCategory\\u001b[0m: science, \\u001b[32;1mSubject\\u001b[0m: computer, ' \\\n f'\\u001b[32;1mid\\u001b[0m: 123 '\n self.assertEqual(actual, expected)\n\n def test_to_dict_return_dictionary(self):\n actual = self.book_two.to_dict()\n expected = {\n \"Author\": \"Lipra\",\n \"Title\": \"Coding with April\",\n \"Publisher\": \"Johnson\",\n \"Shelf\": 10,\n \"Category\": \"math\",\n \"Subject\": \"matrix\"\n }\n self.assertEqual(actual, expected)\n","repo_name":"nickfurk/book-manager","sub_path":"test_book.py","file_name":"test_book.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31113909967","text":"# -*- coding: utf-8 -*-\n# vi:si:et:sw=4:sts=4:ts=4\n\n\nimport base64\nimport hashlib\nimport os\nimport codecs\n\nimport ox\n\nfrom . import pdf\nfrom . import cbr\nfrom . import epub\nfrom . import txt\nfrom . import opf\n\ndef get_id(f=None, data=None):\n if data:\n return base64.b32encode(hashlib.sha1(data).digest()).decode()\n else:\n return base64.b32encode(codecs.decode(ox.sha1sum(f, cached=True), 'hex')).decode()\n\n\ndef metadata(f, from_=None):\n ext = f.split('.')[-1]\n data = {}\n data['extension'] = ext\n data['size'] = os.stat(f).st_size\n\n if ext == 'cbr':\n info = cbr.info(f)\n elif ext == 'epub':\n info = epub.info(f)\n elif ext == 'pdf':\n info = pdf.info(f)\n elif ext == 'txt':\n info = txt.info(f)\n\n opf_info = {}\n metadata_opf = os.path.join(os.path.dirname(from_ or f), 'metadata.opf')\n if os.path.exists(metadata_opf):\n opf_info = opf.info(metadata_opf)\n for key in (\n 'title', 'author', 'date', 'publisher', 'description',\n 'language', 'textsize', 'pages',\n 'isbn', 'asin'\n ):\n if key in info:\n value = info[key]\n if isinstance(value, bytes):\n try:\n value = value.decode('utf-8')\n except:\n value = None\n if value:\n data[key] = info[key]\n if key in opf_info:\n data[key] = opf_info[key]\n if key in data:\n if isinstance(data[key], str):\n data[key] = data[key].replace('\\x00', '')\n elif isinstance(data[key], list):\n data[key] = [e.replace('\\x00', '') if isinstance(e, str) else e for e in data[key]]\n if 'isbn' in data:\n data['primaryid'] = ['isbn', data['isbn'][0]]\n elif 'asin' in data:\n data['primaryid'] = ['asin', data['asin'][0]]\n if 'author' in data:\n if isinstance(data['author'], str):\n if data['author'].strip():\n data['author'] = data['author'].strip().split('; ')\n else:\n del data['author']\n if 'author' in data and data['author'] in (['Administrator'], ['Default'], ['user']):\n del data['author']\n if not 'title' in data:\n data['title'] = os.path.splitext(os.path.basename(f))[0]\n if data['title'].startswith('Microsoft Word - '):\n data['title'] = data['title'][len('Microsoft Word - '):]\n for postfix in ('.doc', 'docx', '.qxd', '.indd', '.tex'):\n if data['title'].endswith(postfix):\n data['title'] = data['title'][:-len(postfix)]\n if not data['title'].strip():\n del data['title']\n return data\n\n","repo_name":"h4ck3rm1k3/openmedialibrary","sub_path":"oml/media/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16615576616","text":"from django.http import HttpResponse\n\"\"\" Utils \"\"\"\nfrom datetime import datetime\nimport json\n\n\ndef hello_world(request):\n now = datetime.now().strftime('%dth, %b %Y - %H:%M hrs')\n return HttpResponse(f'Hola Josselincita son las {now}')\n\n\ndef sorted_func(request):\n numbers = [int(i) for i in request.GET['numbers'].split(',')]\n sorted_int = sorted(numbers)\n res_data = {\n 'status': 'OK',\n 'numbers': sorted_int,\n 'message': 'Integers sorted sussessfully'\n }\n \"\"\" import pdb\n pdb.set_trace() \"\"\"\n return HttpResponse(json.dumps(res_data), content_type='application/json')\n\n\ndef say_hi(request, name, age):\n print(name, age)\n if age < 12:\n message = f'{name} eres menor de {age} años'\n else:\n message = f'{name} eres mayor de {age} años'\n return HttpResponse(message)\n","repo_name":"josseline534/project-django","sub_path":"platziGram/platziGram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2175484461","text":"\"\"\"PG&E DLP data access\"\"\"\nimport os\nimport pycurl\nfrom io import BytesIO, StringIO, IOBase\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport numpy as np\nfrom zipfile import ZipFile\n\ndlpurl = \"https://www.pge.com/pge_global/forms/mads/profiles\"\ncachedir = \"__dlpcache__\"\n\nif not os.path.exists(cachedir):\n\tos.mkdir(cachedir)\n\ndef get_remote_file(url,out):\n\t\"\"\"Stream a file from the PG&E data archive\"\"\"\n\tc = pycurl.Curl()\n\tc.setopt(c.URL, url)\n\tc.setopt(c.WRITEDATA, out)\n\tc.perform()\n\tc.close()\n\ndef get_load_archive(year,cache=True,refresh=True):\n\t\"\"\"Copy a DLP archive for a previous year to the cache\"\"\"\n\tzipfile = f\"{cachedir}/{year}dlp.zip\"\n\twith open(zipfile,\"wb\") as zipfh:\n\t\tget_remote_file(f\"{dlpurl}/archive/{year}dlp.zip\",zipfh)\n\twith ZipFile(zipfile, 'r') as zipObj:\n\t\tfiles = zipObj.namelist()\n\t\tfor file in files:\n\t\t\tif file.endswith('.dlp'):\n\t\t\t\tzipObj.extract(file, f\"{cachedir}/{year}dlp\")\n\ndef get_load_profile(date,cache=True,refresh=False):\n\t\"\"\"Copy a DLP for a particular date to the cache and return a dataframe\"\"\"\n\tif date.year < datetime.now().year:\n\t\tget_load_archive(date.year)\n\n\tdatename = date.strftime('%Y%m%d')\n\tif not os.path.exists(f\"{cachedir}/{date.year}dlp\"):\n\t\tos.mkdir(f\"{cachedir}/{date.year}dlp\")\n\tcsvname = f\"{cachedir}/{date.year}dlp/{datename}.dlp\"\n\tif not cache or not os.path.exists(csvname) or refresh:\n\t\twith open(csvname,\"wb\") as csvfh:\n\t\t\tget_remote_file(f'{dlpurl}/{datename}.dlp',csvfh)\n\n\tdf = pd.read_csv(csvname).dropna(how='all').transpose()\n\tdf.columns = list(np.array(df[1:2])[0])\n\tassert(datename == df.index[0])\n\tdf.drop([datename,'Profile','Method'],inplace=True)\n\tdef get_time(date,time):\n\t\tt = time.split(':')\n\t\tt = (24+int(t[0]))*60 + int(t[1]) - 30\n\t\ty = int(date[0:4])\n\t\tm = int(date[4:6])\n\t\td = int(date[6:8])\n\t\tH = int(t/60) % 24\n\t\tM = t % 60\n\t\treturn datetime(y,m,d,H,M,0)\n\tdf['datetime'] = list(map(lambda t: datetime.strptime(datename+\" \"+t,\"%Y%m%d %H:%S\"),df.index))\n\tdf.set_index('datetime',inplace=True)\n\n\treturn df\n\ndef daterange(start_date, end_date):\n\t\"\"\"Obtain a date range\"\"\"\n\tfor n in range(int ((end_date - start_date).days+1)):\n\t\tyield start_date + timedelta(n)\n\ndef get_loads(start,stop,date_format='%m/%d/%y',show_progress=False):\n\t\"\"\"Obtain the loads for a date range as a dataframe\"\"\"\n\tif type(start) is str:\n\t\tstart = datetime.strptime(start,date_format)\n\tif type(stop) is str:\n\t\tstop = datetime.strptime(stop,date_format)\n\tblocks = []\n\tfor date in daterange(start,stop):\n\t\tif show_progress:\n\t\t\tprint(f\"Processing {date}...\",flush=True)\n\t\ttry:\n\t\t\tblocks.append(get_load_profile(date))\n\t\texcept Exception as err:\n\t\t\tprint(f\"ERROR: get_load_profile(date={date}): {err}\")\n\treturn pd.concat(blocks)\n\nif __name__ == '__main__':\n\tget_load_profile(datetime(2019,3,1,0,0,0))\n\tdata = get_loads('3/1/20','3/14/20')\n\tdata.to_csv('test_result.csv')\n","repo_name":"slacgismo/pgande_dlp_archive","sub_path":"src/pgande.py","file_name":"pgande.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9837962734","text":"from typing import Any, Dict, List, Optional, Union\n\nfrom huntflow_api_client.entities.base import (\n BaseEntity,\n CreateEntityMixin,\n GetEntityMixin,\n ListEntityMixin,\n)\nfrom huntflow_api_client.models.consts import AgreementState, ApplicantSearchField\nfrom huntflow_api_client.models.request.applicants import (\n ApplicantCreateRequest,\n ApplicantUpdateRequest,\n)\nfrom huntflow_api_client.models.response.applicants import (\n ApplicantCreateResponse,\n ApplicantItem,\n ApplicantListResponse,\n ApplicantSearchByCursorResponse,\n)\n\n\nclass Applicant(BaseEntity, ListEntityMixin, CreateEntityMixin, GetEntityMixin):\n async def list(\n self,\n account_id: int,\n count: Optional[int] = 30,\n page: Optional[int] = 1,\n status: Optional[int] = None,\n vacancy_id: Optional[int] = None,\n agreement_state: Optional[AgreementState] = None,\n ) -> ApplicantListResponse:\n \"\"\"\n API method reference https://api.huntflow.ai/v2/docs#get-/accounts/-account_id-/applicants\n\n :param account_id: Organization ID\n :param count: Number of items per page\n :param page: Page number\n :param status: Vacancy status ID\n :param vacancy_id: Vacancy ID\n :param agreement_state: Agreement's state of applicant to personal data processing.\n Available if the Personal Data module is enabled for organization.\n Cannot be supplied if the status parameter is passed.\n :return: List of applicants with pagination\n \"\"\"\n params: Dict[str, Any] = {\"count\": count, \"page\": page}\n if status:\n params[\"status\"] = status\n if vacancy_id:\n params[\"vacancy_id\"] = vacancy_id\n if agreement_state:\n params[\"agreement_state\"] = agreement_state.value\n response = await self._api.request(\n \"GET\",\n f\"/accounts/{account_id}/applicants\",\n params=params,\n )\n return ApplicantListResponse.model_validate(response.json())\n\n async def create(\n self,\n account_id: int,\n data: ApplicantCreateRequest,\n ) -> ApplicantCreateResponse:\n \"\"\"\n API method reference https://api.huntflow.ai/v2/docs#post-/accounts/-account_id-/applicants\n\n :param account_id: Organization ID\n :param data: Applicant data\n :return: The created applicant\n \"\"\"\n response = await self._api.request(\n \"POST\",\n f\"/accounts/{account_id}/applicants\",\n json=data.jsonable_dict(exclude_none=True),\n )\n return ApplicantCreateResponse.model_validate(response.json())\n\n async def get(self, account_id: int, applicant_id: int) -> ApplicantItem:\n \"\"\"\n API method reference\n https://api.huntflow.ai/v2/docs#get-/accounts/-account_id-/applicants/-applicant_id-\n\n :param account_id: Organization ID\n :param applicant_id: Applicant ID\n :return: The specified applicant\n \"\"\"\n response = await self._api.request(\n \"GET\",\n f\"/accounts/{account_id}/applicants/{applicant_id}\",\n )\n return ApplicantItem.model_validate(response.json())\n\n async def patch(\n self,\n account_id: int,\n applicant_id: int,\n data: ApplicantUpdateRequest,\n ) -> ApplicantItem:\n \"\"\"\n API method reference\n https://api.huntflow.ai/v2/docs#patch-/accounts/-account_id-/applicants/-applicant_id-\n\n :param account_id: Organization ID\n :param applicant_id: Applicant ID\n :param data: Applicant data\n :return: The created applicant\n \"\"\"\n response = await self._api.request(\n \"PATCH\",\n f\"/accounts/{account_id}/applicants/{applicant_id}\",\n json=data.jsonable_dict(exclude_none=True),\n )\n return ApplicantItem.model_validate(response.json())\n\n async def delete(self, account_id: int, applicant_id: int) -> None:\n \"\"\"\n API method reference\n https://api.huntflow.ai/v2/docs#delete-/accounts/-account_id-/applicants/-applicant_id-\n\n :param account_id: Organization ID\n :param applicant_id: Applicant ID\n \"\"\"\n await self._api.request(\n \"DELETE\",\n f\"/accounts/{account_id}/applicants\" f\"/{applicant_id}\",\n )\n\n async def search_by_cursor(\n self,\n account_id: int,\n next_page_cursor: Optional[str] = None,\n query: Optional[str] = None,\n tag: Optional[List[int]] = None,\n status: Optional[List[int]] = None,\n rejection_reason: Optional[List[int]] = None,\n vacancy: Union[List[int], None] = None,\n only_current_status: bool = False,\n account_source: Optional[List[int]] = None,\n field: ApplicantSearchField = ApplicantSearchField.all,\n count: int = 30,\n ) -> ApplicantSearchByCursorResponse:\n \"\"\"\n API method reference:\n https://api.huntflow.ai/v2/docs#get-/accounts/-account_id-/applicants/search_by_cursor\n\n :param account_id: Organization ID\n :param next_page_cursor: A cursor to the next page,\n if specified, no other params will be included\n :param query: Search query\n :param tag: List of tag ID\n :param status: List of vacancy status ID\n :param rejection_reason: List of rejection reason ID\n :param vacancy: List of vacancy ID's or None\n - None - no filter for vacancies\n - [] - empty list means applicant is not assigned to any vacancy\n - [1, 2, 3] - applicants assigned to specified vacancies\n :param only_current_status: If the value is set to True,\n then applicants who are currently at this status will be displayed.\n :param account_source: List of resume source ID\n :param field: Search field\n :param count: Number of items per page\n\n :return: Returns a list of found applicants and a cursor to the next page\n \"\"\"\n\n path = f\"/accounts/{account_id}/applicants/search_by_cursor\"\n\n params: Dict[str, Any]\n if next_page_cursor is not None:\n params = {\"next_page_cursor\": next_page_cursor}\n else:\n params = {\n \"tag\": tag or [],\n \"status\": status or [],\n \"rejection_reason\": rejection_reason or [],\n \"only_current_status\": only_current_status,\n \"field\": field.value,\n \"count\": count,\n \"account_source\": account_source or [],\n }\n if query:\n params[\"q\"] = query\n\n if vacancy is not None:\n params[\"vacancy\"] = vacancy if vacancy else \"null\"\n\n response = await self._api.request(\"GET\", path, params=params)\n return ApplicantSearchByCursorResponse.model_validate(response.json())\n","repo_name":"huntflow/huntflow-api-client-python","sub_path":"huntflow_api_client/entities/applicants.py","file_name":"applicants.py","file_ext":"py","file_size_in_byte":6966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70575541529","text":"# -*- coding: utf8 -*-\nimport logging , sys , traceback2\nimport loghandler\nfrom django.conf import settings\n\nimport os\n\ndef init_log( name = None , screen = False , thread = True ):\n #settings.check( 'LOGDIR' , 'LOGLEVEL' )\n return init_logger( name , settings.LOGDIR , screen , thread )\n\ninit = init_log\n\ndef init_logger( logname , logdir , screen = True , thread = True ):\n logobj = logging.getLogger( logname )\n # 判断是否需要清理\n if logobj.handlers:\n return logobj # 日志已创建,跳过\n# # 有处理句柄,则该日志对象需要清理\n logobj.info( '日志[%s]重新初始化' , logname )\n for hdl in logobj.handlers[:]:\n logobj.removeHandler( hdl )\n \n # 初始化日志文件处理句柄\n fn = '%s.log' % logname\n hdlr = loghandler.DateFileHandler( os.path.join( logdir , fn ) )\n fmts = '%(asctime)s ' + ( 'T%(thread)d ' if thread else '' ) + '%(levelname)s %(message)s'\n formatter = logging.Formatter( fmts )\n hdlr.setFormatter(formatter)\n logobj.addHandler( hdlr )\n \n if screen:\n # 初始化屏幕打印处理句柄\n hdlr = logging.StreamHandler()\n fmts = '%(asctime)s %(name)s:' + ( 'T%(thread)d ' if thread else '' ) + '%(levelname)s %(message)s'\n formatter = logging.Formatter( fmts )\n hdlr.setFormatter(formatter)\n logobj.addHandler( hdlr )\n\n logobj.setLevel( settings.LOGLEVEL )\n return logobj\n\ndef _fmt_msg( *args , **kwargs ):\n if len( args ) > 1:\n msg = args[0] % args[1:]\n elif len( args ) == 1:\n msg = args[0]\n else:\n msg = ''\n \n block = kwargs.get( 'block' )\n if type(block) is str:\n # 是块日志\n bin = kwargs.get( 'bin' , True )\n if bin:\n block = to_hex( block )\n \n if block:\n block = '\\n'+'='*40+'\\n'+block+ ('\\n' if block[-1] != '\\n' else '' ) +'='*40 + '\\n'\n elif msg[-1] == '\\n':\n block = ''\n else:\n block = '\\n'\n \n msg = msg + block\n if msg[-1] == '\\n':\n msg = msg[:-1]\n return msg\n \ndef debug( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.debug( _fmt_msg( *args , **kwargs ) )\n\ndef info( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.info( _fmt_msg( *args , **kwargs ) )\n \ndef warning( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.warning( _fmt_msg( *args , **kwargs ) )\n \ndef error( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.error( _fmt_msg( *args , **kwargs ) )\n \ndef critical( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n logger.critical( _fmt_msg( *args , **kwargs ) )\n\ndef exception( logname , *args , **kwargs ):\n if logname:\n logger = init_log( logname )\n exc_msg = traceback2.format_exc( show_locals = True )\n args = list( args )\n if args:\n args[0] += '\\n%s'\n else:\n args.append( '%s' )\n args.append( exc_msg )\n logger.error( _fmt_msg( *args , **kwargs ) )\n return ''\n\nif __name__ == \"__main__\":\n init_log( 'zjxx' , True )\n init_log( 'zjxx' , True )","repo_name":"chengdg/zjyw","sub_path":"src/zjyw_utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14877919921","text":"import os\n#Chris E Williams\n#1/14/2021\n#Today we will declare variables, print variables, print type of data, learn some operators.\n# (#) This symbol is for comments, the computer will ignore these lines if they have the (#)\n\n\n#This is a program to find the average of 3 tests.\n\n#Declare and assign values\n#before I start my code, I want to clear my Terminal, I can do this by (os.sys('clear')\nos.system('cls')\ntest1=89\ntest2=62.5673\ntest3=82\nFlag=False \n\n#to display things on the screen, we use the function print.\n\nprint(type(test1), type(test2), type(Flag))\n\n#declare sum to add tests symbol for addition is +\nSum = test1 + test2+ test3\n# print(Sum)\n\n#for the average, we will use division\nAverage = Sum/3\n# print(Average)\n# if you use ctrl + forward + / (as in forward slash) every thing you highlight will become a comment. In order to change it back, you simply do it again to the comment you want to change back. \n#I want to print the average of three tests is (print number)\n#any time you want to type letters, you have to use quotation marks.\n\nprint(\"The Average of 3 test is\", Average)\n\nprint(\"Test1 =\", test1, end=\": \")\nprint(\"Test2 =\", test2, end=\": \")\nprint(\"Test3 =\", test3)","repo_name":"Chris-Williams25/Block_E-GameDesign","sub_path":"ClassStuff/JunkFromClass/Averaging_Grades.py","file_name":"Averaging_Grades.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31905124966","text":"from artie_analysis import *\n\nif __name__ == \"__main__\":\n\n artieI_cfg = {\n \"inputs\": {\n \"argon\": \"../outputs/artieI_ideal_argon_short_0.root\",\n \"vacuum\": \"../outputs/artieI_ideal_vacuum_short_0.root\"\n }\n }\n\n artie = ArtieAnalysis(\n artieI_cfg\n )\n\n number_of_bins=100\n energy_min=70\n energy_max=200\n save='artieI_short_high'\n show=False\n\n artie.plot_all(\n artieI_cfg[\"inputs\"].keys(),\n number_of_bins=number_of_bins,\n energy_min=energy_min,\n energy_max=energy_max,\n save=save,\n show=show\n )","repo_name":"ARTIE-II/ArtieSim","sub_path":"analysis/artieI_analysis.py","file_name":"artieI_analysis.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40594629274","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 18 11:15:54 2018\n\n@author: osheizaotori\n\"\"\"\n\n#Import appropriate packages for analysis\nimport os \nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#Load data into dataframe\nos.chdir('/Users/osheizaotori/Desktop/Jobs/Deliveroo/deliveroo')\nrgr_raw_data = pd.read_csv('rgr_data_test-2 (1).csv')\n\n#SECTION 1: Exploratory Data Analysis\nprint(rgr_raw_data.head())\nprint(rgr_raw_data.tail())\nprint(rgr_raw_data.describe())\nprint(pd.unique(rgr_raw_data['ACQUISITION_CHANNEL']))\nprint(len(pd.unique(rgr_raw_data['RIDER_ID']))) #Current fleet\n## Rider 67787 is an outlier\n \n#SECTION 2: DATA MANIPULATION\nrgr_168 = rgr_raw_data[rgr_raw_data.DAYS_SINCE_ACQUISITION == 168]\nreferral = rgr_168[rgr_raw_data.ACQUISITION_CHANNEL == \"Referral\"] \n\n\noutliers = []\ndef detect_outliers(data):\n threshold=3\n mean_1 = np.mean(data)\n std_1 =np.std(data)\n \n \n for y in data:\n z_score= (y - mean_1)/std_1 \n if np.abs(z_score) > threshold:\n outliers.append(y)\n return outliers\n\nprint((detect_outliers(referral[\"HOURS_WORKED_CUMULATIVE\"])))\n","repo_name":"osheiza/deliveroo","sub_path":"rgr.py","file_name":"rgr.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31004733457","text":"import requests\n\n\nif __name__ == '__main__':\n url = \"http://www.httpbin.org/post\"\n params_dict = {\n \"name\":\"alice\",\n \"age\":25\n }\n headers_dict = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\"\n }\n response = requests.post(url,data=params_dict,headers=headers_dict)\n print(\"响应状态码:\",response.status_code)\n print(\"最终URL是:\",response.url)\n print(\"响应内容\",response.text)","repo_name":"SpCrazy/crazy","sub_path":"code/SpiderDay03/requests_demo/requests_post.py","file_name":"requests_post.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25799847245","text":"import time\n\ndef parseAsLocalTime(s, fmt='%Y-%m-%d %H:%M', fail=True):\n try:\n dt = time.strptime(s, fmt)\n except ValueError:\n if fail:\n raise\n else:\n return None\n return time.mktime(dt)\n","repo_name":"skogsbaer/snooze-apple-mail","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74283937687","text":"from keras.utils import to_categorical\nfrom keras import layers\nfrom keras import models\n\n\"\"\" (MNIST) It’s a set of 60,000 training\nimages, plus 10,000 test images, assembled by the National Institute of Standards and\nTechnology (the NIST in MNIST) in the 1980s. It's like the hello-world of programming in ML \"\"\"\n\nfrom keras.datasets import mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\nnetwork = models.Sequential()\n\"\"\" layer is a data-processing module that\nyou can think of as a filter for data. Some data goes in, and it comes out in a more useful form. Specifically, layers extract representations out of the data fed into them—hopefully, representations that are more meaningful for the problem at hand. \"\"\"\nnetwork.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))\nnetwork.add(layers.Dense(10, activation='softmax'))\n\"\"\" DENSE means the layers are densely connected (meanse that they are fully connected) \"\"\"\n\nnetwork.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\"\"\" 1: A loss function—How the network will be able to measure its performance on\nthe training data, and thus how it will be able to steer itself in the right direction.\n2: An optimizer—The mechanism through which the network will update itself\nbased on the data it sees and its loss function.\n3: Metrics to monitor during training and testing—Here, we’ll only care about accuracy(the fraction of the images that were correctly classified). \"\"\"\n\"\"\" Before training, we’ll preprocess the data by reshaping it into the shape the network\nexpects and scaling it so that all values are in the [0, 1] interval. \"\"\"\ntrain_images = train_images.reshape((60000, 28 * 28))\ntrain_images = train_images.astype('float32') / 255\ntest_images = test_images.reshape((10000, 28 * 28))\ntest_images = test_images.astype('float32') / 255\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\nnetwork.fit(train_images, train_labels, epochs=5, batch_size=128)\ntest_loss, test_acc = network.evaluate(test_images, test_labels)\nprint('test_acc:', test_acc)\n","repo_name":"Himanshunitrr/LearningAI","sub_path":"MNIST.py","file_name":"MNIST.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21989324884","text":"from math import sqrt, pi, ceil, atan\n\nl = int(input())\nx1, y1 = map(int, input().split())\nx2, y2 = map(int, input().split())\n\nslope = -1\np = a = b = c = -1 # a := distance to origo x-axis, b := distance to origo y-axis, c := b, p := distance to origo\nif x1 - x2 == 0: # vertical straight line\n p = abs(x1)\nelif y1 - y2 == 0: # horizontal straight line\n p = abs(y1)\nelse: # Other, y = kx+m\n k = (y1 - y2)/(x1 - x2)\n m = y1 - k*x1\n a = k\n b = -1\n c = m # Line expressed in standard form! Ax + By + C = 0\n p = abs(c) / sqrt(a**2 + b**2)\n\nr = sqrt(l/pi)\nif p >= r: # Distance to wall is greater than needed radius\n print(ceil(r))\nelse:\n r = ceil(r)\n area = -1\n while r < 200:\n h = p\n b = (r**2 - h**2)**0.5\n if h == 0:\n angle = pi / 2\n if h > 0:\n angle = atan(b / h)\n area = (pi * r*r) + (b * h) - (angle * r * r)\n if area >= l:\n print(r)\n break\n\n r += 1\n","repo_name":"fr3632ho/kattis","sub_path":"get-off-my-lawn/lawn.py","file_name":"lawn.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5253393129","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('take-quiz', views.take_quiz, name='take-quiz'),\n path('signin', views.signin, name='signin'),\n path('quiz-list', views.quiz_list, name='quiz-list'),\n path('my-profile', views.my_account, name='learner-profile'),\n path('countdown', views.countdown, name='countdown'),\n path('signup', views.signup, name='learner-signup'),\n path('all-quiz', views.all_quiz, name='all-quiz'),\n path('quiz-result', views.result_page, name='quiz-result'),\n path('quiz-review', views.review_page, name='quiz-review'),\n path('generate', views.generate, name='generate-l'),\n path('', views.home_page, name='l-home'),\n path('practice-quiz', views.practice_quiz, name='practice-quiz'),\n path('logout', views.logout_l, name='l-logout'),\n path('live-list', views.live_list, name='live-list'),\n path('instructor-list', views.instructor_list, name='instructor-list'),\n path('instructor-profile', views.instructor_profile, name='instructor-profile')\n]\n","repo_name":"Subodh7300/Quiz-Emporium","sub_path":"quizsite/student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9220276953","text":"import os\r\nimport random\r\nfrom PIL import Image\r\n\r\nfrom torch.utils import data\r\n\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\n\r\n\r\ndef create_dataloader(dataset='cifar10', batch_size=64, num_workers=1):\r\n if dataset == 'cifar10':\r\n transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root='./data/', train=True, transform=transform, download=True)\r\n testset = torchvision.datasets.CIFAR10(root='./data/', train=False, transform=transform, download=True)\r\n\r\n trainloader = data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\r\n testloader = data.DataLoader(dataset=testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\r\n\r\n elif dataset == 'summer2winter':\r\n transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n\r\n trainset = Summer2WinterDataset(train=True, transform=transform)\r\n testset = Summer2WinterDataset(train=False, transform=transform)\r\n\r\n trainloader = data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\r\n testloader = data.DataLoader(dataset=testset, batch_size=1, shuffle=False, num_workers=num_workers)\r\n\r\n return trainloader, testloader\r\n\r\n\r\nclass Summer2WinterDataset(data.Dataset):\r\n def __init__(self, train: bool=True, transform=None):\r\n self.transform = transform\r\n dataset_dir = './data/summer2winter_yosemite/'\r\n\r\n # Implement the dataset for unpaired image-to-image translation.\r\n # Check the dataset directory and implement the proper dataset.\r\n # This dataset have to load the train or test files depending on the 'train' option.\r\n\r\n ### YOUR CODE HERE (~ 10 lines)\r\n self.train =train\r\n if self.train:\r\n self.image_list_A = os.listdir(dataset_dir+'trainA')\r\n self.folderA = dataset_dir +'trainA'\r\n self.image_list_B = os.listdir(dataset_dir + 'trainB')\r\n self.folderB = dataset_dir + 'trainB'\r\n else:\r\n self.image_list_A = os.listdir(dataset_dir + 'testA')\r\n self.folderA = dataset_dir + 'testA'\r\n self.image_list_B = os.listdir(dataset_dir + 'testB')\r\n self.folderB = dataset_dir + 'testB'\r\n\r\n self.image_list_A.sort()\r\n self.image_list_B.sort()\r\n self.transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n ### END YOUR CODE\r\n\r\n def __getitem__(self, index):\r\n\r\n # The number of images in domain A and domain B are different.\r\n # You have to sample the index to load data from different pairs.\r\n\r\n ### YOUR CODE HERE (~ 2 lines)\r\n image_A= Image.open(os.path.join(self.folderA, self.image_list_A[index]))\r\n image_B = Image.open(os.path.join(self.folderB, self.image_list_B[random.randint(0,len(self.image_list_B)-1)]))\r\n\r\n ### END YOUR CODE\r\n\r\n return self.transform(image_A), self.transform(image_B)\r\n\r\n def __len__(self):\r\n return len(self.image_list_A)\r\n\r\n\r\nclass FolderDataset(data.Dataset):\r\n def __init__(self, folder):\r\n self.folder = folder\r\n self.image_list = os.listdir(folder)\r\n self.transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\r\n\r\n def __getitem__(self, index):\r\n image = Image.open(os.path.join(self.folder, self.image_list[index]))\r\n return self.transform(image)\r\n\r\n def __len__(self):\r\n return len(self.image_list)\r\n","repo_name":"thanhkaist/GAN","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"71542508888","text":"'''\n @Author: JoeyforJoy & ylheng\n @Date: 2022-03-25 15:10:15\n @LastEditTime: 2022-03-29 11:13:51\n @LastEditors: JoeyforJoy\n @Description: Transfer rosbag to synchronized image and pcd files.\n @Example: \n # message should be broadcast first\n rosrun b2x time_sync_cam2.py ${img1_topic} ${img2_topic} --output_dir ${output_dir}\n'''\n\nimport numpy as np\nimport rospy\nimport message_filters\nfrom sensor_msgs.msg import Image, CompressedImage\n\nimport os\nimport sys\nPARENT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"..\")\nsys.path.append(PARENT_DIR)\nfrom utils import *\nimport argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Transfer rosbag to synchronized image and pcd files.\")\n parser.add_argument(\"topic_img1\", type=str, help = \"the name of the image1 topic\")\n parser.add_argument(\"topic_img2\", type=str, help = \"the name of the image2 topic\")\n parser.add_argument(\"--output_dir\", type=str, default=\"./data/synchronized\", \n help = \"the root directory of the output files\")\n parser.add_argument(\"--img1_dir_label\", type=str, default=\"image1\", \n help = \"the subdirectory name of output images\")\n parser.add_argument(\"--img2_dir_label\", type=str, default=\"image2\", \n help = \"the subdirectory name of output images\")\n parser.add_argument(\"--tot\", type=float, default=0.01, \n help = \"the tolerence of time synchronization\")\n return parser.parse_args()\n\nclass callBackClass:\n def __init__(self, output_dir, img1_subdir=\"image1\", img2_subdir=\"image2\",\n img1_compressed = True, img2_compressed = True):\n self.output_dir = output_dir\n self.img1_dir = os.path.join(self.output_dir, img1_subdir)\n self.img2_dir = os.path.join(self.output_dir, img2_subdir)\n \n os.makedirs(self.output_dir, exist_ok=True)\n os.makedirs(self.img1_dir, exist_ok=True)\n os.makedirs(self.img2_dir, exist_ok=True)\n \n self.img1_compressed = img1_compressed\n self.img2_compressed = img2_compressed\n\n self.count = 0\n self.max_count = 1000000\n\n def __call__(self, img1_msg, img2_msg):\n frame_name = \"%06d\" % (self.count)\n # print(\"frame name: %s\\ttimestampe: %s\" % (frame_name, img1_msg.header.stamp))\n\n # transfer img1 msg 2 cv img\n dumpImageMsg(img1_msg, self.img1_dir, frame_name, compressed = self.img1_compressed)\n dumpImageMsg(img2_msg, self.img2_dir, frame_name, compressed = self.img2_compressed)\n\n self.count = (self.count + 1) % self.max_count\n\nif __name__ == \"__main__\":\n rospy.init_node('time_sync_lidar_cam')\n\n args = parse_args()\n\n image1_sub = createImgMsgFilterSubsciber(args.topic_img1)\n image2_sub = createImgMsgFilterSubsciber(args.topic_img2)\n ts = message_filters.ApproximateTimeSynchronizer([image1_sub, image2_sub], 10, args.tot, allow_headerless=True)\n\n img1_compressed = isCompressedImage(args.topic_img1)\n img2_compressed = isCompressedImage(args.topic_img2)\n callback = callBackClass(args.output_dir, img1_compressed = img1_compressed, img2_compressed = img2_compressed)\n ts.registerCallback(callback)\n rospy.spin()\n","repo_name":"YuanxianH/b2x","sub_path":"src/b2x/scripts/time_sync_cam2.py","file_name":"time_sync_cam2.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"15175318169","text":"import numpy as np\nimport pandas as pd\nfrom scipy.special import comb\n\nfrom LV_real_multispec_com import LV_pars, max_spec, LV_multi_spec\n\n# simple summary for number of communities\nn_specs = np.arange(2,7)\ncom_sum = pd.DataFrame()\n# distinct communities from papers\ndist_com = [sum(LV_multi_spec.n_spec == i) for i in range(max_spec+1)]\ncom_sum[\"dist_com\"] = dist_com\n# maximal number of communities\ncom_sum[\"max_com\"] = [int(sum(dist_com * comb(np.arange(0,max_spec +1),i)))\n for i in range(0,max_spec+1)]\n# communities for which all parameters exist and are nonzero\ncom_sum[\"full_com\"] = [len(mat) for mat in LV_pars[\"matrix\"]]\n# communities for which we can compute NFD parameters\n[sum(comp) for comp in LV_pars[\"NFD_comp\"]]\ncom_sum[\"NFD_comp\"] = [len(ND) for ND in LV_pars[\"ND\"]]\n# communities with stable equilibrium\ncom_sum[\"coex\"] = [sum(coex) for coex in LV_pars[\"real_coex\"]]\ncom_sum[\"no_coex\"] = com_sum[\"full_com\"]-com_sum[\"coex\"]\n\n\n\n# number of communities, for which invasion is not possible, or does not\n# predict coexistnece, but can coexist\ncoex_real = LV_pars[\"real_coex\"]\nNFD_comp = LV_pars[\"NFD_comp\"]\ncoex_invasion = LV_pars[\"coex_invasion\"]\n\n\ncoex_no_inv = [coex_real[i] & (~NFD_comp[i]) for i in n_specs]\ninv_wrong = [coex_real[i][NFD_comp[i]] != coex_invasion[i] for i in n_specs]\ncom_sum[\"no_inv\"] = 0\ncom_sum[\"no_inv\"].iloc[n_specs] = [sum(c) for c in coex_no_inv]\ncom_sum[\"inv_wrong\"] = 0\ncom_sum[\"inv_wrong\"].iloc[n_specs] = [sum(c) for c in inv_wrong]\ncom_sum[\"NFD_coex\"] = com_sum[\"coex\"]-com_sum[\"no_inv\"]\ncom_sum[\"NFD_no_coex\"] = com_sum[\"NFD_comp\"] -com_sum[\"NFD_coex\"]\ncom_sum = com_sum.T\n\ncom_sum[\"total\"] = np.sum(com_sum.values, axis = 1)\nprint(com_sum)\ncom_sum.index = [\"Original matrices\", \"Subcommunities\",\n \"Complete\\n int. matrix\", \"NFD computed\", \"coexistence\",\n \"comp. exclusion\", \"no invasion analysis\", \"invasion wrong\",\n \"NFD coexistence\", \"NFD comp. excl\"]\ndel(com_sum[0])\ndel(com_sum[1])\ncom_sum.to_csv(\"Table_S2.csv\", index = True) \n","repo_name":"juergspaak/multi_species_NFD","sub_path":"Table_S2.py","file_name":"Table_S2.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32377607324","text":"from math import sqrt, pi\n\n\ndef configure_constants(units):\n\n units.define('bolometric_luminosity = 3.0128e+28 * W = L_bol0')\n units.define('solar_luminosity = 3.828e+26 * W = L_sun = L_sol')\n units.define('earth_mass = 5.97216787e+27 * g = M_earth')\n units.define('jupiter_mass = 1.8981246e+30 * g = M_jup')\n units.define('solar_mass = 1.9889e+33 * g = M_sun = M_sol')\n units.define('earth_radius = 6.3781e+08 * cm = R_earth')\n units.define('jupiter_radius = 7.1492e+09 * cm = R_jup')\n units.define('solar_radius = 6.957e+10 * cm = R_sun = R_sol')\n units.define(\n 'radiation_constant = 7.56591469318689378e-015 * erg / cm^3 / K^4 = ar')\n\n\ndef configure_units(units, unit_d, unit_l, unit_t):\n\n density = unit_d * units(\"g / cm**3\")\n velocity = (unit_l / unit_t) * units(\"cm / s\")\n magnetic_field = sqrt(4.0 * pi * unit_d * (unit_l / unit_t)**2) * units(\"G\")\n momentum = density * velocity\n acceleration = (unit_l / unit_t**2) * units(\"cm / s**2\")\n energy = unit_d * ((unit_l / unit_t)**2) * units(\"erg / cm**3\")\n time = unit_t * units(\"s\")\n length = unit_l * units(\"cm\")\n mass = density * length**3\n temperature = 1.0 * units(\"K\")\n grav_potential = velocity**2\n\n library = {\n 'unit_d': unit_d,\n 'unit_l': unit_l,\n 'unit_t': unit_t,\n 'density': density,\n 'velocity': velocity,\n 'velocity_*': velocity,\n 'momentum': momentum,\n 'momentum_*': momentum,\n 'magnetic_field': magnetic_field,\n 'B_left': magnetic_field,\n 'B_left_*': magnetic_field,\n 'B_right': magnetic_field,\n 'B_right_*': magnetic_field,\n 'B_field': magnetic_field,\n 'B_field_*': magnetic_field,\n 'B_*_left': magnetic_field,\n 'B_*_right': magnetic_field,\n 'acceleration': acceleration,\n 'grav_acceleration': acceleration,\n 'grav_acceleration_*': acceleration,\n 'grav_potential': grav_potential,\n 'energy': energy,\n 'internal_energy': energy,\n 'thermal_pressure': energy,\n 'pressure': energy,\n 'radiative_energy': energy,\n 'radiative_energy_*': energy,\n 'time': time,\n 'length': length,\n 'x': length,\n 'y': length,\n 'z': length,\n 'position': length,\n 'position_*': length,\n 'dx': length,\n 'mass': mass,\n 'temperature': temperature\n }\n return library\n\n\ndef additional_variables(data):\n \"\"\"\n Here are some additional variables that are to be computed every time data\n is loaded.\n\n It is recommended to place your variables in a `try/except` block, which\n will prevent errors if the variables are not found, for instance when\n loading data from a different simulation.\n \"\"\"\n\n # Magnetic field\n try:\n data['hydro']['B_field'] = 0.5 * (data['hydro']['B_left'] +\n data['hydro']['B_right'])\n except KeyError:\n pass\n\n # Mass\n try:\n data['hydro']['mass'] = (data['hydro']['density'] *\n data['amr']['dx']**3).to('M_sun')\n except KeyError:\n pass\n","repo_name":"osyris-project/osyris","sub_path":"src/osyris/config/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"72445389849","text":"\"\"\"\n#Create set of pulses for single qubit randomized benchmarking sequence. \n\nCreated on Tue Feb 07 15:01:37 2012\n\n@authors: Colm Ryan and Marcus Silva\n\"\"\"\nimport numpy as np\nfrom functools import reduce\n\nimport csv\n\ndef memoize(function):\n\tcache = {}\n\tdef decorated(*args):\n\t\tif args not in cache:\n\t\t\tcache[args] = function(*args)\n\t\treturn cache[args]\n\treturn decorated\n\n@memoize\ndef pauli_multiply(P1, P2):\n '''\n Multiplication table for single qubit cliffords. Note this assumes C1 is applied first. \n '''\n tmpMult = np.dot(Paulis[P2].matrix,Paulis[P1].matrix)\n checkArray = np.array([np.abs(np.trace(np.dot(tmpMult.transpose().conj(),Paulis[x].matrix))) for x in range(1,5)])\n return checkArray.argmax()+1\n\n\n#Number of gates that we want\ngateLengths = np.array([2, 4, 8, 16, 32, 64, 96, 128, 192, 256, 320])\n\n#Number of randomizations\nnumRandomizations = 32\n\n#Single qubit paulis\nX = np.array([[0, 1],[1, 0]])\nY = np.array([[0, -1j],[1j, 0]])\nZ = np.array([[1, 0],[0, -1]]);\nI = np.eye(2)\n\n#Basically a structure to contain some infor about the Cliffords\nclass Pauli(object):\n def __init__(self, matrix, inverse):\n self.matrix = matrix\n self.inverse = inverse\n \n#Basis Cliffords\nPaulis = {}\nPaulis[1] = Pauli(I, 1)\nPaulis[2] = Pauli(X, 2)\nPaulis[3] = Pauli(Y, 3)\nPaulis[4] = Pauli(Z, 4)\n\ntargetGate = 1\n\n#Generate random sequence of Paulis for each number of gates we want to look at and repeat numRandomization times\nrandPauliLists = [np.random.randint(1,5, gatect-1).tolist() for gatect in gateLengths for randct in range(numRandomizations) ] \n\n#Interleave gate of interest\n#interLeavedGateLists = [np.vstack((tmpGateList, targetGate*np.ones_like(tmpGateList))).flatten(order='F').tolist() for tmpGateList in randPauliLists]\n \n#For each sequence calculate inverse and the X sequence and append the final Clifford\nrandomISeqs = []\n#randomXSeqs = []\nfor tmpPauliSeq in randPauliLists:\n totalPauli = reduce(pauli_multiply, tmpPauliSeq)\n inversePauli = Paulis[totalPauli].inverse\n# inverseCliffX = clifford_multiply(inverseCliff, 2)\n randomISeqs.append(tmpPauliSeq + [inversePauli])\n# randomXSeqs.append(tmpSeq + [inverseCliffX]) \n \n\n#Write out the files now\nwith open('PauliTwirl_ISeqs.txt','wt') as ISeqFID:\n writer = csv.writer(ISeqFID)\n writer.writerows(randomISeqs)\n\n#with open('PauliTwirl_XSeqs.txt','wt') as XSeqFID:\n# writer = csv.writer(XSeqFID)\n# writer.writerows(randomXSeqs)\n\n\n\n\n\n \n\n \n \n \n \n","repo_name":"BBN-Q/Qlab","sub_path":"experiments/muWaveDetection/sequences/CreatePauliTwirlSeq.py","file_name":"CreatePauliTwirlSeq.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"31"} +{"seq_id":"43093494100","text":"import pymongo\nimport json\nfrom getCardSet import get_setids, default_setids_path\n\nport = 27017\nusername = None\npassword = None\nclient = pymongo.MongoClient(host='localhost', port=27017)\ndb = client['artifact']\n\n\ndef load_cardset(setid):\n with open('save/' + setid + '.json') as f:\n cardset = json.load(f)\n return cardset\n\n\ndef extract_cardset_info(cardset_json):\n cardset_info = cardset_json['set_info']\n set_id = cardset_info['set_id']\n set_name = cardset_info['name']['english']\n return set_id, set_name\n\n\nsetids = get_setids(default_setids_path)\nfor set_id in setids:\n cardset = load_cardset(set_id)['card_set']\n set_id, set_name = extract_cardset_info(cardset)\n collection = db[str(set_id)]\n card_list = cardset['card_list']\n for card in card_list:\n collection.insert_one(card)\n","repo_name":"PlumPeanut/ArtifactCard-Python3","sub_path":"dataBase.py","file_name":"dataBase.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"23819006565","text":"import ssl\nimport socket\n\ndef verify_ssl_certificate(hostname):\n # Create a default SSL context\n context = ssl.create_default_context()\n\n try:\n # Create an SSL socket using the context and the target hostname\n s = context.wrap_socket(socket.socket(), server_hostname=hostname)\n # Connect to the target hostname on port 443\n s.connect((hostname, 443))\n # Get the peer certificate\n certificate = s.getpeercert()\n # If the connection and certificate are valid, print a message and return True\n print(\"SSL certificate for {} is valid.\".format(hostname))\n return True\n except ssl.SSLError as e:\n # If there's an SSL error, print an error message and return False\n print(\"SSL certificate for {} is not valid: {}\".format(hostname, e))\n return False\n\n# Verify the SSL certificate for the target hostname\nverify_ssl_certificate(\"example.com\")\n","repo_name":"paulinhoneto/mismatched-ssl","sub_path":"m-ssl.py","file_name":"m-ssl.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13756555766","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\n#%% Combining loop and function\n#%% Why we need to combine loop and function\n'''\n1. We can perform similar task easily by using function. \n We can do stuff repeatly and regularily by using loop.\n If combing two together, we can easily peform similar task repeatly and regularily.\n'''\n\n#%% Two type of loop+function\n'''\n1. looping outside a function:\n def func():\n ......\n ......\n \n for ...... :\n func()\n \n2. looping inside a function: \n def func(...):\n for ......:\n ....\n ....\n \n3. looping inside and outside a function:\n def func(...):\n ....\n ....\n for ..... :\n ....\n ....\n \n for ..... :\n func()\n \n'''\n\n#1. looping outside a function: 最常用\ndef get_word_len_single(string):\n return [string, len(string)]\n \ndata = ['apple', 'ball', 'cat', 'door']\noutcome = []\nfor i in data:\n outcome.append(get_word_len_single(i))\nprint(outcome)\n\n#2. looping inside a function: \ndef get_word_len_multi(lis):\n result = []\n for string in lis:\n result.append([string, len(string)])\n return result\n \ndata = ['apple', 'ball', 'cat', 'door']\noutcome = get_word_len_multi(data)\nprint(outcome)\n\n#3. looping inside & outside a function:\ndef count_char(string):\n result = {}\n for char in string:\n if char not in result:\n result[char] = 1\n else:\n result[char] += 1\n return [string, result]\n \n \ndata = ['apple', 'ball', 'cat', 'door']\noutcome = []\nfor i in data:\n outcome.append(count_char(i))\nprint(outcome)\n#%% Upgraded loop+function\n'''\n1. map(func, iterable): 等同於looping outside a function\n map是python本身的function, 所以適用於所有iterable object\n2. Series.apply(func): pandas裡面的map, \n 等同於looping outside a function. \n Each element is the input of the function\n3. DataFrame.apply(func, axis): Eache row(axis=1)/column(axis=0) is the input of the function\n4. DataFrame.applymap(func): Each cell is the input of the function\n\n'''\n##1. map\ndef get_word_len_single(string):\n return [string, len(string)]\n\noutcome = list(map(get_word_len_single, data))\nprint(outcome)\n#----等同於\ndata = ['apple', 'ball', 'cat', 'door']\noutcome = []\nfor i in data:\n outcome.append(get_word_len_single(i))\nprint(outcome)\n\n#2. Series.apply()\ndef get_word_len_single(string):\n return [string, len(string)]\n\ndata = pd.Series(['apple', 'ball', 'cat', 'door'])\noutcome = data.apply(get_word_len_single)\nprint(outcome)\n#----等同於\noutcome = []\nfor i in data:\n outcome.append(get_word_len_single(i))\noutcome = pd.Series(outcome)\nprint(outcome)\n\n#3. DataFrame.apply()\ndef get_max_min(lis):\n return [min(lis), max(lis)]\n\n##axis=1\ndf = pd.DataFrame(data = [[1,2,3],[4,5,6],[7,8,9]],\n index = ['row_1', 'row_2', 'row_3'],\n columns = ['col_1', 'col_2', 'col_3'])\noutcome = df.apply(get_max_min, axis=1)\nprint(df)\nprint(outcome)\n#----等同於\noutcome = []\nfor idx, row in df.iterrows():\n outcome.append(get_max_min(row))\noutcome = pd.DataFrame(outcome, \n index = df.index)\nprint(df)\nprint(outcome)\n\n##axis=0\ndf = pd.DataFrame(data = [[1,2,3],[4,5,6],[7,8,9]],\n index = ['row_1', 'row_2', 'row_3'],\n columns = ['col_1', 'col_2', 'col_3'])\noutcome = df.apply(get_max_min, axis=0)\nprint(df)\nprint(outcome)\n#----等同於\noutcome = []\nfor col_name, col in df.iteritems():\n outcome.append(get_max_min(col))\noutcome = pd.DataFrame(outcome, \n index = df.columns).T\nprint(df)\nprint(outcome)\n\n\n#4. DataFrame.applymap(func)\ndf_C = pd.DataFrame(data = [[1,2,3],[4,5,6],[7,8,9]],\n index = ['row_1', 'row_2', 'row_3'],\n columns = ['col_1', 'col_2', 'col_3'])\ndef C_to_F(cell):\n out = cell * 1.8 + 32\n return out\ndf_F = df_C.applymap(C_to_F)\nprint(df_F)\n#----等同於\ndf_C = pd.DataFrame(data = [[1,2,3],[4,5,6],[7,8,9]],\n index = ['row_1', 'row_2', 'row_3'],\n columns = ['col_1', 'col_2', 'col_3'])\ndf_F = df_C.copy()\nfor row_idx in range(df_C.shape[0]):\n for col_idx in range(df_C.shape[1]):\n df_F.iat[row_idx, col_idx] = C_to_F(df_C.iat[row_idx, col_idx])\n\nprint(df_F)\n\n\n\n","repo_name":"chienchunliao/Python-Materials","sub_path":"additional material/loop+function_additional material.py","file_name":"loop+function_additional material.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12946021098","text":"import numpy as np\nimport copy\nimport os\nimport smplx\nimport torch\nfrom os.path import join, exists\nfrom psbody.mesh import Mesh, MeshViewer, MeshViewers\nfrom lib.utils import filter_cloth_pose\n\nnp.random.seed(123)\n\nclass demo(object):\n def __init__(self, model, name, dataset, data_dir, datadir_root, n_sample, save_obj,\n sample_option='normal', smpl_model_folder='', vis=True):\n self.n_sample = n_sample\n self.sample_option = sample_option\n self.name = name\n self.data_dir = data_dir\n self.datadir_root = datadir_root\n self.model = model\n self.dataset = dataset\n self.save_obj = save_obj\n self.vis = vis\n\n self.smpl_model = smplx.body_models.create(model_type='smpl',\n model_path=smpl_model_folder,\n gender='neutral')\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n self.clothing_verts_idx = np.load(join(script_dir, 'data', 'clothing_verts_idx.npy'))\n self.ref_mesh = Mesh(filename=join(script_dir, 'data', 'template_mesh.obj'))\n self.minimal_shape = self.ref_mesh.v\n\n self.rot = np.load(join(script_dir, 'data', 'demo_data', 'demo_pose_params.npz'))['rot'] # 216 dim pose vector\n self.pose = np.load(join(script_dir, 'data', 'demo_data', 'demo_pose_params.npz'))['pose']\n\n train_stats = np.load(join(script_dir, 'data', 'demo_data', 'trainset_stats.npz'))\n self.train_mean = train_stats['mean']\n self.train_std = train_stats['std']\n\n self.results_dir = join(script_dir, 'results', name)\n if not exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n def sample_vary_pose(self):\n '''\n fix clothing type, sample sevearl poses, under each pose sample latent code N times\n '''\n full_pose = self.pose # take the corresponding full 72-dim pose params, for later reposing\n rot = filter_cloth_pose(self.rot) # only keep pose params from clo-related joints; then take one pose instance\n clotype = np.array([1, 0, 0, 0]) # one-hot clothing type label\n clotype_repeated = np.repeat(clotype[np.newaxis, :], len(rot), axis=0)\n\n # get latent embedding of the conditions\n pose_emb, clotype_emb = self.model.encode_only_condition(rot, clotype_repeated)\n clotype_emb = clotype_emb[0]\n\n obj_dir = join(self.results_dir, 'sample_vary_pose')\n\n print('\\n=============== Running demo: fix z, clotype, change pose ===============')\n print('\\nFound {} different pose, for each we generate {} samples\\n'.format(len(rot), self.n_sample))\n\n # sample latent space\n z_samples = np.random.normal(loc=0.0, scale=1.0, size=(self.n_sample, self.model.nz))\n\n for idx, pose_emb_i in enumerate(pose_emb):\n full_pose_repeated = np.repeat(full_pose[np.newaxis, idx, :], self.n_sample, axis=0)\n # concat z with conditions\n z_sample_c = np.array([np.concatenate([sample.reshape(1, -1), pose_emb_i.reshape(1, -1), clotype_emb.reshape(1, -1)], axis=1)\n for sample in z_samples]).reshape(self.n_sample, -1)\n\n predictions = self.model.decode(z_sample_c, cond=pose_emb_i.reshape(1, -1), cond2=clotype_emb.reshape(1, -1))\n predictions = predictions * self.train_std + self.train_mean\n\n # exclude head, fingers and toes\n disp_masked = np.zeros_like(predictions)\n disp_masked[:, self.clothing_verts_idx, :] = predictions[:, self.clothing_verts_idx, :]\n\n predictions_fullbody = disp_masked + self.minimal_shape\n\n predictions_fullbody_posed = self.pose_result_onepose_multisample(predictions_fullbody, full_pose_repeated, pose_idx=idx,\n save_obj=self.save_obj, obj_dir=obj_dir)\n if self.vis:\n minimal_shape_posed = self.pose_result_onepose_multisample(np.array([self.minimal_shape]), full_pose_repeated, pose_idx=idx,\n save_obj=False)\n self.vis_meshviewer(mesh1=predictions_fullbody_posed, mesh2=minimal_shape_posed, mesh3=None,\n n_sample=self.n_sample, titlebar='Sample vary pose')\n\n def vis_meshviewer(self, mesh1, mesh2, mesh3, n_sample, titlebar='titlebar', disp_value=False, values_to_disp=None):\n from psbody.mesh import Mesh, MeshViewer, MeshViewers\n\n if mesh3 is not None:\n viewer = MeshViewers(shape=(1, 3), titlebar=titlebar)\n for x in range(n_sample):\n viewer[0][0].static_meshes = [Mesh(mesh1[x], self.ref_mesh.f)]\n viewer[0][1].static_meshes = [Mesh(mesh2[x], self.ref_mesh.f)]\n viewer[0][2].static_meshes = [Mesh(mesh3[x], self.ref_mesh.f)]\n if disp_value is False:\n input('frame {}, Press key for next'.format(x))\n else:\n input('Current value: {}'.format(values_to_disp[x]))\n else:\n viewer = MeshViewers(shape=(1, 2), titlebar=titlebar)\n for x in range(n_sample):\n viewer[0][0].static_meshes = [Mesh(mesh1[x], self.ref_mesh.f)]\n viewer[0][1].static_meshes = [Mesh(mesh2[x], self.ref_mesh.f)]\n if disp_value is False:\n input('frame {}, press key for next'.format(x))\n else:\n input('Current value: {}'.format(values_to_disp[x]))\n\n def pose_result(self, verts, pose_params, save_obj, cloth_type=None, obj_dir=None):\n '''\n :param verts: [N, 6890, 3]\n :param pose_params: [N, 72]\n '''\n if verts.shape[0] != 1: # minimal shape: pose it to every pose\n assert verts.shape[0] == pose_params.shape[0] # otherwise the number of results should equal the number of pose identities\n\n verts_posed = []\n\n if save_obj:\n if not exists(obj_dir):\n os.makedirs(obj_dir)\n print('saving results as .obj files to {}...'.format(obj_dir))\n\n if verts.shape[0] == 1:\n self.smpl_model.v_template[:] = torch.from_numpy(verts[0])\n for i in range(len(pose_params)):\n # model.pose[:] = pose_params[i]\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n if cloth_type is not None:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{}_{:0>4d}.obj').format(cloth_type, i))\n else:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{:0>4d}.obj').format(i))\n else:\n for i in range(len(verts)):\n self.smpl_model.v_template[:] = torch.from_numpy(verts[i])\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n if cloth_type is not None:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{}_{:0>4d}.obj').format(cloth_type, i))\n else:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, '{:0>4d}.obj').format(i))\n\n return verts_posed\n\n def pose_result_onepose_multisample(self, verts, pose_params, pose_idx, save_obj, obj_dir=None):\n '''\n :param verts: [N, 6890, 3]\n :param pose_params: [N, 72]\n '''\n if verts.shape[0] != 1: # minimal shape: pose it to every pose\n assert verts.shape[0] == pose_params.shape[0] # otherwise the number of results should equal the number of pose identities\n\n verts_posed = []\n\n if save_obj:\n if not exists(obj_dir):\n os.makedirs(obj_dir)\n print('saving results as .obj files to {}...'.format(obj_dir))\n\n if verts.shape[0] == 1:\n self.smpl_model.v_template[:] = torch.from_numpy(verts[0])\n for i in range(len(pose_params)):\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n Mesh(verts_out, self.smpl_model.faces).write_obj(join(obj_dir, 'pose{}_{:0>4d}.obj').format(pose_idx, i))\n\n else:\n for i in range(len(verts)):\n self.smpl_model.v_template[:] = torch.from_numpy(verts[i])\n self.smpl_model.body_pose[:] = torch.from_numpy(pose_params[i][3:])\n self.smpl_model.global_orient[:] = torch.from_numpy(pose_params[i][:3])\n verts_out = self.smpl_model().vertices.detach().cpu().numpy()\n verts_posed.append(verts_out)\n if save_obj:\n Mesh(verts_out.squeeze(), self.smpl_model.faces).write_obj(join(obj_dir, 'pose{}_{:0>4d}.obj').format(pose_idx, i))\n\n return verts_posed\n\n\n def run(self):\n self.sample_vary_pose()\n\n","repo_name":"yuxwind/CAPE","sub_path":"demos.py","file_name":"demos.py","file_ext":"py","file_size_in_byte":9752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"45362874938","text":"#!/usr/bin/env python3\n\nimport sys\nfrom collections import defaultdict\nimport os\nimport time\nimport argparse\n\ndef parse_dimacs(filename):\n clauses = []\n with open(filename, 'r') as input_file:\n for line in input_file:\n if line[0] in ['c', 'p']:\n continue\n literals = list(map(int, line.split()))\n assert literals[-1] == 0\n literals = literals[:-1]\n clauses.append(literals)\n return clauses\n\n# Jersolow-Wang method\ndef jersolow_wang_method(cnf):\n literal_weight = defaultdict(int)\n for clause in cnf:\n for literal in clause:\n literal_weight[literal] += 2 ** -len(clause)\n return max(literal_weight, key=literal_weight.get)\n\n# Jersolow-Wang 2-sided method (consider only positive literals)\n# this is faster by 50% relative improvement in speed\n# ref: http://www.cril.univ-artois.fr/~coste/Articles/coste-etal-sat05.pdf\ndef jersolow_wang_2_sided_method(cnf):\n literal_weight = defaultdict(int)\n for clause in cnf:\n for literal in clause:\n literal_weight[abs(literal)] += 2 ** -len(clause)\n return max(literal_weight, key=literal_weight.get)\n\n# Boolean Constrain Propagation\n# we set unit to true and so we need to update the cnf by the following rules:\n# - Clauses that contain unit are removed (due to \"or\")\n# - Update clauses by removing -unit from them if it exist (due to \"or\")\ndef bcp(cnf, unit):\n new_cnf = []\n for clause in cnf:\n if unit in clause:\n continue\n if -unit in clause:\n new_clause = [literal for literal in clause if literal != -unit]\n # base case: conjunct containing an empty disjunct so False\n # but we should continue later because there might be another path\n if not new_clause:\n return -1\n new_cnf.append(new_clause)\n else:\n new_cnf.append(clause)\n return new_cnf\n\n# This implements the while loop of the BCP function\ndef assign_unit(cnf):\n I = [] # contains the bool assignments for each variable\n unit_clauses = [clause for clause in cnf if len(clause) == 1]\n while unit_clauses:\n unit = unit_clauses[0][0]\n cnf = bcp(cnf, unit) # assign true to unit\n I += [unit]\n if cnf == -1:\n return -1, []\n # base case: empty conjunct so it is SAT\n if not cnf:\n return cnf, I\n unit_clauses = [clause for clause in cnf if len(clause) == 1] # update\n return cnf, I\n\n# DPLL algorithm is here\ndef backtrack(cnf, I):\n cnf, unit_I = assign_unit(cnf)\n I = I + unit_I\n if cnf == -1:\n return []\n if not cnf:\n \treturn I\n selected_literal = jersolow_wang_2_sided_method(cnf)\n res = backtrack(bcp(cnf, selected_literal), I + [selected_literal])\n # if no solution when assigning to True, try to assign to False\n if not res:\n res = backtrack(bcp(cnf, -selected_literal), I + [-selected_literal])\n return res\n\ndef run_benchmarks(fname):\n print('Running on benchmarks...')\n start_time = time.time()\n with open(fname, 'w') as out_file:\n for filename in os.listdir(\"benchmarks\"):\n clauses = parse_dimacs(os.path.join(\"benchmarks\", filename))\n assignment = backtrack(clauses, [])\n if assignment:\n out_file.write('SAT')\n else:\n out_file.write('UNSAT')\n out_file.write('\\n')\n end_time = time.time()\n print('Execution time: %.2f seconds' % (end_time - start_time))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--run_benchmarks', action='store_true',\n help='Run the sat solver over all files in the benchmarks folder')\n parser.add_argument('--input_file', default=None,\n help='input file following DIMACS format (ignored if run_benchmarks is set to True')\n args = parser.parse_args()\n if args.run_benchmarks:\n run_benchmarks('benchmarks-results.log')\n elif args.input_file is not None:\n f = args.input_file\n assert os.path.exists(f), '{} does not exists'.format(f)\n clauses = parse_dimacs(f)\n assignment = backtrack(clauses, [])\n if assignment:\n print('SAT')\n assignment.sort(key=lambda x: abs(x))\n print(assignment)\n else:\n print('UNSAT')\n else:\n print('Please either choose an input file or run the benchmarks. Type --help for more details')\n","repo_name":"mmz33/DPLL-SAT-Solver","sub_path":"sat_dpll.py","file_name":"sat_dpll.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69921767449","text":"# accounts/urls.py\n\nfrom django.urls import path, re_path\nfrom .views.users import active_users, current_user, Login, logout, Register, users, EditUser\nfrom .views.skins import available_skins, ActiveUserSkin, PurchasedUserSkins\nfrom .views.stats import Stats\nfrom .views.wallet import WalletAPI\n\nurlpatterns = [\n # Users\n path('users/current/', current_user),\n path('users/', users),\n path('users/active/', active_users),\n re_path(r'signup|register/$', Register.as_view(), name='account-create'),\n re_path(r'signin|login/$', Login.as_view(), name='account-login'),\n re_path(r'signout|logout/$', logout, name='account-logout'),\n path('users/edit/', EditUser.as_view()),\n # Skins\n path('skins/', available_skins),\n path('skins/active/', ActiveUserSkin.as_view()),\n path('skins/purchased/', PurchasedUserSkins.as_view()),\n # Stats\n re_path(r'stats|statistics/$', Stats.as_view()),\n # Wallet\n path('wallet/', WalletAPI.as_view())\n]\n","repo_name":"cs188-software-design-security-w20/project-reiher-s-revenge","sub_path":"backend/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11782649992","text":"from tkinter import *\r\nimport ctypes\r\nio = ctypes.CDLL('./io.so')\r\nopre = ctypes.CDLL('./array_operations.so')\r\n\r\nio.read_line.argtypes = (ctypes.POINTER(ctypes.c_char), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))\r\nio.read_array.argtypes = (ctypes.POINTER(ctypes.c_char), ctypes.POINTER(ctypes.c_int), ctypes.c_int)\r\nopre.get_count_of_squares.argtypes = (ctypes.POINTER(ctypes.c_int), ctypes.c_int)\r\n\r\ndef move_array():\r\n str = array_ent.get()\r\n n = n_ent.get()\r\n if (len(n) > 0):\r\n n = int(n)\r\n else:\r\n n = io.get_count(str.encode('utf-8'))\r\n arr = (ctypes.c_int * n)()\r\n io.read_array(str.encode('utf-8'), arr, n)\r\n k = k_ent.get()\r\n if (len(k) > 0):\r\n opre.move_array(arr, n, int(k))\r\n myText.set(list(arr))\r\n\r\ndef place_array():\r\n str = array_ent.get()\r\n n = n_ent.get()\r\n if (len(n) > 0):\r\n n = int(n)\r\n else:\r\n n = io.get_count(str.encode('utf-8'))\r\n arr = (ctypes.c_int * n)()\r\n io.read_array(str.encode('utf-8'), arr, n)\r\n new_n = 0\r\n new_n = opre.get_count_of_squares(arr, n)\r\n new_arr = (ctypes.c_int * new_n)()\r\n opre.place_in_array_squares(arr, n, new_arr, new_n)\r\n myText.set(list(new_arr))\r\n\r\ndef clear_all():\r\n array_ent.delete(0, 'end')\r\n n_ent.delete(0, 'end')\r\n k_ent.delete(0, 'end')\r\n\r\n\r\nmaster = Tk()\r\nmaster.configure()\r\nmyText = StringVar()\r\n\r\narray_ent = Entry(master, width=40)\r\nn_ent = Entry(master, width=7)\r\nLabel(master, text=\"Массив\").grid(row=2, column=0)\r\narray_ent.grid(row=3, column=0)\r\nLabel(master, text=\"Кол-во эл-ов\").grid(row=2, column=1)\r\nn_ent.grid(row=3, column=1)\r\n\r\nk_ent = Entry(master, width=7)\r\nLabel(master, text=\"K\").grid(row=5, column=1)\r\nk_ent.grid(row=7, column=1)\r\n\r\nLabel(master, text=\"--------------------\").grid(row=5, column=0)\r\n\r\nget_move = Button(master, text=\"Сдвинуть цикл. массив на k позиций влево\", command=move_array)\r\nget_move.grid(row=7, column=0)\r\nLabel(master, text=\"--------------------\").grid(row=8, column=0)\r\nget_place = Button(master, text=\"Переместить из массива полные квадраты\", command=place_array)\r\nget_place.grid(row=9, column=0)\r\nLabel(master, text=\"--------------------\").grid(row=10, column=0)\r\nButton(master, text=\"Очистить\", command=clear_all).grid(row=11, column=1)\r\nresult = Label(master, text=\"REsult\", textvariable=myText, background=\"white\", width=30).grid(row=11, column=0)\r\n\r\nmainloop()\r\n","repo_name":"xanderkov/ics7-cProg","sub_path":"lab_12_01_2/src/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"27126022627","text":"# GENERAL BOT FUNCTIONS\n\n# bot.py\nimport googletrans\nimport pytube\n\nimport BeatRequests\nimport Bugs\nfrom Bugs import BugHandler\nfrom Beans import BeanHandler\nfrom Movies import MovieHandler\nimport Music\nimport glob\nimport math\nimport os\nimport random\nimport time\nimport asyncio\nimport datetime\nimport math\nimport feedparser\nfrom bs4 import BeautifulSoup\n\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nfrom googletrans import Translator\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nADMINID = int(os.getenv('DISCORD_ADMINID'))\n\nidentifier = os.getenv('IDENTIFIER_STR')\n\nintents = discord.Intents().default()\nintents.members = True\n\nbot = commands.Bot(command_prefix='+', intents=intents)\nbot.remove_command('help')\n\nbeanHandler = BeanHandler()\nbugHandler = BugHandler()\nmovieHandler = MovieHandler(0)\nreqManager = BeatRequests.BSReqHandler()\n\nmusicDict = dict()\n# service_urls=['translate.googleapis.com']\ntranslator = Translator()\n\n\n# START OF BOT COMMANDS\n\n@bot.event\nasync def on_ready():\n currentSongIndex = 0\n print(\n f'{bot.user} is Online!'\n )\n\n\n# HELP CMDS ----------------------------------------------------------------------------- ###\n\n@bot.command(name=\"help\")\nasync def help(ctx):\n print(str(ctx.author.id))\n outStr = \"COMMANDS:\"\n outStr += \"\\n+musicHelp: Explains how to request, skip, and manage songs\"\n outStr += \"\\n+movieHelp: Explains how to add, remove, and view requested movies\"\n outStr += \"\\n+beanHelp: Explains the Beanking System(tm)\"\n outStr += \"\\n+sCasm(commment): Makes a comment sarcastic\"\n outStr += \"\\n+myIQ: Accurate IQ reading\"\n outStr += \"\\n+pfpGrabMe: Grabs ya PFP son\"\n outStr += \"\\n+pfpGrab(Mention User): Grabs someones PFP son\"\n outStr += \"\\n+goodBot: He is a good bot after all asd\"\n await ctx.send(outStr)\n\n\n@bot.command()\nasync def beanHelp(ctx):\n outStr = \"BEAN COMMANDS:\"\n outStr += \"\\n+beanCounter: Shows your Beank Statement\"\n outStr += \"\\n+beanMe: High Quality Beanking(c)\"\n await ctx.send(outStr)\n\n\n@bot.command()\nasync def movieHelp(ctx):\n outStr = \"MOVIE COMMANDS:\"\n outStr += \"\\n+movieLS: Lists currently requested movies\"\n outStr += \"\\n+movieADD(name): Adds a movie to the list\"\n outStr += \"\\n+movieDEL(index): Removes a movie at index from the list\"\n outStr += \"\\n+movieRAND: Picks a movie! ( Don't use this one yet ;) )\"\n await ctx.send(outStr)\n\n\n@bot.command(aliases=['songHelp'])\nasync def musicHelp(ctx):\n outStr = \"MUSIC COMMANDS:\"\n outStr += \"\\n+play(search term / YouTube link): Plays a song or adds it to the queue\"\n outStr += \"\\n+skip: Skips the current song\"\n outStr += \"\\n+queue: Lists the current song queue\"\n await ctx.send(outStr)\n\n\n# FUN CMDS ------------------------------------------------------------------------------ ###\n\n@bot.command()\nasync def arabify(ctx, *args):\n sarcStr = reverseChars(args)\n await ctx.send(sarcStr)\n\n\n@bot.command(aliases=[\"CAT\", \"CATME\", \"catme\", \"cat\"])\nasync def catMe(ctx):\n embed = discord.Embed(\n title='Random Image 🐈',\n description='Random',\n colour=discord.Colour.purple()\n )\n embed.set_image(url='https://source.unsplash.com/1600x900/?cat')\n embed.set_footer(text=\"\")\n await ctx.send(embed=embed)\n\n\n@bot.command(aliases=[\"XKCD\", \"xkcd\", \"xme\", \"XME\", \"XkMe\", \"XKME\"])\nasync def xkme(ctx):\n Feed = feedparser.parse(\"https://xkcd.com/rss.xml\")\n pointer = Feed.entries[0]\n soup = BeautifulSoup(pointer.description, \"html.parser\")\n\n embed = discord.Embed(\n title=\"XKCD \" + pointer.link.split('/')[3] + \" - \" + pointer.title,\n colour=discord.Colour.dark_gray()\n )\n embed.set_image(url=soup.img[\"src\"])\n embed.set_footer(text=soup.img[\"alt\"])\n await ctx.send(embed=embed)\n\n\n@bot.command(aliases=[\"Troll\", \"TROLL\"])\nasync def troll(ctx, user: discord.User):\n if ctx.author.id != ADMINID:\n return\n for i in range(10):\n await asyncio.sleep(1)\n await ctx.send(f\"HEY <@{user.id}>\")\n\n\n@bot.command()\nasync def goom(ctx):\n destStr = random.choice(list(googletrans.LANGCODES.items()))[1]\n result = translator.translate(\"Good Morning\", src=\"en\", dest=destStr)\n await ctx.send(result.text)\n\n\n@bot.command()\nasync def sCasm(ctx, *args):\n sarcStr = sarcasify(args)\n await ctx.send(sarcStr)\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n beanHandler.add_beans(gain)\n\n\n@bot.command()\nasync def doomsgay(ctx):\n if ctx.author.id != ADMINID:\n return\n doomTime = datetime.datetime(2025, 9, 9, 0, 0, 0) - datetime.datetime.now()\n outStr = \"Ricky will become homosexual in \"\n years = math.floor(doomTime.days / 365.25)\n days = doomTime.days - math.floor(years * 365.25)\n hours = math.floor(doomTime.seconds / 3600)\n minutes = math.floor(doomTime.seconds / 60) - hours * 60\n outStr += str(years) + \" years, \"\n outStr += str(days) + \" days, \"\n outStr += str(hours) + \" hours, \"\n outStr += str(minutes) + \" minutes\"\n await ctx.send(outStr)\n\n\n@bot.command()\nasync def myIQ(ctx):\n if ctx.author.id == ADMINID:\n # g*mer IQ\n await ctx.send(\"Loochis Daddy's IQ is: 300\")\n else:\n # pleb IQ\n random.seed(ctx.author.id)\n await ctx.send(ctx.author.name + \"'s IQ is: \" + str(random.randint(10, 90)))\n\n\n@bot.command()\nasync def pfpGrabMe(ctx):\n await ctx.send(str(ctx.author.avatar_url))\n\n\n@bot.command()\nasync def pfpGrabYou(ctx, user: discord.User):\n await ctx.send(str(user.avatar_url))\n\n\n@bot.command()\nasync def cringe(ctx):\n await ctx.send(str.upper(random.choice(ctx.guild.members).name) + \" IS CRINGE!\")\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n beanHandler.add_beans(gain)\n await ctx.send(\"+\" + str(gain) + \" BND\")\n\n\n@bot.command()\nasync def goodBot(ctx):\n await ctx.send(\"thanks B0s\")\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n beanHandler.add_beans(gain)\n await ctx.send(\"+\" + str(gain) + \" BND\")\n\n\n# BEAN CMDS ----------------------------------------------------------------------------- ###\n\n@bot.command()\nasync def beanMe(ctx):\n i = beanHandler.account_from_id(ctx.author.id)\n gain = random.randint(5, 20)\n oldBal = beanHandler.beanAccount.beans\n beanHandler.add_beans(gain)\n if i == 0:\n await ctx.send(ctx.author.name + \"'s Bean Account has been succesfully created!\")\n\n await ctx.send(str.upper(ctx.author.name) + \" GOT BEANED: \\n```\\nOld Balance: \" + numFormat(\n oldBal) + \"Transaction: \" + numFormat(gain) + lineFormat() + \"New Balance: \" + balFormat(beanHandler) + \"```\")\n\n\n@bot.command()\nasync def beanYou(ctx, user: discord.User, *args):\n outAcc = beanHandler.account_from_id(ctx.author.id)\n gain = int(args[0])\n oldBal = beanHandler.beanAccount.beans\n newBal = oldBal - gain\n if outAcc == 0:\n await ctx.send(ctx.author.name + \"'s Bean Account has been succesfully created!\")\n if beanHandler.beanAccount.beans - gain < 0:\n await ctx.send(ctx.author.name + \", You dont have enough BND to perform this Beansaction\")\n return\n beanHandler.add_beans(-gain)\n inAcc = beanHandler.account_from_id(user.id)\n if inAcc == 0:\n await ctx.send(user.name + \"'s Bean Account has been succesfully created!\")\n\n await ctx.send(str.upper(ctx.author.name) + \" BEANED \" + str.upper(user.name) + \"\\n```\\nOld Balance: \" + numFormat(\n oldBal) + \"Transaction: \" + numFormat(-gain) + lineFormat() + \"New Balance: \" + numFormat(newBal) + \"```\")\n oldBal = beanHandler.beanAccount.beans\n beanHandler.add_beans(gain)\n await ctx.send(\n str.upper(user.name) + \" GOT BEANED BY \" + str.upper(ctx.author.name) + \"\\n```\\nOld Balance: \" + numFormat(\n oldBal) + \"Transaction: \" + numFormat(gain) + lineFormat() + \"New Balance: \" + balFormat(\n beanHandler) + \"```\")\n\n\n@bot.command()\nasync def beanCounter(ctx):\n i = beanHandler.account_from_id(ctx.author.id)\n if i == 0:\n await ctx.send(ctx.author.name + \"'s Bean Account has been succesfully created!\")\n\n await ctx.send(ctx.author.name + \"'s Bean Account: \\n```\\nBalance: \" + balFormat(beanHandler) + \"```\")\n\n\n# BUG CMDS ------------------------------------------------------------------------------ ###\n\n@bot.command(aliases=[\"sawbug\", \"SAWBUG\", \"sawbugs\", \"SAWBUGS\", \"sawBugs\"])\nasync def sawBug(ctx, *args):\n gain = 1\n if len(args) != 0 and int(args[0]) > 1:\n gain = int(args[0])\n i = bugHandler.account_from_id(ctx.author.id)\n bugHandler.add_sightings(gain)\n if i == 0:\n await ctx.send(ctx.author.name + \"Saw their first bug!\\nWELCOME TO THE THUNDERDOME\")\n\n if gain == 1:\n await ctx.send(str.upper(ctx.author.name) + \" saw a bug!\")\n else:\n await ctx.send(str.upper(ctx.author.name) + \" saw \" + str(gain) + \" bugs!\")\n\n\n@bot.command(aliases=[\"killedbug\", \"KILLEDBUG\", \"killedbugs\", \"KILLEDBUGS\", \"killedBugs\"])\nasync def killedBug(ctx, *args):\n gain = 1\n if len(args) != 0 and int(args[0]) > 1:\n gain = int(args[0])\n i = bugHandler.account_from_id(ctx.author.id)\n bugHandler.add_kills(gain)\n bugHandler.add_sightings(gain)\n if i == 0:\n await ctx.send(ctx.author.name + \" Killed their first bug!\\nWELCOME TO THE THUNDERDOME\")\n\n if gain == 1:\n await ctx.send(str.upper(ctx.author.name) + \" \" + random.choice(Bugs.KILLED_SYNONYMS) + \" a bug!\")\n else:\n await ctx.send(\n str.upper(ctx.author.name) + \" \" + random.choice(Bugs.KILLED_SYNONYMS) + \" \" + str(gain) + \" bugs!\")\n\n@bot.command(aliases=[\"killedseenbug\", \"KILLEDSEENBUG\"])\nasync def killedSeenBug(ctx):\n bugHandler.account_from_id(ctx.author.id)\n if bugHandler.bugAccount.kills >= bugHandler.bugAccount.sightings:\n await ctx.send(\"Somethin' Ain't right here\")\n return\n bugHandler.add_kills(1)\n\n await ctx.send(\n str.upper(ctx.author.name) + \" \" + random.choice(Bugs.KILLED_SYNONYMS) + \" One that got away!\")\n\n\n@bot.command()\nasync def bugStats(ctx, user: discord.User = None):\n if user is None:\n user = ctx.author\n i = bugHandler.account_from_id(user.id)\n if i == 0:\n await ctx.send(user.name + \" hasn't committed bug crimes yet :(\")\n return\n if bugHandler.bugAccount.kills != 0:\n await ctx.send(ctx.author.name + \"'s Bug crimes: \\n```\\nKills: \" + str(bugHandler.bugAccount.kills) + \"\\nSightings: \"\n + str(bugHandler.bugAccount.sightings) + \"\\nK/S: \"\n + '{0:.3g}'.format(bugHandler.bugAccount.kills / bugHandler.bugAccount.sightings) + \"```\")\n else:\n await ctx.send(\n ctx.author.name + \" is a pacifist :( \\n```\\nKills: \" + str(bugHandler.bugAccount.kills) + \"\\nSightings: \"\n + str(bugHandler.bugAccount.sightings) + \"```\")\n\n\n# MOVIE CMDS ---------------------------------------------------------------------------- ###\n\n@bot.command()\nasync def movieLS(ctx):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n if len(movieHandler.movies) != 0:\n await ctx.send(\"Here are the curently requested movies:\\n```\" + movieHandler.moviesToOrderedString() + \"\\n```\")\n else:\n await ctx.send(\"There are no requested movies!\")\n\n\n@bot.command()\nasync def movieADD(ctx, *args):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n name = ' '.join([x for x in args])\n movieHandler.add_movie(name)\n\n await ctx.send(\"Succesfully Added **\" + name + \"**\")\n await ctx.send(\"Here are the curently requested movies:\\n```\" + movieHandler.moviesToOrderedString() + \"\\n```\")\n\n\n@bot.command()\nasync def movieDEL(ctx, *args):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n try:\n name = movieHandler.del_movie(int(args[0]))\n except:\n await ctx.send(\"Must be a valid index!\")\n return\n\n await ctx.send(\"Succesfully Deleted **\" + name + \"**\")\n if len(movieHandler.movies) != 0:\n await ctx.send(\"Here are the curently requested movies:\\n```\" + movieHandler.moviesToOrderedString() + \"\\n```\")\n else:\n await ctx.send(\"There are no requested movies!\")\n\n\n@bot.command()\nasync def movieRAND(ctx):\n movieHandler = MovieHandler(ctx.guild.id)\n movieHandler.get_movies()\n if len(movieHandler.movies) != 0:\n await ctx.send(\"PICKING FROM A HAT...\")\n await asyncio.sleep(2)\n await ctx.send(\"3...\")\n await asyncio.sleep(2)\n await ctx.send(\"2...\")\n await asyncio.sleep(2)\n await ctx.send(\"1...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.5...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.25...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.125...\")\n await asyncio.sleep(2)\n await ctx.send(\"0.0625...\")\n await asyncio.sleep(2)\n await ctx.send(\"0... FRICK IT ROUNDED DO-\")\n await ctx.send(\"The Chosen Movie is: **\" + random.choice(movieHandler.movies) + \"**\")\n else:\n await ctx.send(\"There are no requested movies!\")\n\n\n# MUSIC CMDS ---------------------------------------------------------------------------- ###\n\n@bot.command(aliases=['p', 'P', 'Play', 'PLAY'])\nasync def play(ctx, *args):\n video = Music.getVideo(args)\n if video is None:\n await ctx.send(\"ERR: Cannot find song.\")\n return\n\n guildID = ctx.guild.id\n if str(guildID) not in musicDict:\n musicDict[str(guildID)] = []\n\n if ctx.author.voice is None:\n await ctx.send(\"ERR: User not in channel.\")\n return\n\n voice_channel = ctx.author.voice.channel\n\n bot_channel = None\n if not (ctx.guild.voice_client is None):\n bot_channel = ctx.guild.voice_client.channel\n\n if bot_channel is not None:\n if voice_channel == bot_channel:\n vc = ctx.guild.voice_client\n else:\n await ctx.send(\"ERR: Bot is in another channel, permission denied.\")\n return\n else:\n await ctx.send(\"Joined VC\")\n vc = await voice_channel.connect()\n\n print(video)\n if len(video) == 1:\n musicDict[str(guildID)].append(video[0])\n if vc.is_playing():\n await ctx.send(\"Queued: **\" + video[0].title + \"**\")\n else:\n for v in video:\n musicDict[str(guildID)].append(v)\n await ctx.send(\"Queued \" + str(len(video)) + \" videos\")\n\n print(len(musicDict[str(guildID)]))\n if not vc.is_playing():\n await playNext(ctx)\n\n\n@bot.command(aliases=['s', 'S', 'Skip', 'SKIP'])\nasync def skip(ctx):\n if ctx.author.voice is None:\n await ctx.send(\"ERR: User not in channel.\")\n return\n\n voice_channel = ctx.author.voice.channel\n bot_channel = None\n if not (ctx.guild.voice_client is None):\n bot_channel = ctx.guild.voice_client.channel\n\n if bot_channel is not None:\n if voice_channel == bot_channel:\n vc = ctx.guild.voice_client\n else:\n await ctx.send(\"ERR: Bot is in another channel, permission denied.\")\n return\n else:\n await ctx.send(\"ERR: Bot not in channel.\")\n return\n\n guildID = ctx.guild.id\n if not musicDict[str(guildID)]:\n await ctx.send(\"Nothing in Queue!\")\n return\n\n try:\n # del musicDict[str(guildID)][0]\n voice_channel = ctx.message.guild.voice_client\n voice_channel.stop()\n await ctx.send(\"Skipped!\")\n except:\n await ctx.send(\"ERR: Nothing playing.\")\n return\n\n # await playNext(ctx)\n\n\nasync def playNext(ctx):\n guildID = ctx.guild.id\n if len(musicDict[str(guildID)]) >= 1:\n await ctx.send(\"Now Playing: **\" + musicDict[str(guildID)][0].title + \"**\")\n if len(musicDict[str(guildID)]) == 0:\n return\n vc = ctx.guild.voice_client\n Music.getYTFile(musicDict[str(guildID)][0], ctx.guild.id)\n vc.play(discord.FFmpegPCMAudio(source=\"Audio/\" + str(ctx.guild.id) + \".mp4\"))\n while vc.is_playing():\n await asyncio.sleep(1)\n del musicDict[str(guildID)][0]\n print(\"Deleted.\")\n await playNext(ctx)\n else:\n await ctx.send(\"Queue Finished!\")\n\n\n@bot.command(aliases=['q', 'Q', 'Queue'])\nasync def queue(ctx, *args):\n guildID = ctx.guild.id\n if str(guildID) not in musicDict:\n musicDict[str(guildID)] = []\n if not musicDict[str(guildID)]:\n await ctx.send(\"Nothing in Queue!\")\n return\n\n pageNum = 1\n if args:\n try:\n pageNum = int(args[0])\n except:\n await ctx.send(\"Invalid Page Number\")\n\n outStr = pageListFormatter([x.title for x in musicDict[str(guildID)]], pageNum)\n\n await ctx.send(outStr)\n\n\n# BEAT SABER REQUEST FUNCS -------------------------------------------------------------- ###\n\n@bot.command(aliases=[\"BS\", \"beatsaber\", \"BEATSABER\", \"bs\", \"bsrequest\", \"BEATSABERREQUEST\"])\nasync def BeatSaber(ctx, *args):\n msg = await ctx.send(\"Searching...\")\n url = reqManager.getBeatsaverPage(' '.join(args))\n if url[0:8] == \"https://\":\n await msg.edit(content=\"Verifying...\")\n bsSong = BeatRequests.BSSong(url)\n reqStatus = reqManager.add_req(bsSong)\n if not reqStatus:\n await msg.edit(content=\"Song already in queue!\")\n return\n\n embed = discord.Embed(\n title=\"[{}] \".format(bsSong.id) + bsSong.name,\n description=\"Mapped By: {}\".format(bsSong.mapper),\n colour=discord.Colour.red()\n )\n embed.set_image(url=bsSong.coverArt)\n embed.set_footer(text=bsSong.description)\n embed.add_field(name=\"Votes\", value=\"\\👍 \" + str(bsSong.upvotes) + \" | \\👎\" + str(bsSong.downvotes),\n inline=False)\n await msg.edit(content=\"Successfully added!\", embed=embed)\n else:\n await msg.edit(content=url)\n\n\n@bot.command(aliases=[\"BSLS\", \"beatsaberlist\", \"BEATSABERLIST\", \"bsls\", \"bslist\"])\nasync def BeatSaberList(ctx, *args):\n pageNum = 1\n if args:\n try:\n pageNum = int(args[0])\n except:\n await ctx.send(\"Invalid Page Number\")\n\n reqManager.get_reqs()\n if not reqManager.requests:\n await ctx.send(\"Nothing in Queue!\")\n return\n\n print([x.split(\"\u001F\")[1] for x in reqManager.requests])\n outStr = pageListFormatter([x.split(\"\u001F\")[1] for x in reqManager.requests], pageNum)\n await ctx.send(outStr)\n\n\n# HELPER FUNCS -------------------------------------------------------------------------- ###\n\ndef pageListFormatter(pagedList, pageNum):\n maxPageNum = math.floor(len(pagedList) / 10.0) + 1\n if pageNum < 1:\n pageNum = 1\n if pageNum > maxPageNum:\n pageNum = maxPageNum\n\n outStr = \"Queue Page \" + str(pageNum) + \"/\" + str(maxPageNum) + \":\\n```\"\n outStr += \"\\n>> \" + pagedList[0]\n for i in range((pageNum - 1) * 10, min(pageNum * 10, len(pagedList))):\n outStr += \"\\n(\" + str(i + 1) + \"). \" + pagedList[i]\n outStr += \"```\"\n return outStr\n\n\ndef sarcasify(*args):\n random.seed(time.time())\n outStr = '\"'\n for arg in args:\n for argSt in arg:\n for argCh in argSt:\n outStr += random.choice([str.lower(argCh), str.upper(argCh)])\n outStr += \" \"\n outStr += '\"'\n return outStr\n\n\ndef reverseChars(*args):\n outStr = u'\\u202B'\n for arg in args:\n for argSt in arg:\n for argCh in argSt:\n outStr += argCh\n outStr += \" \"\n outStr += u'\\u202B'\n return outStr[::-1]\n\n\ndef balFormat(bHandler):\n return str(bHandler.beanAccount.beans) + \" BND\\n\"\n\n\ndef numFormat(num):\n return str(num) + \" BND\\n\"\n\n\ndef lineFormat():\n return \"-----------------------\\n\"\n\n\nbot.run(TOKEN)\n","repo_name":"Loochis/LoochisBot","sub_path":"LoochisBot.py","file_name":"LoochisBot.py","file_ext":"py","file_size_in_byte":19816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"18632807620","text":"\nfrom sklearn import tree, metrics\n\nimport csv\n\n\ndef DT_2(alpha, delta)->int:\n\n #build And Train the tree\n\n class_weight_dict={'0':(1-delta) , '1':delta} #delta value is 0.8\n DT2 = tree.DecisionTreeClassifier(criterion=\"entropy\",min_samples_split=9 , class_weight=class_weight_dict)\n DT2.fit(train_data,train_results)\n\n\n #Test the tree\n #DT2_results = DT2.predict(test_data)\n DT2_prob_results=DT2.predict_proba(test_data)\n #Generate prediction tree\n\n\n delta_weighted_pred_result=[]\n for c_prob in DT2_prob_results:\n if alpha*c_prob[1]> c_prob[0]: # calculate classification by new rule alpha*|T|>|F|\n delta_weighted_pred_result.append('1')\n else:\n delta_weighted_pred_result.append('0')\n conf_mat1=metrics.confusion_matrix(test_results,delta_weighted_pred_result)\n\n conf_mat1[0][0],conf_mat1[1][1]=conf_mat1[1][1],conf_mat1[0][0]\n errw=conf_mat1[0][1]+4*conf_mat1[1][0]\n\n print(conf_mat1)\n\n return errw\n\n\n\n\n\n#Read Train file\n\n\ntrain_file = open('train.csv')\ndata = csv.reader(train_file, delimiter=',')\ntrain_data, train_results = [], []\nfor row in data:\n train_data.append(row[0:8])\n train_results.append(row[8])\ntrain_data, train_results=train_data[1:len(train_data)],train_results[1:len(train_results)]\n\n\n#Load test file\ntest_file = open('test.csv')\ndata = csv.reader(test_file, delimiter=',')\ntest_data,test_results = [], []\nfor row in data:\n test_data.append(row[0:8])\n test_results.append(row[8])\ntest_data, test_results=test_data[1:len(test_data)], test_results[1:len(test_results)]\n\n\ndelta=0.8\nalpha=4\nDT_2(alpha,delta)\n\n\n\n","repo_name":"omertaub7/intro_ai_3","sub_path":"DT2.py","file_name":"DT2.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11913564222","text":"import urllib\nimport urlparse\n\nfrom openerp.osv import orm\n\n\nclass MailMail(orm.Model):\n\n _inherit = 'mail.mail'\n\n def _get_unsubscribe_url(\n self, cr, uid, mail, email_to, msg=None, context=None):\n '''\n Override native method to manage unsubscribe URL for distribution list\n case of newsletter.\n '''\n mml = mail.mailing_id\n if mml.distribution_list_id and mml.distribution_list_id.newsletter:\n res_id = mail.res_id\n if mail.model != 'res.partner':\n mail_obj = self.pool[mail.model]\n partner_path = mml.distribution_list_id.partner_path\n if partner_path in mail_obj._columns.keys():\n curr_obj = self.pool[mail.model]\n p_val = curr_obj.read(\n cr, uid, res_id, [partner_path], context=context)\n # get partner_id\n res_id = p_val[partner_path][0]\n else:\n # do not set URL for newsletter if no partner_id\n return False\n param_obj = self.pool['ir.config_parameter']\n base_url = param_obj.get_param(\n cr, uid, 'web.base.url')\n vals = {\n 'db': cr.dbname,\n 'res_id': res_id,\n 'email': email_to,\n }\n url = urlparse.urljoin(\n base_url, 'mail/newsletter/%(mailing_id)s/'\n 'unsubscribe?%(params)s' % {\n 'mailing_id': mail.mailing_id.id,\n 'params': urllib.urlencode(vals)\n }\n )\n return '%s' % (\n url, msg or 'Click to unsubscribe')\n else:\n return super(MailMail, self)._get_unsubscribe_url(\n cr, uid, mail, email_to, msg=msg, context=context)\n","repo_name":"acsone/acsone-addons","sub_path":"mass_mailing_distribution_list/mail_mail.py","file_name":"mail_mail.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"31"} +{"seq_id":"8078837413","text":"# eve door & window: wireless contact sensor\n\nimport digi\nimport digi.on as on\nimport digi.util as util\nfrom digi.digilite.matter import Controller\n\nmatter_device = Controller()\n\ndef report():\n closed = matter_device.cluster('booleanstate').read('state-value', endpoints=1)[0][\"value\"]\n digi.model.patch({\n \"obs\": {\n \"closed\": closed\n }\n })\n digi.pool.load([{ \"closed\": closed }])\n\nloader = util.Loader(load_fn=report)\n\n@on.meta\ndef do_meta(meta): \n matter_device.pair(meta.get(\"matter_code\", \"\"))\n \n i = meta.get(\"report_interval\", -1)\n if i < 0:\n digi.logger.info(\"Stop loader\")\n loader.stop()\n else:\n loader.start() \n\n\nif __name__ == '__main__':\n digi.run()\n","repo_name":"digi-project/digilite","sub_path":"matter/examples/door-sensor/driver/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"42400383149","text":"'''\nTake the raw MSCOCO annotation files (like instances_train2014.json) and creates a csv where each lines is an example:\n12746,0,1,0,0,0,...1,0\n(12746 is the image id and the rest is the ground truth of each class)\n'''\nimport json\nimport os\nimport sys\n\nimport numpy as np\n\nfrom collections import defaultdict\nfrom pprint import pprint\n\n\ndef categories(json_data):\n cats = dict()\n for i, category in enumerate(sorted(json_data['categories'], key=lambda x: x['id'])):\n cats[category['id']] = {'name': category['name'], 'norm_id': i}\n\n assert len(cats) == 80\n return cats\n\n\ndef write_categories(root_dir, cats):\n '''\n normalized_id,name,original_id\n '''\n categories_file = os.path.join(root_dir, 'annotations', 'categories.csv')\n\n with open(categories_file, 'w+') as f_out:\n for key in sorted(cats, key=lambda x: cats[x]['norm_id']):\n line = '%s,%s,%s\\n' % (cats[key]['norm_id'], cats[key]['name'], key)\n f_out.write(line)\n\n\ndef write_dataset(root_dir, original_name, id_to_label):\n dataset_file = os.path.join(root_dir, 'annotations', 'multilabel_%s.csv' % original_name)\n with open(dataset_file, 'w+') as f_out:\n\n for img_id in sorted(id_to_label):\n ground_truth = [str(int(gt)) for gt in id_to_label[img_id]]\n line = str(img_id) + ',' + ','.join(ground_truth) + '\\n'\n f_out.write(line)\n\n\ndef create_csv(root_dir, original_name):\n\n annotations_file = os.path.join(root_dir, 'annotations', 'instances_%s.json' % original_name)\n with open(annotations_file, 'r') as f_in:\n json_data = json.load(f_in)\n\n # create categories file\n cats = categories(json_data)\n nb_classes = len(cats)\n\n # init image_ids\n id_to_label = dict()\n for img in json_data['images']:\n img_id = img['id']\n id_to_label[img_id] = np.zeros(nb_classes)\n\n # fill with data\n stats = defaultdict(int)\n for annot in json_data['annotations']:\n cat = annot['category_id']\n norm_cat = cats[cat]['norm_id']\n stats[cat] += 1\n\n img_id = annot['image_id']\n\n id_to_label[img_id][norm_cat] = 1\n pprint(stats)\n\n # write categories file\n write_categories(root_dir, cats)\n\n # write data csv file\n write_dataset(root_dir, original_name, id_to_label)\n\n\n# python3 pp_multilabel.py /share/DEEPLEARNING/datasets/mscoco train2017\nif __name__ == '__main__':\n create_csv(sys.argv[1], sys.argv[2])\n","repo_name":"lcalem/partial-labels","sub_path":"data/coco/preprocessing/pp_multilabel.py","file_name":"pp_multilabel.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"5091960940","text":"import requests, json, pprint, time\nimport hashlib\nimport base64\n\nURL_SEARCH = \"https://affiliate-api.flipkart.net/affiliate/1.0/search.json\"\n\nheaders = { 'Fk-Affiliate-Id' : 'sujithejg',\n 'Fk-Affiliate-Token' : '1e3c864a20654c95ab56a300906e1d69',\n 'Content-Type' : 'application/json',\n }\n\ndef query(query_str):\n params = {\n 'query' : query_str,\n 'resultCount' : 5\n }\n response = requests.get(URL_SEARCH, headers=headers, params=params)\n res = response.json()\n print(res)\n product_id = res['products'][0]['productBaseInfoV1']\n print('\\n')\n print(product_id)\n print('\\n')\n #for k, v in product_id.items():\n print('\\n')\n print(product_id.get(\"productId\"))\n print('\\n')\n #return jsonData\n\nif __name__==\"__main__\":\n query('sony mobile');\n print(\"\\n\\n\\n\\n QUERYING IPOD \\n\\n\\n\\n\");\n query('ipod')\n","repo_name":"snehadasa/Graphit_project","sub_path":"query_api/query_flipkart_id.py","file_name":"query_flipkart_id.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43476103555","text":"import time\n\nclass node:\n def __init__(self,data = None):\n self.data = data\n self.point = None\n\nclass linklist:\n def __init__(self):\n self.head = None\n\n\ndef quicksort(q):\n if len(q) <= 1:\n return q\n else:\n firstpart = [];\n secondpart = [];\n pivot = q[len(q)-1];\n i = 0\n for i in range(len(q)-1):\n if q[i]<=pivot:\n firstpart.append(q[i])\n else:\n secondpart.append(q[i])\n return quicksort(firstpart)+[pivot]+quicksort(secondpart)\n\n\ndef mergesort(q):\n\n mid = len(q)//2\n left = q[0:mid]\n right = q[mid:]\n return merge(mergesort(left),mergesort(right))\n\n\ndef merge(left,right):\n res = []\n i = 0\n j = 0\n while i < len(left) & j< len(right):\n if left[i] < right[j]:\n res.append(left[i])\n i+=1\n else:\n res.append(right[j])\n j+=1\n return res\n\n\ndef selectsort(q):\n for i in range(len(q)-1):\n for j in range(i,len(q)):\n if q[i]